21 #ifndef mia_core_splineparzenmi_hh
22 #define mia_core_splineparzenmi_hh
24 #include <boost/concept/requires.hpp>
25 #include <boost/concept_check.hpp>
70 template <
typename MovIterator,
typename RefIterator>
71 BOOST_CONCEPT_REQUIRES( ((::boost::ForwardIterator<MovIterator>))
72 ((::boost::ForwardIterator<RefIterator>)),
75 fill(MovIterator mov_begin, MovIterator mov_end,
76 RefIterator ref_begin, RefIterator ref_end);
92 template <
typename MovIterator,
typename RefIterator,
typename MaskIterator>
93 void fill(MovIterator mov_begin, MovIterator mov_end,
94 RefIterator ref_begin, RefIterator ref_end,
95 MaskIterator mask_begin, MaskIterator mask_end);
128 double rmin,
double rmax,
129 double mmin,
double mmax);
132 double scale_moving(
double x)
const;
133 double scale_reference(
double x)
const;
135 void evaluate_histograms();
136 void evaluate_log_cache();
141 size_t m_ref_real_bins;
150 size_t m_mov_real_bins;
156 std::vector<double> m_joined_histogram;
157 std::vector<double> m_ref_histogram;
158 std::vector<double> m_mov_histogram;
160 std::vector<std::vector<double>> m_pdfLogCache;
161 double m_cut_histogram;
163 template <
typename Iterator>
164 std::pair<double, double> get_reduced_range(Iterator begin, Iterator end)
const;
166 template <
typename DataIterator,
typename MaskIterator>
167 std::pair<double, double>
168 get_reduced_range_masked(DataIterator dbegin,
170 MaskIterator mbegin)
const;
174 template <
typename MovIterator,
typename RefIterator>
175 BOOST_CONCEPT_REQUIRES( ((::boost::ForwardIterator<MovIterator>))
176 ((::boost::ForwardIterator<RefIterator>)),
180 RefIterator ref_begin, RefIterator ref_end)
182 std::fill(m_joined_histogram.begin(), m_joined_histogram.end(), 0.0);
183 assert(mov_begin != mov_end);
184 assert(ref_begin != ref_end);
186 if (m_mov_max < m_mov_min) {
188 auto mov_range = get_reduced_range(mov_begin, mov_end);
190 if (mov_range.second == mov_range.first)
191 throw std::invalid_argument(
"relevant moving image intensity range is zero");
193 m_mov_min = mov_range.first;
194 m_mov_max = mov_range.second;
195 m_mov_scale = (m_mov_bins - 1) / (m_mov_max - m_mov_min);
196 cvdebug() <<
"Mov Range = [" << m_mov_min <<
", " << m_mov_max <<
"]\n";
199 if (m_ref_max < m_ref_min) {
200 auto ref_range = get_reduced_range(ref_begin, ref_end);
202 if (ref_range.second == ref_range.first)
203 throw std::invalid_argument(
"relevant reference image intensity range is zero");
205 m_ref_min = ref_range.first;
206 m_ref_max = ref_range.second;
207 m_ref_scale = (m_ref_bins - 1) / (m_ref_max - m_ref_min);
208 cvdebug() <<
"Ref Range = [" << m_ref_min <<
", " << m_ref_max <<
"]\n";
211 std::vector<double> mweights(m_mov_kernel->size());
212 std::vector<double> rweights(m_ref_kernel->size());
215 while (ref_begin != ref_end && mov_begin != mov_end) {
216 const double mov = scale_moving(*mov_begin);
217 const double ref = scale_reference(*ref_begin);
218 const int mov_start = m_mov_kernel->get_start_idx_and_value_weights(mov, mweights) + m_mov_border;
219 const int ref_start = m_ref_kernel->get_start_idx_and_value_weights(ref, rweights) + m_ref_border;
221 for (
size_t r = 0; r < m_ref_kernel->size(); ++r) {
222 auto inbeg = m_joined_histogram.begin() +
223 m_mov_real_bins * (ref_start + r) + mov_start;
224 double rw = rweights[r];
225 std::transform(mweights.begin(), mweights.end(), inbeg, inbeg,
226 [rw](
double mw,
double jhvalue) {
227 return mw * rw + jhvalue;
236 cvdebug() <<
"CSplineParzenMI::fill: counted " << N <<
" pixels\n";
238 const double nscale = 1.0 / N;
239 transform(m_joined_histogram.begin(), m_joined_histogram.end(), m_joined_histogram.begin(),
240 [&nscale](
double jhvalue) {
241 return jhvalue * nscale;
243 evaluate_histograms();
244 evaluate_log_cache();
248 template <
typename MovIterator,
typename RefIterator,
typename MaskIterator>
250 RefIterator ref_begin, RefIterator ref_end,
251 MaskIterator mask_begin, MaskIterator mask_end)
253 std::fill(m_joined_histogram.begin(), m_joined_histogram.end(), 0.0);
254 assert(mov_begin != mov_end);
255 assert(ref_begin != ref_end);
258 if (mask_begin == mask_end)
259 fill(mov_begin, mov_end, ref_begin, ref_end);
261 if (m_mov_max < m_mov_min) {
263 auto mov_range = get_reduced_range_masked(mov_begin, mov_end, mask_begin);
265 if (mov_range.second == mov_range.first)
266 throw std::invalid_argument(
"relevant moving image intensity range is zero");
268 m_mov_min = mov_range.first;
269 m_mov_max = mov_range.second;
270 m_mov_scale = (m_mov_bins - 1) / (m_mov_max - m_mov_min);
271 cvdebug() <<
"Mov Range = [" << m_mov_min <<
", " << m_mov_max <<
"]\n";
274 if (m_ref_max < m_ref_min) {
275 auto ref_range = get_reduced_range_masked(ref_begin, ref_end, mask_begin);
277 if (ref_range.second == ref_range.first)
278 throw std::invalid_argument(
"relevant reference image intensity range is zero");
280 m_ref_min = ref_range.first;
281 m_ref_max = ref_range.second;
282 m_ref_scale = (m_ref_bins - 1) / (m_ref_max - m_ref_min);
283 cvdebug() <<
"Ref Range = [" << m_ref_min <<
", " << m_ref_max <<
"]\n";
286 std::vector<double> mweights(m_mov_kernel->size());
287 std::vector<double> rweights(m_ref_kernel->size());
290 while (ref_begin != ref_end && mov_begin != mov_end) {
292 const double mov = scale_moving(*mov_begin);
293 const double ref = scale_reference(*ref_begin);
294 const int mov_start = m_mov_kernel->get_start_idx_and_value_weights(mov, mweights) + m_mov_border;
295 const int ref_start = m_ref_kernel->get_start_idx_and_value_weights(ref, rweights) + m_ref_border;
297 for (
size_t r = 0; r < m_ref_kernel->size(); ++r) {
298 auto inbeg = m_joined_histogram.begin() +
299 m_mov_real_bins * (ref_start + r) + mov_start;
300 double rw = rweights[r];
301 std::transform(mweights.begin(), mweights.end(), inbeg, inbeg,
302 [rw](
double mw,
double jhvalue) {
303 return mw * rw + jhvalue;
315 cvdebug() <<
"CSplineParzenMI::fill: counted " << N <<
" pixels\n";
317 const double nscale = 1.0 / N;
318 transform(m_joined_histogram.begin(), m_joined_histogram.end(), m_joined_histogram.begin(),
319 [&nscale](
double jhvalue) {
320 return jhvalue * nscale;
322 evaluate_histograms();
323 evaluate_log_cache();
326 template <
typename Iterator>
327 std::pair<double, double> CSplineParzenMI::get_reduced_range(Iterator begin, Iterator end)
const
329 auto range = std::minmax_element(begin, end);
332 h.push_range(begin, end);
333 auto reduced_range = h.get_reduced_range(m_cut_histogram);
334 cvinfo() <<
"CSplineParzenMI: reduce range by " << m_cut_histogram
335 <<
"% from [" << *range.first <<
", " << *range.second
336 <<
"] to [" << reduced_range.first <<
", " << reduced_range.second <<
"]\n";
337 return std::make_pair(reduced_range.first, reduced_range.second);
340 template <
typename DataIterator,
typename MaskIterator>
341 std::pair<double, double>
342 CSplineParzenMI::get_reduced_range_masked(DataIterator dbegin,
344 MaskIterator mbegin)
const
349 while (! *im && ib != dend) {
355 throw std::runtime_error(
"CSplineParzenMI: empty mask");
357 double range_max = *ib;
358 double range_min = *ib;
388 auto reduced_range = h.get_reduced_range(m_cut_histogram);
389 cvinfo() <<
"CSplineParzenMI: reduce range by " << m_cut_histogram
390 <<
"% from [" << range_min <<
", " << range_max
391 <<
"] to [" << reduced_range.first <<
", " << reduced_range.second <<
"]\n";
392 return std::make_pair(reduced_range.first, reduced_range.second);
Implementation of mutual information based on B-splines.
void fill(MovIterator mov_begin, MovIterator mov_end, RefIterator ref_begin, RefIterator ref_end)
double get_gradient_slow(double moving, double reference) const
void fill_histograms(const std::vector< double > &values, double rmin, double rmax, double mmin, double mmax)
double get_gradient(double moving, double reference) const
CSplineParzenMI(size_t rbins, PSplineKernel rkernel, size_t mbins, PSplineKernel mkernel, double cut_histogram)
A class to normalize and quantizize input data to a given histogram range with its given number of bi...
a simple histogram that uses an instance of THistogramFeeder as input converter
#define NS_MIA_BEGIN
conveniance define to start the mia namespace
#define EXPORT_CORE
Macro to manage Visual C++ style dllimport/dllexport.
#define NS_MIA_END
conveniance define to end the mia namespace
std::shared_ptr< CSplineKernel > PSplineKernel
vstream & cvinfo()
informal output that may be of interest to understand problems with a program and are of higher prior...