vnl_sparse_lst_sqr_function.cxx
Go to the documentation of this file.
1 // This is core/vnl/vnl_sparse_lst_sqr_function.cxx
2 //:
3 // \file
4 // \author Matt Leotta (Brown)
5 // \date April 13, 2005
6 
7 
8 #include <iostream>
9 #include <cassert>
11 #include <vnl/vnl_vector_ref.h>
12 
13 void vnl_sparse_lst_sqr_function::dim_warning(unsigned int nr_of_unknowns,
14  unsigned int nr_of_residuals)
15 {
16  if (nr_of_unknowns > nr_of_residuals)
17  std::cerr << "vnl_sparse_lst_sqr_function: WARNING: "
18  << "unknowns(" << nr_of_unknowns << ") > "
19  << "residuals("<< nr_of_residuals << ")\n";
20 }
21 
22 //: Construct vnl_sparse_lst_sqr_function.
23 // Assumes A consists of \p num_a parameters each of size \p num_params_per_a
24 // Assumes B consists of \p num_b parameters each of size \p num_params_per_b
25 // Assumes C consists of \p num_params_c parameters
26 // Assumes there is a residual x_ij for all i and j, each of size \p num_residuals_per_e
27 // The optional argument should be no_gradient if the gradf function has not
28 // been implemented. Default is use_gradient.
30  unsigned int num_a,
31  unsigned int num_params_per_a,
32  unsigned int num_b,
33  unsigned int num_params_per_b,
34  unsigned int num_params_c,
35  unsigned int num_residuals_per_e,
36  UseGradient g,
37  UseWeights w)
38  : failure(false),
39  residual_indices_(),
40  indices_a_(num_a+1,0),
41  indices_b_(num_b+1,0),
42  num_params_c_(num_params_c),
43  indices_e_(num_a*num_b+1,0),
44  use_gradient_(g == use_gradient),
45  use_weights_(w == use_weights)
46 {
47  unsigned int k = num_params_per_a;
48  for (unsigned int i=1; i<indices_a_.size(); ++i, k+=num_params_per_a)
49  indices_a_[i] = k;
50 
51  k = num_params_per_b;
52  for (unsigned int i=1; i<indices_b_.size(); ++i, k+=num_params_per_b)
53  indices_b_[i] = k;
54 
55  k = num_residuals_per_e;
56  for (unsigned int i=1; i<indices_e_.size(); ++i, k+=num_residuals_per_e)
57  indices_e_[i] = k;
58 }
59 
60 //: Construct vnl_sparse_lst_sqr_function.
61 // Assumes A consists of \p num_a parameters each of size \p num_params_per_a
62 // Assumes B consists of \p num_b parameters each of size \p num_params_per_b
63 // Assumes C consists of \p num_params_c parameters
64 // \p xmask is a mask for residual availability. residual e_ij exists only if mask[i][j]==true
65 // Assumes each available residual has size \p num_residuals_per_e
66 // The optional argument should be no_gradient if the gradf function has not
67 // been implemented. Default is use_gradient.
69  unsigned int num_a,
70  unsigned int num_params_per_a,
71  unsigned int num_b,
72  unsigned int num_params_per_b,
73  unsigned int num_params_c,
74  const std::vector<std::vector<bool> >& xmask,
75  unsigned int num_residuals_per_e,
76  UseGradient g,
77  UseWeights w)
78  : failure(false),
79  residual_indices_(xmask),
80  indices_a_(num_a+1,0),
81  indices_b_(num_b+1,0),
82  num_params_c_(num_params_c),
83  indices_e_(residual_indices_.num_non_zero()+1,0),
84  use_gradient_(g == use_gradient),
85  use_weights_(w == use_weights)
86 {
87  unsigned int k = num_params_per_a;
88  for (unsigned int i=1; i<indices_a_.size(); ++i, k+=num_params_per_a)
89  indices_a_[i] = k;
90 
91  k = num_params_per_b;
92  for (unsigned int i=1; i<indices_b_.size(); ++i, k+=num_params_per_b)
93  indices_b_[i] = k;
94 
95  k = num_residuals_per_e;
96  for (unsigned int i=1; i<indices_e_.size(); ++i, k+=num_residuals_per_e)
97  indices_e_[i] = k;
98 
99  dim_warning(num_a*num_params_per_a + num_b*num_params_per_b + num_params_c, k);
100 }
101 
102 
103 //: Construct vnl_sparse_lst_sqr_function.
104 // This constructor is the most general
105 // \param a_sizes is a vector describing the number of parameters for each a_i
106 // \param b_sizes is a vector describing the number of parameters for each b_j
107 // \param num_params_c is the number of C parameters
108 // \param e_sizes is a vector describing the number of parameters for each residual e_ij
109 // \param xmask is a mask for residual availability. residual e_ij exists only if mask[i][j]==true
110 // xmask must be a_sizes.size() by b_sizes.size() and contain e_sizes.size() true entries
111 // The optional argument should be no_gradient if the gradf function has not
112 // been implemented. Default is use_gradient.
114  const std::vector<unsigned int>& a_sizes,
115  const std::vector<unsigned int>& b_sizes,
116  unsigned int num_params_c,
117  const std::vector<unsigned int>& e_sizes,
118  const std::vector<std::vector<bool> >& xmask,
119  UseGradient g,
120  UseWeights w)
121  : failure(false),
122  residual_indices_(xmask),
123  indices_a_(a_sizes.size()+1,0),
124  indices_b_(b_sizes.size()+1,0),
125  num_params_c_(num_params_c),
126  indices_e_(e_sizes.size()+1,0),
127  use_gradient_(g == use_gradient),
128  use_weights_(w == use_weights)
129 {
130  assert(residual_indices_.num_non_zero() == (int)e_sizes.size());
131  assert(residual_indices_.num_rows() == (int)a_sizes.size());
132  assert(residual_indices_.num_cols() == (int)b_sizes.size());
133 
134  for (unsigned int i=0; i<a_sizes.size(); ++i)
135  indices_a_[i+1] = indices_a_[i]+a_sizes[i];
136 
137  for (unsigned int i=0; i<b_sizes.size(); ++i)
138  indices_b_[i+1] = indices_b_[i]+b_sizes[i];
139 
140  for (unsigned int i=0; i<e_sizes.size(); ++i)
141  indices_e_[i+1] = indices_e_[i]+e_sizes[i];
142 
143  dim_warning(indices_a_.back() + indices_b_.back() + num_params_c,
144  indices_e_.back());
145 }
146 
147 
148 //: Compute all residuals.
149 // Given the parameter vectors a, b, and c, compute the vector of residuals f.
150 // f has been sized appropriately before the call.
151 // The default implementation computes f by calling fij for each valid
152 // pair of i and j. You do not need to overload this method unless you
153 // want to provide a more efficient implementation for your problem.
154 void
156  vnl_vector<double> const& b,
157  vnl_vector<double> const& c,
159 {
160  for (unsigned int i=0; i<number_of_a(); ++i)
161  {
162  // This is semi const incorrect - there is no vnl_vector_ref_const
164  const_cast<double*>(a.data_block())+index_a(i));
165 
167  for (auto & r_itr : row)
168  {
169  unsigned int j = r_itr.second;
170  unsigned int k = r_itr.first;
171  // This is semi const incorrect - there is no vnl_vector_ref_const
173  const_cast<double*>(b.data_block())+index_b(j));
175  fij(i,j,ai,bj,c,eij); // compute residual vector e_ij
176  }
177  }
178 }
179 
180 
181 //: Compute the sparse Jacobian in block form.
182 // Given the parameter vectors a, b, and c, compute the set of block
183 // Jacobians Aij, Bij, and Cij.
184 // All Aij, Bij, and Cij have been sized appropriately before the call.
185 // The default implementation computes A, B, and C by calling
186 // jac_Aij, jac_Bij, and jac_Cij for each valid pair of i and j.
187 // You do not need to overload this method unless you want to provide
188 // a more efficient implementation for your problem.
189 void
191  vnl_vector<double> const& b,
192  vnl_vector<double> const& c,
193  std::vector<vnl_matrix<double> >& A,
194  std::vector<vnl_matrix<double> >& B,
195  std::vector<vnl_matrix<double> >& C)
196 {
197  for (unsigned int i=0; i<number_of_a(); ++i)
198  {
199  // This is semi const incorrect - there is no vnl_vector_ref_const
201  const_cast<double*>(a.data_block())+index_a(i));
202 
204  for (auto & r_itr : row)
205  {
206  unsigned int j = r_itr.second;
207  unsigned int k = r_itr.first;
208  // This is semi const incorrect - there is no vnl_vector_ref_const
210  const_cast<double*>(b.data_block())+index_b(j));
211 
212  jac_Aij(i,j,ai,bj,c,A[k]); // compute Jacobian A_ij
213  jac_Bij(i,j,ai,bj,c,B[k]); // compute Jacobian B_ij
214  jac_Cij(i,j,ai,bj,c,C[k]); // compute Jacobian C_ij
215  }
216  }
217 }
218 
219 
220 //: Compute the sparse Jacobian in block form using a finite difference approximation.
221 // Given the parameter vectors a, b and c, compute the set of block Jacobians
222 // Aij, Bij, and Cij. The finite difference approximation is done independently
223 // at each block. All Aij, Bij, and Cij have been sized appropriately before the call.
224 // The default implementation computes A, B, and C by calling
225 // jac_Aij, jac_Bij, and jac_Cij for each valid pair of i and j.
226 // You do not need to overload this method unless you want to provide
227 // a more efficient implementation for your problem.
228 void
230  vnl_vector<double> const& b,
231  vnl_vector<double> const& c,
232  std::vector<vnl_matrix<double> >& A,
233  std::vector<vnl_matrix<double> >& B,
234  std::vector<vnl_matrix<double> >& C,
235  double stepsize)
236 {
237  for (unsigned int i=0; i<number_of_a(); ++i)
238  {
239  // This is semi const incorrect - there is no vnl_vector_ref_const
241  const_cast<double*>(a.data_block())+index_a(i));
242 
244  for (auto & r_itr : row)
245  {
246  unsigned int j = r_itr.second;
247  unsigned int k = r_itr.first;
248  // This is semi const incorrect - there is no vnl_vector_ref_const
250  const_cast<double*>(b.data_block())+index_b(j));
251 
252  fd_jac_Aij(i,j,ai,bj,c,A[k],stepsize); // compute Jacobian A_ij with finite differences
253  fd_jac_Bij(i,j,ai,bj,c,B[k],stepsize); // compute Jacobian B_ij with finite differences
254  fd_jac_Cij(i,j,ai,bj,c,C[k],stepsize); // compute Jacobian C_ij with finite differences
255  }
256  }
257 }
258 
259 
260 //: If using weighted least squares, compute the weights for each i and j.
261 // Return the weights in \a weights.
262 // The default implementation computes \a weights by calling
263 // compute_weight_ij for each valid pair of i and j.
264 // You do not need to overload this method unless you want to provide
265 // a more specialized implementation for your problem.
266 void
268  vnl_vector<double> const& b,
269  vnl_vector<double> const& c,
270  vnl_vector<double> const& e,
271  vnl_vector<double>& weights)
272 {
273  for (unsigned int i=0; i<number_of_a(); ++i)
274  {
275  // This is semi const incorrect - there is no vnl_vector_ref_const
277  const_cast<double*>(a.data_block())+index_a(i));
278 
280  for (auto & r_itr : row)
281  {
282  unsigned int j = r_itr.second;
283  unsigned int k = r_itr.first;
284  // This is semi const incorrect - there is no vnl_vector_ref_const
286  const_cast<double*>(b.data_block())+index_b(j));
288  const_cast<double*>(e.data_block()+index_e(k)));
289  compute_weight_ij(i,j,ai,bj,c,eij,weights[k]);
290  }
291  }
292 }
293 
294 
295 //: If using weighted least squares, apply the weights to residuals f.
296 // The default implementation applies \a weights by calling
297 // apply_weight_ij for each valid pair of i and j.
298 // You do not need to overload this method unless you want to provide
299 // a more specialized implementation for your problem.
300 void
303 {
304  for (unsigned int i=0; i<number_of_a(); ++i)
305  {
307  for (auto & r_itr : row)
308  {
309  unsigned int j = r_itr.second;
310  unsigned int k = r_itr.first;
312  apply_weight_ij(i,j,weights[k],eij);
313  }
314  }
315 }
316 
317 
318 //: If using weighted least squares, apply the weights to residuals A, B, C.
319 // The default implementation applies \a weights by calling
320 // apply_weight_ij for each valid pair of i and j.
321 // You do not need to overload this method unless you want to provide
322 // a more specialized implementation for your problem.
323 void
325  std::vector<vnl_matrix<double> >& A,
326  std::vector<vnl_matrix<double> >& B,
327  std::vector<vnl_matrix<double> >& C)
328 {
329  for (unsigned int i=0; i<number_of_a(); ++i)
330  {
332  for (auto & r_itr : row)
333  {
334  unsigned int j = r_itr.second;
335  unsigned int k = r_itr.first;
336  apply_weight_ij(i,j,weights[k],A[k],B[k],C[k]);
337  }
338  }
339 }
340 
341 
342 //: Compute the residuals from the ith component of a, the jth component of b.
343 // Given the parameter vectors ai, bj, and c, compute the vector of residuals fij.
344 // fij has been sized appropriately before the call.
345 void
346 vnl_sparse_lst_sqr_function::fij(int /*i*/, int /*j*/,
347  vnl_vector<double> const& /*ai*/,
348  vnl_vector<double> const& /*bj*/,
349  vnl_vector<double> const& /*c*/,
350  vnl_vector<double> & /*f_i_j*/)
351 {
352  std::cerr << "Warning: fij() called but not implemented in derived class\n";
353 }
354 
355 //: Calculate the Jacobian A_ij, given the parameter vectors a_i, b_j, and c.
356 void
357 vnl_sparse_lst_sqr_function::jac_Aij(unsigned int /*i*/, unsigned int /*j*/,
358  vnl_vector<double> const& /*ai*/,
359  vnl_vector<double> const& /*bj*/,
360  vnl_vector<double> const& /*c*/,
361  vnl_matrix<double> & /*Aij*/)
362 {
363  std::cerr << "Warning: jac_Aij() called but not implemented in derived class\n";
364 }
365 
366 //: Calculate the Jacobian B_ij, given the parameter vectors a_i, b_j, and c.
367 void
368 vnl_sparse_lst_sqr_function::jac_Bij(unsigned int /*i*/, unsigned int /*j*/,
369  vnl_vector<double> const& /*ai*/,
370  vnl_vector<double> const& /*bj*/,
371  vnl_vector<double> const& /*c*/,
372  vnl_matrix<double> & /*Bij*/)
373 {
374  std::cerr << "Warning: jac_Bij() called but not implemented in derived class\n";
375 }
376 
377 //: Calculate the Jacobian C_ij, given the parameter vectors a_i, b_j, and c.
378 void
379 vnl_sparse_lst_sqr_function::jac_Cij(unsigned int /*i*/, unsigned int /*j*/,
380  vnl_vector<double> const& /*ai*/,
381  vnl_vector<double> const& /*bj*/,
382  vnl_vector<double> const& /*c*/,
383  vnl_matrix<double> & /*Cij*/)
384 {
385  if (num_params_c_ > 0)
386  std::cerr << "Warning: jac_Cij() called but not implemented in derived class\n";
387 }
388 
389 //: Use this to compute a finite-difference Jacobian A_ij
390 void
392  vnl_vector<double> const& ai,
393  vnl_vector<double> const& bj,
394  vnl_vector<double> const& c,
395  vnl_matrix<double> & Aij,
396  double stepsize)
397 {
398  const unsigned int dim = ai.size();
399  const unsigned int n = Aij.rows();
400  assert(dim == number_of_params_a(i));
401  assert(n == number_of_residuals(i,j));
402  assert(dim == Aij.columns());
403 
404  vnl_vector<double> tai = ai;
405  vnl_vector<double> fplus(n);
406  vnl_vector<double> fminus(n);
407  // note: i and j are indices for the macro problem
408  // while ii and jj are indices for subproblem jacobian Aij
409  for (unsigned int ii = 0; ii < dim; ++ii)
410  {
411  // calculate f just to the right of ai[ii]
412  double tplus = tai[ii] = ai[ii] + stepsize;
413  this->fij(i,j,tai,bj,c,fplus);
414 
415  // calculate f just to the left of ai[ii]
416  double tminus = tai[ii] = ai[ii] - stepsize;
417  this->fij(i,j,tai,bj,c,fminus);
418 
419  double h = 1.0 / (tplus - tminus);
420  for (unsigned int jj = 0; jj < n; ++jj)
421  Aij(jj,ii) = (fplus[jj] - fminus[jj]) * h;
422 
423  // restore tai
424  tai[ii] = ai[ii];
425  }
426 }
427 
428 
429 //: Use this to compute a finite-difference Jacobian B_ij
430 void
432  vnl_vector<double> const& ai,
433  vnl_vector<double> const& bj,
434  vnl_vector<double> const& c,
435  vnl_matrix<double> & Bij,
436  double stepsize)
437 {
438  const unsigned int dim = bj.size();
439  const unsigned int n = Bij.rows();
440  assert(dim == number_of_params_b(j));
441  assert(n == number_of_residuals(i,j));
442  assert(dim == Bij.columns());
443 
444  vnl_vector<double> tbj = bj;
445  vnl_vector<double> fplus(n);
446  vnl_vector<double> fminus(n);
447  // note: i and j are indices for the macro problem
448  // while ii and jj are indices for subproblem jacobian Bij
449  for (unsigned int ii = 0; ii < dim; ++ii)
450  {
451  // calculate f just to the right of bj[ii]
452  double tplus = tbj[ii] = bj[ii] + stepsize;
453  this->fij(i,j,ai,tbj,c,fplus);
454 
455  // calculate f just to the left of bj[ii]
456  double tminus = tbj[ii] = bj[ii] - stepsize;
457  this->fij(i,j,ai,tbj,c,fminus);
458 
459  double h = 1.0 / (tplus - tminus);
460  for (unsigned int jj = 0; jj < n; ++jj)
461  Bij(jj,ii) = (fplus[jj] - fminus[jj]) * h;
462 
463  // restore tbj
464  tbj[ii] = bj[ii];
465  }
466 }
467 
468 
469 //: Use this to compute a finite-difference Jacobian C_ij
470 void
472  vnl_vector<double> const& ai,
473  vnl_vector<double> const& bj,
474  vnl_vector<double> const& c,
475  vnl_matrix<double> & Cij,
476  double stepsize)
477 {
478  const unsigned int dim = c.size();
479  assert(dim == number_of_params_c());
480  // if there are no C parameters, return early
481  if(dim == 0)
482  return;
483 
484  const unsigned int n = Cij.rows();
485  assert(n == number_of_residuals(i,j));
486  assert(dim == Cij.columns());
487 
488  vnl_vector<double> tc = c;
489  vnl_vector<double> fplus(n);
490  vnl_vector<double> fminus(n);
491  // note: i and j are indices for the macro problem
492  // while ii and jj are indices for subproblem jacobian Cij
493  for (unsigned int ii = 0; ii < dim; ++ii)
494  {
495  // calculate f just to the right of c[ii]
496  double tplus = tc[ii] = c[ii] + stepsize;
497  this->fij(i,j,ai,bj,tc,fplus);
498 
499  // calculate f just to the left of c[ii]
500  double tminus = tc[ii] = c[ii] - stepsize;
501  this->fij(i,j,ai,bj,tc,fminus);
502 
503  double h = 1.0 / (tplus - tminus);
504  for (unsigned int jj = 0; jj < n; ++jj)
505  Cij(jj,ii) = (fplus[jj] - fminus[jj]) * h;
506 
507  // restore tc
508  tc[ii] = c[ii];
509  }
510 }
511 
512 
513 //: If using weighted least squares, compute the weight.
514 // Return the weight in \a weight.
515 // The default implementation sets weight = 1
516 void
518  vnl_vector<double> const& /*ai*/,
519  vnl_vector<double> const& /*bj*/,
520  vnl_vector<double> const& /*c*/,
521  vnl_vector<double> const& /*fij*/,
522  double& weight)
523 {
524  weight = 1.0;
525 }
526 
527 
528 //: If using weighted least squares, apply the weight to fij.
529 // The default implementation multiplies fij by weight.
530 void
532  double const& weight,
533  vnl_vector<double>& fij)
534 {
535  fij *= weight;
536 }
537 
538 
539 //: If using weighted least squares, apply the weight to Aij, Bij, Cij.
540 // The default implementation multiplies each matrix by weight.
541 void
543  double const& weight,
544  vnl_matrix<double>& Aij,
545  vnl_matrix<double>& Bij,
546  vnl_matrix<double>& Cij)
547 {
548  Aij *= weight;
549  Bij *= weight;
550  Cij *= weight;
551 }
552 
553 
554 void vnl_sparse_lst_sqr_function::trace(int /* iteration */,
555  vnl_vector<double> const& /*a*/,
556  vnl_vector<double> const& /*b*/,
557  vnl_vector<double> const& /*c*/,
558  vnl_vector<double> const& /*e*/)
559 {
560  // This default implementation is empty; overloaded in derived class.
561 }
unsigned int number_of_params_a(int i) const
Return the number of parameters of a_j.
unsigned int index_a(int i) const
return the index of aj in a.
virtual void jac_Aij(unsigned int i, unsigned int j, vnl_vector< double > const &ai, vnl_vector< double > const &bj, vnl_vector< double > const &c, vnl_matrix< double > &Aij)
Calculate the Jacobian A_ij, given the parameter vectors a_i, b_j, and c.
unsigned int number_of_params_c() const
Return the number of parameters of c.
Abstract base for sparse least squares functions.
std::vector< unsigned int > indices_e_
unsigned int number_of_residuals(int k) const
Return the number of residuals in the kth residual vector.
unsigned int index_b(int j) const
return the index of bj in b.
int num_rows() const
number of rows in the sparse matrix.
Definition: vnl_crs_index.h:42
vnl_vector using user-supplied storage
vnl_vector using user-supplied storage.
Definition: vnl_fwd.h:17
void dim_warning(unsigned int n_unknowns, unsigned int n_residuals)
unsigned int number_of_a() const
Return the number of subsets in a.
virtual void compute_weights(vnl_vector< double > const &a, vnl_vector< double > const &b, vnl_vector< double > const &c, vnl_vector< double > const &f, vnl_vector< double > &weights)
If using weighted least squares, compute the weights for each i and j.
size_t size() const
Return the length, number of elements, dimension of this vector.
Definition: vnl_vector.h:126
T const * data_block() const
Access the contiguous block storing the elements in the vector. O(1).
Definition: vnl_vector.h:230
unsigned int index_e(int k) const
return the index of ek in e.
void fd_jac_Cij(int i, int j, vnl_vector< double > const &ai, vnl_vector< double > const &bj, vnl_vector< double > const &c, vnl_matrix< double > &Cij, double stepsize)
Use this to compute a finite-difference Jacobian C_ij.
virtual void trace(int iteration, vnl_vector< double > const &a, vnl_vector< double > const &b, vnl_vector< double > const &c, vnl_vector< double > const &e)
Called after each LM iteration to print debugging etc.
virtual void jac_Bij(unsigned int i, unsigned int j, vnl_vector< double > const &ai, vnl_vector< double > const &bj, vnl_vector< double > const &c, vnl_matrix< double > &Bij)
Calculate the Jacobian B_ij, given the parameter vectors a_i, b_j, and c.
sparse_vector sparse_row(int i) const
returns row i as a vector of index-column pairs.
void fd_jac_Aij(int i, int j, vnl_vector< double > const &ai, vnl_vector< double > const &bj, vnl_vector< double > const &c, vnl_matrix< double > &Aij, double stepsize)
Use this to compute a finite-difference Jacobian A_ij.
virtual void apply_weight_ij(int i, int j, double const &weight, vnl_vector< double > &fij)
If using weighted least squares, apply the weight to fij.
virtual void jac_blocks(vnl_vector< double > const &a, vnl_vector< double > const &b, vnl_vector< double > const &c, std::vector< vnl_matrix< double > > &A, std::vector< vnl_matrix< double > > &B, std::vector< vnl_matrix< double > > &C)
Compute the sparse Jacobian in block form.
vnl_sparse_lst_sqr_function(unsigned int num_a, unsigned int num_params_per_a, unsigned int num_b, unsigned int num_params_per_b, unsigned int num_params_c, unsigned int num_residuals_per_e, UseGradient g=use_gradient, UseWeights w=no_weights)
Construct vnl_sparse_lst_sqr_function.
void fd_jac_Bij(int i, int j, vnl_vector< double > const &ai, vnl_vector< double > const &bj, vnl_vector< double > const &c, vnl_matrix< double > &Bij, double stepsize)
Use this to compute a finite-difference Jacobian B_ij.
virtual void fij(int i, int j, vnl_vector< double > const &ai, vnl_vector< double > const &bj, vnl_vector< double > const &c, vnl_vector< double > &fij)
Compute the residuals from the ith component of a, the jth component of b.
virtual void fd_jac_blocks(vnl_vector< double > const &a, vnl_vector< double > const &b, vnl_vector< double > const &c, std::vector< vnl_matrix< double > > &A, std::vector< vnl_matrix< double > > &B, std::vector< vnl_matrix< double > > &C, double stepsize)
Compute the sparse Jacobian in block form using a finite difference approximation.
std::vector< unsigned int > indices_b_
virtual void apply_weights(vnl_vector< double > const &weights, vnl_vector< double > &f)
If using weighted least squares, apply the weights to residuals f.
virtual void compute_weight_ij(int i, int j, vnl_vector< double > const &ai, vnl_vector< double > const &bj, vnl_vector< double > const &c, vnl_vector< double > const &fij, double &weight)
If using weighted least squares, compute the weight.
int num_cols() const
number of columns in the sparse matrix.
Definition: vnl_crs_index.h:45
unsigned int rows() const
Return the number of rows.
Definition: vnl_matrix.h:179
virtual void f(vnl_vector< double > const &a, vnl_vector< double > const &b, vnl_vector< double > const &c, vnl_vector< double > &f)
Compute all residuals.
std::vector< idx_pair > sparse_vector
Definition: vnl_crs_index.h:30
unsigned int number_of_params_b(int j) const
Return the number of parameters of b_i.
std::vector< unsigned int > indices_a_
virtual void jac_Cij(unsigned int i, unsigned int j, vnl_vector< double > const &ai, vnl_vector< double > const &bj, vnl_vector< double > const &c, vnl_matrix< double > &Cij)
Calculate the Jacobian C_ij, given the parameter vectors a_i, b_j, and c.
int num_non_zero() const
number of non-zero elements.
Definition: vnl_crs_index.h:48
unsigned int columns() const
Return the number of columns.
Definition: vnl_matrix.h:187