Jafar
|
This namespace contains function to call some of the classical optimization algorithm for estimating the parameter p and minimizing sum( f_i(p)2 ). More...
This namespace contains function to call some of the classical optimization algorithm for estimating the parameter p and minimizing sum( f_i(p)2 ).
The class _TFunction_ must at least contains the following functions:
int count() const which return the number of functions f_i
And optionnally when using the gaussNewton algorithm witht the Newton method:
Example:
// Optimization of 2 functions depending on (a,b) : // a*a+ 2*b - 5 // -a + b + 1 class Function { public: ublas::vector\<double\> values(const ublas::vector\<double\>& x) const { ublas::vector\<double\> v(2); v[0] = x[0]*x[0] + 2.0 * x[1] - 5.0; v[1] = -x[0] + x[1] + 1.0; return v; } ublas::matrix\<double\> jacobian(const ublas::vector\<double\>& x) const { ublas::matrix_row\< jblas::wsvector\<double\> \> jt(2, 2); jt(0,0) = 2.0 * x[0]; jt(0,1) = 2.0; jt(1,0) = -1.0; jt(1,1) = 1.0; ublas::matrix\<double\> jr(2,2); jr = jt; return jr; } ublas::matrix\<double\> hessian(const ublas::vector\<double\>& x, int n) const { ublas::matrix\<double\> ht(2, 2); if(n == 0) { ht(0,0) = 2.0; ht(0,1) = 0.0; ht(1,0) = 0.0; ht(1,1) = 0.0; } else if(n == 1) { ht(0,0) = 0.0; ht(0,1) = 0.0; ht(1,0) = 0.0; ht(1,1) = 0.0; } ublas::matrix\<double\> hr(2,2); hr = ht; return hr; } int count() const { return 2; } }; Function f; ublas::vector\<double\> v(2); v[0] = 10.0; v[1] = 10.0; std::cout \<\< "Gauss-Newton:" \<\< std::endl; std::cout \<\< "Remain = " \<\< joptimization::algorithms::gaussNewton\< joptimization::methods::GaussNewton\< Function, double\> \>(&f, v, 100, 1e-12) \<\< std::endl; std::cout \<\< v \<\< f.values(v) \<\< std::endl; v[0] = 10.0; v[1] = 10.0; std::cout \<\< "Newton:" \<\< std::endl; std::cout \<\< "Remain = " \<\< joptimization::algorithms::gaussNewton\< joptimization::methods::Newton\< Function, double\> \>(&f, v, 100, 1e-12) \<\< std::endl; std::cout \<\< v \<\< f.values(v) \<\< std::endl; v[0] = 1.0; v[1] = 1.0; std::cout \<\< "Gradient descent:" \<\< std::endl; std::cout \<\< "Remain = " \<\< joptimization::algorithms::gradientDescent\< Function, double\>(&f, v, 100, 1e-12, 1e-2) \<\< std::endl; std::cout \<\< v \<\< f.values(v) \<\< std::endl; std::cout \<\< "Levenberg-Marquardt:" \<\< std::endl; v[0] = 10.0; v[0] = 10.0; std::cout \<\< "Remain = " \<\< joptimization::algorithms::levenbergMarquardt\< Function, double\>(&f, v, 100, 1e-12, 0.01, 10.0) \<\< std::endl; std::cout \<\< v \<\< f.values(v) \<\< std::endl;
Functions | |
template<class _TFunction_ , typename _TType_ > | |
_TType_ | gradientDescent (_TFunction_ *f, ublas::vector< _TType_ > ¶meters, int iter, _TType_ epsilon, _TType_ gamma) |
Perform an optimization following Gauss Newton algorithm. | |
template<class _TMethod_ > | |
_TMethod_::Type | gaussNewton (typename _TMethod_::Function *f, ublas::vector< typename _TMethod_::Type > ¶meters, int iter, typename _TMethod_::Type epsilon) |
Perform an optimization following Gauss Newton algorithm. | |
template<class _TFunction_ , typename _TType_ > | |
_TType_ | levenbergMarquardt (_TFunction_ *f, ublas::vector< _TType_ > ¶meters, int iter, _TType_ epsilon, _TType_ lambda0, _TType_ nu) |
Perform an optimization following Levenberg Marquardt algorithm. |
Generated on Wed Oct 15 2014 00:37:52 for Jafar by doxygen 1.7.6.1 |