version $Id: fitting_toolkit.py,v 1.18 2006/08/08 02:00:19 mendenhall Exp $
To use it, subclass the fit class and, at minimum, define function(self, params, x) as below
from fitting_toolkit import fit class myfit(fit): "a really terrible way to fit a polynomial to data!" def function(self, params, x): return params[0]+params[1]*x+params[2]*x*x
Then, instantiate this class, give it initial parameter estimates, set the autoderivative step size if numerical derivatives are being used (the default value is 0.001*parameter values), add points, and repeatedly call hessian_compute_fit until fitting has converged (which, with Hessian fitting, it often won't!)
x=myfit() x.set_initial_params([1.,1.,1.]) x.auto_deriv_steps((1,1,1)) x.add_point(0, 2) x.add_point(1, 5) x.add_point(2, 10) x.add_point(3, 17) x.add_point(4, 26.1) print "***Start test fit***" for i in range(5): x.hessian_compute_fit() print x.fitmat, x.fitvector, x.funcparams, x.funcvals[:x.pointcount], sqrt(x.chi2/x.pointcount)
x.add_points(<array of independent variables>, <array of dependent variables>, x_transpose=<0 | 1>)
For a more robust fitter, use the Levenberg-Marquardt stepping as follows (e.g. ) (this example shows one convergence checker):
savechi2=1e30 acceptcount=0 for i in range(maxloops): reject=x.lm_compute_fit(trace=1) chi2=sqrt(x.chi2/x.pointcount) if not reject: if chi2/savechi2 >0.99 and acceptcount > 5: break else: savechi2=chi2 acceptcount += 1 elif chi2/savechi2 > 1.01: #only penalize for really bad steps, not noise! acceptcount=0 print x.funcparams, chi2
If weighted fitting is desired, include the function weight_func(self) in the subclass, e.g.
def weight_func(self): #throw out points with < 5 a/d counts in the bin. #This is a bad way to do this if lots of points are being tossed, #but doesn't cost much for the expected case that it doesn't happen often return Numeric.greater(self.yarray[:self.pointcount], 5.0)
class gauss_fit_2d(fit): def function(self, p, r): #print p z0, a, xmu, xsigma, ymu, ysigma = p xsigi=-1.0/(2.0*xsigma**2) ysigi=-1.0/(2.0*ysigma**2) return z0+a*Numeric.exp(xsigi*(r[0]-xmu)**2+ysigi*(r[1]-ymu)**2) def derivs(self): #analytic derivatives for a 2-d gaussian #z0+a*exp( -(x-xmu)**2/(2*xsig**2) -(y-ymu)**2/(2.0*ysig**2)) z0, a, xmu, xsigma, ymu, ysigma = self.funcparams n=self.pointcount x=self.xarray[0,:n] y=self.xarray[1,:n] xsigi=-1.0/(2.0*xsigma**2) ysigi=-1.0/(2.0*ysigma**2) dx=x-xmu dx2=dx*dx dy=y-ymu dy2=dy*dy expfact=Numeric.exp(xsigi*dx2+ysigi*dy2) z=a*expfact dd = zeros((n, 6), self.atype) dd[:,0]=1.0 dd[:,1]=expfact dd[:,2]=(-2.0*xsigi)*(dx*z) dd[:,3]=(-2.0*xsigi/xsigma)*(dx2*z) dd[:,4]=(-2.0*ysigi)*(dy*z) dd[:,5]= (-2.0*ysigi/ysigma)*(dy2*z) return dd
Classes | |
class | fit |
class | linear_combination_fit |
class | polynomial_fit |
class | gauss_fit |
class | gauss_fit_2d |
class | cauchy_fit |
class | generalized_cauchy_fit |
class | myfit |
class | my_svd_fit |
class | my_svd_bad_fit |
Functions | |
def | singular_value_decomposition |
def | find_peak |
Variables | |
string | _rcsid = "$Id: fitting_toolkit.py,v 1.18 2006/08/08 02:00:19 mendenhall Exp $" |
numeric_float = Numeric.float64 | |
solve_linear_equations = linalg.solve | |
tuple | x = myfit() |