#Math Methods Problem Set 3 solutions #Part B: Numerical quadrature from ClimateUtilities import * import math #Problem 3. Accuracy of numerical quadrature def dumbTrap(f,interval,n): a = interval[0] b = interval[1] dx = (b-a)/n sum = dx*(f(a)+f(b))/2. for i in range(1,n): x = a+i*dx sum = sum + f(x)*dx return sum #Function returning the exact analytic value of #the definite integral def exact(interval): return (interval[1]**1.5 - interval[0]**1.5)/1.5 #Carry out trapezoidal rule def integral(f,interval): dxList = [] ansList = [] n = 1 while n<100000: dxList.append((interval[1]-interval[0])/n) ansList.append(dumbTrap(f,interval,n)) n = 2*n return dxList,ansList print '----------Problem 3 answers-----------------' I = [0.,10.] dx,ans = integral(math.sqrt,I) #Put it in a curve for plotting c = Curve() c.addCurve(dx) c.addCurve(ans,'trap','Trapezoidal rule, [0,10]') #Put in the line giving the exact answer, for comparison c.addCurve([exact(I) for i in range(len(dx))],'exact','exact') w1 = plot(c) # #We note that the approximate solution converges to the exact #solution as n is made large. The graph of the trapezoidal #rule approximation looks like a parabola, indicating dx**2 #convergence. This is a little surprising, since our theorem #on convergence assumed that the integrand is differentiable #everywhere, but this integrand isn't at x=0. Let's take a #closer look, by plotting the error in a different way. error = [abs(approx-exact(I)) for approx in ans] #Let's save this for later. IMPORTANT: Note that #what we do below, using error[:], makes a NEW COPY of the #list. If we just wrote TrapErrorSingularCase, that would just #define a new reference to the same data, which wouldn't do #what we want. This reference/copy distinction is an unavoidable #issue in any language that allows use of objects. To have #the "=" sign make a copy by default, rather than a new reference, #would usually be wasteful. When we want to make a copy of an object #and all its data, we have to do something to specify we really want #a copy. The way we do this below is one of many ways of making a copy. # TrapErrorSingularCase = error[:] # c = Curve() c.addCurve(dx) c.addCurve(error,'error','Trapezoidal Error, [0,10]') c.addCurve([d*d for d in dx],'quadratic','Quadratic Error') c.XlogAxis = c.YlogAxis = True #Do a log-log plot w2 = plot(c) #Aha! When we plot it this way, we see that the convergence #is indeed slower than quadratic -- on a log log plot, the #slope of the error vs. dx is smaller than the slope of a quadratic #(which is 2 on a log-log plot). Let's estimate the actual slope slope = (math.log(error[-1])-math.log(error[0]))/(math.log(dx[-1]) - math.log(dx[0])) print 'Trapezoidal Rule error converges like dx**%f'%slope,'for interval [0,10]' #We see that the error is approximately order dx**1.5, rather than dx**2 #Now let's try it for an interval that's not singular at the boundary #In this case, we just compute the slope, and don't bother making #the graphs. If you want to see graphs, you can just edit the #earlier part of the script and run it again I = [1.,10.] dx,ans = integral(math.sqrt,I) error = [abs(approx-exact(I)) for approx in ans] slope = (math.log(error[-1])-math.log(error[0]))/(math.log(dx[-1]) - math.log(dx[0])) print 'Trapezoidal Rule error converges like dx**%f'%slope,'for interval [1,10]' #This time the convergence is close to dx**2, as it should be #Now let's try again with Romberg extrapolation. First, we do #Romberg on the non-singular case we've just done by the trapezoidal #rule from polint import polint # #The following does Romberg extrapolation on sublists #of our trapezoidal rule results containing the first n #approximations. dx[0:3], for example, is dx[0],dx[1],dx[2] romberg = [] for n in range(1,len(dx)+1): romberg.append(polint(dx[0:n],ans[0:n],0.)) rombergError = [abs(approx - exact(I)) for approx in romberg] # #Now we plot the error on a log-log scale c = Curve() c.addCurve(dx) c.addCurve(rombergError,'error','Romberg Error, [1,10]') c.addCurve([d*d for d in dx],'quadratic','Quadratic Error') c.XlogAxis = c.YlogAxis = True #Do a log-log plot w3 = plot(c) # #We see from the plot that the error approaches zero much faster #than dx**2. The bumpiness of the error curve, and the fact that #it flattens out at small values, comes because we have run up #against the roundoff error on the computer (64 bit double precision #at the time of writing), when the error is around 10**-12 # #Remark: As discussed in the Numerical Recipes chapter on #Romberg's method, we can actually do better than this, because #it can be shown that the Taylor series expansion of the trapezoidal #rule error only contains even terms in dx. That means we can do #the extrapolation to zero based on a polynomial in dx**2, rather #than dx. Let's try this. dxSquared = [d*d for d in dx] romberg = [] for n in range(1,len(dxSquared)+1): romberg.append(polint(dxSquared[0:n],ans[0:n],0.)) rombergError = [abs(approx - exact(I)) for approx in romberg] # #Now add the new curve and plot (still on a log-log axis) c.addCurve(rombergError,'error1','Improved Romberg Error, [1,10]') w4 = plot(c) # #We see that using the improved version, we hit the #roundoff error limitation at even large dx. With #dx = .01, our error is only 10**-15 ! # # #The Romberg method, however, only gives improved convergence #if the integrand has a convergent Taylor series, so that #the trapezoidal rule error has a convergent Taylor series #in dx. If the integrand has singularities, or singularities #in some derivative, convergence will be degraded. To illustrate #this, let's do it again for the interval [0,10], which includes #the point with a singular derivative I = [0.,10.] dx,ans = integral(math.sqrt,I) romberg = [] for n in range(1,len(dx)+1): romberg.append(polint(dx[0:n],ans[0:n],0.)) rombergError = [abs(approx - exact(I)) for approx in romberg] c = Curve() c.addCurve(dx) c.addCurve(rombergError,'error','Romberg Error, [0,10]') c.addCurve([d*d for d in dx],'quadratic','Quadratic Error') c.XlogAxis = c.YlogAxis = True #Do a log-log plot # dxSquared = [d*d for d in dx] romberg = [] for n in range(1,len(dxSquared)+1): romberg.append(polint(dxSquared[0:n],ans[0:n],0.)) rombergError = [abs(approx - exact(I)) for approx in romberg] # #Now add the new curve and plot (still on a log-log axis) c.addCurve(rombergError,'error1','Improved Romberg Error, [0,10]') #Put in the trapezoidal rule error, for comparison c.addCurve(TrapErrorSingularCase,'error2','Trapezoidal rule error,[0,10]') w4 = plot(c) #Wow, what a difference! When the integrand has a singular derivative #somewhere, the convergence for Romberg's method is no better than what #we got for the Trapezoidal rule. On the other hand, it's no worse. # #When the integrand has a milder singularity (e.g. an infinite #sixth derivative), the Romberg method will provide higher order #convergence than the Trapezoidal rule, but not as high order #convergence as it would be for an infinitely smooth function. #The rate of convergence of Romberg's method is determined by #how many derivatives of the integrand are finite. Experiment #with integrating functions of the form x**(n+1/2) for positive n, #to see how this works. #Finally, we try this out for a function we can't integrate #analytically def f(x): return math.exp(-math.cos(x)) I = [0.,math.pi] dx,ans = integral(f,I) dxSquared = [d*d for d in dx] romberg = [] for n in range(1,len(dx)+1): romberg.append(polint(dxSquared[0:n],ans[0:n],0.)) print 'Convergence of integral of exp(-cos x)' for n in range(len(dx)): print n+1,dx[n],'%.14f'%romberg[n] #This prints out romberg[n] to 14 digits #Problem 4 answer: A quadrature class # # (Note: "quadrature" is just what mathematicians call the # process of evaluating a definite integral.) # #Before developing a general quadrature class, we'll #implement a class which efficiently carries out trapezoidal rule #integration with iterative refinement class BetterTrap: def __init__(self,f,interval,nstart): self.f = f self.n = nstart self.interval = interval self.integral = self.dumbTrap(nstart) def dumbTrap(self,n): a = self.interval[0] b = self.interval[1] dx = (b-a)/n sum = dx*(self.f(a)+self.f(b))/2. for i in range(1,n): x = a+i*dx sum = sum + self.f(x)*dx return sum def refine(self): #Compute the sum of f(x) at the #midpoints between the existing intervals. #To get the refinement of the trapezoidal #rule sum we just add this to half the #previous result sum = 0. a = self.interval[0] b = self.interval[1] dx = (b-a)/self.n #Remember: n is the number of subintervals, #not the number of endpoints. Therefore we #have one midpoint per subinterval. Keeping that #in mind helps us get the range of i right in #the following loop for i in range(self.n): sum = sum + self.f(a+(i+.5)*dx)*(dx/2.) #The old trapezoidal sum was multiplied by #the old dx. To get its correct contribution #to the refined sum, we must multiply it by .5, #because the new dx is half the old dx self.integral = .5*self.integral + sum # #Update the number of intervals self.n = 2*self.n #Now let's test this out for the same function we defined earlier # #First instantiate the class quad = BetterTrap(f,I,1) # #Now successively refine. Check the results by comparing #with the direct computation using dumbTrap. The results #should be exactly the same print 'Check of the BetterTrap class:' for i in range(10): print quad.n,quad.integral,dumbTrap(f,I,quad.n) quad.refine() # #Looks good! Hope your experience was as good as mine. #The rest of this is extra. Here I define a class called #romberg, which assists in carrying out evaluation of #integrals using romberg extrapolation. It assumes polint has #been imported class romberg: def __init__(self,f,interval,nstart): #Make a trapezoidal rule integrator self.trap = BetterTrap(f,interval,nstart) #We keep lists of all our results, for doing #Romberg extrapolation self.nList = [nstart] self.integralList = [self.trap.integral] def refine(self): self.trap.refine() self.integralList.append(self.trap.integral) self.nList.append(self.trap.n) # #Use a __call__ method to return the result. The #__call__ method takes an optional argument, which is #the number of additional refinement steps to take before computing #the result. This may not be the ideal way to set up the behavior #of the object. I'm still thinking of other designs. It might #be better to specify the "accuracy" and optionally the #maximum number of refinements, and then have the method #do enough refinements to meet the accuracy criterion. The #intermediate results could still be retrieved from self.integralList #and self.nList for exploratory purposes def __call__(self,nRefine = 0): for i in range(nRefine): self.refine() dx = [1./(n*n) for n in self.nList] return polint(dx,self.integralList,0.) #Now instantiate the object and test it out on the function #defined previously. Make sure it gives the same result as the #romberg method carried out "manually" quad = romberg(f,I,1) print 'Test of romberg integration class' print 'Result for 5 refinements is %.13f'%quad(5) #ToDo: A good challenge is to implement some kind of romberg #method for tabulated data at fixed resolution, as opposed #to a function. I think we can still get a form of high #order accuracy, equivalent to fitting the data with #a high order polynomial, by doing a number of refinement #steps based on first taking every eighth datapoint, then #every fourth and so on until we use every datapoint. It #is worth experimenting with this.