DF = data.frame( POP=c( 0.3, 0.5, 0.6, 0.7, 0.9, 0.9, 1.0, 1.7, 1.8, 1.9 ), CI=c( 1.6, 2.8, 2.8, 2.8, 2.0, 2.3, 2.6, 1.9, 2.9, 2.5 ) ) m = lm( CI ~ POP + I(POP^2) + I(POP^3), data=DF ) summary(m) #postscript("../../WriteUp/Graphics/Chapter2/ex_2_29_plot.eps", onefile=FALSE, horizontal=FALSE) plot(DF$POP, DF$CI, type='p', pch=19, cex=1.5, xlab='POP', ylab='CI') x_sample = seq( min(DF$POP), max(DF$POP), length.out=100 ) y_sample = predict( m, newdata=data.frame( POP=x_sample ) ) points( x_sample, y_sample, type='l', col='red' ) grid() #dev.off() # The fact that a constant is a better fit than the cubic polynomial can be seen using: # m0 = lm( CI ~ 1, data=DF ) print( anova( m0, m ) )