% % Written by: % -- % John L. Weatherwax 2006-08-28 % % email: wax@alum.mit.edu % % Please send comments and especially bug reports to the % above email address. % %----- clear; close all; clc; % Plot the function the book provided: N_sample = 200; x_sample = linspace(1,5,N_sample); f_sample = 12./( x_sample.^2 .* cos(x_sample) + 1./x_sample ); plot(x_sample,f_sample,'-'); xlabel('x'); ylabel('f(x)'); title('original function f(x)'); %saveas(gcf,'../../WriteUp/Graphics/Chapter6/orig_f_function.eps','epsc'); close all; % Plot the function we will attempt to fit: N_sample = 200; x_sample = linspace(1,5,N_sample); f_sample = ( - ( x_sample.^2 .* cos(x_sample) + 1./x_sample ) + 10 )/22; fh = figure(); ph_truth = plot(x_sample,f_sample,'-'); xlabel('x'); ylabel('f(x)'); title('alternative function f(x)'); %saveas(gcf,'../../WriteUp/Graphics/Chapter6/ann_new_function_w_approx.eps','epsc'); addpath('../Chapter1'); addpath('../Chapter3'); % % Parameters used in the genetic algorithm % N_pop = 30; % the population size X_rate = 0.5; % selection rate (fraction of population to keep for mating) mu = 0.2; % the mutation rate (how strongly to mutate the population) max_number_of_iterations = 100; % method to select mates: % 1=> pairing from top to bottom % 2=> random pairing % 3=> random pairing (rank weighting) % 4=> weighted random pairing (cost weighting) % 5=> tournament selection % parent_selection_method = 3; % method used to do crossover: % 1=> single point crossover % 2=> double point crossover % 3=> uniform crossover % 4=> blending method (3.9) % 5=> blending method (3.13) or the books suggested method % crossover_method = 3; % a function to minimize (and its bounds) % N_vars = 5 + 5 + 5 + 1; % the number of variables the genetic algorithm will process (number of unknown weights in the NN) % our function handle should be able to evaluate an entire population input as a matrix of dimension [ N_pop x N_vars ] wfn = @(x) artificial_neural_net_genetic_fn(x,x_sample,f_sample); v_bound = +8; bounds = repmat( [-v_bound,+v_bound], [N_vars,1] ); % some bounds on all our variables N_mc = 1; % number of monte carlos to run use_hybrid = 1; % 0 => the original (non hybrid method), 1 => hybrid method study_avg = 1; % what to plot (the average or the best solution) fn_values = zeros(N_mc,max_number_of_iterations); for ii = 1:N_mc randn('seed',ii); rand('seed',ii); fprintf('Running Monte Carlo: %5d \n',ii); [pop_natural,pop_costs,best_fn_values,avg_fn_values] = continuous_GA( wfn, bounds, ... N_pop, X_rate, mu, max_number_of_iterations, parent_selection_method, crossover_method, use_hybrid ); fprintf('best solution= \n'); pop_natural(1,:) if( study_avg==1 ) fn_values(ii,:) = avg_fn_values; else fn_values(ii,:) = best_fn_values; end end m_fn_values_NH = mean( fn_values, 1 ); % NH = not hybrid s_fn_values_NH = std( fn_values, 1 ); if 0 figure(); ph_nh = plot( m_fn_values_NH, '-o' ); hold on; plot( m_fn_values_NH+2*s_fn_values_NH, '-r' ); plot( m_fn_values_NH-2*s_fn_values_NH, '-r' ); end % Plot the approximate curve in relationship to the truth: % W = pop_natural(1,:); % take the best result W1 = W(1:5); % weights in the first hidden layer b1 = W(6:11); % bias of in the first hidden layer b_1,b_2,...,b_5, and b_6 W2 = W(12:16); % weights in the second hidden layer % Compute the output of the NN at each input x value: logsig = @(x) ( 1 ./ ( 1 + exp(-x) ) ); out = zeros(1,length(x_sample)); for ii=1:length(x_sample) xin = ( x_sample(ii) - 3 )/2.; % scale inputs so they are [ -1, +1 ] a1 = logsig( W1*xin + b1(1:5) ); a2 = logsig( a1(:)' * W2(:) + b1(6) ); out(ii) = a2; end figure(fh); hold on; ph_approx = plot( x_sample, out, '-.r' ); legend( [ph_truth,ph_approx], {'f(x)','NN approximation'}, 'location', 'best' ); %saveas(gcf,'../../WriteUp/Graphics/Chapter6/ann_new_function_w_approx.eps','epsc');