%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use of Neural Nets Toolbox for
% solving a simple function approximation problem.
%
% By: Kevin Passino
% (with help of Yixin Diao)
% Version: 2/25/00
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
clear
figure(1)
clf
% First, generate the training data, G
% For the M=121 case
x=-6:0.1:6;
xt=x';
M=length(x);
for i=1:M,
z(i)=0.15*(rand-0.5)*2; % Define the auxiliary variable
G(i)=exp(-50*(x(i)-1)^2)-0.5*exp(-100*(x(i)-1.2)^2)+atan(2*x(i))+2.15+...
0.2*exp(-10*(x(i)+1)^2)-0.25*exp(-20*(x(i)+1.5)^2)+0.1*exp(-10*(x(i)+2)^2)-0.2*exp(-10*(x(i)+3)^2);
if x(i) >= 0
G(i)=G(i)+0.1*(x(i)-2)^2-0.4;
end
Gz(i)=G(i)+1*z(i); % Adds in the influence of the auxiliary variable (or can multiply by 0)
end
Gzt=Gz';
% Now, x,Gz are training pairs
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Next, compute the approximator values (i.e., at a test set)
% Define the points at which it will be tested
xtest=-6:0.01:6;
Mtest=length(xtest);
% First, define the phi vector, for each data pair in Gz
% (note that j+1 is the last time at which we have generated a theta
% and we use it here - first, we have to compute phi)
for i=1:Mtest,
ztest(i)=0.15*(rand-0.5)*2; % Define the auxiliary variable
Gtest(i)=exp(-50*(xtest(i)-1)^2)-0.5*exp(-100*(xtest(i)-1.2)^2)+atan(2*xtest(i))+2.15+...
0.2*exp(-10*(xtest(i)+1)^2)-0.25*exp(-20*(xtest(i)+1.5)^2)+0.1*exp(-10*(xtest(i)+2)^2)-0.2*exp(-10*(xtest(i)+3)^2);
if xtest(i) >= 0
Gtest(i)=Gtest(i)+0.1*(xtest(i)-2)^2-0.4;
end
Gztest(i)=Gtest(i)+1*ztest(i); % Adds in the influence of the auxiliary variable (or can multiply by 0)
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Form a feedforward neural network with n1 hidden layer neurons all with
% logistic neurons (logsig - use tansig for hyperbolic tangent), and train with Levenberg-Marquardt method (note that it
% picks all parameters for the algorithm). Output layer is a single linear neuron.
% Using traincgp rather than trainlm would give a type of conjugate gradient method (Polak-Ribiere)
% and traingd gives a basic gradient descent method
n1=11;
%net = newff([min(xt)' max(xt)'], [n1 1], {'logsig' 'purelin'},'traingd');
%net = newff([min(xt)' max(xt)'], [n1 1], {'logsig' 'purelin'},'traincgp');
%net = newff([min(xt)' max(xt)'], [n1 1], {'logsig' 'purelin'},'trainlm');
net = newff([min(xt)' max(xt)'], [n1 1], {'tansig' 'purelin'},'trainlm');
net.trainParam.epochs = 500; % Sets the number of training epochs (with even 25 does ok for this example)
net = train(net,x,Gz); % Train the network with the training data
Fmlp = sim(net,xtest); % Test the network at the test data
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Next, plot the network output and actual output to compare (do it two ways)
figure(2)
clf
plot(x,Gz,'ko',xtest,Fmlp,'k-')
xlabel('x')
ylabel('Data and neural network mapping')
title('Multilayer perceptron trained with Matlab NN Toolbox')
grid
axis([min(x) max(x) 0 max(G)])
figure(3)
clf
plot(xtest,Gztest,'k.',xtest,Fmlp,'k-')
xlabel('x')
ylabel('Data and neural network mapping')
title('Multilayer perceptron trained with Matlab NN Toolbox')
grid
axis([min(x) max(x) 0 max(G)])
% And for fun show the error
figure(4)
clf
plot(xtest,Gztest-Fmlp,'k-')
xlabel('x')
ylabel('Error between data and neural network mapping')
title('Multilayer perceptron trained with Matlab NN Toolbox')
grid
axis([min(x) max(x) 0 max(G)])
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% End of program
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%