%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Construction of linear and fuzzy controllers from operator data for a chemical % plant. % % By: Kevin Passino % Version: 8/20/99 % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% clear load operator.dat % For information on the data set see this file. yy=operator(:,6); % The settings made by the operator y=yy'; xx=operator(:,1:5); x=xx'; % The data input from the plant that the operator uses to make decisions, and in particular % Inputs x holds u1(k),u2(k),u3(k),u4(k),u5(k) time=0:length(y)-1; % Set up the time vector for the data M=length(y); %%%%%%%%%%%%%%% % Plot the data (this is what is used for training): figure(1) clf subplot(321) plot(time,x(1,:),'k-') ylabel('u_1(k)') axis([min(time) max(time) min(x(1,:)) max(x(1,:))]) zoom subplot(322) plot(time,x(2,:),'k-') ylabel('u_2(k)') axis([min(time) max(time) min(x(2,:)) max(x(2,:))]) zoom subplot(323) plot(time,x(3,:),'k-') ylabel('u_3(k)') axis([min(time) max(time) min(x(3,:)) max(x(3,:))]) zoom subplot(324) plot(time,x(4,:),'k-') ylabel('u_4(k)') axis([min(time) max(time) min(x(4,:)) max(x(4,:))]) zoom subplot(325) plot(time,x(5,:),'k-') ylabel('u_5(k)') xlabel('Time, k') axis([min(time) max(time) min(x(5,:)) max(x(5,:))]) zoom subplot(326) plot(time,y,'k-') ylabel('y(k)') xlabel('Time, k') axis([min(time) max(time) min(y) max(y)]) % Next, we form the test data. Note that we only have M=70 training data points. % We do not have an ability to generate more data from the physical system; hence, % we will generate MGamma=69 points which are "in between" the given data. To % do this we simply place points at the average of the two values they lie between: for i=1:M-1 xt(:,i)=0.5*(x(:,i)+x(:,i+1)); yt(i)=0.5*(y(i)+y(i+1)); end MGamma=length(yt); timet=0:length(yt)-1; %%%%%%%%%%%%%%% % Plot the data (this is what is used for testing): figure(2) clf subplot(321) plot(timet,xt(1,:),'k-') ylabel('u_1(k)') axis([min(timet) max(timet) min(xt(1,:)) max(xt(1,:))]) zoom subplot(322) plot(timet,xt(2,:),'k-') ylabel('u_2(k)') axis([min(timet) max(timet) min(xt(2,:)) max(xt(2,:))]) zoom subplot(323) plot(timet,xt(3,:),'k-') ylabel('u_3(k)') axis([min(timet) max(timet) min(xt(3,:)) max(xt(3,:))]) zoom subplot(324) plot(timet,xt(4,:),'k-') ylabel('u_4(k)') axis([min(timet) max(timet) min(xt(4,:)) max(xt(4,:))]) zoom subplot(325) plot(timet,xt(5,:),'k-') ylabel('u_5(k)') xlabel('Time, k') axis([min(time) max(time) min(xt(5,:)) max(xt(5,:))]) zoom subplot(326) plot(timet,yt,'k-') ylabel('y(k)') xlabel('Time, k') axis([min(timet) max(timet) min(yt) max(yt)]) %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Perform correlation analysis to determine which inputs to use % to the estimator %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % The i,j element of Rho is the correlation coefficient between the ith and jth % variable (of course it is symetric with 1s all down the diagonal since every % variable is perfectly correlated with itself). Hence, the mth row shows the % how every variable is correlated with the mth variable. Rho=corrcoef([yy xx]) % To determine the effect of each of the inputs of the estimator (i.e., the % elements of x(k)) on the output we need to look at the 1,j elements. To % do this we plot the first row as a histogram: figure(3) clf bar(Rho(1,2:6),'w') ylabel('Correlation coefficient values') title('Correlation coefficient between inputs and y(k)') xlabel('i indices for u_i(k), i=1,2,3,4,5') grid axis([0.5 5.5 -1 1]) % To view this use the following 3d plot (this helps to visualize the cross-correlations) figure(4) clf surf(Rho); colormap(jet) % Use next line for generating plots to put in black and white documents. %colormap(white); xlabel('i'); ylabel('j'); zlabel('Correlation coefficient'); title('Matrix of correlation coefficient values'); rotate3d % For rotating for better viewing % From this you can see that the operator does not seem to use u4 and u5 too strongly % in deciding what set point to pick. We may use this as a basis for deciding that % we should not use these variables as inputs to the controller. Also, note that % the input u2 does not have that big of an impact - so if we want, we could not % use it as an input either. %%%%%%%%%%%%%%%%%%%%%% % Controller estimator development: %%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%% % Batch linear least squares training of an affine function % using all the inputs %%%%%%%%%%%%%%%%%%%%%%%%%%%% Y=y(1,1:M)'; % Initialize Phi phi(:,1)=[x(:,1)', 1]'; Phi=phi(:,1)'; % Form the rest of Phi for i=2:M phi(:,i)=[x(:,i)', 1]'; Phi=[Phi ; phi(:,i)']; end theta=Phi\Y % Next, compute the approximator values and the error vector % at the training data for i=1:M, phi(:,i)=[x(:,i)', 1]'; Fltrain(i)=theta'*phi(:,i); etrain(i)=y(i)-Fltrain(i); end % Print out the training error training_error_bls=(1/M)*etrain*etrain' % Next, compute the approximator values and the error vector % at the testing data for i=1:MGamma, phit(:,i)=[xt(:,i)', 1]'; Fltest(i)=theta'*phit(:,i); etest(i)=yt(i)-Fltest(i); end % Print out the testing error testing_error_bls=(1/MGamma)*etest*etest' % Next, plot the data and the approximator to compare figure(5) clf subplot(211) plot(time,y,'k-',timet,Fltest,'k--') ylabel('y and its estimate') title('Estimator performance, y (solid line), estimate of y (dashed line)') grid subplot(212) plot(timet,etest,'k-') xlabel('Time, k') title('Estimation error') grid %%%%%%%%%%%%%%%%%%%%%%%%%%%% % Batch linear least squares training of an affine function % using only inputs u1, u2, u3 %%%%%%%%%%%%%%%%%%%%%%%%%%%% Y=y(1,1:M)'; x3=x(1:3,:); % Picks the first three inputs x3t=xt(1:3,:); % Initialize Phi phi3(:,1)=[x3(:,1)', 1]'; Phi3=phi3(:,1)'; % Form the rest of Phi for i=2:M phi3(:,i)=[x3(:,i)', 1]'; Phi3=[Phi3 ; phi3(:,i)']; end theta3=Phi3\Y % Next, compute the approximator values and the error vector % at the training data for i=1:M, phi3(:,i)=[x3(:,i)', 1]'; Fltrain3(i)=theta3'*phi3(:,i); etrain3(i)=y(i)-Fltrain3(i); end % Print out the training error training_error_bls_3inputs=(1/M)*etrain3*etrain3' % Next, compute the approximator values and the error vector % at the test data for i=1:MGamma, phi3t(:,i)=[x3t(:,i)', 1]'; Fltest3(i)=theta3'*phi3t(:,i); etest3(i)=yt(i)-Fltest3(i); end % Print out the testing error testing_error_bls_3inputs=(1/MGamma)*etest3*etest3' % Next, plot the data and the approximator to compare figure(6) clf subplot(211) plot(time,y,'k-',timet,Fltest3,'k--') ylabel('y and its estimate') title('Estimator performance, y (solid line), estimate of y (dashed line) (3 inputs)') grid subplot(212) plot(timet,etest3,'k-') xlabel('Time, k') ylabel('Estimation errors') grid %%%%%%%%%%%%%%%%%%%%%%%%%%%% % Batch linear least squares training of an affine function % using only inputs u1 and u3 %%%%%%%%%%%%%%%%%%%%%%%%%%%% Y=y(1,1:M)'; x2=x([1,3],:); % Picks the u1 and u3 inputs x2t=xt([1,3],:); % Initialize Phi phi2(:,1)=[x2(:,1)', 1]'; Phi2=phi2(:,1)'; % Form the rest of Phi for i=2:M phi2(:,i)=[x2(:,i)', 1]'; Phi2=[Phi2 ; phi2(:,i)']; end theta2=Phi2\Y % Next, compute the approximator values and the error vector % at the training data for i=1:M, phi2(:,i)=[x2(:,i)', 1]'; Fltrain2(i)=theta2'*phi2(:,i); etrain2(i)=y(i)-Fltrain2(i); end % Print out the training error training_error_bls_2inputs=(1/M)*etrain2*etrain2' % Next, compute the approximator values and the error vector % at the test data for i=1:MGamma, phi2t(:,i)=[x2t(:,i)', 1]'; Fltest2(i)=theta2'*phi2t(:,i); etest2(i)=yt(i)-Fltest2(i); end % Print out the training error testing_error_bls_2inputs=(1/MGamma)*etest2*etest2' % Next, plot the data and the approximator to compare figure(7) clf subplot(211) plot(time,y,'k-',timet,Fltest2,'k--') ylabel('y and its estimate') title('Estimator performance, y (solid line), estimate of y (dashed line) (2 inputs)') grid subplot(212) plot(timet,etest2,'k-') xlabel('Time, k') ylabel('Estimation errors') grid %%%%%%%%%%%%%%%%%%%%%%%%%%%% % Batch linear least squares training of an affine function % using only u1 as an input %%%%%%%%%%%%%%%%%%%%%%%%%%%% Y=y(1,1:M)'; x1=x(1,:); % Picks the u1 input x1t=xt(1,:); % Initialize Phi phi1(:,1)=[x1(:,1)', 1]'; Phi1=phi1(:,1)'; % Form the rest of Phi for i=2:M phi1(:,i)=[x1(:,i)', 1]'; Phi1=[Phi1 ; phi1(:,i)']; end theta1=Phi1\Y % Next, compute the approximator values and the error vector % at the training data for i=1:M, phi1(:,i)=[x1(:,i)', 1]'; Fltrain1(i)=theta1'*phi1(:,i); etrain1(i)=y(i)-Fltrain1(i); end % Print out the training error training_error_bls_1input=(1/M)*etrain1*etrain1' % Next, compute the approximator values and the error vector % at the training data for i=1:MGamma, phi1t(:,i)=[x1t(:,i)', 1]'; Fltest1(i)=theta1'*phi1t(:,i); etest1(i)=yt(i)-Fltest1(i); end % Print out the training error testing_error_bls_1input=(1/MGamma)*etest1*etest1' % Next, plot the data and the approximator to compare figure(8) clf subplot(211) plot(time,y,'k-',timet,Fltest1,'k--') ylabel('y and its estimate') title('Estimator performance, y (solid line), estimate of y (dashed line) (1 input)') grid subplot(212) plot(timet,etest1,'k-') xlabel('Time, k') ylabel('Estimation errors') grid %%%%%%%%%%%%%%%%%%%%% % Train a fuzzy system - 1 input to premise (1, 3, or all for consequent functions) %%%%%%%%%%%%%%%%%%%%% % Next, find Phi, which involves processing x through phi R=9; % The number of rules. n=1; % One input (to membership functions) % Check ranges of data for u1and grid on that region c1(1,:)=4.5:.25:6.5; sigma1(1,:)=0.25*ones(1,R); % Initialize Phif for j=1:R mu1(j,1)=exp(-0.5*((x1(1,1)-c1(1,j))/sigma1(1,j))^2); end denominator1(1)=sum(mu1(:,1)); for j=1:R xi1(j,1)=mu1(j,1)/denominator1(1); end % To use only one input to the consequent functions: %Phif1=[xi1(:,1)', x1(1,1)*xi1(:,1)']; % To use all the inputs for the consequents, except u4 and u5: Phif1=[xi1(:,1)', x(1,1)*xi1(:,1)', x(2,1)*xi1(:,1)', x(3,1)*xi1(:,1)']; % To use all the inputs for the consequents: %Phif1=[xi1(:,1)', x(1,1)*xi1(:,1)', x(2,1)*xi1(:,1)', x(3,1)*xi1(:,1)', x(4,1)*xi1(:,1)', x(5,1)*xi1(:,1)']; % Form the rest of Phif for i=2:M for j=1:R mu1(j,i)=exp(-0.5*((x1(1,i)-c1(1,j))/sigma1(1,j))^2); end denominator1(i)=sum(mu1(:,i)); for j=1:R xi1(j,i)=mu1(j,i)/denominator1(i); end % To use only one input to the consequent functions: %Phif1=[Phif1; xi1(:,i)', x1(1,i)*xi1(:,i)']; % To use all inputs to the consequents, except u4 and u5: Phif1=[Phif1; xi1(:,i)', x(1,i)*xi1(:,i)', x(2,i)*xi1(:,i)', x(3,i)*xi1(:,i)']; % To use all inputs to the consequents: % Phif1=[Phif1; xi1(:,i)', x(1,i)*xi1(:,i)', x(2,i)*xi1(:,i)', x(3,i)*xi1(:,i)', x(4,i)*xi1(:,i)', x(5,i)*xi1(:,i)']; end % Find the least squares estimate thetaf1=Phif1\Y % Next, compute the approximator values and the error vector % at the training data for i=1:M for j=1:R mu1tr(j,i)=exp(-0.5*((x1(1,i)-c1(1,j))/sigma1(1,j))^2); end denominator1tr(i)=sum(mu1tr(:,i)); for j=1:R xi1tr(j,i)=mu1tr(j,i)/denominator1tr(i); end % To use only one input to the consequent functions: %phif1tr(:,i)=[xi1tr(:,i)', x1(1,i)*xi1tr(:,i)']'; % To use all inputs to the consequents, except u4 and u5: phif1tr(:,i)=[xi1tr(:,i)', x(1,i)*xi1tr(:,i)', x(2,i)*xi1tr(:,i)', x(3,i)*xi1tr(:,i)']'; % To use all inputs to the consequents: % phif1tr(:,i)=[xi1tr(:,i)', x(1,i)*xi1tr(:,i)', x(2,i)*xi1tr(:,i)', x(3,i)*xi1tr(:,i)', x(4,i)*xi1tr(:,i)', x(5,i)*xi1tr(:,i)']'; Fftrain1(i)=thetaf1'*phif1tr(:,i); etrainf1(i)=y(i)-Fftrain1(i); end % Print out the training error training_error_TS_1input=(1/M)*etrainf1*etrainf1' % Next, compute the approximator values and the error vector % at the testing data for i=1:MGamma for j=1:R mu1t(j,i)=exp(-0.5*((x1t(1,i)-c1(1,j))/sigma1(1,j))^2); end denominator1t(i)=sum(mu1t(:,i)); for j=1:R xi1t(j,i)=mu1t(j,i)/denominator1t(i); end % To use only one input to the consequent functions: %phif1t(:,i)=[xi1t(:,i)', x1t(1,i)*xi1t(:,i)']'; % To use all inputs to the consequents, except u4 and u5: phif1t(:,i)=[xi1t(:,i)', xt(1,i)*xi1t(:,i)', xt(2,i)*xi1t(:,i)', xt(3,i)*xi1t(:,i)']'; % To use all inputs to the consequents: % phif1t(:,i)=[xi1t(:,i)', xt(1,i)*xi1t(:,i)', xt(2,i)*xi1t(:,i)', xt(3,i)*xi1t(:,i)', xt(4,i)*xi1t(:,i)', xt(5,i)*xi1t(:,i)']'; Fftest1(i)=thetaf1'*phif1t(:,i); etestf1(i)=yt(i)-Fftest1(i); end % Print out the testing error testing_error_TS_1input=(1/MGamma)*etestf1*etestf1' % Next, plot the data and the approximator to compare figure(9) clf subplot(211) plot(time,y,'k-',timet,Fftest1,'k--') ylabel('y and its estimate') title('Estimator performance, y (solid line), estimate of y (dashed line) (1 input)') grid subplot(212) plot(timet,etestf1,'k-') xlabel('Time, k') ylabel('Estimation errors') grid % Next, test to see if there is correlation between any input and the error % and if there is then that input should be added as an input to the system. Rhof1=corrcoef([etrainf1' xx(:,:)]); % To determine the effect of each of the inputs of the estimator (i.e., the % elements of x(k)) on the output we need to look at the 1,j elements. To % do this we plot the first row as a histogram: figure(10) clf bar(Rhof1(1,2:6),'w') ylabel('Correlation coefficient values') title('Correlation coefficient between input and error (for training data)') xlabel('i indices for u_i(k), i=1,2,3,4,5') grid axis([0.5 5.5 -1 1]) %%%%%%%%%%%%%%%%%%%%% % Train a fuzzy system - 2 inputs (2 or all for consequent functions) %%%%%%%%%%%%%%%%%%%%% % In this case we use two inputs (u1, u3) to the membership functions, and % to the consequent functions (or all the inputs to the consequent functions). % Next, find Phi, which involves processing x through phi R=9; % The number of rules. n=2; % Two inputs (to membership functions) % Check ranges of data for u1 and u3 and grid on that region c(1,:)=[4.5,4.5,4.5,5.5,5.5,5.5,6.5,6.5,6.5]; c(2,:)=[1000,3500,6000,1000,3500,6000,1000,3500,6000]; sigma(1,:)=1*ones(1,R); sigma(2,:)=2500*ones(1,R); % Initialize Phif for j=1:R mu(j,1)=1; % Initialize for ii=1:n mu(j,1)=mu(j,1)*exp(-0.5*((x2(ii,1)-c(ii,j))/sigma(ii,j))^2); end end denominator(1)=sum(mu(:,1)); for j=1:R xi(j,1)=mu(j,1)/denominator(1); end % To use only two inputs to the consequent functions: Phif=[xi(:,1)', x2(1,1)*xi(:,1)', x2(2,1)*xi(:,1)']; % To use all the inputs for the consequents: %Phif=[xi(:,1)', x(1,1)*xi(:,1)', x(2,1)*xi(:,1)', x(3,1)*xi(:,1)', x(4,1)*xi(:,1)', x(5,1)*xi(:,1)']; % Form the rest of Phif for i=2:M for j=1:R mu(j,i)=1; % Initialize for ii=1:n mu(j,i)=mu(j,i)*exp(-0.5*((x2(ii,i)-c(ii,j))/sigma(ii,j))^2); end end denominator(i)=sum(mu(:,i)); for j=1:R xi(j,i)=mu(j,i)/denominator(i); end % To use only two inputs to the consequent functions: Phif=[Phif; xi(:,i)', x2(1,i)*xi(:,i)', x2(2,i)*xi(:,i)']; % To use all inputs to the consequents: % Phif=[Phif; xi(:,i)', x(1,i)*xi(:,i)', x(2,i)*xi(:,i)', x(3,i)*xi(:,i)', x(4,i)*xi(:,i)', x(5,i)*xi(:,i)']; end % Find the least squares estimate thetaf=Phif\Y % Next, compute the approximator values and the error vector % at the training data for i=1:M for j=1:R mu(j,i)=1; % Initialize for ii=1:n mu(j,i)=mu(j,i)*exp(-0.5*((x2(ii,i)-c(ii,j))/sigma(ii,j))^2); end end denominator(i)=sum(mu(:,i)); for j=1:R xi(j,i)=mu(j,i)/denominator(i); end % To use only two inputs to the consequent functions: phif(:,i)=[xi(:,i)', x2(1,i)*xi(:,i)', x2(2,i)*xi(:,i)']'; % To use all inputs to the consequents: % phif(:,i)=[xi(:,i)', x(1,i)*xi(:,i)', x(2,i)*xi(:,i)', x(3,i)*xi(:,i)', x(4,i)*xi(:,i)', x(5,i)*xi(:,i)']'; Fftrain(i)=thetaf'*phif(:,i); etrainf(i)=y(i)-Fftrain(i); end % Print out the training error training_error_TS_2inputs=(1/M)*etrainf*etrainf' % Next, compute the approximator values and the error vector % at the testing data for i=1:MGamma for j=1:R mut(j,i)=1; % Initialize for ii=1:n mut(j,i)=mut(j,i)*exp(-0.5*((x2t(ii,i)-c(ii,j))/sigma(ii,j))^2); end end denominatort(i)=sum(mut(:,i)); for j=1:R xit(j,i)=mut(j,i)/denominatort(i); end % To use only two inputs to the consequent functions: phift(:,i)=[xit(:,i)', x2t(1,i)*xit(:,i)', x2t(2,i)*xit(:,i)']'; % To use all inputs to the consequents: % phift(:,i)=[xit(:,i)', xt(1,i)*xit(:,i)', xt(2,i)*xit(:,i)', xt(3,i)*xit(:,i)', xt(4,i)*xit(:,i)', xt(5,i)*xit(:,i)']'; Fftest(i)=thetaf'*phift(:,i); etestf(i)=yt(i)-Fftest(i); end % Print out the testing error testing_error_TS_2inputs=(1/MGamma)*etestf*etestf' % Next, plot the data and the approximator to compare figure(11) clf subplot(211) plot(time,y,'k-',timet,Fftest,'k--') ylabel('y and its estimate') title('Estimator performance, y (solid line), estimate of y (dashed line) (2 inputs)') grid subplot(212) plot(timet,etestf,'k-') xlabel('Time, k') ylabel('Estimation errors') grid % Next, test to see if there is correlation between any input and the error % and if there is then that input should be added as an input to the system. Rhof2=corrcoef([etrainf' xx(:,:)]); % To determine the effect of each of the inputs of the estimator (i.e., the % elements of x(k)) on the output we need to look at the 1,j elements. To % do this we plot the first row as a histogram: figure(12) clf bar(Rhof2(1,2:6),'w') ylabel('Correlation coefficient values') title('Correlation coefficient between input and error (for training data)') xlabel('i indices for u_i(k), i=1,2,3,4,5') grid axis([0.5 5.5 -1 1]) %%%%%%%%%%%%%%%%%%%%% % Train a fuzzy system - 2 inputs - but u1 and u4 (2 or all for consequent functions) %%%%%%%%%%%%%%%%%%%%% % Next, find Phi, which involves processing x through phi R=9; % The number of rules. n=2; % Two inputs (to membership functions) % Check ranges of data for u1 and u4 and grid on that region c(1,:)=[4.5,4.5,4.5,5.5,5.5,5.5,6.5,6.5,6.5]; c(2,:)=[-.3,-.1,.1,-.3,-.1,.1,-.3,-.1,.1]; sigma(1,:)=1*ones(1,R); sigma(2,:)=0.2*ones(1,R); x4=x([1,4],:); % Picks the u1 and u4 inputs x4t=xt([1,4],:); % Initialize Phif for j=1:R mu(j,1)=1; % Initialize for ii=1:n mu(j,1)=mu(j,1)*exp(-0.5*((x4(ii,1)-c(ii,j))/sigma(ii,j))^2); end end denominator(1)=sum(mu(:,1)); for j=1:R xi(j,1)=mu(j,1)/denominator(1); end % To use only two inputs to the consequent functions: Phif4=[xi(:,1)', x4(1,1)*xi(:,1)', x4(2,1)*xi(:,1)']; % To use all the inputs for the consequents: %Phif4=[xi(:,1)', x(1,1)*xi(:,1)', x(2,1)*xi(:,1)', x(3,1)*xi(:,1)', x(4,1)*xi(:,1)', x(5,1)*xi(:,1)']; % Form the rest of Phif for i=2:M for j=1:R mu(j,i)=1; % Initialize for ii=1:n mu(j,i)=mu(j,i)*exp(-0.5*((x4(ii,i)-c(ii,j))/sigma(ii,j))^2); end end denominator(i)=sum(mu(:,i)); for j=1:R xi(j,i)=mu(j,i)/denominator(i); end % To use only two inputs to the consequent functions: Phif4=[Phif4; xi(:,i)', x4(1,i)*xi(:,i)', x4(2,i)*xi(:,i)']; % To use all inputs to the consequents: % Phif4=[Phif4; xi(:,i)', x(1,i)*xi(:,i)', x(2,i)*xi(:,i)', x(3,i)*xi(:,i)', x(4,i)*xi(:,i)', x(5,i)*xi(:,i)']; end % Find the least squares estimate thetaf4=Phif4\Y % Next, compute the approximator values and the error vector % at the training data for i=1:M for j=1:R mu(j,i)=1; % Initialize for ii=1:n mu(j,i)=mu(j,i)*exp(-0.5*((x4(ii,i)-c(ii,j))/sigma(ii,j))^2); end end denominator(i)=sum(mu(:,i)); for j=1:R xi(j,i)=mu(j,i)/denominator(i); end % To use only two inputs to the consequent functions: phif4(:,i)=[xi(:,i)', x4(1,i)*xi(:,i)', x4(2,i)*xi(:,i)']'; % To use all inputs to the consequents: % phif4(:,i)=[xi(:,i)', x(1,i)*xi(:,i)', x(2,i)*xi(:,i)', x(3,i)*xi(:,i)', x(4,i)*xi(:,i)', x(5,i)*xi(:,i)']'; Fftrain4(i)=thetaf4'*phif4(:,i); etrainf4(i)=y(i)-Fftrain4(i); end % Print out the training error training_error_TS_2inputs4=(1/M)*etrainf4*etrainf4' % Next, compute the approximator values and the error vector % at the testing data for i=1:MGamma for j=1:R mut(j,i)=1; % Initialize for ii=1:n mut(j,i)=mut(j,i)*exp(-0.5*((x4t(ii,i)-c(ii,j))/sigma(ii,j))^2); end end denominatort(i)=sum(mut(:,i)); for j=1:R xit(j,i)=mut(j,i)/denominatort(i); end % To use only two inputs to the consequent functions: phif4t(:,i)=[xit(:,i)', x4t(1,i)*xit(:,i)', x4t(2,i)*xit(:,i)']'; % To use all inputs to the consequents: % phif4t(:,i)=[xit(:,i)', xt(1,i)*xit(:,i)', xt(2,i)*xit(:,i)', xt(3,i)*xit(:,i)', xt(4,i)*xit(:,i)', xt(5,i)*xit(:,i)']'; Fftest4(i)=thetaf4'*phif4t(:,i); etestf4(i)=yt(i)-Fftest4(i); end % Print out the testing error testing_error_TS_2inputs4=(1/MGamma)*etestf4*etestf4' % Next, plot the data and the approximator to compare figure(13) clf subplot(211) plot(time,y,'k-',timet,Fftest4,'k--') ylabel('y and its estimate') title('Estimator performance, y (solid line), estimate of y (dashed line) (2 inputs)') grid subplot(212) plot(timet,etestf4,'k-') xlabel('Time, k') ylabel('Estimation errors') grid % Next, test to see if there is correlation between any input and the error % and if there is then that input should be added as an input to the system. Rhof24=corrcoef([etrainf4' xx(:,:)]); % To determine the effect of each of the inputs of the estimator (i.e., the % elements of x(k)) on the output we need to look at the 1,j elements. To % do this we plot the first row as a histogram: figure(14) clf bar(Rhof24(1,2:6),'w') ylabel('Correlation coefficient values') title('Correlation coefficient between input and error (for training data)') xlabel('i indices for u_i(k), i=1,2,3,4,5') grid axis([0.5 5.5 -1 1]) %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % End of Program %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%