function [Y,G,E]=ThreeLayers(Inp,Tar,W,NNP) [np,ni]=size(Inp); % PWeigths --- pointer to the weights % PBias --- pointer to the current bias PWeigths=1:ni; PBias=ni*NNP(1)+(NNP(1)+1)*NNP(2)+1; % Obtain the weigths from the input to the first layer in % W1 and the bias for the first hidden layer in T1 for i=1:NNP(1) W1(:,i)=W(PWeigths)'; PWeigths=PWeigths+ni; end; T1=W(PBias:PBias-1+NNP(1)); % Obtain the weigths from the first to the second layer in % W2 and the bias for the first hidden layer in T2 PWeigths=(1:NNP(1))+NNP(1)*ni; PBias=PBias+NNP(1); for i=1:NNP(2) W2(:,i)=W(PWeigths)'; PWeigths=PWeigths+NNP(1); end; T2=W(PBias:PBias-1+NNP(2)); % Obtain the weigths from the second to the third layer in % W3 and the bias for the first hidden layer in T3 PWeigths=(1:NNP(2))+NNP(1)*ni+NNP(1)*NNP(2); PBias=PBias+NNP(2); for i=1:NNP(3) W3(:,i)=W(PWeigths)'; PWeigths=PWeigths+NNP(2); end; T3=W(PBias:PBias-1+NNP(3)); % Xi is the net imput to the neurons in the i-th layer; Yi is the % output of the neurons in the i-th layer; Deri is the derivate of % Yi w.r.t. Xi X1=Inp*W1+ones(np,1)*T1; Y1=ones(np,NNP(1))./(1+exp(-X1)); Der1=Y1.*(1-Y1); X2=Y1*W2+ones(np,1)*T2; Y2=ones(np,NNP(2))./(1+exp(-X2)); Der2=Y2.*(1-Y2); Y=Y2*W3+ones(np,1)*T3; for i=1:NNP(2) Der2(:,i)=Der2(:,i)*W3(i); end; l=1; PBias=(ni*NNP(1))+(NNP(1)+1)*NNP(2)+1; for i=1:NNP(1) delta=(Der2*W2(i,:)').*Der1(:,i); for j=1:ni G(:,l)=delta.*Inp(:,j); l=l+1; end; G(:,PBias)=delta; PBias=PBias+1; end; for i=1:NNP(2) for j=1:NNP(1); G(:,l)=Der2(:,i).*Y1(:,j); l=l+1; end; G(:,PBias)=Der2(:,i); PBias=PBias+1; end; for j=1:NNP(2) G(:,l)=Y2(:,j); l=l+1; end; G(:,PBias)=ones(np,1); E=G'*(Y-Tar);