function [Y,G,E]=TwoLayer(Input,Tar,W,NNP) [np,ni]=size(Input); % PWeigths --- pointer to the weights % PBias --- pointer to the current bias PWeigths=1:ni; PBias=ni*NNP(1)+NNP(1)*NNP(2)+1; % Obtain the weigths from the input to the first layer in % W1 and the bias for the first hidden layer in T1 for i=1:NNP(1) W1(:,i)=W(PWeigths)'; PWeigths=PWeigths+ni; end; T1=W(PBias:PBias-1+NNP(1)); % Obtain the weigths from the first to the second layer in % W2 and the bias for the first hidden layer in T2 PWeigths=(1:NNP(1))+NNP(1)*ni; PBias=PBias+NNP(1); for i=1:NNP(2) W2(:,i)=W(PWeigths)'; PWeigths=PWeigths+NNP(1); end; T2=W(PBias:PBias-1+NNP(2)); % Xi is the net imput to the neurons in the i-th layer; Yi is the % output of the neurons in the i-th layer; Deri is the derivate of % Yi w.r.t. Xi X1=Input*W1+ones(np,1)*T1; Y1=ones(np,NNP(1))./(1+exp(-X1)); Der1=Y1.*(1-Y1); Y=Y1*W2+ones(np,1)*T2; for i=1:NNP(1) Der1(:,i)=Der1(:,i)*W2(i); end l=1; PBias=ni*NNP(1)+NNP(1)*NNP(2)+1; for i=1:NNP(1) for j=1:ni; G(:,l)=Der1(:,i).*Input(:,j); l=l+1; end; G(:,PBias)=Der1(:,i); PBias=PBias+1; end; for j=1:NNP(1) G(:,l)=Y1(:,j); l=l+1; end G(:,PBias)=ones(np,1); E=G'*(Y-Tar);