function [Y,G,E,Y2]=TwoLayers2(Inp,Tar,W,NNP) W=W(:)'; [np,ni]=size(Inp); % PWeigths --- pointer to the weights % PBias --- pointer to the current bias PWeigths=1:ni; PBias=ni*NNP(1)+NNP(1)*NNP(2)+1; % Obtain the weigths from the input to the first layer in % W1 and the bias for the first hidden layer in T1 for i=1:NNP(1) W1(:,i)=W(PWeigths)'; PWeigths=PWeigths+ni; end; T1=W(PBias:PBias-1+NNP(1)); % Obtain the weigths from the first to the second layer in % W2 and the bias for the first hidden layer in T2 PWeigths=(1:NNP(1))+NNP(1)*ni; PBias=PBias+NNP(1); for i=1:NNP(2) W2(:,i)=W(PWeigths)'; PWeigths=PWeigths+NNP(1); end; T2=W(PBias:PBias-1+NNP(2)); % Xi is the net imput to the neurons in the i-th layer; Yi is the % output of the neurons in the i-th layer; Deri is the derivate of % Yi w.r.t. Xi X1=Inp*W1+ones(np,1)*T1; Y1=ones(np,NNP(1))./(1+exp(-X1)); Der1=Y1.*(1-Y1); X2=Y1*W2+ones(np,1)*T2; Y2=ones(np,NNP(2))./(1+exp(-X2)); Der2=Y2.*(1-Y2); teta=[ones(np,1) Y2]\Tar; keyboard Y=[ones(np,1) Y2]*teta; for i=1:NNP(2) Der2(:,i)=Der2(:,i)*teta(i+1); end; l=1; PBias=(ni*NNP(1))+NNP(1)*NNP(2)+1; for i=1:NNP(1) delta=(Der2*W2(i,:)').*Der1(:,i); for j=1:ni G(:,l)=delta.*Inp(:,j); l=l+1; end; G(:,PBias)=delta; PBias=PBias+1; end; for i=1:NNP(2) for j=1:NNP(1); G(:,l)=Der2(:,i).*Y1(:,j); l=l+1; end; G(:,PBias)=Der2(:,i); PBias=PBias+1; end; E=G'*(Y-Tar);