function [Y,G,E,Y1]=TwoLayers2(Inp,Tar,W,NNP) W=W(:)'; [np,ni]=size(Inp); % PWeigths --- pointer to the weights % PBias --- pointer to the current bias PWeigths=1:ni; PBias=ni*NNP(1)+1; % Obtain the weigths from the input to the first layer in % W1 and the bias for the first hidden layer in T1 for i=1:NNP(1) W1(:,i)=W(PWeigths)'; PWeigths=PWeigths+ni; end; T1=W(PBias:PBias-1+NNP(1)); % Xi is the net imput to the neurons in the i-th layer; Yi is the % output of the neurons in the i-th layer; Deri is the derivate of % Yi w.r.t. Xi X1=Inp*W1+ones(np,1)*T1; Y1=ones(np,NNP(1))./(1+exp(-X1)); Der1=Y1.*(1-Y1); teta=[ones(np,1) Y1]\Tar; Y=[ones(np,1) Y1]*teta; for i=1:NNP(1) Der1(:,i)=Der1(:,i)*teta(i+1); end; l=1; PBias=(ni*NNP(1))+1; for i=1:NNP(1) for j=1:ni G(:,l)=Der1(:,i).*Inp(:,j); l=l+1; end; G(:,PBias)=Der1(:,i); PBias=PBias+1; end; E=G'*(Y-Tar);