// Layer ver 2.01 // 9/8/2000 #include "Layer.h" #include void Layer::softmax() { int y; int overflow=0; Float max=A[0]; int amax=0; Float norm=0; for (y=0; y85) { overflow=1; } else { norm += (Float)exp(A[y]); } if (A[y]>max) { max = A[y]; amax = y; } } if (overflow) { memset(Y, 0, NY*sizeof(Float)); Y[amax]=1.0; return; } else { for (y=0; y> NY; is >> NU; is >> NUr; NK=new int[NU+NUr]; for (u=0; u> NK[u]; alloc(NY,NU+NUr,NK); for (y=0; y> W[y][u][k]; } } is >> B[y]; } level=0; } void Layer::read(istream& is) { int y,u,k; is >> NY; is >> NU; is >> NUr; for (u=0; u> NK[u]; for (y=0; y> W[y][u][k]; } } is >> B[y]; } level=0; } void Layer::write(ostream& os) { int y,u,k; os << NY << "\n"; os << NU << "\n"; os << NUr << "\n"; for (u=0; u=0) a += W[y][u][I[u]]; } Y[y]=A[y]=a; } } void Layer::forward(Float* I) { int y,u,k,i; for (y=0; y=0) a += W[y][u][I1[u]]; } for (u=NU; u=0) { dW[y][u][I[u]] += delta[y]; } } dB[y] += delta[y]; } } void Layer::gradient(Float* I) { int y,u,k; int i; for (y=0; y=0) { dW[y][u][I1[u]] += delta[y]; } } i=0; for (u=NU; u20) D=20.0; //srand48(seed); srand(seed); for (y=0; y80) { overflow=1; } else sum += (Float)exp(A[y]); } if (qua == -1) cerr << " AMBIGUITY!! "; Float parz; if (overflow) { parz=0.0; } else parz = (Float)exp(a)/sum; return (parz - parz*parz); } Float Layer_soft::f_cost(Float* t) { return Layer::log_cost(t); } // Layer_tanh Float Layer_tanh::f1(Float a) { return 1.0-(Float)(tanh(a)*tanh(a)); } Float Layer_tanh::f_cost(Float* t) { return Layer::sq_cost(t); }