Пример #1
0
static PetscErrorCode bfgs_apply(PC pc, Vec xin, Vec xout)
{
  TaoLMVMMat *M ;
  TaoVecPetsc Xin(xin);
  TaoVecPetsc Xout(xout);
  TaoTruth info2;
  int info;

  PetscFunctionBegin;

  PetscTruth VerbosePrint = PETSC_FALSE; 
  PetscOptionsGetTruth(PETSC_NULL,"-verboseapp",&VerbosePrint,PETSC_NULL);

  info = PCShellGetContext(pc,(void**)&M); CHKERRQ(info);

  PetscScalar solnNorm,solnDot;
  info = VecNorm(xin,NORM_2,&solnNorm); CHKERRQ(info)
  info=PetscPrintf(PETSC_COMM_WORLD,"bfgs_apply: ||Xin||_2 = %22.15e\n",solnNorm);
  if(VerbosePrint) VecView(xin,0);

  info = M->Solve(&Xin, &Xout, &info2); CHKERRQ(info);

  info = VecNorm(xout,NORM_2,&solnNorm); CHKERRQ(info)
  info = VecDot(xin,xout,&solnDot); CHKERRQ(info)
  info=PetscPrintf(PETSC_COMM_WORLD,"bfgs_apply: ||Xout||_2 = %22.15e, Xin^T Xout= %22.15e\n",solnNorm,solnDot);
  if(VerbosePrint) VecView(xout,0);

  PetscFunctionReturn(0);
}
Пример #2
0
static double compute_Xin()
{
    realvar *tmp;
    /* printf("Computing: Xin[]\n"); */
    tmp = &(Xin());
    if (!tmp->computed) {
        printf("Input Xin =" );
        gets(__s_);
        tmp->value = atof(__s_);
        tmp->computed = 1;
    }
    return tmp->value;
}
Пример #3
0
 void apply_adjoint(XType& X) const {
     XType Xin(X);
     base::Gemm(elem::ADJOINT, elem::NORMAL, 1.0, N, Xin, X);
 }
Пример #4
0
 void apply(XType& X) const {
     XType Xin(X);
     base::Gemm(elem::NORMAL, elem::NORMAL, 1.0, N, Xin, X);
 }
Пример #5
0
//Algoritmo usado: backpropagation
void RNA::treina(const char* fn, const char* fout, bool isBatch, bool isNature)
{
	FILE* f=fopen(fn,"r");
	FILE* flog=fopen("treino.log","w+");

	if(!f){
		fprintf(stderr,"Falha ao abrir %s!\n",fn);
		exit(3);
	}
	
	int N;
	fscanf(f,"%d",&N);
	
	vector<vector<double> >  Xin(N);
	vector<vector<double> >  d(N);//Saida desejada
	
	printf("Carregando banco de treinamento...\n");
	for(int i=0;i<N;i++){
		Xin[i].resize(nIn); d[i].resize(nOut); 
		for(int j=0;j<nIn;j++) fscanf(f,"%lf",&Xin[i][j]);
		for(int j=0;j<nOut;j++) fscanf(f,"%lf",&d[i][j]);
	}
	
	int NI;
	printf("Executando treinamento...\n");
	fscanf(f,"%d",&NI);
	fclose(f);
	
	vector<double> e(nNeu+1);
	vector<double> delta(nNeu+1);
	vector<vector<double> > dW;
 	vector<double> err_ant(nOut);

	vector<vector<double> > wt1, wt;

	if(isBatch||isNature){
		dW.resize(W.size());
		wt1.resize(W.size());
		wt.resize(W.size());

		for(int i=0;i<W.size();i++){
			dW[i].resize(W[i].size());
			wt1[i].resize(W[i].size());
			wt[i].resize(W[i].size());
		}

		//for(int i=0;i<W.size();i++)
			//for(int j=0;j<W[i].size();j++)
				//W[i][j] += rand(-1, 1);
	} else {
		wt1.resize(W.size());
		wt.resize(W.size());

		for(int i=0;i<W.size();i++){
			wt1[i].resize(W[i].size());
			wt[i].resize(W[i].size());
			for (int j=0;j<W[i].size();j++)
				wt[i][j]=wt1[i][j]=0;
		}
	}

	bool isf = true;
	double TA0 = TA;
	for(int kp=0;kp<NI;kp++){
		printf("passo %d\n",kp);

		double err_acc = 0;
		double alpha = rand(0, 1) * 0.01 * TA0;

		TA = rand(0.8, 1.2) * TA0;

		if (isBatch){
			for(int i=0;i<W.size();i++)
				for(int j=0;j<W[i].size();j++)
					dW[i][j]=0;
		}

		if (isNature){
			double e0, e1;

			e0 = 0;

			for(int k=0;k<N;k++){
				g(Xin[k]);
				gd(Xin[k]);
		
				//Atualizando neuronio na ultima camada
				for(int j=0;j<nOut;j++) {
					e[cam[NC-1][j]]=d[k][j]-G[cam[NC-1][j]];
					e0 += e[cam[NC-1][j]] * e[cam[NC-1][j]];
					err_acc+= e[cam[NC-1][j]] * e[cam[NC-1][j]];
				}
			}

			e0=e0/(2);

			for(int i=1;i<W.size();i++)
				for(int j=1;j<W[i].size();j++){
					dW[i][j]=TA*rand(-10,10);
					W[i][j]+=dW[i][j];
				}

			
			e1 = 0;
			for(int k=0;k<N;k++){
				g(Xin[k]);
				gd(Xin[k]);

				//Atualizando neuronio na ultima camada
				for(int j=0;j<nOut;j++) {
					e[cam[NC-1][j]]=d[k][j]-G[cam[NC-1][j]];
					e1 += e[cam[NC-1][j]] * e[cam[NC-1][j]];
				}
			}

			e1=e1/(2);
			if(e1>e0){
				for(int i=1;i<W.size();i++)
					for(int j=1;j<W[i].size();j++){
						W[i][j]-=dW[i][j];
					}
			}

		}else{

			//Apresentando a rede a todas amostras
			for(int k=0;k<N;k++){
				g(Xin[k]);
				gd(Xin[k]);
		
				//Atualizando neuronio na ultima camada
				for(int j=0;j<nOut;j++) {
					e[cam[NC-1][j]]=d[k][j]-G[cam[NC-1][j]];
					err_acc += e[cam[NC-1][j]] * e[cam[NC-1][j]];
				}

				for(int j=0;j<nOut;j++){
					int yj, i;

					yj=cam[NC-1][j];

					delta[yj]=e[yj]*Gd[yj];//Gd[yj];

					for(int m=0;m<rede[yj].conn.size();m++){
						i=rede[yj].conn[m];
						double dw = TA*delta[yj]*G[i];
						double dwt = 0;

						if (isf) {
							alpha = 0;
							wt[i][yj] = W[i][yj];
						} else {
							wt1[i][yj] = wt[i][yj];
							wt[i][yj] = W[i][yj];
							dwt = wt[i][yj] - wt1[i][yj];
						}

						if(isBatch)dW[i][yj]+= dw;
						else W[i][yj]+= dw + alpha * dwt;
					}
				}
		
				//Atualizando neuronios nas camadas ocultas
				for(int l=NC-2;l>-1;l--){
		
					for(int j=0;j<cam[l].size();j++){
						int yj, k; 
						double w;
				
						yj=cam[l][j];

						delta[yj]=0;
						for(int m=0;m<rede[yj].dest.size();m++){
							k=rede[yj].dest[m];
							w=W[yj][k];
							delta[yj]+=delta[k]*w;
						}
						delta[yj]*=Gd[yj];//Gd[yj]

						for(int m=0;m<rede[yj].conn.size();m++){
							int i=rede[yj].conn[m];

							double dw = TA*delta[yj]*G[i];
							double dwt = 0;

							if (isf) {
								alpha = 0;
								wt[i][yj] = W[i][yj];
							} else {
								wt1[i][yj] = wt[i][yj];
								wt[i][yj] = W[i][yj];
								dwt = wt[i][yj] - wt1[i][yj];
							}

							if(isBatch)dW[i][yj]+= dw;
							else W[i][yj]+= dw + alpha * dwt;
						}
					}
				}

				if(isBatch){
					for(int i=0;i<W.size();i++)
						for(int j=0;j<W[i].size();j++)
							W[i][j]+=dW[i][j];
				}

			}
		}

		fprintf(flog, "%d %e\n", kp,err_acc*0.5);
		printf("err=%e\n", err_acc*0.5);
		isf = false;
	}
	printf("Treino efetuado!\n");
	fclose(flog);
	//Salva a rede
	save(fout);
}