示例#1
0
void RBMPretrain(string filename)
{
	srand((unsigned)time(NULL));
	RBM *rbm = new RBM(5, 3, NonLinearFactory::SIGMOID, false, AbstractNetworkLayer::RANDOM);
	rbm->setWeightLearningRate(0.1);
	rbm->setBiasLearningRate(0.2);
	rbm->setRegularizationRate(0.0001);
	rbm->preTraining(filename);
}
示例#2
0
文件: rbm.cpp 项目: CoolIceFire/RS
void train()
{
    srand(0);
    int train_N = 100;
    int n_visible = num_of_visible;
    int n_hidden = num_of_hidden;
    int rating = num_of_rating;
    int train_iter = 1000;
    double learning_rate = 0.0001;
    int training_num = 1000;
    int k = 1;
    int train_data[943][5][1682];
    memset(train_data, 0, sizeof(train_data));
    get_train_data(train_data);

    double hbias[num_of_user][num_of_hidden];
    memset(hbias, 0, sizeof(hbias));

    vector<pair<int, int> > test_data[num_of_user];
    get_test_data(test_data);


    RBM rbm = RBM(train_N, n_visible, n_hidden, rating);

    for (int iter = 0; iter < train_iter; ++iter)
    {
        for (int i = 0; i < num_of_user; ++i)
        {
            rbm.contrastiveDivergence(train_data[i], learning_rate, 1);
        }
        int cnt = 0;
        double error = 0;
        for (int i = 0; i < num_of_user; ++i)
        {
            error += make_predict(rbm, train_data[i], i, test_data[i]);
            cnt += test_data[i].size();
        }
        double rmse = sqrt(error / cnt);
        printf("epoch: %d, rmse: %f\n",iter, rmse);
        learning_rate *= 0.9;
    }

    for (int i = 0; i < num_of_hidden; ++i)
        printf("%lf ", rbm.hbias[i]);
    printf("-----------------------------");

    int cnt = 0;
    double error = 0;
    for (int i = 0; i < num_of_user; ++i)
    {
        error += make_predict(rbm, train_data[i], i, test_data[i]);
        cnt += test_data[i].size();
    }
    double rmse = sqrt(error / cnt);
    printf("rmse: %f\n", rmse);

}
int main()
{
RBM *a = new RBM;

//a->input(stdin);
// for xor
a->build(2,2);  
int x[2],y[2],s[2],sz[2];
x[0] = -1;
x[1] =  1;
y[0] =  1;
y[1] = -1;
s[0] =  1;
s[1] =  1;
sz[0] = -1;
sz[1] = -1;
for( int i=0; i< 100; i++)
	{
a->train((float)1.001,2,x);
a->train((float)1.001,2,y);
printf( "xor %f %f same %f %f\n",a->energy(2,x),a->energy(2,y),a->energy(2,s),a->energy(2,sz));

a->dump(stdout);
	}

s[1] = 0;
a->fillin(2,s);
printf("%d %d\n",s[0],s[1]);
}
int main()
{
RBM *a = new RBM;

//a->input(stdin);
// for xor
a->build(1,4);  
int x[4],y[4],s[4],sz[4];
for( int i=0; i< 4; i++)
{
	x[i] = 1;
	y[i] = -1;
	s[i] = 1;
	sz[i] = -1;
}
for( int i=0; i<4; i+=3)
{
	x[i] = -1;
	y[i] =  1;
}
for( int i=0; i< 100; i++)
	{
a->train((float)1.001,4,x);
a->train((float)1.001,4,y);
printf( "xor %f %f same %f %f\n",a->energy(4,x),a->energy(4,y),a->energy(4,s),a->energy(4,sz));

a->dump(stdout);
	}

}
int main()
{
RBM *a = new RBM;
const int code_width = 5;
const int window_width = 40;

FILE *ip; 
ip = fopen("one.10.rbm","r");
a->input(ip);
fclose(ip);
ip = fopen("pdbaanr","r");

fasta *d = new fasta(ip);

// static issue WTF man
static int encoded[window_width*code_width];
static int test[window_width*code_width];



while(d->get_next())
  while( d->encode(window_width,window_width*code_width,encoded))
  {
	int same;

	for( int ia=0;ia<window_width; ia++)
	{
	same = 0;
	for( int i=0; i< window_width*code_width; i++)
		test[i] = encoded[i];
	for( int i=ia*code_width; i< ia*code_width+code_width; i++)
		test[i] = 0;
//	for( int i=ia*code_width; i< ia*code_width+code_width; i++)
//		printf("%d %d\n", encoded[i],test[i]);

	a->fillin(window_width*code_width, test);
//	for( int i=ia*code_width; i< ia*code_width+code_width; i++)
//		printf("%d %d\n", encoded[i],test[i]);
	for( int i=ia*code_width; i< ia*code_width+code_width; i++)
		if( test[i] == encoded[i]) same ++;
	printf("%d ", same);
   
	
	}
	printf("\n");
//     a->train((float)1.001,window_width*code_width,encoded);
  }
}
示例#6
0
void RBM::CDTrainer::learn(RBM& m,float* pv,float* ph,float* nv,float* nh){
	for(unsigned x=0;x<m.nhid;++x){
		for(unsigned y=0;y<m.nvis;++y){
			m.at(x,y)+=(pv[y]*ph[x]-nv[y]*nh[x])*rate;
		}
		
		m.bias[x]+=(ph[x]-nh[x])*rate;
	}
}
示例#7
0
文件: DBN.cpp 项目: guker/DBN-2.0
void DBN::learn(){
   teacher->monitor->teacher = teacher;
   int level = 0;
   for (auto input:inputs) rc_MLP->add(input);
   while (1){
      RBM *rbm = make_rbm_level(level);
      for (auto edge:(rbm->edges)) rc_MLP->add((Connection*)edge);
      
      if (rbm->edges.size() == 0) break;
      //if (level > 0) rbm->toggle_noise();
      
      rbm->teacher = teacher;
      rbm->learn();
      //rbm->turn_off();
      std::cout << "Done training " << level << " layer." << std::endl;
      ++level;
   }
}
示例#8
0
文件: DBN.cpp 项目: ArArgyridis/SPOR
void DBN::pretrain(float learningRate, float lrCoef, int  k, int epochs) {
    matrix2dPtr layerInput = inputData;
    for (register unsigned int i = 0; i < hiddenLayerSizes.size(); i++) {

        if (i == 0) {
            layerInput = inputData;
        }
        else {            
            layerInput = sigmoidLayers[i-1].getSampleHGivenV(layerInput);
        }

        RBM *rbm;
        CRBM *crbm;
        if ( rbmLayers[i]->type == typeRBM) {
            rbm = new RBM();
            rbm = rbmLayers[i];
            rbm->contrastive_divergence(k, epochs, learningRate, lrCoef, layerInput, true);
        }
        else if (rbmLayers[i]->type == typeCRBM) {
            crbm = dynamic_cast< CRBM* > (rbmLayers[i]);
            crbm->contrastive_divergence(k, epochs, learningRate, lrCoef, layerInput, true);
        }
    }
}
示例#9
0
文件: rbm.cpp 项目: CoolIceFire/RS
double make_predict(RBM rbm, int train_data[][1682], int u, vector<pair<int, int> >& v)
{
    double hidden[num_of_hidden];
    for (int i = 0; i < num_of_hidden; ++i)
    {
        double temp = 0.0;
        for (int j = 0; j < num_of_rating; ++j)
        {
            for (int kk = 0; kk < num_of_movies; ++kk)
            {
                temp += train_data[j][kk] * rbm.W[i][kk][j];
            }
        }
        temp += rbm.hbias[i];
        hidden[i] = rbm.sigmoid(temp);
    }
    int size = v.size();
    double ret = 0;
    for (int i = 0; i < size; ++i)
    {
        double vp[num_of_rating];
        int item = v[i].first;
        int real_rating = v[i].second;

        for (int j = 0; j < num_of_rating; ++j)
        {
            double temp = 0;
            for (int kk = 0; kk < num_of_hidden; ++kk)
            {
                temp += hidden[kk]*rbm.W[kk][item][j];
            }
            temp += rbm.vbias[j][item];
            temp = exp(temp);
            vp[j] = temp;
        }
        double mx = 0, mxi = 0;
        for (int j = 0; j < num_of_rating; ++j)
        {
            if (vp[j] > mx) mx = vp[j], mxi = j;
        }
        ret += (mxi - real_rating) * (mxi - real_rating);
    }
    return ret;
}
示例#10
0
 static void init_weights(RBM& rbm, Iterator first, Iterator last) {
     rbm.init_weights(first, last);
 }
示例#11
0
int main (int argc, const char * argv[])
{
   //--------------RNG INIT STUFF
   
   srand((unsigned)time(0));
   
   long seed;
   r = gsl_rng_alloc (gsl_rng_rand48);     // pick random number generator
   seed = time (NULL) * getpid();
   gsl_rng_set (r, seed);                  // set seed
   //--------------
   //LOAD DATASET and INIT
   
   DataSet data1;
   
   data1.loadfMRI(true,true,true);
   
   //DataSet data2;
   //data2.loadstim();
  
   //---------DONE INIT
   //--------- GSL TESTS GO HERE ----------------
   
   //INIT RBM
   
   GaussianLayer baselayer((int)data1.train->size2);
   
   //SigmoidLayer stimuluslayer((int)data2.train->size2);
   //stimuluslayer.noise = .2;
   ReLULayer hiddenlayer(32);
   //ReLULayer hiddenlayer2(16);
   
   InputEdge ie1(&data1);
   //InputEdge ie2(&data2);
   
   baselayer.input_edge = &ie1;
   //stimuluslayer.input_edge = &ie2;
   
   Connection c1(&baselayer, &hiddenlayer);
   c1.learning_rate = 0.0000001;
   c1.decay = 0.0000002;
   //Connection c2(&stimuluslayer, &hiddenlayer);
   //c2.learning_rate = 0.000000005;
   //c2.decay = 0;
   
   //Connection c3(&hiddenlayer, &hiddenlayer2);
   //c3.learning_rate = 0.00001;
   //c3.decay = 0.000001;
   
   //--------------
   float momentum = 0.65;
   float k = 1;
   float batchsize = 1;
   float epochs = 4000;
   
   //LEARNING!!!!!!!!!
   ContrastiveDivergence cdLearner(momentum, k, batchsize, epochs);
   Connection_Learning_Monitor monitor(&c1);
   cdLearner.monitor = &monitor;
   
   RBM rbm;
   rbm.add_connection(&c1);
   //rbm.add_connection(&c2);
   rbm.teacher = &cdLearner;
   rbm.learn();

   /*
   DBN dbn;
   dbn.add_connection(&c1);
   dbn.add_connection(&c3);
   dbn.teacher = &cdLearner;
   
   dbn.finish_setup();
   
   dbn.pathways[0]->setup_viz();
   
   dbn.viz = (dbn.pathways[0])->viz;
   dbn.viz->initViz();
   dbn.viz->thresh = .02;
   
   dbn.viz->scale = 1;
   dbn.learn();
   
   std::cout << "Done Learning! " << std::endl;
    */
   
   return 0; 
}
示例#12
0
size_t input_size(const RBM& rbm) {
    return rbm.input_size();
}
示例#13
0
size_t output_size(const RBM& rbm) {
    return rbm.output_size();
}