static bool fillUp(Allocator &a, size_t allocSize, vector<Pointer> &out) {
    int Max = (2 * sizeof(buf) / allocSize); // To avoid creating an infinite loop.

    for (int i = 0; i < Max; i++ ) try {
        out.push_back( a.alloc(allocSize) );
        writeTo(out.back(), allocSize);
    } catch (AllocError &) {
        return true;
    }

    return false;
}
inline MemRef BasicArray<T>::create_array(std::size_t size, Allocator& alloc)
{
    std::size_t byte_size_0 = calc_aligned_byte_size(size); // Throws
    // Adding zero to Array::initial_capacity to avoid taking the
    // address of that member
    std::size_t byte_size = std::max(byte_size_0, Array::initial_capacity+0); // Throws

    MemRef mem = alloc.alloc(byte_size); // Throws

    bool is_inner_bptree_node = false;
    bool has_refs = false;
    bool context_flag = false;
    int width = sizeof (T);
    init_header(mem.m_addr, is_inner_bptree_node, has_refs, context_flag, wtype_Multiply,
                width, size, byte_size);

    return mem;
}
예제 #3
0
int main() {

  Allocator a = Allocator();
  
  char *c;
  c = (char *)a.alloc(16);
  // at this point the memory should all be set to 0xDEAD, repeating
  for (size_t i = 0; i < 16; ++i) {
    std::cout << char(*(c + i) + 55);
  }
  std::cout << std::endl;

  *c = 'A';
  
  std::cout << *c << std::endl;

  a.free(c);
  
  return(0);
}
void NeuralClassifier::TRAIN_DATA_BITS(char* train_file, char* model, int n_hu)
{

	  cout<<"Training.......... ."<<endl;
	Allocator *allocator = new Allocator;
	int num_of_features = 9;
    int my_digit = 5;
	
	DiskXFile out_model_file(model, "w");

	// creating the mlp
	ConnectedMachine mlp[10];
	Linear *c1[10];
	Tanh *c2[10];
	Linear *c3[10];
	Tanh *c4[10];
	  cout<<"Addng layers......... ."<<endl;
	for(int count = my_digit; count <= my_digit; count++)
	{
		c1[count]= new(allocator) Linear(num_of_features,n_hu);
		c2[count]= new(allocator) Tanh(n_hu);
		c3[count]= new(allocator) Linear(n_hu,1);
		c4[count]= new(allocator) Tanh(1);

		mlp[count].addFCL(c1[count]);
		mlp[count].addFCL(c2[count]);
		mlp[count].addFCL(c3[count]);
		mlp[count].addFCL(c4[count]);
		mlp[count].build();
		mlp[count].setPartialBackprop();
    }
	  cout<<"Reading data.......... ."<<endl;
	DataSet *train_set;
	train_set=new(allocator) MatDataSet(train_file,num_of_features,1,false,-1,false);

	Criterion *criterion[10];
	// creating the trainer
	Trainer **trainers = (Trainer **)allocator->alloc(sizeof(Trainer *)*10);
	  cout<<"Training each digit.......... ."<<endl;
	/*Training each digit*/
	for(int count = my_digit; count <= my_digit; count++)
	{
		long trained_with_one = 0;
		long trainedAsNotFive=0;
		cout<<"Training digit "<<count<<endl;
		for(int t = 0; t < train_set->n_examples; t++)
		{
		
			train_set->setExample(t);
				if((t%500) == 0)  {
				cout<<"Example "<<t<<" Target is "<<train_set->targets->frames[0][0]<<endl;
				
			}
            
			if(train_set->targets->frames[0][0] == count)
			{
				
                train_set->targets->frames[0][0] = 1;
				trained_with_one++;
				//cout<<"Example "<<t<<" Target is "<<train_set->targets->frames[0][0]<<endl;
			}
			else {
		//		cout<<"---------------Example "<<t<<" Target is "<<train_set->targets->frames[0][0]<<endl;
                train_set->targets->frames[0][0] = -1;
				trainedAsNotFive++;

			}
		}
	 
		cout<<"examples trained with one: "<<trained_with_one<<endl;
		cout<<"examples trained not five: "<<trainedAsNotFive<<endl;
        cout<<"total number of examples: "<<train_set->n_examples<<endl;
				
		criterion[count] = new(allocator) MSECriterion(1);
		trainers[count] = new(allocator) StochasticGradient(&mlp[count], criterion[count]);

		trainers[count]->setIOption("max iter",100);
			  cout<<"Train digit.......... ."<<endl;
		trainers[count]->train(train_set, NULL);

			  cout<<"Save file.......... ."<<endl;
		mlp[count].saveXFile(&out_model_file);
	}
	delete(allocator);
}
예제 #5
0
파일: gmm.cpp 프로젝트: kobeyuan/PCM
int GMM(char *datafile, float means[N_GAUSS][3], float var[N_GAUSS][3], float logw[N_GAUSS])
{
	real accuracy = 0.001; //end accuracy
	real threshold = 0.001; //variance threshold
	int max_iter_kmeans = 50; //max number of iterations of KMeans
	int max_iter_gmm = 50; //max number of iterations of GMM
	int n_gaussians = N_GAUSS; //number of Gaussians
	real prior = 0.001; //prior on the weights

	int max_load = -1; //max number of examples to load for train
	int the_seed = -1; //the random seed

	bool norm = false; //normalize the datas
	char *save_model_file = "model.txt"; //the model file
	int k_fold = -1; //number of folds, if you want to do cross-validation
	bool binary_mode = true; //binary mode for files

	Allocator *allocator = new Allocator;

	//==================================================================== 
	//=================== Create the DataSet ... =========================
	//==================================================================== 
	
	MatDataSet data(datafile, -1, 0, true, max_load, binary_mode);
	MeanVarNorm* mv_norm = NULL;
	if(norm)
		mv_norm = new(allocator)MeanVarNorm (&data);

	//==================================================================== 
	//=================== Training Mode  =================================
	//==================================================================== 

	if(the_seed == -1)
		Random::seed();
	else
		Random::manualSeed((long)the_seed);

	if(norm)
		data.preProcess(mv_norm);

	//=================== Create the GMM... =========================

	// create a KMeans object to initialize the GMM
	KMeans kmeans(data.n_inputs, n_gaussians);
	kmeans.setROption("prior weights",prior);

	// the kmeans trainer
	EMTrainer kmeans_trainer(&kmeans);
	kmeans_trainer.setROption("end accuracy", accuracy);
	kmeans_trainer.setIOption("max iter", max_iter_kmeans);

	// the kmeans measurer
	MeasurerList kmeans_measurers;
	DiskXFile *filektv = new(allocator) DiskXFile("kmeans_train_val", "w");
	NLLMeasurer nll_kmeans_measurer(kmeans.log_probabilities,&data,filektv);
	kmeans_measurers.addNode(&nll_kmeans_measurer);

	// create the GMM
	DiagonalGMM gmm(data.n_inputs,n_gaussians,&kmeans_trainer);
	
	// set the training options
	real* thresh = (real*)allocator->alloc(data.n_inputs*sizeof(real));
	initializeThreshold(&data,thresh,threshold);	
	gmm.setVarThreshold(thresh);
	gmm.setROption("prior weights",prior);
	gmm.setOOption("initial kmeans trainer measurers", &kmeans_measurers);

	//=================== Measurers and Trainer  ===============================

	// Measurers on the training dataset
	MeasurerList measurers;
	DiskXFile *filegtv = new(allocator) DiskXFile("gmm_train_val", "w");
	NLLMeasurer nll_meas(gmm.log_probabilities, &data, filegtv);
	measurers.addNode(&nll_meas);

	// The Gradient Machine Trainer
	EMTrainer trainer(&gmm);
	trainer.setIOption("max iter", max_iter_gmm);
	trainer.setROption("end accuracy", accuracy);
	//trainer.setBOption("viterbi", true);

	//=================== Let's go... ===============================

	if(k_fold <= 0)
	{
		trainer.train(&data, &measurers);

		if(strcmp(save_model_file, ""))
		{
			DiskXFile model_(save_model_file, "w");
			//cmd.saveXFile(&model_);
			if(norm)
				mv_norm->saveXFile(&model_);
			model_.taggedWrite(&n_gaussians, sizeof(int), 1, "n_gaussians");
			model_.taggedWrite(&data.n_inputs, sizeof(int), 1, "n_inputs");
			gmm.saveXFile(&model_);
			for (int i = 0; i < N_GAUSS; i++) {
				logw[i] = gmm.log_weights[i];
				for (int j = 0; j < 3; j++) {
					means[i][j] = gmm.means[i][j];
					var[i][j] = gmm.var[i][j];
				}
			}
		}
	}
	else
	{
		KFold k(&trainer, k_fold);
		k.crossValidate(&data, NULL, &measurers);
	}

	delete allocator;

	return(0);
}