Esempio n. 1
0
void h8_adc_device::timeout(uint64_t current_time)
{
	if(mode & BUFFER) {
		do_buffering((mode & DUAL) && (channel & 1));
		if((mode & DUAL) && !(channel & 1)) {
			channel++;
			conversion_wait(false, false, current_time);
			return;
		}
	} else {
		if(mode & DUAL) {
			if(channel & 1)
				commit_value(channel, 1);
			else {
				commit_value(channel, 0);
				channel++;
				conversion_wait(false, false, current_time);
				return;
			}
		} else
			commit_value(channel);
	}

	if(mode & ROTATE) {
		if(channel != end_channel) {
			channel++;
			sampling();
			conversion_wait(false, false, current_time);
			return;
		}
		channel = start_channel;
	}

	if(mode & COUNTED) {
		count--;
		if(count) {
			sampling();
			conversion_wait(false, false, current_time);
			return;
		}
	}

	adcsr |= F_ADF;
	if(adcsr & F_ADIE)
		intc->internal_interrupt(intc_vector);

	if(mode & REPEAT) {
		if(suspend_on_interrupt && (adcsr & F_ADIE)) {
			mode |= HALTED;
			return;
		}
		channel = start_channel;
		count = start_count;
		sampling();
		conversion_wait(false, false, current_time);
		return;
	}

	done();
}
Esempio n. 2
0
	uint64 Sampler::recordAllocationSample(const void* item, uint64 size, bool callback_ok)
	{
		AvmAssertMsg(sampling(), "How did we get here if sampling is disabled?");
		if(!samplingNow)
			return 0;

		if(!samplingAllAllocs)
			return 0;

		if(!sampleSpaceCheck(callback_ok))
			return 0;

		(void)item;

		lastAllocSample = currentSample;
		writeRawSample(NEW_AUX_SAMPLE);
		uint64 uid = allocId++;
		uids.add(item, (void*)uid);
		write(currentSample, uid);
		write(currentSample, item);
		write(currentSample, (uintptr)0);
		write(currentSample, size);

		AvmAssertMsg((uintptr)currentSample % 4 == 0, "Alignment should have occurred at end of raw sample.\n");
		numSamples++;

		return uid; 
	}
void PLEN2::AccelerationGyroSensor::dump()
{
	#if _DEBUG
		system.outputSerial().println(F("=== running in function : AccelerationGyroSensor::dump()"));
	#endif

	sampling();

	system.outputSerial().println(F("{"));

		system.outputSerial().print(F("\t\"Acc X\": "));
		system.outputSerial().print(getAccX());
		system.outputSerial().println(F(","));

		system.outputSerial().print(F("\t\"Acc Y\": "));
		system.outputSerial().print(getAccY());
		system.outputSerial().println(F(","));

		system.outputSerial().print(F("\t\"Acc Z\": "));
		system.outputSerial().print(getAccZ());
		system.outputSerial().println(F(","));

		system.outputSerial().print(F("\t\"Gyro Roll\": "));
		system.outputSerial().print(getGyroRoll());
		system.outputSerial().println(F(","));

		system.outputSerial().print(F("\t\"Gyro Pitch\": "));
		system.outputSerial().print(getGyroPitch());
		system.outputSerial().println(F(","));

		system.outputSerial().print(F("\t\"Gyro Yaw\": "));
		system.outputSerial().println(getGyroYaw());

	system.outputSerial().println(F("}"));
}
Esempio n. 4
0
MyWidget::MyWidget(QGLWidget *parent)
  : QGLWidget(QGLFormat(), parent)
{

  setMouseTracking(true);

  setPaletteBackgroundColor(QColor(255,255,255));

  setstatus();

  menu = new QMenuBar(this, "Menu bar");
  file = new QPopupMenu( this , "file");
  save = new QPopupMenu( this , "save");
  placement = new QPopupMenu( this , "placement");
  options = new QPopupMenu( this , "options");
  view = new QPopupMenu( this , "viewing");
  save->insertItem( "Save &eps file", this, SLOT(savedepsfile()), CTRL+Key_E );
  save->insertItem( "Save &stl file", this, SLOT(savedstlfile()), CTRL+Key_S );
  file->insertItem( "&Load", this, SLOT(openedfile()), CTRL+Key_L );
  generate_id = placement->insertItem( "&Generate", &p, SLOT(generate()), CTRL+Key_G );
  generatefirst_id = placement->insertItem( "Generate &First", &p, SLOT(generateFirst()), CTRL+Key_F );
  generatenext_id = placement->insertItem( "Generate &Next", &p, SLOT(generateNext()), CTRL+Key_N );
  generateten_id = placement->insertItem( "Next &ten streamlines", &p, SLOT(generateTen()), CTRL+Key_T );
  generateresume_id = placement->insertItem( "&Resume the placement", &p, SLOT(generateAll()), CTRL+Key_C );
  clear_id = placement->insertItem( "&Clear", &p, SLOT(purge()), CTRL+Key_M );
  drawstl_id = view->insertItem( "&Draw streamlines", &p, SLOT(draw_stl()), CTRL+Key_D );
  drawpq_id = view->insertItem( "Draw &queue elements", &p, SLOT(draw_pq()), CTRL+Key_Q );
  drawtr_id = view->insertItem( "Draw t&riangulation", &p, SLOT(draw_tr()), CTRL+Key_R );
  drawbc_id = view->insertItem( "Draw &biggest circle", &p, SLOT(draw_bc()), CTRL+Key_B );
  addimage_id = placement->insertItem( "&Image", this, SLOT(openedimage()), CTRL+Key_I );
  options->insertItem( "Density...", &p, SLOT(density()));
  options->insertItem( "Saturation ration...", &p, SLOT(ratio()));
  options->insertItem( "Sampling step...", &p, SLOT(sampling()));
  options->insertItem( "Integrating step...", &p, SLOT(integrating()));
  placement->insertItem( "&Options ", options );
  save_id = file->insertItem( "&Save", save );
  menu->insertItem( "&File", file );
  menu->insertItem( "&Placement", placement );
  view_id = menu->insertItem( "&View ", view );
  file->insertItem( "&Quit", qApp, SLOT(quit()), ALT+Key_F4 );

  // desable all generator menu items
  placement->setItemEnabled(generate_id, false);
  placement->setItemEnabled(generatefirst_id, false);
  placement->setItemEnabled(generatenext_id, false);
  placement->setItemEnabled(generateten_id, false);
  placement->setItemEnabled(generateresume_id, false);
  placement->setItemEnabled(clear_id, false);

  menu->setItemEnabled(view_id, false);

  placement->setItemEnabled(addimage_id, false);
  file->setItemEnabled(save_id, false);


  connect(this, SIGNAL(fileloaded(const QString &)), &p, SLOT(load(const QString &)));
  connect(this, SIGNAL(imageloaded(const QString &)), &p, SLOT(image(const QString &)));
  connect(&p, SIGNAL(optionschanged()), this, SLOT(updatestatus()));

}
Esempio n. 5
0
	void Sampler::sample()
	{		
		AvmAssertMsg(sampling(), "How did we get here if sampling is disabled?");
		if(!samplingNow || !core->callStack || !sampleSpaceCheck())
			return;	
		writeRawSample(RAW_SAMPLE);
		numSamples++;
	}
Esempio n. 6
0
void h8_adc_device::start_conversion()
{
	mode = start_mode;
	channel = start_channel;
	count = start_count;
	sampling();
	conversion_wait(true, !analog_powered);
	analog_powered = true;
}
Esempio n. 7
0
double Gibbs_ask(int N)
{
    int bn[] = {0, 1, 1, 1};
    int n = 0;
    int j;
    for (j = 0; j < N; ++j)
    {
        int z = rand() % 2;
        bn[z] = sampling(getProb(z, bn));
        if (bn[1])
            ++n;
    }
    return (n + 0.0) / N;    
}
Esempio n. 8
0
	uint64 Sampler::recordAllocationInfo(AvmPlusScriptableObject *obj, uintptr typeOrVTable)
	{
		AvmAssertMsg(sampling(), "How did we get here if sampling is disabled?");
		if(!samplingNow)
			return 0;

		if( !samplingAllAllocs )
		{
			// Turn on momentarily to record the alloc for this object.
			samplingAllAllocs = true;
			recordAllocationSample(obj, 0);
			samplingAllAllocs = false;
		}

		byte* old_sample = lastAllocSample;
		Sample s;
		readSample(old_sample, s);
		old_sample = lastAllocSample;

		if(typeOrVTable < 7 && core->codeContext() && core->codeContext()->domainEnv()) {
			// and in toplevel
			typeOrVTable |= (uintptr)core->codeContext()->domainEnv()->toplevel();
		}

		AvmAssertMsg(s.sampleType == NEW_AUX_SAMPLE, "Sample stream corrupt - can only add info to an AUX sample.\n");
		AvmAssertMsg(s.ptr == (void*)obj, "Sample stream corrupt - last sample is not for same object.\n");

		byte* pos = currentSample;
		currentSample = old_sample;
		// Rewrite the sample as a NEW_OBJECT_SAMPLE
		writeRawSample(NEW_OBJECT_SAMPLE);

		write(currentSample, s.id);

		AvmAssertMsg( ptrSamples->get(obj)==0, "Missing dealloc sample - same memory alloc'ed twice.\n");
		ptrSamples->add(obj, currentSample);

		write(currentSample, s.ptr);

		write(currentSample, typeOrVTable);
		write(currentSample, s.alloc_size);

		AvmAssertMsg((uintptr)currentSample % 4 == 0, "Alignment should have occurred at end of raw sample.\n");
		currentSample = pos;

		return s.id;
	}
Esempio n. 9
0
/// Hay dos Fases en el proceso adaptativo: ordenamiento y convergencia (p452)
// En el ordenamiento, la tasa suele ser mas alta, y descender en el tiempo
void SOM::adaptation(const vector<vector<float> > &samples, float tasa, float var,
                   unsigned int maxit, bool tasa_fija) {

    this->tasa = tasa;
    this->varianza = var;

    this->tasa_fija = tasa_fija;

    // El hayking toma 1000, pero creo que es la cantidad de iteraciones maxima
    this->tao_1 = maxit / (log(varianza));   // Haykin, pag452;
    this->tao_2 = maxit;

    sampling(samples, maxit);

    // Luego de que termino, almaceno mi tasa de aprendizaje, para hacerla decrecer linealmente mas tarde (p465)
    tasa_old = tasa_n;
}
Esempio n. 10
0
int main(int argc, char *argv[]) {
	if (argc != 4) { 
		std::cout << argv[0] << "input.m patchnum sampling\n";
		exit(-1);
	}
	
	int patchnum = atoi(argv[2]);
	double threshold = atof(argv[3]);
	srand (static_cast <unsigned> (time(0)));

	//load seeds;
	std::vector<Patch*> patches;
	//loadSeeds("seed_sim.m", patches);
	//loadSeeds("seed2.m", patches);

	Mesh *mesh = new Mesh;
	mesh->readMFile(argv[1]);

	generateSeeds(mesh, patches, patchnum);
	
	for (int i = 0; i <= 500; ++i) {
		/*if (i % 100 == 0) {
			sprintf_s(buf, "center_%d.cm", i+1);
			saveCenters(buf, patches);
		}*/
		clustering(mesh, patches);
		checkPatches(patches);
		update(patches);
		/*if (i % 100 == 0) {
			sprintf_s(buf, "out_%d.m", i+1);
			mesh->writeMFile(buf);
		}*/
		
	}
	mesh->writeMFile("end.m");
	std::cout << "end!\n";
	traceBoundary(patches);
	DualGraph *dualGraph = generateGraph(patches);

	sampling(patches, avg_length);

	delete dualGraph;
	delete mesh;
	return 0;
} 
Esempio n. 11
0
// Se encarga de llamar varias epocas de sampling
void SOM::sampling(const vector<vector<float> > &samples, unsigned int maxit) {
	for (unsigned int it = 0; it < maxit; it++) {

        updating_som(it, (float) maxit);

        // Si esta activado printCSV, guardo cada 20 iteraciones los pesos (o 200, si es convergencia)
		//if (is_printingCSV && it%( (tasa_fija)? 5 : 1) == 0 && (!tasa_fija || it < 200)) {
        if (is_printingCSV && it%20 == 0) {
		    cout << it << " . "; cout << "[ " << tasa_n << " | " << var_n << " ]" << endl;
		    vector<vector<float> > datos(get_pesos());
            printCSV<float> (datos, "logs/buffer.csv", true);
		}

		sampling(samples, it, maxit);

	}
	cout << "\n";
}
Esempio n. 12
0
	void Sampler::recordDeallocationSample(const void* item, uint64 size)
	{
		AvmAssertMsg(sampling(), "How did we get here if sampling is disabled?");
		AvmAssert(item != 0);
		// recordDeallocationSample doesn't honor the samplingNow flag
		// this is to avoid dropping deleted object samples when sampling is paused.
		uint64 uid = (uint64)uids.get(item);
		// If we didn't find a UID then this wasn't memory that the sampler knew was allocated
		if(uid && sampleSpaceCheck(false)) {

			
//			if( !uid )
//				uid = (uint64)-1;

			writeRawSample(DELETED_OBJECT_SAMPLE);
			write(currentSample, uid);
			write(currentSample, size);

			numSamples++;

			AvmAssertMsg((uintptr)currentSample % 4 == 0, "Alignment should have occurred at end of raw sample.\n");
		}

		// Nuke the ptr in the sample stream for the newobject sample
		if( samples )
		{

		byte* oldptr = 0;
		if( (oldptr = (byte*)ptrSamples->get(item)) != 0 )
		{
#ifdef _DEBUG
				void* oldval = 0;
				read(oldptr, oldval);
				AvmAssertMsg(oldval==item, "Sample stream corrupt, dealloc doesn't point to correct address");
				rewind(oldptr, sizeof(void*));
#endif
			write(oldptr, (void*)0);
			ptrSamples->remove(item);
		}
		}
		if(uid)
			uids.remove(item);
	}
Esempio n. 13
0
File: Sampler.cpp Progetto: bsdf/trx
    void Sampler::sample()
    {
        AvmAssertMsg(sampling(), "How did we get here if sampling is disabled?");
        if(!samplingNow)
            return;

        uint64_t nowMicros = this->nowMicros();
        const uint64_t sampleFrequencyMicros = SAMPLE_FREQUENCY_MILLIS * 1000;

        if (takeSample)
        {
            if (core->callStack)
            {
                // We may want to write more than one sample.  E.g. if 5.5 milliseconds have
                // passed, we'll write 5 samples.
                int sampleCount = 0;
                if (lastSampleCheckMicros != 0)
                    sampleCount = (int) ((nowMicros - lastSampleCheckMicros) / sampleFrequencyMicros);
                if (sampleCount <= 0)
                    sampleCount = 1;
                for (int sampleNum = sampleCount-1; sampleNum >= 0; sampleNum--)
                {
                    if (!sampleSpaceCheck())
                        break;

                    // We artificially manufacture a different time for each sample.
                    uint64_t sampleTimeMicros = nowMicros - (sampleNum * sampleFrequencyMicros);
                    writeRawSample(RAW_SAMPLE, sampleTimeMicros);
                    numSamples++;
                }
            }
        }

        // Even if the callstack was empty, don't take another sample until the next timer tick.
        takeSample = 0;

        // Don't just set lastSampleCheckMicros equal to nowMicros -- we want to keep the
        // sampling frequency as close to one per millisecond as we can.
        uint64_t elapsed = nowMicros - lastSampleCheckMicros;
        lastSampleCheckMicros += (elapsed / sampleFrequencyMicros * sampleFrequencyMicros);
    }
Esempio n. 14
0
int model::train()
{
	if (specific_init())
		return 1;

	std::chrono::high_resolution_clock::time_point ts, tn;
	std::cout << "Sampling " << n_iters << " iterations!" << std::endl;

	for (int iter = 1; iter <= n_iters; ++iter)
	{
		std::cout << "Iteration " << iter << " ..." << std::endl;
		ts = std::chrono::high_resolution_clock::now();

		// for each document
		for (int m = 0; m < M; ++m)
			sampling(m);

		tn = std::chrono::high_resolution_clock::now();
		time_ellapsed.push_back(std::chrono::duration_cast<std::chrono::milliseconds>(tn - ts).count());

#if COMP_LLH
		test();
#endif

		if (n_save > 0)
		{
			if (iter % n_save == 0)
			{
				// saving the model
				std::cout << "Saving the model at iteration " << iter << "..." << std::endl;
				save_model(iter);
			}
		}
	}

	std::cout << "Gibbs sampling completed!" << std::endl;
	std::cout << "Saving the final model!" << std::endl;
	save_model(-1);

	return 0;
}
Esempio n. 15
0
CravaTrend::CravaTrend(Simbox                       * timeSimbox,
                       Simbox                       * timeCutSimbox,
                       ModelSettings                * modelSettings,
                       bool                         & failed,
                       std::string                  & errTxt,
                       const InputFiles             * inputFiles)
{

  n_samples_ = 1000;

  const std::vector<std::string> trend_cube_parameters = modelSettings->getTrendCubeParameters();
  const std::vector<int>         trend_cube_type       = modelSettings->getTrendCubeType();
  n_trend_cubes_                                       = static_cast<int>(trend_cube_parameters.size());

  std::vector<std::string> trendCubeNames(n_trend_cubes_);

  if(n_trend_cubes_ > 0) {

    std::string errorText  = "";

    const int nx   = timeSimbox->getnx();
    const int ny   = timeSimbox->getny();
    const int nz   = timeSimbox->getnz();
    const int nxp  = nx;
    const int nyp  = ny;
    const int nzp  = nz;
    const int rnxp = 2*(nxp/2 + 1);

    for(int grid_number=0; grid_number<n_trend_cubes_; grid_number++) {

      FFTGrid * trend_cube = NULL;

      const std::string   log_name   = "trend cube '"+trend_cube_parameters[grid_number]+"'";

      if(trend_cube_type[grid_number] == ModelSettings::CUBE_FROM_FILE) {

        trendCubeNames[grid_number] = inputFiles->getTrendCube(grid_number);

        const SegyGeometry      * dummy1     = NULL;
        const TraceHeaderFormat * dummy2     = NULL;
        const float               offset     = modelSettings->getSegyOffset(0); //Facies estimation only allowed for one time lapse

        ModelGeneral::readGridFromFile(trendCubeNames[grid_number],
                                       log_name,
                                       offset,
                                       trend_cube,
                                       dummy1,
                                       dummy2,
                                       FFTGrid::PARAMETER,
                                       timeSimbox,
                                       timeCutSimbox,
                                       modelSettings,
                                       errorText,
                                       true);

        if(errorText != "") {
          errorText += "Reading of file \'"+trendCubeNames[grid_number]+"\' failed\n";
          errTxt    += errorText;
          failed     = true;
        }
      }

      else if(trend_cube_type[grid_number] == ModelSettings::STRATIGRAPHIC_DEPTH) {

        LogKit::LogFormatted(LogKit::Low,"\nGenerating trend grid \'"+trend_cube_parameters[grid_number]+"\'\n");

        trend_cube = ModelGeneral::createFFTGrid(nx, ny, nz, nxp, nyp, nzp, false);
        trend_cube->createRealGrid();
        trend_cube->setAccessMode(FFTGrid::WRITE);

        for(int k=0; k<nzp; k++) {
          for(int j=0; j<nyp; j++) {
            for(int i=0; i<rnxp; i++) {
              if(i < nx)
                trend_cube->setRealValue(i, j, k, static_cast<float>(k));
              else
                trend_cube->setRealValue(i, j, k, 0);
            }
          }
        }

        trend_cube->endAccess();
      }

      else if(trend_cube_type[grid_number] == ModelSettings::TWT) {

        LogKit::LogFormatted(LogKit::Low,"\nGenerating trend grid \'"+trend_cube_parameters[grid_number]+"\'\n");

        trend_cube = ModelGeneral::createFFTGrid(nx, ny, nz, nxp, nyp, nzp, false);
        trend_cube->createRealGrid();
        trend_cube->setAccessMode(FFTGrid::WRITE);

        for(int k=0; k<nzp; k++) {
          for(int j=0; j<nyp; j++) {
            for(int i=0; i<rnxp; i++) {
              if(i < nx) {
                float value = static_cast<float>(timeSimbox->getTop(i,j) + timeSimbox->getdz(i,j)*k);
                trend_cube->setRealValue(i, j, k, value);
              }
              else
                trend_cube->setRealValue(i, j, k, 0);
            }
          }
        }
        trend_cube->endAccess();
      }

      NRLib::Grid<double> grid_cube(nx, ny, nz);

      for(int k=0; k<nzp; k++) {
        for(int j=0; j<nyp; j++) {
          for(int i=0; i<rnxp; i++) {
            if (i < nx && j < ny && k < nz)
              grid_cube(i,j,k) = trend_cube->getRealValue(i,j,k);
          }
        }
      }

      trend_cubes_.push_back(grid_cube);


      // Calculate trend_cube_sampling_
      // Sample all trends from min to max of the trend cube, using increment_ in the sampling

      trend_cube->calculateStatistics();

      const float  max       = trend_cube->getMaxReal();
      const float  min       = trend_cube->getMinReal();
      const double increment = (max-min)/(n_samples_-1);

      std::vector<double> sampling(n_samples_);

      for(int j=0; j<n_samples_-1; j++)
        sampling[j] = min + j*increment;

      sampling[n_samples_-1] = max;

      trend_cube_sampling_.push_back(sampling);


      if((modelSettings->getOutputGridsOther() & IO::TREND_CUBES) > 0) {
        std::string fileName = IO::PrefixTrendCubes() + trend_cube_parameters[grid_number];
        writeToFile(timeSimbox, trend_cube, fileName, "trend cube");
      }

      delete trend_cube;

    }
  }
}
Esempio n. 16
0
bool AlembicWriteJob::PreProcess()
{
    // check filenames
    if(mFileName.empty())
    {
        ESS_LOG_WARNING("[alembic] No filename specified.");
        return false;
    }

    //// check objects
    //if(mSelection.Count() == 0)
    //{
    //    ESS_LOG_WARNING("[alembic] No objects specified.");
    //    return false;
    //}

    // check frames
    if(mFrames.size() == 0)
    {
        ESS_LOG_WARNING("[alembic] No frames specified.");
        return false;
    }


	const bool bParticleMesh = GetOption("exportParticlesAsMesh");
   bool bMergePolyMeshSubtree = GetOption("mergePolyMeshSubtree");

   bool bSelectParents = GetOption("includeParentNodes");/*|| !bFlattenHierarchy || bTransformCache*/
   const bool bSelectChildren = false;
   bool bTransformCache = GetOption("transformCache");
   const bool bFlattenHierarchy = GetOption("flattenHierarchy");

   if(bMergePolyMeshSubtree){
      bTransformCache = false;
      //bSelectParents = true;
   }

   bcsgSelection::types buildSelection = bcsgSelection::ALL;

   const bool bExportSelected = GetOption("exportSelected");
   const bool bObjectsParameterExists = GetOption("objectsParameterExists");
   if(bExportSelected){
      //copy max selection
      buildSelection = bcsgSelection::APP;
   }
   else if(bObjectsParameterExists){
      //select nothing when building, fill in later from parameter data
      buildSelection = bcsgSelection::NONE;
   }
   else{
      //select everything
   }

   int nNumNodes = 0;
   exoSceneRoot = buildCommonSceneGraph(nNumNodes, true, buildSelection);
   //WARNING ILM robot right crashes when printing
   //printSceneGraph(exoSceneRoot, false);


   if(bObjectsParameterExists){
      //Might be better to use refineSelection here, but call a function that sets up dccSelected flag first, then delete this function from codebase
      selectNodes(exoSceneRoot, mObjectsMap,  bSelectParents, bSelectChildren, !bTransformCache);

      bool bAllResolved = true;

      if(bObjectsParameterExists){
         for(SceneNode::SelectionT::iterator it = mObjectsMap.begin(); it != mObjectsMap.end(); it++){
            if(it->second == false){
               bAllResolved = false;
               ESS_LOG_ERROR("Could not resolve objects identifier: "<<it->first);
            }
         }
      }

      if(bAllResolved){
         removeUnselectedNodes(exoSceneRoot);
      }
      else{
         return false;
      }
   }
   else if(bExportSelected){
      refineSelection(exoSceneRoot, bSelectParents, bSelectChildren, !bTransformCache);
      removeUnselectedNodes(exoSceneRoot);
   }
   

   if(bMergePolyMeshSubtree){
      replacePolyMeshSubtree<SceneNodeMaxPtr, SceneNodeMax>(exoSceneRoot);
   }

   if(bFlattenHierarchy){
      nNumNodes = 0;
      flattenSceneGraph(exoSceneRoot, nNumNodes);
   }

   
  
   if(GetOption("renameConflictingNodes")){
      renameConflictingNodes(exoSceneRoot, false);
   }
   else{
      int nRenameCount = renameConflictingNodes(exoSceneRoot, true);
      if(nRenameCount){
         ESS_LOG_ERROR("Can not export due sibling node naming conflict. Consider exporting with renameConflictingNodes=true");
         return false;
      }
   }

    const bool bUseOgawa = (bool)GetOption("useOgawa");

    // init archive (use a locally scoped archive)
    std::string sceneFileName = "";
    sceneFileName.append( EC_MSTR_to_UTF8( mApplication->GetCurFilePath() ) );
    try
    {
       if(bUseOgawa){
        mArchive = CreateArchiveWithInfo(
            Alembic::AbcCoreOgawa::WriteArchive(),
			mFileName.c_str(),
			getExporterName( "3DS Max " EC_QUOTE( crate_Max_Version ) ).c_str(),
			getExporterFileName( sceneFileName ).c_str(),
			Abc::ErrorHandler::kThrowPolicy);
       }
       else{
        mArchive = CreateArchiveWithInfo(
			Alembic::AbcCoreHDF5::WriteArchive( true ),
			mFileName.c_str(),
			getExporterName( "3DS Max " EC_QUOTE( crate_Max_Version ) ).c_str(),
			getExporterFileName( sceneFileName ).c_str(),
			Abc::ErrorHandler::kThrowPolicy);
       }

    }
    catch(Alembic::Util::Exception& e)
    {
        std::string exc(e.what());
		    ESS_LOG_ERROR("[alembic] Error writing to file: "<<e.what());
        return false;
    }

    // get the frame rate
    mFrameRate = static_cast<float>(GetFrameRate());
    if(mFrameRate == 0.0f)
    {
        mFrameRate = 25.0f;
    }

    std::vector<AbcA::chrono_t> frames;
    for(LONG i=0;i<mFrames.size();i++)
    {
        frames.push_back(mFrames[i] / mFrameRate);
    }

    // create the sampling
    double timePerSample = 1.0 / mFrameRate;
    if(frames.size() > 1)
    {
		 if( ! HasAlembicWriterLicense() )
		 {
       if( HasAlembicInvalidLicense() ) {
          ESS_LOG_ERROR("[alembic] No license available and EXOCORTEX_ALEMBIC_NO_DEMO defined, aborting." );
          return false;
       }
			 if(frames.size() > 75)
			 {
				frames.resize(75);
				ESS_LOG_WARNING("[ExocortexAlembic] Writer license not found: Maximum exportable samplecount is 75!");
			 }
		 }
		 
	    double timePerCycle = frames[frames.size()-1] - frames[0];
        AbcA::TimeSamplingType samplingType((boost::uint32_t)frames.size(),timePerCycle);
        AbcA::TimeSampling sampling(samplingType,frames);
        mTs = mArchive.addTimeSampling(sampling);
    }
    else
    {
        AbcA::TimeSampling sampling(1.0,frames[0]);
        mTs = mArchive.addTimeSampling(sampling);
    }

    m_ArchiveBoxProp = AbcG::CreateOArchiveBounds(mArchive,mTs);



   std::list<PreProcessStackElement> sceneStack;
   
   sceneStack.push_back(PreProcessStackElement(exoSceneRoot, mArchive.getTop()));

   try{

   while( !sceneStack.empty() )
   {

      PreProcessStackElement sElement = sceneStack.back();
      SceneNodePtr eNode = sElement.eNode;
      sceneStack.pop_back();
      
      Abc::OObject oParent = sElement.oParent;
      Abc::OObject oNewParent;

      AlembicObjectPtr pNewObject;

      if(eNode->type == SceneNode::SCENE_ROOT){
         //we do not want to export the Scene_Root (the alembic archive has one already)
      }
      else if(eNode->type == SceneNode::ITRANSFORM || eNode->type == SceneNode::ETRANSFORM){
         pNewObject.reset(new AlembicXForm(eNode, this, oParent));
      }
      else if(eNode->type == SceneNode::CAMERA){
         pNewObject.reset(new AlembicCamera(eNode, this, oParent));
      }
      else if(eNode->type == SceneNode::POLYMESH || eNode->type == SceneNode::POLYMESH_SUBTREE){
         pNewObject.reset(new AlembicPolyMesh(eNode, this, oParent));
      }
      //TODO: as far I recall we dont support SUBD. verify...
      //else if(eNode->type == SceneNode::SUBD){
      //   pNewObject.reset(new AlembicSubD(eNode, this, oParent));
      //}
      else if(eNode->type == SceneNode::CURVES){
         pNewObject.reset(new AlembicCurves(eNode, this, oParent));
      }
      else if(eNode->type == SceneNode::PARTICLES || eNode->type == SceneNode::PARTICLES_TP){
         if(bParticleMesh){
            pNewObject.reset(new AlembicPolyMesh(eNode, this, oParent));
         }
         else{
            pNewObject.reset(new AlembicPoints(eNode, this, oParent));
         }
      }
      //else{
      //   ESS_LOG_WARNING("Unknown type: not exporting "<<eNode->name);//Export as transform, and give warning?
      //}

      if(pNewObject){
         //add the AlembicObject to export list if it is not being skipped
         AddObject(pNewObject);
      }

      if(pNewObject){
         oNewParent = oParent.getChild(eNode->name);
      }
      else{ //this case should be unecessary
         //if we skip node A, we parent node A's children to the parent of A
         oNewParent = oParent;
      }

      if(oNewParent.valid()){
         for( std::list<SceneNodePtr>::iterator it = eNode->children.begin(); it != eNode->children.end(); it++){
            sceneStack.push_back(PreProcessStackElement(*it, oNewParent));
         }
      }
      else{
         ESS_LOG_ERROR("Do not have refernce to parent.");
         return false;
      }
   }

   }catch( std::exception& exp ){
      ESS_LOG_ERROR("An std::exception occured: "<<exp.what());
      return false;
   }catch(...){
      ESS_LOG_ERROR("Exception ecountered when exporting.");
   }

   if(mObjects.empty()){
      ESS_LOG_ERROR("No objects specified.");
      return false;
   }

   return true;
}
MStatus AlembicWriteJob::PreProcess()
{
  ESS_PROFILE_SCOPE("AlembicWriteJob::PreProcess");
  // check filenames
  if (mFileName.length() == 0) {
    MGlobal::displayError("[ExocortexAlembic] No filename specified.");
    MPxCommand::setResult(
        "Error caught in AlembicWriteJob::PreProcess: no filename specified");
    return MStatus::kInvalidParameter;
  }

  // check objects
  if (mSelection.length() == 0) {
    MGlobal::displayError("[ExocortexAlembic] No objects specified.");
    MPxCommand::setResult(
        "Error caught in AlembicWriteJob::PreProcess: no objects specified");
    return MStatus::kInvalidParameter;
  }

  // check frames
  if (mFrames.size() == 0) {
    MGlobal::displayError("[ExocortexAlembic] No frames specified.");
    MPxCommand::setResult(
        "Error caught in AlembicWriteJob::PreProcess: no frame specified");
    return MStatus::kInvalidParameter;
  }

  // check if the file is currently in use
  if (getRefArchive(mFileName) > 0) {
    MGlobal::displayError("[ExocortexAlembic] Error writing to file '" +
                          mFileName + "'. File currently in use.");
    MPxCommand::setResult(
        "Error caught in AlembicWriteJob::PreProcess: no filename already in "
        "use");
    return MStatus::kInvalidParameter;
  }

  // init archive (use a locally scoped archive)
  // TODO: determine how to access the current maya scene path
  // MString sceneFileName = "Exported from:
  // "+Application().GetActiveProject().GetActiveScene().GetParameterValue("FileName").GetAsText();
  try {
    createArchive("Exported from Maya.");

    mTop = mArchive.getTop();

    // get the frame rate
    mFrameRate = MTime(1.0, MTime::kSeconds).as(MTime::uiUnit());
    const double timePerSample = 1.0 / mFrameRate;
    std::vector<AbcA::chrono_t> frames;
    for (LONG i = 0; i < mFrames.size(); i++) {
      frames.push_back(mFrames[i] * timePerSample);
    }

    // create the sampling
    if (frames.size() > 1) {
      const double timePerCycle = frames[frames.size() - 1] - frames[0];
      AbcA::TimeSamplingType samplingType((Abc::uint32_t)frames.size(),
                                          timePerCycle);
      AbcA::TimeSampling sampling(samplingType, frames);
      mTs = mArchive.addTimeSampling(sampling);
    }
    else {
      AbcA::TimeSampling sampling(1.0, frames[0]);
      mTs = mArchive.addTimeSampling(sampling);
    }
    Abc::OBox3dProperty boxProp = AbcG::CreateOArchiveBounds(mArchive, mTs);

    MDagPath dagPath;
    {
      MItDag().getPath(dagPath);
    }
    SceneNodePtr exoSceneRoot = buildMayaSceneGraph(dagPath, this->replacer);
    const bool bFlattenHierarchy = GetOption("flattenHierarchy") == "1";
    const bool bTransformCache = GetOption("transformCache") == "1";
    const bool bSelectChildren = false;
    {
      std::map<std::string, bool> selectionMap;
      for (int i = 0; i < (int)mSelection.length(); ++i) {
        MFnDagNode dagNode(mSelection[i]);
        selectionMap[dagNode.fullPathName().asChar()] = true;
      }
      selectNodes(exoSceneRoot, selectionMap,
                  !bFlattenHierarchy || bTransformCache, bSelectChildren,
                  !bTransformCache, true);
    }

    // create object for each
    MProgressWindow::reserve();
    MProgressWindow::setTitle("Alembic Export: Listing objects");
    MProgressWindow::setInterruptable(true);
    MProgressWindow::setProgressRange(0, mSelection.length());
    MProgressWindow::setProgress(0);

    MProgressWindow::startProgress();
    int interrupt = 20;
    bool processStopped = false;
    std::deque<PreProcessStackElement> sceneStack;

    sceneStack.push_back(PreProcessStackElement(exoSceneRoot, mTop));

    while (!sceneStack.empty()) {
      if (--interrupt == 0) {
        interrupt = 20;
        if (MProgressWindow::isCancelled()) {
          processStopped = true;
          break;
        }
      }

      PreProcessStackElement &sElement = sceneStack.back();
      SceneNodePtr eNode = sElement.eNode;
      sceneStack.pop_back();

      Abc::OObject oParent = sElement.oParent;
      Abc::OObject oNewParent;

      AlembicObjectPtr pNewObject;
      if (eNode->selected) {
        switch (eNode->type) {
          case SceneNode::SCENE_ROOT:
            break;
          case SceneNode::ITRANSFORM:
          case SceneNode::ETRANSFORM:
            pNewObject.reset(new AlembicXform(eNode, this, oParent));
            break;
          case SceneNode::CAMERA:
            pNewObject.reset(new AlembicCamera(eNode, this, oParent));
            break;
          case SceneNode::POLYMESH:
            pNewObject.reset(new AlembicPolyMesh(eNode, this, oParent));
            break;
          case SceneNode::SUBD:
            pNewObject.reset(new AlembicSubD(eNode, this, oParent));
            break;
          case SceneNode::CURVES:
            pNewObject.reset(new AlembicCurves(eNode, this, oParent));
            break;
          case SceneNode::PARTICLES:
            pNewObject.reset(new AlembicPoints(eNode, this, oParent));
            break;
          case SceneNode::HAIR:
            pNewObject.reset(new AlembicHair(eNode, this, oParent));
            break;
          default:
            ESS_LOG_WARNING("Unknown type: not exporting " << eNode->name);
        }
      }

      if (pNewObject) {
        AddObject(pNewObject);
        oNewParent = oParent.getChild(eNode->name);
      }
      else {
        oNewParent = oParent;
      }

      if (oNewParent.valid()) {
        for (std::list<SceneNodePtr>::iterator it = eNode->children.begin();
             it != eNode->children.end(); ++it) {
          if (!bFlattenHierarchy ||
              (bFlattenHierarchy && eNode->type == SceneNode::ETRANSFORM &&
               isShapeNode((*it)->type))) {
            // If flattening the hierarchy, we want to attach each external
            // transform to its corresponding geometry node.
            // All internal transforms should be skipped. Geometry nodes will
            // never have children (If and XSI geonode is parented
            // to another geonode, each will be parented to its extracted
            // transform node, and one node will be parented to the
            // transform of the other.
            sceneStack.push_back(PreProcessStackElement(*it, oNewParent));
          }
          else {
            // if we skip node A, we parent node A's children to the parent of A
            sceneStack.push_back(PreProcessStackElement(*it, oParent));
          }
        }
      }
      //*
      else {
        ESS_LOG_ERROR("Do not have reference to parent.");
        MPxCommand::setResult(
            "Error caught in AlembicWriteJob::PreProcess: do not have "
            "reference to parent");
        return MS::kFailure;
      }
      //*/
    }

    MProgressWindow::endProgress();
    return processStopped ? MStatus::kEndOfFile : MStatus::kSuccess;
  }
  catch (AbcU::Exception &e) {
    this->forceCloseArchive();
    MString exc(e.what());
    MGlobal::displayError("[ExocortexAlembic] Error writing to file '" +
                          mFileName + "' (" + exc +
                          "). Do you still have it opened?");
    MPxCommand::setResult(
        "Error caught in AlembicWriteJob::PreProcess: error writing file");
  }

  return MS::kFailure;
}
Esempio n. 18
0
int main (int argc, char** argv){

	double timing = get_time();
	double start_time = timing;

	Parameter myPara (argc, argv);
	GlobalParam gParam;

	// -------------------------------------------------------------------

	//std::cout << "Get input bam file list...\n\n";
	//xny::getInputFilelist (gParam.bam_filelist, "bam", myPara.iDirNm);

	gParam.bam_filelist.push_back(myPara.iDirNm);

	if (gParam.bam_filelist.size() == 0) abording ("no .bam file found");
	else {
		int sz = gParam.bam_filelist.size();
		std::cout << "\n\t" << sz  << " bam file(s) found: \n";
		print_strvec ("\t\t", gParam.bam_filelist);
	}

	// -------------------------------------------------------------------

	std::cout << "\nParse bam header: get refSeq info & sanity check\n\n";
	parse_bam_header (gParam, myPara.pSample);

	// -------------------------------------------------------------------

	std::cout << "\nGet maxQ, minQ, maxReadLen, avgFragSz, "
			"stdFragSz from bam files ...\n\n";

	int min_qual, max_qual;
	sampling (min_qual, max_qual, gParam.maxRL, gParam.avgFragSz,
			gParam.stdFragSz, gParam.bam_filelist, myPara.pSample);
	if (max_qual - min_qual + 1 < myPara.var_qt) {
		myPara.var_qt = max_qual - min_qual + 1;
		std::cout << "\tqQuantile is reset to " << myPara.var_qt << "\n";
	}

	// -------------------------------------------------------------------
	std::cout << "\nGenerate qual -> quantile map ... \n\n";

	qqMap (gParam.qq, min_qual, max_qual, myPara.var_qt);

	// -------------------------------------------------------------------
	std::cout << "\nSet up paired read map arrays ... \n\n";
	set_rmap_array (gParam, myPara);
	//set_arrays (gParam, qq, myPara);

	// remeasure fragment size based on paired read alignment as the
	// BAM file did not properly record such information
	if (gParam.avgFragSz == 0) {
		ivec_t fragSz;
		std::map<std::string, MapEntry>::iterator it_am = gParam.alnMap.begin();
		for (; it_am != gParam.alnMap.end(); ++ it_am) {
			if (it_am->second.second.start >= it_am->second.first.start){
				fragSz.push_back(it_am->second.second.start -
						it_am->second.first.start);
			}
		}
		/* calculate mean frag sz and std */
		int sample_sz = fragSz.size();
		uint64_t sum = 0;
		for (int i = 0; i < sample_sz; ++ i) sum += fragSz[i];
		if (sum > 0 && sample_sz > 0) gParam.avgFragSz = sum/sample_sz;
		sum = 0;
		for (int i = 0; i < (int) fragSz.size(); ++ i) {
			sum += (gParam.avgFragSz - fragSz[i])*(gParam.avgFragSz - fragSz[i]);
		}
		if (sample_sz > 1) gParam.stdFragSz = std::sqrt(sum/sample_sz - 1);
		std::cout << "\trecalculated frag size and std" << gParam.avgFragSz
				<< ", " << gParam.stdFragSz << "\n";
	}

	// -------------------------------------------------------------------
	std::cout << "\nPrepare aln columns file...\n\n";
	prep_aln_file (gParam, myPara);

	// -------------------------------------------------------------------
	std::cout << "\nInference ...\n\n";

	EM (gParam, myPara);

	std::cout << "-----------------------------------------------------------\n\n";

    print_time("Whole program takes \t", start_time);
	std::cout << "DONE!\n";
	return (EXIT_SUCCESS);
}
Esempio n. 19
0
/*
 * When a container buffer is full, we push it into container_queue.
 */
static void* filter_thread(void *arg) {
    int enable_rewrite = 1;
    struct fileRecipeMeta* r = NULL;

    while (1) {
        struct chunk* c = sync_queue_pop(rewrite_queue);

        if (c == NULL)
            /* backup job finish */
            break;

        /* reconstruct a segment */
        struct segment* s = new_segment();

        /* segment head */
        assert(CHECK_CHUNK(c, CHUNK_SEGMENT_START));
        free_chunk(c);

        c = sync_queue_pop(rewrite_queue);
        while (!(CHECK_CHUNK(c, CHUNK_SEGMENT_END))) {
            g_sequence_append(s->chunks, c);
            if (!CHECK_CHUNK(c, CHUNK_FILE_START)
                    && !CHECK_CHUNK(c, CHUNK_FILE_END))
                s->chunk_num++;

            c = sync_queue_pop(rewrite_queue);
        }
        free_chunk(c);

        /* For self-references in a segment.
         * If we find an early copy of the chunk in this segment has been rewritten,
         * the rewrite request for it will be denied to avoid repeat rewriting. */
        GHashTable *recently_rewritten_chunks = g_hash_table_new_full(g_int64_hash,
        		g_fingerprint_equal, NULL, free_chunk);
        GHashTable *recently_unique_chunks = g_hash_table_new_full(g_int64_hash,
        			g_fingerprint_equal, NULL, free_chunk);

        pthread_mutex_lock(&index_lock.mutex);

        TIMER_DECLARE(1);
        TIMER_BEGIN(1);
        /* This function will check the fragmented chunks
         * that would be rewritten later.
         * If we find an early copy of the chunk in earlier segments,
         * has been rewritten,
         * the rewrite request for it will be denied. */
        index_check_buffer(s);

    	GSequenceIter *iter = g_sequence_get_begin_iter(s->chunks);
    	GSequenceIter *end = g_sequence_get_end_iter(s->chunks);
        for (; iter != end; iter = g_sequence_iter_next(iter)) {
            c = g_sequence_get(iter);

    		if (CHECK_CHUNK(c, CHUNK_FILE_START) || CHECK_CHUNK(c, CHUNK_FILE_END))
    			continue;

            VERBOSE("Filter phase: %dth chunk in %s container %lld", chunk_num,
                    CHECK_CHUNK(c, CHUNK_OUT_OF_ORDER) ? "out-of-order" : "", c->id);

            /* Cache-Aware Filter */
            if (destor.rewrite_enable_cache_aware && restore_aware_contains(c->id)) {
                assert(c->id != TEMPORARY_ID);
                VERBOSE("Filter phase: %dth chunk is cached", chunk_num);
                SET_CHUNK(c, CHUNK_IN_CACHE);
            }

            /* A cfl-switch for rewriting out-of-order chunks. */
            if (destor.rewrite_enable_cfl_switch) {
                double cfl = restore_aware_get_cfl();
                if (enable_rewrite && cfl > destor.rewrite_cfl_require) {
                    VERBOSE("Filter phase: Turn OFF the (out-of-order) rewrite switch of %.3f",
                            cfl);
                    enable_rewrite = 0;
                } else if (!enable_rewrite && cfl < destor.rewrite_cfl_require) {
                    VERBOSE("Filter phase: Turn ON the (out-of-order) rewrite switch of %.3f",
                            cfl);
                    enable_rewrite = 1;
                }
            }

            if(CHECK_CHUNK(c, CHUNK_DUPLICATE) && c->id == TEMPORARY_ID){
            	struct chunk* ruc = g_hash_table_lookup(recently_unique_chunks, &c->fp);
            	assert(ruc);
            	c->id = ruc->id;
            }
            struct chunk* rwc = g_hash_table_lookup(recently_rewritten_chunks, &c->fp);
            if(rwc){
            	c->id = rwc->id;
            	SET_CHUNK(c, CHUNK_REWRITE_DENIED);
            }

            /* A fragmented chunk will be denied if it has been rewritten recently */
            if (!CHECK_CHUNK(c, CHUNK_DUPLICATE) || (!CHECK_CHUNK(c, CHUNK_REWRITE_DENIED)
            		&& (CHECK_CHUNK(c, CHUNK_SPARSE)
                    || (enable_rewrite && CHECK_CHUNK(c, CHUNK_OUT_OF_ORDER)
                        && !CHECK_CHUNK(c, CHUNK_IN_CACHE))))) {
                /*
                 * If the chunk is unique, or be fragmented and not denied,
                 * we write it to a container.
                 * Fragmented indicates: sparse, or out of order and not in cache,
                 */
                if (storage_buffer.container_buffer == NULL){
                	storage_buffer.container_buffer = create_container();
                	if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY)
                		storage_buffer.chunks = g_sequence_new(free_chunk);
                }

                if (container_overflow(storage_buffer.container_buffer, c->size)) {

                    if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY){
                        /*
                         * TO-DO
                         * Update_index for physical locality
                         */
                        GHashTable *features = sampling(storage_buffer.chunks,
                        		g_sequence_get_length(storage_buffer.chunks));
                        index_update(features, get_container_id(storage_buffer.container_buffer));
                        g_hash_table_destroy(features);
                        g_sequence_free(storage_buffer.chunks);
                        storage_buffer.chunks = g_sequence_new(free_chunk);
                    }
                    TIMER_END(1, jcr.filter_time);
                    write_container_async(storage_buffer.container_buffer);
                    TIMER_BEGIN(1);
                    storage_buffer.container_buffer = create_container();
                }

                if(add_chunk_to_container(storage_buffer.container_buffer, c)){

                	struct chunk* wc = new_chunk(0);
                	memcpy(&wc->fp, &c->fp, sizeof(fingerprint));
                	wc->id = c->id;
                	if (!CHECK_CHUNK(c, CHUNK_DUPLICATE)) {
                		jcr.unique_chunk_num++;
                		jcr.unique_data_size += c->size;
                		g_hash_table_insert(recently_unique_chunks, &wc->fp, wc);
                    	VERBOSE("Filter phase: %dth chunk is recently unique, size %d", chunk_num,
                    			g_hash_table_size(recently_unique_chunks));
                	} else {
                		jcr.rewritten_chunk_num++;
                		jcr.rewritten_chunk_size += c->size;
                		g_hash_table_insert(recently_rewritten_chunks, &wc->fp, wc);
                	}

                	if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY){
                		struct chunk* ck = new_chunk(0);
                		memcpy(&ck->fp, &c->fp, sizeof(fingerprint));
                		g_sequence_append(storage_buffer.chunks, ck);
                	}

                	VERBOSE("Filter phase: Write %dth chunk to container %lld",
                			chunk_num, c->id);
                }else{
                	VERBOSE("Filter phase: container %lld already has this chunk", c->id);
            		assert(destor.index_category[0] != INDEX_CATEGORY_EXACT
            				|| destor.rewrite_algorithm[0]!=REWRITE_NO);
                }

            }else{
                if(CHECK_CHUNK(c, CHUNK_REWRITE_DENIED)){
                    VERBOSE("Filter phase: %lldth fragmented chunk is denied", chunk_num);
                }else if (CHECK_CHUNK(c, CHUNK_OUT_OF_ORDER)) {
                    VERBOSE("Filter phase: %lldth chunk in out-of-order container %lld is already cached",
                            chunk_num, c->id);
                }
            }

            assert(c->id != TEMPORARY_ID);

            /* Collect historical information. */
            har_monitor_update(c->id, c->size);

            /* Restore-aware */
            restore_aware_update(c->id, c->size);

            chunk_num++;
        }

        int full = index_update_buffer(s);

        /* Write a SEGMENT_BEGIN */
        segmentid sid = append_segment_flag(jcr.bv, CHUNK_SEGMENT_START, s->chunk_num);

        /* Write recipe */
    	iter = g_sequence_get_begin_iter(s->chunks);
    	end = g_sequence_get_end_iter(s->chunks);
        for (; iter != end; iter = g_sequence_iter_next(iter)) {
            c = g_sequence_get(iter);

        	if(r == NULL){
        		assert(CHECK_CHUNK(c,CHUNK_FILE_START));
        		r = new_file_recipe_meta(c->data);
        	}else if(!CHECK_CHUNK(c,CHUNK_FILE_END)){
        		struct chunkPointer cp;
        		cp.id = c->id;
        		assert(cp.id>=0);
        		memcpy(&cp.fp, &c->fp, sizeof(fingerprint));
        		cp.size = c->size;
        		append_n_chunk_pointers(jcr.bv, &cp ,1);
        		r->chunknum++;
        		r->filesize += c->size;

    	    	jcr.chunk_num++;
	    	    jcr.data_size += c->size;

        	}else{
        		assert(CHECK_CHUNK(c,CHUNK_FILE_END));
        		append_file_recipe_meta(jcr.bv, r);
        		free_file_recipe_meta(r);
        		r = NULL;

	            jcr.file_num++;
        	}
        }

       	/* Write a SEGMENT_END */
       	append_segment_flag(jcr.bv, CHUNK_SEGMENT_END, 0);

        if(destor.index_category[1] == INDEX_CATEGORY_LOGICAL_LOCALITY){
             /*
              * TO-DO
              * Update_index for logical locality
              */
            s->features = sampling(s->chunks, s->chunk_num);
         	if(destor.index_category[0] == INDEX_CATEGORY_EXACT){
         		/*
         		 * For exact deduplication,
         		 * unique fingerprints are inserted.
         		 */
         		VERBOSE("Filter phase: add %d unique fingerprints to %d features",
         				g_hash_table_size(recently_unique_chunks),
         				g_hash_table_size(s->features));
         		GHashTableIter iter;
         		gpointer key, value;
         		g_hash_table_iter_init(&iter, recently_unique_chunks);
         		while(g_hash_table_iter_next(&iter, &key, &value)){
         			struct chunk* uc = value;
         			fingerprint *ft = malloc(sizeof(fingerprint));
         			memcpy(ft, &uc->fp, sizeof(fingerprint));
         			g_hash_table_insert(s->features, ft, NULL);
         		}

         		/*
         		 * OPTION:
         		 * 	It is still an open problem whether we need to update
         		 * 	rewritten fingerprints.
         		 * 	It would increase index update overhead, while the benefit
         		 * 	remains unclear.
         		 * 	More experiments are required.
         		 */
         		VERBOSE("Filter phase: add %d rewritten fingerprints to %d features",
         				g_hash_table_size(recently_rewritten_chunks),
         				g_hash_table_size(s->features));
         		g_hash_table_iter_init(&iter, recently_rewritten_chunks);
         		while(g_hash_table_iter_next(&iter, &key, &value)){
         			struct chunk* uc = value;
         			fingerprint *ft = malloc(sizeof(fingerprint));
         			memcpy(ft, &uc->fp, sizeof(fingerprint));
         			g_hash_table_insert(s->features, ft, NULL);
         		}
         	}
         	index_update(s->features, sid);
         }

        free_segment(s);

        if(index_lock.wait_threshold > 0 && full == 0){
        	pthread_cond_broadcast(&index_lock.cond);
        }
        TIMER_END(1, jcr.filter_time);
        pthread_mutex_unlock(&index_lock.mutex);

        g_hash_table_destroy(recently_rewritten_chunks);
        g_hash_table_destroy(recently_unique_chunks);

    }

    if (storage_buffer.container_buffer
    		&& !container_empty(storage_buffer.container_buffer)){
        if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY){
            /*
             * TO-DO
             * Update_index for physical locality
             */
        	GHashTable *features = sampling(storage_buffer.chunks,
        			g_sequence_get_length(storage_buffer.chunks));
        	index_update(features, get_container_id(storage_buffer.container_buffer));
        	g_hash_table_destroy(features);
        	g_sequence_free(storage_buffer.chunks);
        }
        write_container_async(storage_buffer.container_buffer);
    }

    /* All files done */
    jcr.status = JCR_STATUS_DONE;
    return NULL;
}
Esempio n. 20
0
int main(int argc, char* argv[]) {

    std::string textfile;
    int vecsize = 0;
    int epochs = 0;
    int context = 0;
    int negsamples = 0;
    std::string sinkfile;

    if (argc < 2) {
        print_usage();
    }

    for (int i = 1; i < argc; ++i) {
        switch (argv[i][1])
        {
        case 's':
            textfile = argv[++i];
            break;
        case 'w':
            vecsize = std::atoi(argv[++i]);
            break;
        case 'o':
            sinkfile = argv[++i];
            break;
        case 'e':
            epochs = std::atoi(argv[++i]);
            break;
        case 'c':
            context = std::atoi(argv[++i]);
            break;
        case 'n':
            negsamples = std::atoi(argv[++i]);
            break;
        default:
            print_usage();
            std::exit(-1);
        }
    }

    std::cout << "source text file : " << textfile << std::endl
              << "vector size      : " << vecsize << std::endl
              << "model file       : " << sinkfile << std::endl
              << "#epochs          : " << epochs << std::endl
              << "context size     : " << context << std::endl
              << "negative samples : " << negsamples << std::endl;

    std::vector<Sentence> corpus;
    std::map<std::string, size_t> word2id;
    std::map<size_t, std::string> id2word;
    Eigen::RowVectorXf unidist;

    LoadData(textfile, corpus);
    BuildLexicon(word2id, id2word, corpus);
    BuildWordDist(unidist, word2id, corpus);

    int wordcounts = word2id.size();
    std::cout << "#words : " << wordcounts << std::endl;

    srand(0);
    Eigen::MatrixXf innerVector, outerVector;
    innerVector.resize(wordcounts, vecsize);
    outerVector.resize(wordcounts, vecsize);

    innerVector.setRandom();
    outerVector.setRandom();

    std::cout << "inner " << std::endl;
    std::cout << innerVector << std::endl;
    std::cout << "outer " << std::endl;
    std::cout << outerVector << std::endl;

    std::uniform_real_distribution<double> wordselector(0.0, 1.0);
    std::default_random_engine engine(0);

    // training with word embeddings.
    for (int i = 0; i < epochs; ++i) {
        for (Sentence& sent : corpus) {
            for (int idx = 0; idx < sent.length(); ++idx) {
                size_t leftIdx = Sentence::GetPos(sent.length(), idx, -context);
                size_t rightIdx = Sentence::GetPos(sent.length(), idx, context);

                std::vector<size_t> posindx;
                std::vector<size_t> negindx;

                for (size_t contextidx = leftIdx; contextidx <= rightIdx; ++contextidx) {
                    if (contextidx == idx)
                        continue;
                    posindx.push_back(word2id[sent.word(contextidx)]);
                }

                for (int negcnt = 0; negcnt < negsamples; ++ negcnt) {

                    size_t selwordidx = 0;
                    do {
                        double randomvalue = wordselector(engine);
                        selwordidx = sampling(unidist, randomvalue);
                        bool realneg = true;
                        for (size_t posidx : posindx) {
                            if (posidx == selwordidx)
                                realneg = false;
                        }

                        for (size_t negidx : negindx) {
                            if (negidx == selwordidx)
                                realneg = false;
                        }

                        if (realneg)
                            break;
                    } while (true);

                    negindx.push_back(selwordidx);
                }

                Eigen::MatrixXf posoutervec(posindx.size(), vecsize);
                Eigen::MatrixXf posgrad(posindx.size(), vecsize);

                Eigen::MatrixXf negoutervec(negindx.size(), vecsize);
                Eigen::MatrixXf neggrad(negindx.size(), vecsize);

                // update the information
                for (int posIdx = 0; posIdx < posindx.size(); ++posIdx) {
                    posoutervec.row(posIdx) = outerVector.row(posindx[posIdx]);
                }

                for (int negIdx = 0; negIdx < negindx.size(); ++negIdx) {
                    negoutervec.row(negIdx) = outerVector.row(negindx[negIdx]);
                }

                // initialize the vector
                Eigen::RowVectorXf centerInnervec = innerVector.row(word2id[sent.word(idx)]);

                Eigen::RowVectorXf posrawscore = posoutervec * centerInnervec.transpose();
                Eigen::RowVectorXf negrawscore = negoutervec * centerInnervec.transpose();

                double expsum = posrawscore.array().exp().sum() + negrawscore.array().exp().sum();

                Eigen::RowVectorXf posprob = posrawscore.array().exp() / expsum;
                Eigen::RowVectorXf negprob = negrawscore.array().exp() / expsum;

                // calculate the gradient of center word inner vector.
                Eigen::RowVectorXf centerInnergrad = posoutervec.colwise().sum() - posprob * posoutervec;
                centerInnergrad *= -1;

                // calculate the gradient of surrounding word outer vector
                for (size_t posindx = 0; posindx < posgrad.rows(); ++posindx) {
                    posgrad.row(posindx) = -1 * (centerInnervec - posprob[posindx] * posgrad.rows() * centerInnervec);
                }

                // calculate the negative samples
                for (size_t negidx = 0; negidx < neggrad.rows(); ++negidx) {
                    neggrad.row(negidx) = negprob[negidx] * centerInnervec;
                }

                // update the gradient
                size_t centerWordId = word2id[sent.word(idx)];
                innerVector.row(centerWordId) -= 0.1 * centerInnergrad;

                for (size_t posIdx = 0; posIdx < posindx.size(); ++posIdx) {
                    size_t wordId = posindx[posIdx];
                    outerVector.row(wordId) -= posgrad.row(posIdx);
                }

                for (size_t negIdx = 0; negIdx < negindx.size(); ++negIdx) {
                    size_t wordId = negindx[negIdx];
                    outerVector.row(wordId) -= neggrad.row(negIdx);
                }

                std::cout << "Update Vector : " << std::endl;
                std::cout << "inner " << std::endl;
                std::cout << innerVector << std::endl;
                std::cout << "outer " << std::endl;
                std::cout << outerVector << std::endl;

            }
        }
    }

    saveModel(sinkfile, word2id, innerVector, outerVector);
    return 0;
}
Esempio n. 21
0
int main() 
{
    try {

    // Camera settings
	Camera camera;
	glm::vec3 vrp(0.0f, 0.0f, 10.0f);
	glm::vec3 vpn(0.0f, 0.0f, 1.0f);
	glm::vec3 vup(0.0f, 1.0f, 0.0f);
	glm::vec3 prp(0.0f, 0.0f, 100.0f);
	float F = 10.0f;
	float B = -80.0f;
	glm::vec2 lower_left(-20.0f, -20.0f);
	glm::vec2 upper_right(20.0f, 20.0f);
	camera = Camera(vrp, vpn, vup, prp, lower_left, upper_right, F, B);
	
	// Examples
	int curves = 4;    // Amount of examples
	BezierRow G[curves];
	G[0] = BezierRow(glm::vec3(-15.0f, -15.0f, 0.0f),
					 glm::vec3(-10.0f, 25.0f, 0.0f),
					 glm::vec3(10.0f, 25.0f, 0.0f),
					 glm::vec3(15.0f, -15.0f, 0.0f));
	G[1] = BezierRow(glm::vec3(-20.0f, 0.0f, 0.0f),
					 glm::vec3(-1.0f, 55.0f, 0.0f),
					 glm::vec3(1.0f, -55.0f, 0.0f),
					 glm::vec3(20.0f, 0.0f, 0.0f));
	G[2] = BezierRow(glm::vec3(-1.0f, -5.0f, 0.0f),
					 glm::vec3(-60.0f, 5.0f, 0.0f),
					 glm::vec3(60.0f,  5.0f, 0.0f),
					 glm::vec3(1.0f,  -5.0f, 0.0f));
	G[3] = BezierRow(glm::vec3(-10.0f, -5.0f, 0.0f),
					 glm::vec3(60.0f,   5.0f, 0.0f),
					 glm::vec3(-60.0f,  5.0f, 0.0f),
					 glm::vec3(10.0f,  -5.0f, 0.0f));

	int currentfigure = 3; // Set figure between 4 different examples

	// Decide whether to use sampling or subdivision
	int decider = 1;

	int Npoint = 0;
	std::vector<glm::vec3> points;

	// Sampling
	if(decider == 0){
	float t = 0.0f;
	float step = 12.0f;
	sampling(G[currentfigure], t, step, points);
	Npoint = step*2;
	}

	// Subdivision
	else if(decider == 1){
	DLB /= 8.0f;	
	DRB /= 8.0f;
	int n = 3; 				// Amount of curves (smoothness)
 	int npow = pow(2, n+1); // Amount of points
	subdivide(G[currentfigure], n, points);
	Npoint = npow;
	}
	else{
		printf("No method chosen\n"); return 1;
	}




	glm::mat4x4 CTM = camera.CurrentTransformationMatrix();
	std::cout << "CTM = " << std::endl << CTM << std::endl;


	// Initialize the graphics
	InitializeGLFW();
	GLFWwindow* Window = CreateWindow(WindowWidth, WindowHeight, WindowTitle.c_str());
	InitializeGLEW();
	InitializeOpenGL();
	glfwSwapBuffers(Window);

	// Read and Compile the vertex program vertextransform.vert
	GLuint vertexprogID = CreateGpuProgram("vertextransform.vert", GL_VERTEX_SHADER);

        // Read and Compile the fragment program linefragment.frag
	GLuint linefragmentprogID = CreateGpuProgram("linefragment.frag", GL_FRAGMENT_SHADER);

	// Create a lineshader program and Link it with the vertex and linefragment programs
	GLuint lineshaderID = CreateShaderProgram(vertexprogID, linefragmentprogID);
	
	// Now comes the OpenGL core part

	// This is where the curve is initialized
        // User data is in the global variable curveVertices, and the number of entries
	// is in Ncurvevertices

    // Make a VertexArrayObject - it is used by the VertexArrayBuffer, and it must be declared!
	GLuint CurveVertexArrayID;
	glGenVertexArrays(1, &CurveVertexArrayID);
	glBindVertexArray(CurveVertexArrayID);

	// Make a curvevertexbufferObject - it uses the previous VertexArrayBuffer!
	GLuint curvevertexbuffer;
	glGenBuffers(1, &curvevertexbuffer);
	glBindBuffer(GL_ARRAY_BUFFER, curvevertexbuffer);




	// Give our vertices to OpenGL.
	glBufferData(GL_ARRAY_BUFFER, Npoint * 3 * sizeof(float), &points[0], GL_STATIC_DRAW);
	

    // Validate the shader program
	ValidateShader(lineshaderID, "Validating the lineshader");

	// Get locations of Uniforms
	GLuint curvevertextransform   = glGetUniformLocation(lineshaderID, "CTM");
	GLuint curvefragmentcolor     = glGetUniformLocation(lineshaderID, "Color");	




	// Initialize Attributes
	GLuint curvevertexattribute = glGetAttribLocation(lineshaderID, "VertexPosition");
	glVertexAttribPointer(curvevertexattribute, 3, GL_FLOAT, GL_FALSE, 0, 0);

	// The main loop
	while (!glfwWindowShouldClose(Window)) {
	    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	    glUseProgram(lineshaderID);
		glm::mat4x4 CTM = camera.CurrentTransformationMatrix();
		glUniformMatrix4fv(curvevertextransform, 1, GL_FALSE, &CTM[0][0]);
		glUniform3f(curvefragmentcolor, 0.2f, 0.2f, 0.2f);

		glEnableVertexAttribArray(curvevertexattribute);
		glBindVertexArray(CurveVertexArrayID);  // This is very important! There are two "binds"!
		glDrawArrays(GL_LINES, 0, Npoint);

		glDisableVertexAttribArray(curvevertexattribute);
	    glUseProgram(0);

	    glfwSwapBuffers(Window);
	    std::stringstream errormessage;
	    errormessage << "End of loop: " << "assignment5.cpp" << ": " << __LINE__ << ": ";
	    ErrorCheck(errormessage.str());

	    glfwPollEvents();
	}
    }
    catch (std::exception const& exception) {
	std::cerr << "Exception: " << exception.what() << std::endl;
    }

    glfwTerminate();

    return 0;
}