Example #1
0
Polygon* newPolygon(unsigned int nbVx, ...)
{
	unsigned int i;
	va_list ap;
	Polygon* newPoly = (Polygon*) malloc(sizeof(Polygon));

	/* Pas d'appel à polyInit possible à cause de la liste d'arguments...

	Initialisation des Dynamic Arrays */
	newPoly->Rigids = da();
	newPoly->Vertices = da();
	newPoly->InternalRigids = da();
	daReserve(&newPoly->Rigids, nbVx);
	daReserve(&newPoly->Vertices, nbVx);
	newPoly->Center = NULL;
	newPoly->Fixed = FALSE;
	newPoly->GridPos.Valid = FALSE;
	newPoly->Collided = FALSE;

	va_start(ap, nbVx);
	/* Ajoute les Vertices */
	for(i = 0; i < nbVx; i++)
		daAdd(&newPoly->Vertices, va_arg(ap, Vertex*));
	va_end(ap);
	/* Construit les limites, i.e. Créé un nouveau Rigid à partir de
	deux Vertices de la liste et la distance les séparant, puis l'ajoute
	 à la liste */
	for(i = 0; i < nbVx; i++)
		daAdd(&newPoly->Rigids, newRigid((Vertex*)daGet(&newPoly->Vertices, i),
			(Vertex*)daGet(&newPoly->Vertices, (i+1)%nbVx),
			vec2Length(vec2Sub(vxGetPosition((Vertex*)daGet(&newPoly->Vertices, i)),
					vxGetPosition((Vertex*)daGet(&newPoly->Vertices, (i+1)%nbVx))))));
	return newPoly;
}
Example #2
0
void polyInit(Polygon* P, unsigned int nbVx, ...)
{
	unsigned int i;
	va_list ap;

	/* Initialisation des Dynamic Arrays */
	P->Rigids = da();
	P->Vertices = da();
	P->InternalRigids = da();
	daReserve(&P->Rigids, nbVx);
	daReserve(&P->Vertices, nbVx);
	P->Center = NULL;
	P->Fixed = FALSE;
	P->GridPos.Valid = FALSE;
	P->Collided = FALSE;

	va_start(ap, nbVx);
	/* Ajoute les Vertices */
	for(i = 0; i < nbVx; i++)
		daAdd(&P->Vertices, va_arg(ap, Vertex*));
	va_end(ap);
	/* Construit les limites, i.e. Créé un nouveau Rigid à partir de
	deux Vertices de la liste et la distance les séparant, puis l'ajoute
	 à la liste */
	for(i = 0; i < nbVx; i++)
		daAdd(&P->Rigids, newRigid((Vertex*)daGet(&P->Vertices, i),
			(Vertex*)daGet(&P->Vertices, (i+1)%nbVx),
			vec2Length(vec2Sub(vxGetPosition((Vertex*)daGet(&P->Vertices, i)),
					vxGetPosition((Vertex*)daGet(&P->Vertices, (i+1)%nbVx))))));
}
Example #3
0
Polygon* newPolygonL(List L)
{
	unsigned int i = 0, nbVx = lstCount(&L);
	Polygon* newPoly = (Polygon*) malloc(sizeof(Polygon));
	Node* it = lstFirst(&L);
	/*Initialisation des Dynamic Arrays */
	newPoly->Rigids = da();
	newPoly->Vertices = da();
	newPoly->InternalRigids = da();
	daReserve(&newPoly->Rigids, nbVx);
	daReserve(&newPoly->Vertices, nbVx);
	newPoly->Center = NULL;
	newPoly->Fixed = FALSE;
	newPoly->GridPos.Valid = FALSE;
	newPoly->Collided = FALSE;

	/* Ajoute les Vertices */
	while(!nodeEnd(it))
	{
		daAdd(&newPoly->Vertices, (Vertex*) nodeGetData(it));
		it = nodeGetNext(it);
	}

	/* Construit les limites, i.e. Créé un nouveau Rigid à partir de
	deux Vertices de la liste et la distance les séparant, puis l'ajoute
	 à la liste */
	for(i = 0; i < nbVx; i++)
		daAdd(&newPoly->Rigids, newRigid((Vertex*)daGet(&newPoly->Vertices, i),
			(Vertex*)daGet(&newPoly->Vertices, (i+1)%nbVx),
			vec2Length(vec2Sub(vxGetPosition((Vertex*)daGet(&newPoly->Vertices, i)),
					vxGetPosition((Vertex*)daGet(&newPoly->Vertices, (i+1)%nbVx))))));
	return newPoly;
}
void WeightedDerivativesToRefined::apply_index(Model *m,
                                       ParticleIndex pi) const {
  // retrieving pis by ref if possible is cumbersome but is required for speed
  ParticleIndexes pis_if_not_byref;
  ParticleIndexes const* pPis;
  if(refiner_->get_is_by_ref_supported()){
    ParticleIndexes const& pis =
      refiner_->get_refined_indexes_by_ref(m, pi);
    pPis = &pis;
  } else{
    pis_if_not_byref = refiner_->get_refined_indexes(m, pi);
    pPis = &pis_if_not_byref;
  }
  ParticleIndexes const& pis = *pPis;
  //  Prepare derivative accumulator to normalize by total weight
  Float total_weight;
  if(w_ != FloatKey()){
    total_weight = m->get_attribute(w_, pi);
  } else {
    total_weight = pis.size();
  }
  DerivativeAccumulator da( 1.0 / total_weight);
  // read K values for each key in keys_
  Floats Ks(keys_.size());
  for (unsigned int j = 0; j < Ks.size(); ++j){
    Ks[j] = m->get_derivative(keys_[j], pi);
  }
  // store K reweighted per each particle, normalized with da
  for (unsigned int i = 0; i < pis.size(); ++i) {
    Float w = m->get_attribute(w_, pis[i]);
    for (unsigned int j = 0; j < keys_.size(); ++j) {
      m->add_to_derivative(keys_[j], pis[i], w * Ks[j], da);
    }
  }
}
int main()
{
    int i,n,t,ans;
    //scanf("%d",&t);
    //while(t-->0)
    {
      while (scanf("%s",s)==1)
      {

      n=strlen(s);
      for(i=0;i<n;i++) r[i]=s[i];
      r[n]=0;
      da(r,sa,n+1,128);
      calheight(r,sa,n);

        for (int i = 0;i < n;i++)
            printf("%s\n",s+sa[i]);
        puts("");
      ans=n*(n+1)/2;
      for(i=1;i<=n;i++) ans-=height[i];
      printf("%d\n",ans);
      }
    }
    return 0;
}
Example #6
0
int main() {
    int n,k;
    scanf("%d%d",&n,&k);
    for(int i=0;i<n;i++) {scanf("%d",&r[i]);}
    r[n]=0;
    da(r,sa,rank,height,n,MAXM);
    /*
    printa(sa,n);
    printa(rank,n);
    printa(height,n);
    */
    int low=1,high=n,mid;
    while(low<high) {
        mid=(low+high+1)/2;
        //printf("%d %d %d\n",low,mid,high);
        if(valid(mid,n,k)) {
            low=mid;
        } else {
            high=mid-1;
        }
    }
    //if(flag==0) printf("%d\n",)
    printf("%d\n",low);
    return 0;
}
Example #7
0
void ActionWithGrid::createGrid( const std::string& type, const std::string& inputstr ){
  // Start creating the input for the grid
  std::string vstring = inputstr; 
  if( keywords.exists("KERNEL") ){
      std::string vconc; parse("CONCENTRATION",vconc);
      if( vconc.length()>0 ){
          vstring += " TYPE=fibonacci CONCENTRATION=" + vconc;   
      } else {
          std::string kstring; parse("KERNEL",kstring);
          if( kstring=="DISCRETE" ) vstring += " KERNEL=" + kstring;
          else vstring += " KERNEL=" + kstring + " " + getKeyword("BANDWIDTH");
      }
  }
  vesselbase::VesselOptions da("mygrid","",-1,vstring,this);
  Keywords keys; gridtools::AverageOnGrid::registerKeywords( keys );
  vesselbase::VesselOptions dar( da, keys );
  if( type=="histogram" ){
     mygrid = new HistogramOnGrid(dar); 
  } else if( type=="average" ){
     mygrid = new AverageOnGrid(dar); 
  } else if( type=="grid" ){
     mygrid = new GridVessel(dar); 
  } else {
     plumed_merror("no way to create grid of type " + type );
  } 
}
Example #8
0
int main(int, char*[])
{
    /// leveraging our data abstraction
    tut::data_abstraction da(10);
    std::cout << tut::func(da) << std::endl;
    std::cout << tut::func_ex(da) << std::endl;

    /// we don't need std::bind for stand-alone functions - they're much easier to pass around than
    /// member functions.
    const auto fp(&tut::func);
    std::cout << fp(da) << std::endl;

    /// leveraging structural polymorphism
    std::cout << tut::pow(da) << std::endl;
    std::cout << tut::pow_ex(da) << std::endl;

    /// leveraging static polymorphism
    std::cout << tut::op(3, 5) << std::endl;
    std::cout << tut::op(3.0f, 5.0f) << std::endl;
    std::cout << tut::op('3', '5') << std::endl;

    /// make a widget, and cast it back and forth
    auto widget = std::make_unique<tut::widget>(12345, 101.0f, true);
    auto castable = das::cast_unique<das::castable>(std::move(widget));
    auto widget_again = das::cast_unique<tut::widget>(std::move(castable));

    /// check its replacability :)
    std::cout << tut::should_replace_with(*widget_again, 100.0f, 12345) << std::endl;
    return 0;
}
Example #9
0
int main()
{
    int i,j=0,k,n;
    int min,mid,max;
    scanf("%d",&n);
    while(n!=0)
    {
      n--;
      scanf("%d",&j);
      for(i=0;i<n;i++)
      {
        scanf("%d",&k);
        r[i]=k-j+100;
        j=k;
      }
      r[n]=0;
      da(r,sa,n+1,200);
      calheight(r,sa,n);
      min=1;max=n/2;
      while(min<=max)
      {
        mid=(min+max)/2;
        if(check(sa,n,mid)) min=mid+1;
        else max=mid-1;
      }
      if(max>=4) printf("%d\n",max+1);
      else printf("0\n");
      scanf("%d",&n);
    }
    return 0;
}
Example #10
0
vesselbase::StoreDataVessel* MultiColvarBase::buildDataStashes( const bool& allow_wcutoff, const double& wtol ){
  // Check if vessels have already been setup
  for(unsigned i=0;i<getNumberOfVessels();++i){
     StoreColvarVessel* ssc=dynamic_cast<StoreColvarVessel*>( getPntrToVessel(i) );
     if(ssc){
        if( allow_wcutoff && !ssc->weightCutoffIsOn() ) error("Cannot have more than one data stash with different properties");
        if( !allow_wcutoff && ssc->weightCutoffIsOn() ) error("Cannot have more than one data stash with different properties");
        return ssc;
     }
  }
 
  // Setup central atoms
  vesselbase::VesselOptions da("","",0,"",this);
  mycatoms=new StoreCentralAtomsVessel(da);
  if( allow_wcutoff ) mycatoms->setHardCutoffOnWeight( wtol );
  addVessel(mycatoms); 

  // Setup store values vessel
  vesselbase::VesselOptions ta("","",0,"",this);
  myvalues=new StoreColvarVessel(ta);   
  if( allow_wcutoff ) myvalues->setHardCutoffOnWeight( wtol );
  addVessel(myvalues);

  // Make sure resizing of vessels is done
  resizeFunctions();
  return myvalues;
}
Example #11
0
dense_table_t create_rand_dense_table(unsigned v0_id, unsigned v1_id, unsigned v2_id) 
{
  variable_t v0(v0_id, 4);
  variable_t v1(v1_id, 3);
  variable_t v2(v2_id, 2);

  std::vector<variable_t> vars;
  vars.push_back(v0);
  vars.push_back(v1);
  vars.push_back(v2);
  domain_t domain(vars); 

  dense_table_t dt(domain);
  assignment_t da(domain);

  for(int i=0; i < domain.var(0).size(); ++i) {
    da.set_asg(v0, i);
    for(int j=0; j < domain.var(1).size(); ++j) {
      da.set_asg(v1, j);
      for(int k=0; k < domain.var(2).size(); ++k) {
        da.set_asg(v2, k);

        if(rand() % 100 <= 20) {
          dt.set_logP(da, -1 * (rand() % 100));
        }
      }
    }
  }

  return dt;
}
Example #12
0
double bsmodel_4_vr::d(int pno, const double params[]) {
	if (pno == 0) return(vega(params));
	else if (pno == 1) return(rho(params));
	else if (pno == 2) return(da(params));
	else if (pno == 3) return(db(params));
	else return(0.0);
}
Example #13
0
Sound SoundRecorderArts::sound()
{
	Sound s;
	uint BytePS=BITS/8;
	uint length=m_data.size()/BytePS;
	QMemArray<Q_INT32> da(length);
	s.max=0;
	s._fs=FS;
	for(uint f=0;f<length; f++)
	{
#if BITS==8
		int nb=(unsigned char)(m_data[f])  -128;
#elif BITS==16
		int nb=(m_data[2*f] &0x000000FF )  |  ( (m_data[2*f+1] &0x000000FF ) << 8 )    ;
		if(nb & (1<< 15)) 
			nb = nb-(1<<16);
#else
	#error  BITS is not 16 or 8
#endif
		if(s.max < (uint)ABS(nb))
		{
			s.max= (uint)ABS(nb);
		}
		da[f]=nb;
	}
	s.data=da;
	return s;
}
Example #14
0
void GrVertexBatch::onDraw(GrBatchFlushState* state) {
    int uploadCnt = fInlineUploads.count();
    int currUpload = 0;

    // Iterate of all the drawArrays. Before issuing the draws in each array, perform any inline
    // uploads.
    for (SkTLList<DrawArray>::Iter da(fDrawArrays); da.get(); da.next()) {
        state->advanceLastFlushedToken();
        while (currUpload < uploadCnt &&
               fInlineUploads[currUpload]->lastUploadToken() <= state->lastFlushedToken()) {
            fInlineUploads[currUpload++]->upload(state->uploader());
        }
        const GrVertexBatch::DrawArray& drawArray = *da.get();
        GrProgramDesc desc;
        const GrPipeline* pipeline = this->pipeline();
        const GrPrimitiveProcessor* primProc = drawArray.fPrimitiveProcessor.get();
        state->gpu()->buildProgramDesc(&desc, *primProc, *pipeline, fBatchTracker);
        GrGpu::DrawArgs args(primProc, pipeline, &desc, &fBatchTracker);

        int drawCount = drawArray.fDraws.count();
        for (int i = 0; i < drawCount; i++) {
            state->gpu()->draw(args,  drawArray.fDraws[i]);
        }
    }
}
Example #15
0
/*! \section example33 Example 33: Oscillating Search in very high-dimensional feature selection.

Very high-dimensional feature selection in text categorization, with 
dimensionality in the order of 10000 or 100000.
The standard approach is BIF, yet we show here that a non-trivial
search procedure (OS) can be feasible. Here OS is applied in its
fastest form (delta=1), initialized by means of BIF. We use Multinomial
Bhattacharyya distance as the feature selection criterion (it has been
shown capable of overperforming traditional tools like Information
Gain etc., cf. Novovicova et al., LNCS 4109, 2006). 
Randomly sampled 50% of data is used for multinomial
model parameter estimation to be used in the actual feature selection
process, another (disjunct) 40% of data is randomly sampled for testing. 
The selected subset is eventually used for validation; multinomial Naive Bayes
classifier is trained on the training data on the selected subset
and classification accuracy is finally estimated on the test data.
*/
int main()
{
	try{
	typedef double RETURNTYPE; 	typedef double DATATYPE;  typedef double REALTYPE;
	typedef unsigned int IDXTYPE;  typedef unsigned int DIMTYPE;  typedef short BINTYPE;
	typedef FST::Subset<BINTYPE, DIMTYPE> SUBSET;
	typedef FST::Data_Intervaller<std::vector<FST::Data_Interval<IDXTYPE> >,IDXTYPE> INTERVALLER;
	typedef boost::shared_ptr<FST::Data_Splitter<INTERVALLER,IDXTYPE> > PSPLITTER;
	typedef FST::Data_Splitter_RandomRandom<INTERVALLER,IDXTYPE,BINTYPE> SPLITTERRR;
	//typedef FST::Data_Accessor_Splitting_MemTRN<DATATYPE,IDXTYPE,INTERVALLER> DATAACCESSOR; // uncomment for TRN data format
	typedef FST::Data_Accessor_Splitting_MemARFF<DATATYPE,IDXTYPE,INTERVALLER> DATAACCESSOR; // uncomment for ARFF data format
	typedef FST::Criterion_Multinomial_Bhattacharyya<RETURNTYPE,DATATYPE,REALTYPE,IDXTYPE,DIMTYPE,SUBSET,DATAACCESSOR> BHATTMULTINOMIALDIST;
	typedef FST::Classifier_Multinomial_NaiveBayes<RETURNTYPE,DATATYPE,REALTYPE,IDXTYPE,DIMTYPE,SUBSET,DATAACCESSOR> CLASSIFIERMULTINOMIAL;
	typedef FST::Sequential_Step_Straight<RETURNTYPE,DIMTYPE,SUBSET,BHATTMULTINOMIALDIST> EVALUATOR;

		std::cout << "Starting Example 33: Oscillating Search in very high-dimensional feature selection..." << std::endl;
	// randomly sample 50% of data for training and randomly sample (disjunct) 40% for independent testing of final classification performance 
		PSPLITTER dsp_outer(new SPLITTERRR(1, 50, 40)); // (there will be one outer randomized split only)
	// do not scale data
		boost::shared_ptr<FST::Data_Scaler<DATATYPE> > dsc(new FST::Data_Scaler_void<DATATYPE>());
	// set-up data access
		boost::shared_ptr<std::vector<PSPLITTER> > splitters(new std::vector<PSPLITTER>); splitters->push_back(dsp_outer);
		boost::shared_ptr<DATAACCESSOR> da(new DATAACCESSOR("data/reuters_apte.arff",splitters,dsc));
		da->initialize();
	// initiate access to split data parts
		da->setSplittingDepth(0); if(!da->getFirstSplit()) throw FST::fst_error("50/40 random data split failed.");
	// initiate the storage for subset to-be-selected
		boost::shared_ptr<SUBSET> sub(new SUBSET(da->getNoOfFeatures()));
	// set-up multinomial Bhattacharyya distance criterion
		boost::shared_ptr<BHATTMULTINOMIALDIST> dmultinom(new BHATTMULTINOMIALDIST);
		dmultinom->initialize(da); // (initialization = multinomial model parameter estimation on training data)
	// set-up individual feature ranking to serve as OS initialization
		FST::Search_BIF<RETURNTYPE,DIMTYPE,SUBSET,BHATTMULTINOMIALDIST> srch_bif;
	// set-up the standard sequential search step object (option: hybrid, ensemble, etc.)
		boost::shared_ptr<EVALUATOR> eval(new EVALUATOR);
	// set-up the Oscillating Search procedure in its fastest setting
		FST::Search_OS<RETURNTYPE,DIMTYPE,SUBSET,BHATTMULTINOMIALDIST,EVALUATOR> srch(eval);
		srch.set_delta(1);
	// target subset size must be set because a) Bhattacharyya is monotonous with respect to subset size,
	// b) in very-high-dimensional problem d-optimizing search is not feasible due to search complexity
		DIMTYPE target_subsize=500;
	// run the search - first find the initial subset by means of BIF, then improve it by means of OS
		std::cout << "Feature selection setup:" << std::endl << *da << std::endl << srch_bif << std::endl << srch << std::endl << *dmultinom << std::endl << std::endl;
		RETURNTYPE critval_train, critval_test;
		if(!srch_bif.search(target_subsize,critval_train,sub,dmultinom,std::cout)) throw FST::fst_error("Search (BIF) not finished.");
		std::cout << std::endl << "Initialization result: " << std::endl << *sub << "Criterion value=" << critval_train << std::endl << std::endl;
		if(!srch.search(target_subsize,critval_train,sub,dmultinom,std::cout)) throw FST::fst_error("Search (OS) not finished.");
		std::cout << std::endl << "Search result: " << std::endl << *sub << "Criterion value=" << critval_train << std::endl;
	// (optionally) validate result by estimating Naive Multinomial Bayes classifier accuracy on selected feature sub-space on independent test data
		boost::shared_ptr<CLASSIFIERMULTINOMIAL> cmultinom(new CLASSIFIERMULTINOMIAL);
		cmultinom->initialize(da);
		cmultinom->train(da,sub);
		cmultinom->test(critval_test,da);
		std::cout << "Validated Multinomial NaiveBayes accuracy=" << critval_test << std::endl << std::endl;
	}
	catch(FST::fst_error &e) {std::cerr<<"FST ERROR: "<< e.what() << ", code=" << e.code() << std::endl;}
	catch(std::exception &e) {std::cerr<<"non-FST ERROR: "<< e.what() << std::endl;}
	return 0;
}
Example #16
0
void
dump_code(void)
{
    fdump();			/* dumps all user functions */
    if (begin_start) {
	fprintf(stdout, "BEGIN\n");
	da(begin_start, stdout);
    }
    if (end_start) {
	fprintf(stdout, "END\n");
	da(end_start, stdout);
    }
    if (main_start) {
	fprintf(stdout, "MAIN\n");
	da(main_start, stdout);
    }
}
Example #17
0
BridgeVessel* ActionWithVessel::addBridgingVessel( ActionWithVessel* tome ){
  VesselOptions da("","",0,"",this); 
  BridgeVessel* bv=new BridgeVessel(da);
  bv->setOutputAction( tome );
  functions.push_back( dynamic_cast<Vessel*>(bv) );
  resizeFunctions();
  return bv; 
}
void LDAPConnectionTest::ConnectionTest()
{
	StartTrace(LDAPConnectionTest.ConnectionTest);
	ROAnything cConfig;
	AnyExtensions::Iterator<ROAnything> aEntryIterator(GetTestCaseConfig());
	while ( aEntryIterator.Next(cConfig) ) {
		for ( long l = 0; l < cConfig["NumberOfConnects"].AsLong(1); l++ ) {
			Anything params;
			params["Server"] 			= cConfig["LDAPServer"].AsString();
			params["Port"] 				= cConfig["LDAPPort"].AsLong();
			params["Timeout"] 			= cConfig["LDAPTimeout"].AsLong();
			params["ConnectionTimeout"] = cConfig["LDAPConnectionTimeout"].AsLong(0);
			params["BindName"] 			= cConfig["LDAPBindName"].AsString();
			params["BindPW"] 			= cConfig["LDAPBindPW"].AsString();
			params["PooledConnections"]	= cConfig["LDAPPooledConnections"].AsLong(0L);
			params["RebindTimeout"]		= cConfig["LDAPRebindTimeout"].AsLong(3600L);
			params["TryAutoRebind"]		= cConfig["LDAPTryAutoRebind"].AsLong(0L);
			params["MaxConnections"]	= cConfig["LDAPMaxConnections"].AsLong(2L);

			Context ctx;
			ParameterMapper pm("ConnectionTestParameterMapper");
			ResultMapper rm("ConnectionTestResultMapper");
			pm.Initialize("ParameterMapper");
			rm.Initialize("ResultMapper");
			String da("DataAccess_");
			da << aEntryIterator.Index();

			LDAPErrorHandler eh(ctx, &pm, &rm, da);
			eh.PutConnectionParams(params);

			// connect
			LDAPConnection lc(params);
			LDAPConnection::EConnectState eConnectState = lc.DoConnect(params, eh);
			String result(LDAPConnection::ConnectRetToString(eConnectState));
			Trace("Connect result: " << result);
			// check for errors
			Anything error;
			if ( !eh.GetError(error) ) {
				Trace("No error reported.");
			} else {
				TraceAny(error, "Error description:");
			}

			// compare result and expected error
			assertEqual(cConfig["ConnectRet"].AsString(), result);
			bool ret = LDAPConnection::IsConnectOk(eConnectState);
			assertEqual(cConfig["ConnectIsOk"].AsBool(1), ret);
			if (!ret) {
				String where;
				aEntryIterator.SlotName(where);
				assertAnyCompareEqual(cConfig["Error"], error, String(getConfigFileName()) << ":" << where, '.',':');
			}
			// now release sema and lock
			lc.ReleaseHandleInfo();
		}
	}
}
Example #19
0
void ActionWithVessel::addVessel( const std::string& name, const std::string& input, const int numlab ){
  VesselOptions da(name,"",numlab,input,this);
  Vessel* vv=vesselRegister().create(name,da);
  FunctionVessel* fv=dynamic_cast<FunctionVessel*>(vv);
  if( fv ){
      std::string mylabel=Vessel::transformName( name );
      plumed_massert( keywords.outputComponentExists(mylabel,false), "a description of the value calculated by vessel " + name + " has not been added to the manual"); 
  } 
  addVessel(vv);
}
Example #20
0
void DTALikelihood::calculateStatistics(QVector<QVector3D> &points, LGraph& graph)
{
    DistanceToAtom da(m_numberOfRandomVectors); // voxes_per_dimension
    if (points.size()==0)
        return;
    da.compute(points, m_cutoff); // cutoff
    QVector<QPointF> hist = da.histogram(m_histogramBins); // bins
    graph.fromQVector(hist);
    graph.normalizeArea();

}
Example #21
0
void TestExampleDA(DAExposeFunc ef)
{
    InitGtkmm();

    ExampleDA da(ef);

    Gtk::Window win;
    win.set_default_size(400, 400);
    win.add(da);

    RunWindow(win);
}
int main(){
    char str[maxn];
    int i, m = 30, ans, len;
    while(scanf("%s",str)!=EOF){
        len = strlen(str);
        for(i=0;i<=len;i++) num[i]=str[i]-'a'+1;
        num[len]=0;
        da(num, len + 1, m);
        calHeight(num, len);
    }
    return 0;
}
Example #23
0
StoreDataVessel* ActionWithVessel::buildDataStashes( const bool& allow_wcutoff, const double& wtol ){
  if(mydata) return mydata;
  
  VesselOptions da("","",0,"",this);
  StoreDataVessel* mm=new StoreDataVessel(da);
  if( allow_wcutoff ) mm->setHardCutoffOnWeight( wtol );
  addVessel(mm);

  // Make sure resizing of vessels is done
  resizeFunctions();

  return mydata;
}
Example #24
0
std::shared_ptr<base::IDataArray> BlockFS::createDataArray(const std::string &name, const std::string &type,
        nix::DataType data_type, const NDSize &shape) {
    if (name.empty()) {
        throw EmptyString("Block::createDataArray empty name provided!");
    }
    if (hasDataArray(name)) {
        throw DuplicateName("Block::createDataArray: an entity with the same name already exists!");
    }
    std::string id = util::createId();
    DataArrayFS da(file(), block(), data_array_dir.location(), id, type, name);
    da.createData(data_type, shape);
    return std::make_shared<DataArrayFS>(da);
}
bool FlowController::ExecDataAccess(Context &ctx, long &execTime) {
	StartTrace(FlowController.ExecDataAccess);
	execTime = 0;
	bool boRet = false;
	if (ctx.GetQuery().IsDefined("DataAccess")) {
		String daName = ctx.GetQuery()["DataAccess"].AsString();
		Trace("using DA: " << daName);
		DataAccess da(daName);
		DiffTimer timer;
		boRet = da.StdExec(ctx);
		execTime = timer.Diff();
	}
	return boRet;
}
Example #26
0
	decltype(auto) activate_func_rectifier_test() {
		neu::rectifier a;
		neu::differential<neu::rectifier> da;
		const auto cpu_x = neu::cpu_vector{0.f, -0.1f, 0.1f, -100.f, 100.f};
		const auto gpu_x = neu::to_gpu_vector(cpu_x);
		{
			auto y = neu::to_cpu_vector(a(gpu_x));
			assert((y == neu::cpu_vector{0.f, 0.f, 0.1f, 0.f, 100.f}));
		}
		{
			auto dy = neu::to_cpu_vector(da(gpu_x));
			assert((dy == neu::cpu_vector{0.f, 0.f, 1.f, 0.f, 1.f}));
		}
	}
Example #27
0
bool Module::processImportStatements() {
  // If not already done so, add the list of imported symbols to the
  // module's namespace.
  if (passes_.begin(ResolveImportsPass)) {
    DefnAnalyzer da(this, this, this, NULL);
    for (ASTNodeList::const_iterator it = imports_.begin(); it != imports_.end(); ++it) {
      da.importIntoScope(cast<ASTImport>(*it), this);
    }

    passes_.finish(ResolveImportsPass);
  }

  return true;
}
Example #28
0
///////////////////////////////////////////////////////////////////////////
//optimization enabled, data_stream, dag, peephole
int main(int argc, char* argv[])
{
	std::cout<<"in compile"<<std::endl;
	std::cout<<"please input file name"<<std::endl;
//	char name[1024];
	std::stringstream ss(std::stringstream::in|std::stringstream::out);
	ss<<"test.txt";
//	scanf("%s",name);
	FILE* in=fopen(ss.str().c_str(),"r");
	if(!in) {std::cout<<"file not find"<<std::endl; return 0;}
	tokenizer tk(in);
	errorlist* el = new errorlist();
	SSTable* table = new SSTable();
	InsCtrl* ins = new InsCtrl(*table);

	Parser ps(tk,*el);
	TNode* node=ps.parse();
//	std::cout<<*node;
 	fclose(in);
	if(el->getErrorNum()==0)
		node->visit(*table,*ins,*el);
	else
	{ el->PrintError();return 0;}
	ins->reorder_all_ins();
	ins->print_all_ins();

	if(el->getErrorNum()!=0)
	{el->PrintError(); return 0;}
	el->PrintWarning();	
	std::ofstream file("block.txt",std::ios::out);
	std::vector<FBlock*> blo=ins->get_all_func_block();
	for(int i=0;i<blo.size();i++)
	{
		file<<*blo[i];
	}
	Dag da(ins);	
	Data_stream* ds=NULL;
	ds=new Data_stream(ins);
	
	asm_translator tl(table,ins,ds);
	std::ofstream ff("test.asm",std::ios::out);
	tl.translate(ff);
	system("ml /c /coff test.asm");
	system("link /subsystem:console test.obj");
	std::cout<<"---------------------running-----------------------"<<std::endl;
	system("test.exe");

	return 0;	
}
Example #29
0
TEST(TestMassAtomic, Add)
{
  long lNumber = 0;
  long toAdd = 10;
  boost::shared_array<thread> t;
  t.reset(new thread[NUMTHREADS]);
  DoAdd da(&lNumber,toAdd);
  for(size_t i=0; i<NUMTHREADS; i++)
    t[i] = thread(da);

  for(size_t i=0; i<NUMTHREADS; i++)
    t[i].join();

  EXPECT_EQ((NUMTHREADS * TESTNUM) * toAdd, lNumber);
 }
// [-Inf -Inf -3   -Inf]
// [-15  -4   -Inf -23 ]
// [-Inf -Inf -Inf -Inf]
//
// [-Inf -Inf -Inf -Inf]
// [-20  -12  -19  -78 ]
// [-Inf -Inf -Inf -32 ]
void create_data_vector(std::vector<std::pair<size_t, double> > &data, 
    const domain_t &dom) 
{
  {
  size_t sa[] = {2,1,1}; 
  assignment_t da(dom, std::vector<size_t>(sa, sa+3));
  data.push_back(std::make_pair(da.linear_index(), -19));
  } {  
  size_t sa[] = {3,2,1}; 
  assignment_t da(dom, std::vector<size_t>(sa, sa+3));
  data.push_back(std::make_pair(da.linear_index(), -32));
  } {
  size_t sa[] = {2,0,0}; 
  assignment_t da(dom, std::vector<size_t>(sa, sa+3));
  data.push_back(std::make_pair(da.linear_index(), -3));
  } {
  size_t sa[] = {3,1,0}; 
  assignment_t da(dom, std::vector<size_t>(sa, sa+3));
  data.push_back(std::make_pair(da.linear_index(), -23));
  } {
  size_t sa[] = {3,1,1}; 
  assignment_t da(dom, std::vector<size_t>(sa, sa+3));
  data.push_back(std::make_pair(da.linear_index(), -78));
  } {
  size_t sa[] = {1,1,1}; 
  assignment_t da(dom, std::vector<size_t>(sa, sa+3));
  data.push_back(std::make_pair(da.linear_index(), -12));
  } {
  size_t sa[] = {1,1,0}; 
  assignment_t da(dom, std::vector<size_t>(sa, sa+3));
  data.push_back(std::make_pair(da.linear_index(), -4));
  } {
  size_t sa[] = {0,1,0}; 
  assignment_t da(dom, std::vector<size_t>(sa, sa+3));
  data.push_back(std::make_pair(da.linear_index(), -15));
  } {  
  size_t sa[] = {0,1,1}; 
  assignment_t da(dom, std::vector<size_t>(sa, sa+3));
  data.push_back(std::make_pair(da.linear_index(), -20));
  }
}