Example #1
0
void Car::addNewCost(QDate date, unsigned int distance, QString description, double price)
{
    Cost *cost = new Cost(date,distance,description,price,CREATE_NEW_EVENT,this);
    _costlist.append(cost);
    qSort(_costlist.begin(), _costlist.end(), sortCostByDistance);
    cost->save();
    emit costsChanged();
}
        void PathLengthDirectInfSampler::sampleUniform(State* statePtr, const Cost& maxCost)
        {
            //Check if a solution path has been found
            if (std::isfinite(maxCost.value()) == false)
            {
                //We don't have a solution yet, we sample from our basic sampler instead...
                baseSampler_->sampleUniform(statePtr);
            }
            else //We have a solution
            {
                //Set the new transverse diameter
                phsPtr_->setTransverseDiameter(maxCost.value());

                //Check whether the problem domain (i.e., StateSpace) or PHS has the smaller measure. Sample the smaller directly and reject from the larger.
                if (informedSubSpace_->getMeasure() <= phsPtr_->getPhsMeasure())
                {
                    //The PHS is larger than the subspace, just sample from the subspace directly.
                    //Variables
                    //The informed subset of the sample as a vector
                    std::vector<double> informedVector(informedSubSpace_->getDimension());

                    //Sample from the state space until the sample is in the PHS
                    do
                    {
                        //Generate a random sample
                        baseSampler_->sampleUniform(statePtr);

                        //Is there an extra "uninformed" subspace to trim off before comparing to the PHS?
                        if ( StateSampler::space_->isCompound() == false )
                        {
                            //No, space_ == informedSubSpace_
                            informedSubSpace_->copyToReals(informedVector, statePtr);
                        }
                        else
                        {
                            //Yes, we need to do some work to extract the subspace
                            informedSubSpace_->copyToReals(informedVector, statePtr->as<CompoundState>()->components[informedIdx_]);
                        }
                    }
                    //Check if the informed state is in the PHS
                    while ( phsPtr_->isInPhs(informedSubSpace_->getDimension(), &informedVector[0]) == false );
                }
                else
                {
                    //The PHS has a smaller volume than the subspace.
                    //Sample from within the PHS until the sample is in the state space
                    do
                    {
                        this->sampleUniformIgnoreBounds(statePtr, maxCost);
                    }
                    while ( StateSampler::space_->satisfiesBounds(statePtr) == false );
                }
            }
        }
Example #3
0
Cost IntersectQP::cost(OperationContext &context, QueryExecutionContext &qec) const
{
	Cost result;
	Vector::const_iterator it = args_.begin();
	if(it != args_.end()) {
		result = (*it)->cost(context, qec);
		for(++it; it != args_.end(); ++it) {
			result.intersectOp((*it)->cost(context, qec));
		}
	}
	return result;
}
ompl::base::Cost ompl::base::MultiOptimizationObjective::stateCost(const State *s) const
{
    Cost c = identityCost();
    for (std::vector<Component>::const_iterator comp = components_.begin();
         comp != components_.end();
         ++comp)
    {
        c = Cost(c.value() + comp->weight * (comp->objective->stateCost(s).value()));
    }

    return c;
}
Example #5
0
static void* Cost_optimizeA(void* object){
    pthread_setname_np(pthread_self(), "Athread");
    Cost* cost = (Cost*)object;
    set_affinity(3);
    cost->running_a = true;
    while(cost->running_a) {
        cost->optimizeA();

        if(allDie) {
            return 0;
        }
    }
}
Example #6
0
static void* Cost_optimizeQD(void* object){
    pthread_setname_np(pthread_self(), "QDthread");
    Cost* cost = (Cost*)object;
    set_affinity(2);
    cost->running_qd = true;

    while(cost->running_a) { // A is not done
        cost->optimizeQD();

        if(allDie) {
            return 0;
        }
    }
    cost->running_qd = false;
}
Example #7
0
inline double ticksToAvgPrice(Lots lots, Cost cost, const Contr& contr) noexcept
{
  double ticks = 0;
  if (lots != 0_lts) {
    ticks = fractToReal(cost.count(), lots.count());
  }
  return ticks * contr.priceInc();
}
Cost NegativeNodePredicateFilterQP::cost(OperationContext &context, QueryExecutionContext &qec) const
{
	Cost cost = arg_->cost(context, qec);
	Cost predCost = pred_->cost(context, qec);

	// The predicate expression is run once for every argument key
// 	cost.pages += ceil(cost.keys * predCost.pages);
	cost.pagesForKeys += cost.keys * predCost.totalPages();

	// Take a token key away from the result, because it is likely to
	// return less nodes than the argument - we just don't know how many less.
	if(cost.keys > 1) cost.keys -= 1;

	// Add a token page to the result, because having a predicate takes more
	// time than not
	cost.pagesOverhead += 1;

	return cost;
}
Example #9
0
File: Track.cpp Project: caomw/XTAM
Track::Track(Cost cost){
    rows=cost.rows;
    cols=cost.cols;
    baseImage=lastFrame=thisFrame=cost.baseImage;
    cameraMatrix=Mat(cost.cameraMatrix);
    depth=cost.depthMap();
    PToLie(Mat(cost.basePose),basePose);
    pose=basePose.clone();

}
Example #10
0
    void solve_nonlinear () {
      for (size_t i = 0; i < signals.rows(); ++i) {
        const Math::Vector<cost_value_type> signal (signals.row(i));
        Math::Vector<cost_value_type> values (tensors.row(i));

        cost.set_voxel (&signal, &values);

        Math::Vector<cost_value_type> x (cost.size());
        cost.init (x);
        //Math::check_function_gradient (cost, x, 1e-10, true);

        Math::GradientDescent<Cost> optim (cost);
        try { optim.run (10000, 1e-8); }
        catch (Exception& E) {
          E.display();
        }

        //x = optim.state();
        //Math::check_function_gradient (cost, x, 1e-10, true);

        cost.get_values (values, optim.state());
      }
    }
Example #11
0
ompl::base::Cost ompl::base::StateCostIntegralObjective::motionCost(const State *s1, const State *s2) const
{
    if (interpolateMotionCost_)
    {
        Cost totalCost = this->identityCost();

        int nd = si_->getStateSpace()->validSegmentCount(s1, s2);

        State *test1 = si_->cloneState(s1);
        Cost prevStateCost = this->stateCost(test1);
        if (nd > 1)
        {
            State *test2 = si_->allocState();
            for (int j = 1; j < nd; ++j)
            {
                si_->getStateSpace()->interpolate(s1, s2, (double)j / (double)nd, test2);
                Cost nextStateCost = this->stateCost(test2);
                totalCost = Cost(totalCost.value() +
                                 this->trapezoid(prevStateCost, nextStateCost, si_->distance(test1, test2)).value());
                std::swap(test1, test2);
                prevStateCost = nextStateCost;
            }
            si_->freeState(test2);
        }

        // Lastly, add s2
        totalCost = Cost(totalCost.value() +
                         this->trapezoid(prevStateCost, this->stateCost(s2), si_->distance(test1, s2)).value());

        si_->freeState(test1);

        return totalCost;
    }

        return this->trapezoid(this->stateCost(s1), this->stateCost(s2), si_->distance(s1, s2));
}
        double PathLengthDirectInfSampler::getInformedMeasure(const Cost& currentCost) const
        {
            //Variable
            //The measure of the informed set
            double informedMeasure;

            //The informed measure is then the measure of the PHS for the given cost:
            informedMeasure = phsPtr_->getPhsMeasure(currentCost.value());

            //And if the space is compound, further multiplied by the measure of the uniformed subspace
            if ( StateSampler::space_->isCompound() == true )
            {
                informedMeasure = informedMeasure*uninformedSubSpace_->getMeasure();
            }

            //Return the smaller of the two measures
            return std::min(StateSampler::space_->getMeasure(), informedMeasure);
        }
        void PathLengthDirectInfSampler::sampleUniformIgnoreBounds(State* statePtr, const Cost& maxCost)
        {
            //Variable
            //The informed subset of the sample as a vector
            std::vector<double> informedVector(informedSubSpace_->getDimension());

            //Set the new transverse diameter
            phsPtr_->setTransverseDiameter(maxCost.value());

            //Sample the ellipse
            rng_.uniformProlateHyperspheroid(phsPtr_, informedSubSpace_->getDimension(), &informedVector[0]);

            //If there is an extra "uninformed" subspace, we need to add that to the state before converting the raw vector representation into a state....
            if ( StateSampler::space_->isCompound() == false )
            {
                //No, space_ == informedSubSpace_
                //Copy into the state pointer
                informedSubSpace_->copyFromReals(statePtr, informedVector);
            }
            else
            {
                //Yes, we need to also sample the uninformed subspace
                //Variables
                //A state for the uninformed subspace
                State* uninformedState = uninformedSubSpace_->allocState();

                //Copy the informed subspace into the state pointer
                informedSubSpace_->copyFromReals(statePtr->as<CompoundState>()->components[informedIdx_], informedVector);

                //Sample the uniformed subspace
                uninformedSubSampler_->sampleUniform(uninformedState);

                //Copy the informed subspace into the state pointer
                uninformedSubSpace_->copyState(statePtr->as<CompoundState>()->components[uninformedIdx_], uninformedState);

                //Free the state
                uninformedSubSpace_->freeState(uninformedState);
            }
        }
ompl::base::Cost ompl::base::OptimizationObjective::combineCosts(Cost c1, Cost c2) const
{
    return Cost(c1.value() + c2.value());
}
Example #15
0
std::vector<std::pair<std::vector<uint>, std::vector<SentenceInfo>  aStar::Suchalgorithmus(char* eingabe, PTree<PTree <Cost> >* blacktree, Lexicon* eLex, Lexicon* fLex){
     igzstream in(eingabe);
     aStar::flex=fLex;
     elex=eLex;
     schwarz=blacktree;


     string token,line;

	  unsigned int lineNumber = 0;
	
     while(getline(in,line)){
	  istringstream ist(line); //Einlesen des Satzes
	  
	  vector<unsigned int> sentence_id;

	  vector<HypothesisNode> Knoten;
	  Knoten.push_back(HypothesisNode());//initialisiert den ersten Knoten
	  Cost startkosten(0);
	  Knoten[0].setBestcost(startkosten);
	  //cout << "start kosten " << Knoten[0].getBestcost().cost() << endl;
	  
	  int aktPos=0; //merkt sich, wieviele Wörter schon eingelesen wurden

	  while ( ist >> token){
	       Word word_id_french=flex->getWord_or_add(token); // das word zum Wort (mit 2 Bits Sprache)
	       unsigned int id_french= word_id_french.wordId(); //die id ohne sprachbits

	       sentence_id.push_back(id_french);
	       aktPos++;
	       
	       HypothesisNode knoten_next= HypothesisNode();//initialisiert den nächsten Knoten mit den bisherigen Kosten
	       
	       for (int laengePhrase=1; laengePhrase<5; laengePhrase++){
		    int posPhraseStart=aktPos-laengePhrase; //gibt die Pos. für den Knoten, auf dem die Phrase beginnt
		    if (posPhraseStart < 0)	break; //wir befinden uns am Satzanfang und es gibt keine Phrasen

		    vector<unsigned int> fphrase;
		    for (int i=posPhraseStart; i<aktPos; i++){
			 fphrase.push_back(sentence_id[i]);
		    }
			PTree<PTree <Cost> >* schwarzRest=schwarz->traverse(fphrase);
			if (!schwarzRest)	continue; //wenn es die französische Phrase nicht gibt, nächste überprüfen
			PTree <Cost>* blauBaum=&schwarzRest->c;
			

		    
		    if (blauBaum){
			 int counter=0; //nur fürs Programmieren, damit alle Fehler ausgemerzt werden 
			 for (PTree<Cost>::iterator it=blauBaum->begin(); it!=blauBaum->end(); it++){
			      //if (counter++==10)	continue;
			      vector<unsigned int> ephrase=it->phrase();
			      
			      Cost relfreq = it->c;
				
			      if (relfreq.cost() == 1./0. )	continue;
			      
			      double cost_till_aktPos=Knoten[posPhraseStart].getBestcost().cost();
			      
			      if (cost_till_aktPos+prune > knoten_next.getBestcost().cost())	continue; //pruning ergibt, das ist eine schlecht Übersetzung
			      
			      if(cost_till_aktPos+relfreq.cost() < knoten_next.getBestcost().cost())	knoten_next.setBestcost(Knoten[posPhraseStart].getBestcost()+relfreq.cost());
			      
			      PartialTranslation* Kante= new PartialTranslation(relfreq,ephrase,&knoten_next,posPhraseStart);
			      Knoten[posPhraseStart].add_PartialTranslation_to_Inbound(Kante);
			      knoten_next.add_PartialTranslation_to_Outbound(Kante);   
			 }
		    }
		
	       }
		if (knoten_next.getOutbound().size() == 0){
			//zuerst in das englische Lexicon einfügen
			string word_string = flex->getString(sentence_id[aktPos-1]);
			unsigned int id_english=elex->getWord_or_add(word_string);

			//dann die Kante anlegen, dabei sollen die Kosten niedrig sein, da sie sowieso genutzt werden muss, kann sie auch direkt exploriert werden
			PartialTranslation* Kante= new PartialTranslation(Cost(0),vector<unsigned int>{id_english},&Knoten[aktPos],aktPos-1);
			knoten_next.setBestcost(Knoten.back().getBestcost());
			knoten_next.add_PartialTranslation_to_Outbound(Kante);
			Knoten.back().add_PartialTranslation_to_Inbound(Kante);
		}
	       Knoten.push_back(knoten_next); //letzter Knoten (node_next) hat keine eingehenden Kanten
	       
	  }
	  
	  //dotGraph(Knoten, elex);
	  aStar astar(Knoten);
	  astar.lineNumber = lineNumber;

	  std::vector<SentenceInfo> sentenceInfos = astar.print(astar.search(), sentence_id, schwarz);
	  
	  for(unsigned int i = 0; i < Knoten.size(); i++){
		 HypothesisNode& hnode = Knoten[i];
		 for(unsigned int j = 0; j < hnode.getOutbound().size(); j++)
		 	delete hnode.getOutbound()[j];
	  }
	  
	  std::pair<std::vector<uint>, std::vector<SentenceInfo> > pair_tmp;
	  pair_tmp.first = sentence_id;
	  pair_tmp.second = sentenceInfos;
	  nBestLists.push_back(pair_tmp);
	  lineNumber ++;
     }
     return nBestLists;
}
bool ompl::base::OptimizationObjective::isCostBetterThan(Cost c1, Cost c2) const
{
    return c1.value() < c2.value();
}
Example #17
0
 void addCost (const Cost& c)
 {
     mCosts [static_cast <int> (c.getLoadType ())] = c;
 }
int main(int argc, char* argv[]) {
   vector<Core> core;
   core.push_back(Core(0, 1));
   core.push_back(Core(1, 4));
   core.push_back(Core(4, 6));
   core.push_back(Core(3, 0));
   core.push_back(Core(4, 3));

   double** bandwidth;
   double** latency;
   bandwidth = new double*[5];
   latency = new double*[5];
   for (int i = 0; i < 5; i++) {
      bandwidth[i] = new double[5];
      memset(bandwidth[i], 0, sizeof(double) * 5);
      latency[i] = new double[5];
      memset(latency[i], 0, sizeof(double) * 5);
   }

   bandwidth[0][3] = 10;
   bandwidth[0][4] = 20;
   bandwidth[1][0] = 30;
   bandwidth[1][4] = 40;
   bandwidth[2][3] = 25;
   bandwidth[3][1] = 40;
   bandwidth[3][2] = 50;
   bandwidth[4][0] = 10;
   bandwidth[4][1] = 30;

   latency[0][3] = 10;
   latency[0][4] = 10;
   latency[1][0] = 10;
   latency[1][4] = 10;
   latency[2][3] = 10;
   latency[3][1] = 10;
   latency[3][2] = 10;
   latency[4][0] = 10;
   latency[4][1] = 10;

   Network network;
   network.init(7, 5);

   Coordinate from = { 0, 1 };
   Coordinate to = { 3, 0 };
   network.addCore(from, 0); //core1
   network.addCore(to, 3); //core4
   network.changeConnection(from, to, ADD); //1-4
   to.x = 4;
   to.y = 3;
   network.addCore(to, 4); //core5
   network.changeConnection(from, to, ADD); //1-5

   from.x = 1;
   from.y = 4;
   network.addCore(from, 1); //core2
   to.x = 0;
   to.y = 1;
   network.changeConnection(from, to, ADD); //2-1
   to.x = 4;
   to.y = 3;
   network.changeConnection(from, to, ADD); //2-5

   from.x = 4;
   from.y = 6;
   network.addCore(from, 2); //core3
   to.x = 3;
   to.y = 0;
   network.changeConnection(from, to, ADD); //3-4

   from.x = 3;
   from.y = 0;
   to.x = 1;
   to.y = 4;
   network.changeConnection(from, to, ADD); //4-2
   to.x = 4;
   to.y = 6;
   network.changeConnection(from, to, ADD); //4-3

   from.x = 4;
   from.y = 3;
   to.x = 0;
   to.y = 1;
   network.changeConnection(from, to, ADD); //5-1
   to.x = 1;
   to.y = 4;
   network.changeConnection(from, to, ADD); //5-2

   Cost cost;
   cost.init(1, 1, 0.2, 0.04);
   cost.initCost(bandwidth, latency, core, 1, network);

   network.updateUtilization(bandwidth, core);
   network.showDiagram();

   cost.printCost();
   cout << endl;

   int changedCore = 3;
   Coordinate newPos = { 4, 6 };
   int swapCore = network.getCoreIndex(newPos);
   assert(swapCore == 2);
   Coordinate oldPos = core[changedCore].getPosition();

   //remove old cost (compaction, slack, proximity)
   cost.updateCost(bandwidth, latency, 1, core, REMOVE, changedCore,
         swapCore);
   //remove all connections from the changed core
   network.changeAllConnections(bandwidth, core, changedCore, REMOVE);
   //remove all connections from the old position of the swap core
   network.changeAllConnections(bandwidth, core, swapCore, REMOVE);
   //add the overlap
   if (bandwidth[changedCore][swapCore] != 0) {
      network.changeConnection(core[changedCore].getPosition(),
            core[swapCore].getPosition(), ADD);
   }
   if (bandwidth[swapCore][changedCore] != 0) {
      network.changeConnection(core[swapCore].getPosition(),
            core[changedCore].getPosition(), ADD);
   }

   //place core on new pos
   core[swapCore].setPosition(oldPos);
   network.addCore(oldPos, swapCore);
   core[changedCore].setPosition(newPos);
   network.addCore(newPos, changedCore);

   //add all connections of changedCore
   network.changeAllConnections(bandwidth, core, changedCore, ADD);
   //add all connections of swap core
   network.changeAllConnections(bandwidth, core, swapCore, ADD);
   //remove overlap
   if (bandwidth[changedCore][swapCore] != 0) {
      network.changeConnection(core[changedCore].getPosition(),
            core[swapCore].getPosition(), REMOVE);
   }
   if (bandwidth[swapCore][changedCore] != 0) {
      network.changeConnection(core[swapCore].getPosition(),
            core[changedCore].getPosition(), REMOVE);
   }
   //calculate new cost (compaction, slack, proximity)
   cost.updateCost(bandwidth, latency, 1, core, ADD, changedCore,
         swapCore);
   //calculate new cost
   //cost.initCost(bandwidth, latency, core, LINK_LATENCY, network);
   cost.calculateCost(bandwidth, core, network);

   network.updateUtilization(bandwidth, core);

   //network.printNetwork();

   network.showDiagram();

   cost.printCost();
   cout << endl;

   return 0;
}
Example #19
0
QueryPlan *IntersectQP::optimize(OptimizationContext &opt)
{
	// Optimize the arguments
	vector<QueryPlan*> newArgs;
	Vector::iterator it;
	for(it = args_.begin(); it != args_.end(); ++it) {
		QueryPlan *arg = (*it)->optimize(opt);
		if(arg->getType() == type_) {
			// Merge IntersectQP arguments into this object
			const Vector &args  = ((OperationQP*)arg)->getArgs();
			Vector::const_iterator aend = args.end();
			for(Vector::const_iterator ai = args.begin();
			    ai != aend; ++ai) newArgs.push_back(*ai);
		} else {
			newArgs.push_back(arg);
		}
	}
	args_.clear();
	std::copy(newArgs.begin(), newArgs.end(), back_inserter(args_));

	removeSupersets(opt);

	// Return if we only have one argument
	if(args_.size() == 1) return args_[0];

	// Pull forward filters
	for(it = args_.begin(); it != args_.end(); ++it) {
		switch((*it)->getType()) {
		case VALUE_FILTER:
		case PREDICATE_FILTER:
		case NODE_PREDICATE_FILTER:
		case NEGATIVE_NODE_PREDICATE_FILTER:
		// We can't skip NUMERIC_PREDICATE_FILTER, because changing it's input will change
		// the cardinality of it's input, and cause it to return the wrong results.
// 		case NUMERIC_PREDICATE_FILTER:
// 		case DOC_EXISTS:
		case LEVEL_FILTER: {
			string before = logBefore(this);

			FilterQP *filter = (FilterQP*)*it;

			*it = filter->getArg();
			filter->setArg(this);

			logTransformation(opt.getLog(), "Filter pulled forward", before, filter);
			return filter->optimize(opt);
		}
		default: break;
		}
	}

	// Identify potential pairs of ValueQP for a RangeQP
	newArgs.clear();
	for(it = args_.begin(); it != args_.end(); ++it) {
		if(isLessThanOrGreaterThan(*it)) {
			for(Vector::iterator it2 = it + 1;
			    it2 != args_.end(); ++it2) {
				if(isLessThanOrGreaterThan(*it2)) {
					QueryPlan *range = createRange((ValueQP*)*it, (ValueQP*)*it2);
					if(range != 0) {
						logTransformation(opt.getLog(), "Merged into range",
							logIntersectBefore(*it, *it2), range);
						newArgs.push_back(range);
					}
				}
			}
		}

		newArgs.push_back((*it)->optimize(opt));
	}
	args_.clear();
	std::copy(newArgs.begin(), newArgs.end(), back_inserter(args_));

	// Return if we only have one argument
	if(args_.size() == 1) return args_[0];

	// Try to pull forward document indexes, so they will combine
	if(opt.getPhase() < OptimizationContext::ALTERNATIVES) {
		string before = logBefore(this);
		QueryPlan *result = PullForwardDocumentJoin().run(this);
		if(result != 0) {
			logTransformation(opt.getLog(), "Pull forward document join", before, result);
			return result->optimize(opt);
		}
	}

	if(opt.getPhase() < OptimizationContext::REMOVE_REDUNDENTS)
		return this;

	// TBD remove the need for QueryExecutionContext here - jpcs
	QueryExecutionContext qec(GET_CONFIGURATION(opt.getContext())->getQueryContext(),
				  /*debugging*/false);
	qec.setContainerBase(opt.getContainerBase());
	qec.setDynamicContext(opt.getContext());

	// Sort the arguments based on how many keys we think they'll return
	sort(args_.begin(), args_.end(), keys_compare_less(opt.getOperationContext(), qec));

	// Remove document index joins if they are unlikely to be useful.
	it = args_.begin();
	QueryPlan *leftMost = *it;
	Cost lCost = leftMost->cost(opt.getOperationContext(), qec);
	newArgs.clear();
	newArgs.push_back(*it);
	for(++it; it != args_.end(); ++it) {
		if(StructuralJoinQP::isDocumentIndex(*it, /*toBeRemoved*/true) &&
			StructuralJoinQP::isSuitableForDocumentIndexComparison(leftMost)) {
			Cost itCost = (*it)->cost(opt.getOperationContext(), qec);

			// TBD Calculate these constants? - jpcs
			static const double KEY_RATIO_THRESHOLD = 2.0;
			static const double PAGES_PER_KEY_FACTOR = 2.0;

			if((itCost.keys / lCost.keys) > KEY_RATIO_THRESHOLD ||
				(itCost.totalPages() / itCost.keys) > (lCost.totalPages() * PAGES_PER_KEY_FACTOR / lCost.keys)) {
				string before = logIntersectBefore(leftMost, *it);
				logTransformation(opt.getLog(), "Remove document join", before, leftMost);
				leftMost->logCost(qec, lCost, 0);
				(*it)->logCost(qec, itCost, 0);
				continue;
			}
		}

		newArgs.push_back(*it);
	}
	args_.clear();
	std::copy(newArgs.begin(), newArgs.end(), back_inserter(args_));

	// Return if we only have one argument
	if(args_.size() == 1) return args_[0];

	return this;
}
Example #20
0
void AnalyzeInput::evaltrajs(vector<Trajectory*> &trajs){
 //******Calculate Cost, Acceleration, and Distance traveled per time step*******************************
	Cost evalcost;
	this->DR = 0;
	this->C = 0;

	for (int i = 0; i < trajs.size(); i++){
		trajs[i]->R = 0;
		trajs[i]->C = 0;
		double Cmin = 100;
		double Cmax = 0;
		double Rmin = 1E9;
		double Rmax = 0;
		for (int j = 0; j < trajs[i]->particles.size()-1; j++){
			Particle* P = trajs[i]->particles[j];
			Particle* Pn = trajs[i]->particles[j+1];
			P->dr = distance(P,Pn);
			if(P->dr > Rmax)
				Rmax = P->dr;

			if(P->dr < Rmin)
				Rmin = P->dr;

            		trajs[i]->R += P->dr;

            		Pn->vel = P->dr * FRAMESPERSEC;
			if (j>1){
				Particle* Pm = trajs[i]->particles[j-1];
				P->accel = abs(Pn->vel - Pm->vel) * FRAMESPERSEC/2.0;
			}

			if (j >= COST_TRAJ_SIZE - 1){
				int s = (j) - (COST_TRAJ_SIZE -1);
				Trajectory tr;
				for (int x = s; x <= j; x++){
					tr.particles.push_back(trajs[i]->particles[x]);
				}
				Pn->cost = evalcost.cost(&tr, Pn);
			}else{
				Pn->cost = 0.0;
			}

			if(Pn->cost > Cmax)
				Cmax = Pn->cost;

			if(Pn->cost < Cmin && Pn->cost != 0)
				Cmin = Pn->cost;

			trajs[i]->C += Pn->cost;
		}
		trajs[i]->Cl = Cmin;
		trajs[i]->Ch = Cmax;
		trajs[i]->C = trajs[i]->C / ((double) trajs[i]->particles.size());
		this->C += trajs[i]->C;

		trajs[i]->Rl = Rmin;
		trajs[i]->Rh = Rmax;
		trajs[i]->R = trajs[i]->R / ((double) trajs[i]->particles.size());
		this->DR += trajs[i]->R;
	}
    	this->C = this->C / trajs.size();
	this->DR = this->DR / trajs.size();
}
Example #21
0
QueryPlan *DescendantOrSelfJoinQP::optimize(OptimizationContext &opt)
{
	XPath2MemoryManager *mm = opt.getMemoryManager();

	QueryPlan *qp = StructuralJoinQP::optimize(opt);
	if(qp != this) return qp;

	if(findType(left_) == ImpliedSchemaNode::METADATA) {
		switch(right_->getType()) {
		case DESCENDANT_OR_SELF: {
			DescendantOrSelfJoinQP *rsj = (DescendantOrSelfJoinQP*)right_;
			if(findType(rsj->left_) == ImpliedSchemaNode::METADATA) {
				string before = logBefore(this);

				left_ = new (mm) IntersectQP(left_, rsj->left_, 0, mm);
				left_->setLocationInfo(rsj);
				right_ = rsj->right_;
				flags_ |= rsj->flags_;

				logTransformation(opt.getLog(), "Combine document join", before, this);
				return optimize(opt);
			}
			break;
		}
		default: {
			if(findType(right_) == ImpliedSchemaNode::METADATA) {
				string before = logBefore(this);

				QueryPlan *result = new (mm) IntersectQP(left_, right_, flags_, mm);
				result->setLocationInfo(this);

				logTransformation(opt.getLog(), "Combine document join", this, result);
				return result->optimize(opt);
			}
			break;
		}
		}
	}

	if(opt.getPhase() < OptimizationContext::REMOVE_REDUNDENTS)
		return this;

	if(findType(left_) == ImpliedSchemaNode::METADATA) {
		switch(right_->getType()) {
		case STEP: {
			string before = logBefore(this);

			StepQP *step = (StepQP*)right_;
			right_ = step->getArg();
			step->setArg(this);

			logTransformation(opt.getLog(), "Push back document join", before, step);
			return step->optimize(opt);
		}
		case DESCENDANT:
		case DESCENDANT_OR_SELF:
		case ATTRIBUTE:
		case CHILD:
		case ATTRIBUTE_OR_CHILD: {
			string before = logBefore(this);

			StructuralJoinQP *sj = (StructuralJoinQP*)right_;
			right_ = sj->getLeftArg();
			sj->setLeftArg(this);

			logTransformation(opt.getLog(), "Push back document join", before, sj);
			return sj->optimize(opt);
		}
		case ANCESTOR:
		case ANCESTOR_OR_SELF:
		case PARENT:
		case PARENT_OF_ATTRIBUTE:
		case PARENT_OF_CHILD: {
			string before = logBefore(this);

			StructuralJoinQP *sj = (StructuralJoinQP*)right_;
			right_ = sj->getRightArg();
			sj->setRightArg(this);

			logTransformation(opt.getLog(), "Push back document join", before, sj);
			return sj->optimize(opt);
		}
		case EXCEPT: {
			string before = logBefore(this);

			// Add to both arguments of ExceptQP

			ExceptQP *ex = (ExceptQP*)right_;
			right_ = ex->getLeftArg();
			ex->setLeftArg(this);

			QueryPlan *copy = new (mm) DescendantOrSelfJoinQP(left_->copy(mm), ex->getRightArg(), flags_, mm);
			copy->setLocationInfo(this);
			ex->setRightArg(copy);

			logTransformation(opt.getLog(), "Push back document join", before, ex);
			return ex->optimize(opt);
		}
		default: break;
		}
	}

	// TBD remove the need for QueryExecutionContext here - jpcs
	QueryExecutionContext qec(GET_CONFIGURATION(opt.getContext())->getQueryContext(),
		/*debugging*/false);
	qec.setContainerBase(opt.getContainerBase());
	qec.setDynamicContext(opt.getContext());

	// Remove this document index join if it's unlikely to be useful
	if(isDocumentIndex(left_, /*toBeRemoved*/true) && isSuitableForDocumentIndexComparison(right_)) {
		Cost rCost = right_->cost(opt.getOperationContext(), qec);
		Cost lCost = left_->cost(opt.getOperationContext(), qec);

		// TBD Calculate these constants? - jpcs
		static const double KEY_RATIO_THRESHOLD = 2.0;
		static const double KEYS_PER_PAGE_THRESHOLD = 2.0;

		if((lCost.keys / rCost.keys) > KEY_RATIO_THRESHOLD ||
			(lCost.keys / lCost.totalPages()) > KEYS_PER_PAGE_THRESHOLD) {

			logTransformation(opt.getLog(), "Remove document join", this, right_);
			right_->logCost(qec, rCost, 0);
			left_->logCost(qec, lCost, 0);
			return right_;
		}
	}

	return this;
}