PII mps(TreeNode *root) { if (root == NULL) { return mp(INT_MIN, 0); } PII ret; PII le = mps(root->left); PII ri = mps(root->right); ret.first = root->val; if (le.second > 0) ret.first += le.second; if (ri.second > 0) ret.first += ri.second; ret.first = max(ret.first, max(le.first, ri.first)); ret.second = root->val + max(0, max(le.second, ri.second)); return ret; }
shared_ptr<Cursor> NamespaceDetailsTransient::getCursor( const char *ns, const BSONObj &query, const BSONObj &order ) { if ( query.isEmpty() && order.isEmpty() ) { // TODO This will not use a covered index currently. return theDataFileMgr.findAll( ns ); } if ( isSimpleIdQuery( query ) ) { Database *database = cc().database(); assert( database ); NamespaceDetails *d = database->namespaceIndex.details(ns); if ( d ) { int idxNo = d->findIdIndex(); if ( idxNo >= 0 ) { IndexDetails& i = d->idx( idxNo ); BSONObj key = i.getKeyFromQuery( query ); return shared_ptr<Cursor>( BtreeCursor::make( d, idxNo, i, key, key, true, 1 ) ); } } } auto_ptr<MultiPlanScanner> mps( new MultiPlanScanner( ns, query, order ) ); // mayYield == false shared_ptr<Cursor> single = mps->singleCursor(); if ( single ) { if ( !query.isEmpty() && !single->matcher() ) { shared_ptr<CoveredIndexMatcher> matcher( new CoveredIndexMatcher( query, single->indexKeyPattern() ) ); single->setMatcher( matcher ); } return single; } return newQueryOptimizerCursor( mps ); }
/** This interface is just available for testing. */ shared_ptr<Cursor> newQueryOptimizerCursor ( const char *ns, const BSONObj &query, const BSONObj &order, const QueryPlanSelectionPolicy &planPolicy, bool requireOrder, const shared_ptr<const ParsedQuery> &parsedQuery ) { auto_ptr<MultiPlanScanner> mps( MultiPlanScanner::make( ns, query, order, parsedQuery ) ); return newQueryOptimizerCursor( mps, planPolicy, requireOrder, false ); }
void MasterPly::poissonDiskSampling(int sampleNum){ float rad = 0.f; rad = tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::ComputePoissonDiskRadius(m, sampleNum); tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::SamplingRandomGenerator().initialize((unsigned int)time(0)); //sample point cloud std::vector<Point3f> sampleVec; tri::TrivialSampler<MyMesh> mps(sampleVec); // sampling //cout << "Subsampling a PointCloud" << endl; tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::PoissonDiskParam pp; tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::PoissonDiskParam::Stat pds; pp.preGenMesh = &subM; //pp.pds=&pds; pp.bestSampleChoiceFlag=false; // start poisson disk prunning sampleVec.clear(); tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::PoissonDiskPruning(mps, m, rad, pp); tri::Build(subM,sampleVec); vcg::tri::UpdateBounding<MyMesh>::Box(subM); //m.Clear(); }
InternalRStarTreeWrapper::InternalRStarTreeWrapper(unsigned int pageSize, unsigned int dimensions) : _dimensions(dimensions) { boost::shared_ptr<MemoryPageStore> mps(new MemoryPageStore((int)pageSize)); _tree = boost::shared_ptr<HilbertRTree>(new HilbertRTree(mps, dimensions)); }
void test1() { shared_ptr<PageStore>mps(new MemoryPageStore(100)); shared_ptr<RTreeNodeStore>store(new RTreeNodeStore(2, mps)); { Tgs::RTreeNode rtn(2, mps->createPage()); rtn.clear(); // check init values. CPPUNIT_ASSERT(rtn.calculateEnvelope().isValid() == false); CPPUNIT_ASSERT_EQUAL(0, rtn.getChildCount()); CPPUNIT_ASSERT_EQUAL(2, rtn.getDimensions()); CPPUNIT_ASSERT_EQUAL(0, rtn.getId()); CPPUNIT_ASSERT_EQUAL(2, rtn.getMaxChildCount()); CPPUNIT_ASSERT_EQUAL(true, rtn.isLeafNode()); } // load the node again from the same page and make sure it is still ok w/o a clear() RTreeNode rtn(2, mps->getPage(0)); CPPUNIT_ASSERT(rtn.calculateEnvelope().isValid() == false); CPPUNIT_ASSERT_EQUAL(0, rtn.getChildCount()); CPPUNIT_ASSERT_EQUAL(2, rtn.getDimensions()); CPPUNIT_ASSERT_EQUAL(0, rtn.getId()); CPPUNIT_ASSERT_EQUAL(2, rtn.getMaxChildCount()); CPPUNIT_ASSERT_EQUAL(true, rtn.isLeafNode()); }
void NetworkMatcher::_createVertex2Index() { // No tuning was done, I just copied these settings from OsmMapIndex. // 10 children = 368 bytes shared_ptr<MemoryPageStore> mps(new MemoryPageStore(728)); _vertex2Index.reset(new HilbertRTree(mps, 2)); std::vector<Box> boxes; std::vector<int> fids; const OsmNetwork::VertexMap& vm = _n2->getVertexMap(); for (OsmNetwork::VertexMap::const_iterator it = vm.begin(); it != vm.end(); ++it) { fids.push_back((int)_index2Vertex.size()); _index2Vertex.push_back(it.value()); Box b(2); Meters searchRadius = _details2->getSearchRadius(it.value()); Envelope env(_details2->getEnvelope(it.value())); env.expandBy(searchRadius); b.setBounds(0, env.getMinX(), env.getMaxX()); b.setBounds(1, env.getMinY(), env.getMaxY()); boxes.push_back(b); } _vertex2Index->bulkInsert(boxes, fids); }
nsresult GMPStorageParent::Init() { LOGD(("GMPStorageParent[%p]::Init()", this)); if (NS_WARN_IF(mNodeId.IsEmpty())) { return NS_ERROR_FAILURE; } RefPtr<GeckoMediaPluginServiceParent> mps(GeckoMediaPluginServiceParent::GetSingleton()); if (NS_WARN_IF(!mps)) { return NS_ERROR_FAILURE; } bool persistent = false; if (NS_WARN_IF(NS_FAILED(mps->IsPersistentStorageAllowed(mNodeId, &persistent)))) { return NS_ERROR_FAILURE; } if (persistent) { mStorage = CreateGMPDiskStorage(mNodeId, mPlugin->GetPluginBaseName()); } else { mStorage = mps->GetMemoryStorageFor(mNodeId); } if (!mStorage) { return NS_ERROR_FAILURE; } mShutdown = false; return NS_OK; }
void CMainFrame::OnWizard() { CMyPropertySheet mps(NULL, this); mps.AddPage(new CButtonPage(IDD_BUTTONS, _T("Buttons"))); mps.AddPage(new CComboPage(IDD_COMBOBOXES, _T("Combo Boxes"))); mps.SetWizardMode(TRUE); mps.DoModal(); }
void Foam::LocalInteraction<CloudType>::info(Ostream& os) { // retrieve any stored data labelList npe0(patchData_.size(), 0); this->getModelProperty("nEscape", npe0); scalarList mpe0(patchData_.size(), 0.0); this->getModelProperty("massEscape", mpe0); labelList nps0(patchData_.size(), 0); this->getModelProperty("nStick", nps0); scalarList mps0(patchData_.size(), 0.0); this->getModelProperty("massStick", mps0); // accumulate current data labelList npe(nEscape_, 0); Pstream::listCombineGather(npe, plusEqOp<label>()); npe = npe + npe0; scalarList mpe(massEscape_); Pstream::listCombineGather(mpe, plusEqOp<scalar>()); mpe = mpe + mpe0; labelList nps(nStick_); Pstream::listCombineGather(nps, plusEqOp<label>()); nps = nps + nps0; scalarList mps(massStick_); Pstream::listCombineGather(mps, plusEqOp<scalar>()); mps = mps + mps0; forAll(patchData_, i) { os << " Parcel fate (number, mass) : patch " << patchData_[i].patchName() << nl << " - escape = " << npe[i] << ", " << mpe[i] << nl << " - stick = " << nps[i] << ", " << mps[i] << nl; } if (this->outputTime()) { this->setModelProperty("nEscape", npe); nEscape_ = 0; this->setModelProperty("massEscape", mpe); massEscape_ = 0.0; this->setModelProperty("nStick", nps); nStick_ = 0; this->setModelProperty("massStick", mps); massStick_ = 0.0; } }
void OsmMapIndex::_buildWayTree() const { QTime t; t.start(); LOG_DEBUG("Building way R-Tree index"); // 10 children - 368 shared_ptr<MemoryPageStore> mps(new MemoryPageStore(728)); _wayTree.reset(new HilbertRTree(mps, 2)); vector<Box> boxes; vector<int> ids; const WayMap& ways = _map.getWays(); _treeIdToWid.resize(0); _treeIdToWid.reserve(ways.size()); boxes.reserve(ways.size()); ids.reserve(ways.size()); Box b(2); int count = 0; for (WayMap::const_iterator it = ways.begin(); it != ways.end(); ++it) { shared_ptr<const Way> w = it->second; shared_ptr<LineString> ls = ElementConverter(_map.shared_from_this()).convertToLineString(w); const Envelope* e = ls->getEnvelopeInternal(); Meters a = w->getCircularError(); b.setBounds(0, e->getMinX() - a, e->getMaxX() + a); b.setBounds(1, e->getMinY() - a, e->getMaxY() + a); boxes.push_back(b); ids.push_back(_createTreeWid(w->getId())); if (Log::getInstance().isDebugEnabled() && count % 1000 == 0) { cout << " building way R-Tree count: " << count << " / " << ways.size() << " \r"; cout.flush(); } count += 1; } if (Log::getInstance().isDebugEnabled()) { cout << endl; } _pendingWayInsert.clear(); _pendingWayRemoval.clear(); LOG_DEBUG(" Doing bulk insert."); _wayTree->bulkInsert(boxes, ids); LOG_DEBUG(" Done. Time elapsed: " << t.elapsed() << "ms"); }
void CMainFrame::OnModal() { if (m_ModelessPS.IsWindow()) m_ModelessPS.Destroy(); CMyPropertySheet mps(_T("Modal Property Sheet"), this); mps.AddPage(new CButtonPage(IDD_BUTTONS, _T("Buttons"))); mps.AddPage(new CComboPage(IDD_COMBOBOXES, _T("Combo Boxes"))); mps.DoModal(); }
void OsmMapIndex::_buildNodeTree() const { QTime t; t.start(); LOG_DEBUG("Building node R-Tree index"); // 10 children - 368 shared_ptr<MemoryPageStore> mps(new MemoryPageStore(728)); _nodeTree.reset(new HilbertRTree(mps, 2)); vector<Box> boxes; vector<int> ids; const OsmMap::NodeMap& nodes = _map.getNodeMap(); _treeIdToNid.resize(0); _treeIdToNid.reserve(nodes.size()); boxes.reserve(nodes.size()); ids.reserve(nodes.size()); Box b(2); int count = 0; for (QHash<long, boost::shared_ptr<Node> >::const_iterator it = nodes.constBegin(); it != nodes.constEnd(); ++it) { shared_ptr<const Node> n = it.value(); b.setBounds(0, n->getX(), n->getX()); b.setBounds(1, n->getY(), n->getY()); boxes.push_back(b); ids.push_back(_createTreeNid(n->getId())); if (Log::getInstance().isDebugEnabled() && count % 1000 == 0) { cout << " building node R-Tree count: " << count << " / " << nodes.size() << " \r"; cout.flush(); } count += 1; } if (Log::getInstance().isDebugEnabled()) { cout << endl; } _pendingNodeInsert.clear(); _pendingNodeRemoval.clear(); LOG_DEBUG(" Doing bulk insert."); _nodeTree->bulkInsert(boxes, ids); LOG_DEBUG(" Done. Time elapsed: " << t.elapsed() << "ms"); }
void Meterspersecond_ut::test() { const double EPSILON = 1e-12; Meterspersecond mps(1.0); CPPUNIT_ASSERT_DOUBLES_EQUAL(mps, Meterspersecond(mps), EPSILON); mps = Meterspersecond(2 * 3); CPPUNIT_ASSERT_DOUBLES_EQUAL(6.0, mps, EPSILON); mps = Meterspersecond(4.0 / 2); CPPUNIT_ASSERT_DOUBLES_EQUAL(2.0, mps, EPSILON); mps = Meterspersecond(30); }
/** This interface just available for testing. */ shared_ptr<Cursor> newQueryOptimizerCursor( const char *ns, const BSONObj &query, const BSONObj &order ) { auto_ptr<MultiPlanScanner> mps( new MultiPlanScanner( ns, query, order ) ); // mayYield == false return newQueryOptimizerCursor( mps ); }
void test2() { shared_ptr<PageStore>mps(new MemoryPageStore(368)); shared_ptr<RTreeNodeStore>store(new RTreeNodeStore(2, mps)); RTreeNode rtn(2, mps->createPage()); rtn.clear(); CPPUNIT_ASSERT_EQUAL(10, rtn.getMaxChildCount()); rtn.setParentId(3); CPPUNIT_ASSERT_EQUAL(3, rtn.getParentId()); // check for basic adding and removing of children. Box b1(2); b1.setBounds(0, 0, 1); b1.setBounds(1, 1, 2); rtn.addUserChild(b1, 21); CPPUNIT_ASSERT_EQUAL(1, rtn.getChildCount()); CPPUNIT_ASSERT_EQUAL(21, rtn.getChildUserId(0)); CPPUNIT_ASSERT(rtn.calculateEnvelope() == b1); CPPUNIT_ASSERT(rtn.getChildEnvelope(0).toBox() == b1); CPPUNIT_ASSERT_EQUAL(true, rtn.isLeafNode()); b1.setBounds(0, 3, 4); b1.setBounds(1, 5, 6); rtn.updateChildEnvelope(0, b1); CPPUNIT_ASSERT(rtn.calculateEnvelope() == b1); CPPUNIT_ASSERT(rtn.getChildEnvelope(0).toBox() == b1); b1.setBounds(0, 10, 11); b1.setBounds(1, 1, 1); rtn.addUserChild(b1, 22); Box b2(2); b2.setBounds(0, 3, 11); b2.setBounds(1, 1, 6); CPPUNIT_ASSERT_EQUAL(2, rtn.getChildCount()); CPPUNIT_ASSERT_EQUAL(22, rtn.getChildUserId(1)); CPPUNIT_ASSERT(rtn.calculateEnvelope() == b2); CPPUNIT_ASSERT(rtn.getChildEnvelope(1).toBox() == b1); CPPUNIT_ASSERT_EQUAL(true, rtn.isLeafNode()); rtn.removeChild(0); CPPUNIT_ASSERT_EQUAL(1, rtn.getChildCount()); CPPUNIT_ASSERT_EQUAL(22, rtn.getChildUserId(0)); CPPUNIT_ASSERT(rtn.calculateEnvelope() == b1); CPPUNIT_ASSERT(rtn.getChildEnvelope(0).toBox() == b1); CPPUNIT_ASSERT_EQUAL(true, rtn.isLeafNode()); rtn.removeChild(0); CPPUNIT_ASSERT_EQUAL(0, rtn.getChildCount()); CPPUNIT_ASSERT_EQUAL(false, rtn.calculateEnvelope().isValid()); CPPUNIT_ASSERT_EQUAL(true, rtn.isLeafNode()); for (int i = 20; i < 30; i++) { rtn.addUserChild(b1, i); CPPUNIT_ASSERT_EQUAL(i - 19, rtn.getChildCount()); } for (int i = 0; i < 10; i++) { CPPUNIT_ASSERT_EQUAL(i + 20, rtn.getChildUserId(i)); CPPUNIT_ASSERT(rtn.getChildEnvelope(i).toBox() == b1); } std::vector<int> ids; ids.push_back(2); ids.push_back(5); ids.push_back(7); ids.push_back(1); rtn.removeChildren(ids); CPPUNIT_ASSERT_EQUAL(6, rtn.getChildCount()); CPPUNIT_ASSERT_EQUAL(20, rtn.getChildUserId(0)); CPPUNIT_ASSERT_EQUAL(23, rtn.getChildUserId(1)); CPPUNIT_ASSERT_EQUAL(24, rtn.getChildUserId(2)); CPPUNIT_ASSERT_EQUAL(26, rtn.getChildUserId(3)); CPPUNIT_ASSERT_EQUAL(28, rtn.getChildUserId(4)); CPPUNIT_ASSERT_EQUAL(29, rtn.getChildUserId(5)); RTreeNode rtn2(2, mps->getPage(0)); CPPUNIT_ASSERT_EQUAL(3, rtn.getParentId()); CPPUNIT_ASSERT_EQUAL(6, rtn.getChildCount()); CPPUNIT_ASSERT_EQUAL(20, rtn.getChildUserId(0)); CPPUNIT_ASSERT_EQUAL(23, rtn.getChildUserId(1)); CPPUNIT_ASSERT_EQUAL(24, rtn.getChildUserId(2)); CPPUNIT_ASSERT_EQUAL(26, rtn.getChildUserId(3)); CPPUNIT_ASSERT_EQUAL(28, rtn.getChildUserId(4)); CPPUNIT_ASSERT_EQUAL(29, rtn.getChildUserId(5)); }
int maxPathSum(TreeNode *root) { // Start typing your C/C++ solution below // DO NOT write int main() function return mps(root).first; }
void lena_for_ever() { Gray_Pal Pgray (30); Disc_Pal Pdisc = Disc_Pal::P8COL(); Elise_Set_Of_Palette SOP(newl(Pgray)+Pdisc); Video_Display Ecr((char *) NULL); Ecr.load(SOP); Video_Win Wv (Ecr,SOP,Pt2di(50,50),Pt2di(SZX,SZY)); PS_Display disp("TMP/test.ps","Mon beau fichier ps",SOP); Pt2di NBW(4,20); Pt2di sz(200,100); Mat_PS_Window mps(disp,sz,Pt2dr(2.0,2.0),NBW,Pt2dr(0.5,0.1)); for (int x = 0; x < NBW.x ; x++) for (int y = 0; y < NBW.y ; y++) { mps(x,y).fill_rect(Pt2di(0,0),sz,Pgray((y*255)/( NBW.y-1))); mps(x,y).draw_rect(Pt2di(0,0),sz,Pdisc(P8COL::red)); } /* PS_Window Wps = disp.w_centered_max(Pt2di(SZX,SZY),Pt2dr(0.0,8.0)); Elise_File_Im FLena("../IM_ELISE/lena",Pt2di(SZX,SZY),GenIm::u_int1); Im2D_U_INT1 I(SZX,SZY); Col_Pal red = Pdisc(P8COL::red); Col_Pal blue = Pdisc(P8COL::blue); Col_Pal green = Pdisc(P8COL::green); Col_Pal black = Pdisc(P8COL::black); Col_Pal cyan = Pdisc(P8COL::cyan); Col_Pal white = Pdisc(P8COL::white); Col_Pal yellow = Pdisc(P8COL::yellow); El_Window W = Wv|Wps; ELISE_COPY ( Wps.all_pts(), 32*(FLena.in()/32), W.ogray()|I.out() ); W.fill_rect(Pt2dr(100,200),Pt2dr(400,300),yellow); Line_St s1 (red,3); Line_St s2 (green,2); Plot_1d Plot1 (W,s1,s2,Interval(-10,10),newl(PlBox(50,50,300,400))); Plot1.show_axes(); Plot1.show_axes ( newl(PlBox(150,150,250,250)) + PlOriY(0.2) + PlAxeSty(s2) ); Plot_1d Plot2(W,s1,s2,Interval(-100,100)); Plot1.show_box ( newl(PlBox(150,150,250,250)) + PlBoxSty(cyan,2) ); Plot2.set ( newl(PlBox(20,200,400,400)) + PlOriY(0.4) + PlAxeSty(Line_St(Pdisc(P8COL::yellow),1)) + PlBoxSty(Line_St(black,2)) + PlClearSty(white) ); Plot2.show_axes(); Plot2.show_box(); Plot2.plot(10*sin(FX/4.0)); Plot2.plot ( 10*sin(FX/4.0), newl(PlIntervBoxX(-50,50)) + PlotLinSty(red,2) ); Plot2.plot ( 10*sin(FX/4.0), newl( PlotLinSty(blue,2) ) + PlIntervPlotX(-30,70) + PlAutoScalY(true) + PlShAxes(true) + PlAxeSty(cyan,3) ); Plot2.plot ( 10*(1.2+sin(FX/4.0)), newl( PlotLinSty(red,2) ) + PlAutoScalOriY(true) + PlShAxes(true) + PlAxeSty(cyan,2) + PlAutoClear(true) ); Plot2.clear(newl(PlClearSty(Pgray(196)))); Plot2.plot ( 50*cos(FX/9.0), newl(PlotLinSty(Pdisc(P8COL::red))) + PlClipY(false) + PlStepX(1.0) ); Plot2.plot ( 70*sin(square(FX) / 500.0), newl(PlotLinSty(Pdisc(P8COL::blue))) + PlClipY(true) + PlStepX(0.15) ); Plot2.set ( newl(PlBox(20,0,400,200)) ); Plot2.set(newl(PlIntervBoxX(-20,20))); Plot2.clear(); Plot2.plot ( 10 * cos(FX/4.0), newl(PlotFilSty(green)) + PlClipY(false) + PlModePl(Plots::fill_box) ); Plot2.plot ( 10 * cos(FX/4.0), newl(PlotFilSty(red)) + PlClipY(true) + PlModePl(Plots::fill_box) ); Plot2.plot ( 10 * cos(FX/4.0), newl(PlotLinSty(black,2)) + PlClipY(true) + PlModePl(Plots::draw_box) + PlShAxes(true) + PlAxeSty(Line_St(Pdisc(P8COL::blue),2)) ); cout << "aaaaaaaaaa\n"; getchar(); */ }
void MasterPly::Sampling (char* input, char* output) { MyMesh m; MyMesh subM; float rad = 0.f; if(tri::io::ImporterPLY<MyMesh>::Open(m,input)!=0) { cout << "Error reading file %s\n" << endl; return; } MyMesh vcgMesh; int verticeCount; int triangleCount; vcgMesh.Clear(); verticeCount=m.VN(); vcg::tri::Allocator<MyMesh>::AddVertices(vcgMesh,verticeCount); for(int i=0;i<verticeCount;i++){ vcgMesh.vert[i].P()=vcg::Point3f(m.vert[i].P()[0],m.vert[i].P()[1],m.vert[i].P()[2]); } tri::UpdateBounding<MyMesh>::Box(vcgMesh); // calculate radius rad = tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::ComputePoissonDiskRadius(vcgMesh, 100000); tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::SamplingRandomGenerator().initialize((unsigned int)time(0)); //sample point cloud std::vector<Point3f> sampleVec; tri::TrivialSampler<MyMesh> mps(sampleVec); // sampling tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::PoissonDiskParam pp; tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::PoissonDiskParam::Stat pds; pp.preGenMesh = &subM; //pp.pds=&pds; pp.bestSampleChoiceFlag=false; // start poisson disk prunning sampleVec.clear(); tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::PoissonDiskPruning(mps, vcgMesh, rad, pp); tri::Build(subM,sampleVec); // sample std::stringstream sample; sample << "sampled_" << output; //Build surface vcg::tri::UpdateBounding<MyMesh>::Box(subM); vcg::tri::UpdateNormal<MyMesh>::PerVertex(subM); //Initialization tri::BallPivoting<MyMesh> pivot(subM); //the main processing pivot.BuildMesh(); tri::io::ExporterPLY<MyMesh>::Save(subM,output,true); }
/** * Run a query with a cursor provided by the query optimizer, or FindingStartCursor. * @returns true if client cursor was saved, false if the query has completed. */ bool queryWithQueryOptimizer( int queryOptions, const string& ns, const BSONObj &jsobj, CurOp& curop, const BSONObj &query, const BSONObj &order, const shared_ptr<ParsedQuery> &pq_shared, const ConfigVersion &shardingVersionAtStart, const bool getCachedExplainPlan, const bool inMultiStatementTxn, Message &result ) { const ParsedQuery &pq( *pq_shared ); shared_ptr<Cursor> cursor; QueryPlanSummary queryPlan; const bool tailable = pq.hasOption( QueryOption_CursorTailable ) && pq.getNumToReturn() != 1; LOG(1) << "query beginning read-only transaction. tailable: " << tailable << endl; BSONObj oldPlan; if (getCachedExplainPlan) { scoped_ptr<MultiPlanScanner> mps( MultiPlanScanner::make( ns.c_str(), query, order ) ); oldPlan = mps->cachedPlanExplainSummary(); } cursor = getOptimizedCursor( ns.c_str(), query, order, QueryPlanSelectionPolicy::any(), pq_shared, false, &queryPlan ); verify( cursor ); // Tailable cursors must be marked as such before any use. This is so that // the implementation knows that uncommitted data cannot be returned. if ( tailable ) { cursor->setTailable(); } scoped_ptr<QueryResponseBuilder> queryResponseBuilder ( QueryResponseBuilder::make( pq, cursor, queryPlan, oldPlan ) ); bool saveClientCursor = false; int options = QueryOption_NoCursorTimeout; if (pq.hasOption( QueryOption_OplogReplay )) { options |= QueryOption_OplogReplay; } // create a client cursor that does not create a cursorID. // The cursor ID will be created if and only if we save // the client cursor further below ClientCursor::Holder ccPointer( new ClientCursor( options, cursor, ns, BSONObj(), false, false ) ); // for oplog cursors, we check if we are reading data that is too old and might // be stale. bool opChecked = false; bool slaveLocationUpdated = false; BSONObj last; bool lastBSONObjSet = false; for ( ; cursor->ok(); cursor->advance() ) { if ( pq.getMaxScan() && cursor->nscanned() > pq.getMaxScan() ) { break; } if ( !queryResponseBuilder->addMatch() ) { continue; } // Note slave's position in the oplog. if ( pq.hasOption( QueryOption_OplogReplay ) ) { BSONObj current = cursor->current(); last = current.copy(); lastBSONObjSet = true; // the first row returned is equal to the last element that // the slave has synced up to, so we might as well update // the slave location if (!slaveLocationUpdated) { ccPointer->updateSlaveLocation(curop); slaveLocationUpdated = true; } // check if data we are about to return may be too stale if (!opChecked) { ccPointer->storeOpForSlave(current); uassert(16785, "oplog cursor reading data that is too old", !ccPointer->lastOpForSlaveTooOld()); opChecked = true; } } if ( pq.isExplain() ) { if ( queryResponseBuilder->enoughTotalResults() ) { break; } } else if ( queryResponseBuilder->enoughForFirstBatch() ) { // if only 1 requested, no cursor saved for efficiency...we assume it is findOne() if ( pq.wantMore() && pq.getNumToReturn() != 1 ) { queryResponseBuilder->finishedFirstBatch(); if ( cursor->advance() ) { saveClientCursor = true; } } break; } } // If the tailing request succeeded if ( cursor->tailable() ) { saveClientCursor = true; } if ( ! shardingState.getVersion( ns ).isWriteCompatibleWith( shardingVersionAtStart ) ) { // if the version changed during the query // we might be missing some data // and its safe to send this as mongos can resend // at this point throw SendStaleConfigException( ns , "version changed during initial query", shardingVersionAtStart, shardingState.getVersion( ns ) ); } int nReturned = queryResponseBuilder->handoff( result ); ccPointer.reset(); long long cursorid = 0; if ( saveClientCursor ) { // Create a new ClientCursor, with a default timeout. ccPointer.reset( new ClientCursor( queryOptions, cursor, ns, jsobj.getOwned(), inMultiStatementTxn ) ); cursorid = ccPointer->cursorid(); DEV tlog(2) << "query has more, cursorid: " << cursorid << endl; if ( !ccPointer->ok() && ccPointer->c()->tailable() ) { DEV tlog() << "query has no more but tailable, cursorid: " << cursorid << endl; } if( queryOptions & QueryOption_Exhaust ) { curop.debug().exhaust = true; } // Set attributes for getMore. ccPointer->setChunkManager( queryResponseBuilder->chunkManager() ); ccPointer->setPos( nReturned ); ccPointer->pq = pq_shared; ccPointer->fields = pq.getFieldPtr(); if (pq.hasOption( QueryOption_OplogReplay ) && lastBSONObjSet) { ccPointer->storeOpForSlave(last); } if (!inMultiStatementTxn) { // This cursor is not part of a multi-statement transaction, so // we pass off the current client's transaction stack to the // cursor so that it may be live as long as the cursor. cc().swapTransactionStack(ccPointer->transactions); verify(!cc().hasTxn()); } ccPointer.release(); } QueryResult *qr = (QueryResult *) result.header(); qr->cursorId = cursorid; curop.debug().cursorid = ( cursorid == 0 ? -1 : qr->cursorId ); qr->setResultFlagsToOk(); // qr->len is updated automatically by appendData() curop.debug().responseLength = qr->len; qr->setOperation(opReply); qr->startingFrom = 0; qr->nReturned = nReturned; curop.debug().nscanned = ( cursor ? cursor->nscanned() : 0LL ); curop.debug().ntoskip = pq.getSkip(); curop.debug().nreturned = nReturned; return saveClientCursor; }
void run() { // Data is just a single {_id: 1, a: 1, b: 1} document. insert(BSON("_id" << 1 << "a" << 1 << "b" << 1)); // Indices on 'a' and 'b'. addIndex(BSON("a" << 1)); addIndex(BSON("b" << 1)); AutoGetCollectionForRead ctx(&_txn, ns()); Collection* collection = ctx.getCollection(); // Query for both 'a' and 'b' and sort on 'b'. CanonicalQuery* cq; verify(CanonicalQuery::canonicalize(ns(), BSON("a" << 1 << "b" << 1), // query BSON("b" << 1), // sort BSONObj(), // proj &cq).isOK()); ASSERT(NULL != cq); boost::scoped_ptr<CanonicalQuery> killCq(cq); // Force index intersection. bool forceIxisectOldValue = internalQueryForceIntersectionPlans; internalQueryForceIntersectionPlans = true; // Get planner params. QueryPlannerParams plannerParams; fillOutPlannerParams(&_txn, collection, cq, &plannerParams); // Turn this off otherwise it pops up in some plans. plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS; // Plan. vector<QuerySolution*> solutions; Status status = QueryPlanner::plan(*cq, plannerParams, &solutions); ASSERT(status.isOK()); // We expect a plan using index {a: 1} and plan using index {b: 1} and // an index intersection plan. ASSERT_EQUALS(solutions.size(), 3U); // Fill out the MultiPlanStage. scoped_ptr<MultiPlanStage> mps(new MultiPlanStage(&_txn, collection, cq)); scoped_ptr<WorkingSet> ws(new WorkingSet()); // Put each solution from the planner into the MPR. for (size_t i = 0; i < solutions.size(); ++i) { PlanStage* root; ASSERT(StageBuilder::build(&_txn, collection, *solutions[i], ws.get(), &root)); // Takes ownership of 'solutions[i]' and 'root'. mps->addPlan(solutions[i], root, ws.get()); } // This sets a backup plan. NULL means that 'mps' will not yield. mps->pickBestPlan(NULL); ASSERT(mps->bestPlanChosen()); ASSERT(mps->hasBackupPlan()); // We should have picked the index intersection plan due to forcing ixisect. QuerySolution* soln = mps->bestSolution(); ASSERT(QueryPlannerTestLib::solutionMatches( "{sort: {pattern: {b: 1}, limit: 0, node: " "{fetch: {filter: null, node: {andSorted: {nodes: [" "{ixscan: {filter: null, pattern: {a:1}}}," "{ixscan: {filter: null, pattern: {b:1}}}]}}}}}}", soln->root.get())); // Get the resulting document. PlanStage::StageState state = PlanStage::NEED_TIME; WorkingSetID wsid; while (state != PlanStage::ADVANCED) { state = mps->work(&wsid); } WorkingSetMember* member = ws->get(wsid); // Check the document returned by the query. ASSERT(member->hasObj()); BSONObj expectedDoc = BSON("_id" << 1 << "a" << 1 << "b" << 1); ASSERT(expectedDoc.woCompare(member->obj) == 0); // The blocking plan became unblocked, so we should no longer have a backup plan, // and the winning plan should still be the index intersection one. ASSERT(!mps->hasBackupPlan()); soln = mps->bestSolution(); ASSERT(QueryPlannerTestLib::solutionMatches( "{sort: {pattern: {b: 1}, limit: 0, node: " "{fetch: {filter: null, node: {andSorted: {nodes: [" "{ixscan: {filter: null, pattern: {a:1}}}," "{ixscan: {filter: null, pattern: {b:1}}}]}}}}}}", soln->root.get())); // Restore index intersection force parameter. internalQueryForceIntersectionPlans = forceIxisectOldValue; }
MyMesh* MasterPly::Sampling (string input) { MyMesh m; float rad = 0.f; tri::io::ImporterPLY<MyMesh>::Open(m,input.c_str()); MyMesh vcgMesh; int verticeCount; int triangleCount; /*MyMesh::VertexIterator vi = vcg::tri::Allocator<MyMesh>::AddVertices(vcgMesh,3); MyMesh::FaceIterator fi = vcg::tri::Allocator<MyMesh>::AddFaces(vcgMesh,1); MyMesh::VertexPointer ivp[4]; ivp[0]=&*vi; vi->P()=MyMesh::CoordType ( 0.0, 0.0, 0.0); ++vi; ivp[1]=&*vi; vi->P()=MyMesh::CoordType ( 1.0, 0.0, 0.0); ++vi; ivp[2]=&*vi; vi->P()=MyMesh::CoordType ( 0.0, 1.0, 0.0); ++vi; fi->V(0)=ivp[0]; fi->V(1)=ivp[1]; fi->V(2)=ivp[2];*/ vcgMesh.Clear(); verticeCount=m.VN(); vcg::tri::Allocator<MyMesh>::AddVertices(vcgMesh,verticeCount); for(int i=0;i<verticeCount;i++){ vcgMesh.vert[i].P()=vcg::Point3f(m.vert[i].P()[0],m.vert[i].P()[1],m.vert[i].P()[2]); } /* triangleCount=m.FN(); vcg::tri::Allocator<MyMesh>::AddFaces(vcgMesh, triangleCount); for(int i=0;i<triangleCount;i++){ vcgMesh.face[i].V(0)=m.face[i].V(0); vcgMesh.face[i].V(1)=m.face[i].V(0); vcgMesh.face[i].V(2)=m.face[i].V(0); }*/ tri::UpdateBounding<MyMesh>::Box(vcgMesh); // calculate radius rad = tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::ComputePoissonDiskRadius(vcgMesh, 100000); tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::SamplingRandomGenerator().initialize((unsigned int)time(0)); //sample point cloud std::vector<Point3f> sampleVec; tri::TrivialSampler<MyMesh> mps(sampleVec); // sampling cout << "Subsampling a PointCloud" << endl; tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::PoissonDiskParam pp; tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::PoissonDiskParam::Stat pds; pp.preGenMesh = &subM; //pp.pds=&pds; pp.bestSampleChoiceFlag=false; // start poisson disk prunning sampleVec.clear(); tri::SurfaceSampling<MyMesh,tri::TrivialSampler<MyMesh> >::PoissonDiskPruning(mps, vcgMesh, rad, pp); tri::Build(subM,sampleVec); // sample std::stringstream sample; //Build surface vcg::tri::UpdateBounding<MyMesh>::Box(subM); vcg::tri::UpdateNormal<MyMesh>::PerVertex(subM); //Initialization tri::BallPivoting<MyMesh> pivot(subM); cout << "Antes " << (unsigned int)time(0) <<endl; //the main processing pivot.BuildMesh(); //output the result //tri::io::ExporterSTL<MyMesh>::Save(subM,output); cout << "Despues " << subM.VN() <<endl; tri::io::ExporterPLY<MyMesh>::Save(subM,"pruebaaaaaaa.ply",true); cout << "Fin!!!!" << endl; return &subM; }
/** * Run a query -- includes checking for and running a Command. * @return points to ns if exhaust mode. 0=normal mode * @locks the db mutex for reading (and potentially for writing temporarily to create a new db). * @yields the db mutex periodically after acquiring it. * @asserts on scan and order memory exhaustion and other cases. */ const char *runQuery(Message& m, QueryMessage& q, CurOp& curop, Message &result) { shared_ptr<ParsedQuery> pq_shared( new ParsedQuery(q) ); ParsedQuery& pq( *pq_shared ); BSONObj jsobj = q.query; int queryOptions = q.queryOptions; const char *ns = q.ns; if( logLevel >= 2 ) log() << "runQuery called " << ns << " " << jsobj << endl; curop.debug().ns = ns; curop.debug().ntoreturn = pq.getNumToReturn(); curop.debug().query = jsobj; curop.setQuery(jsobj); // Run a command. if ( pq.couldBeCommand() ) { BufBuilder bb; bb.skip(sizeof(QueryResult)); BSONObjBuilder cmdResBuf; if ( runCommands(ns, jsobj, curop, bb, cmdResBuf, false, queryOptions) ) { curop.debug().iscommand = true; curop.debug().query = jsobj; curop.markCommand(); auto_ptr< QueryResult > qr; qr.reset( (QueryResult *) bb.buf() ); bb.decouple(); qr->setResultFlagsToOk(); qr->len = bb.len(); curop.debug().responseLength = bb.len(); qr->setOperation(opReply); qr->cursorId = 0; qr->startingFrom = 0; qr->nReturned = 1; result.setData( qr.release(), true ); } else { uasserted(13530, "bad or malformed command request?"); } return 0; } bool explain = pq.isExplain(); BSONObj order = pq.getOrder(); BSONObj query = pq.getFilter(); /* The ElemIter will not be happy if this isn't really an object. So throw exception here when that is true. (Which may indicate bad data from client.) */ if ( query.objsize() == 0 ) { out() << "Bad query object?\n jsobj:"; out() << jsobj.toString() << "\n query:"; out() << query.toString() << endl; uassert( 10110 , "bad query object", false); } Client::ReadContext ctx( ns , dbpath ); // read locks const ConfigVersion shardingVersionAtStart = shardingState.getVersion( ns ); replVerifyReadsOk(&pq); if ( pq.hasOption( QueryOption_CursorTailable ) ) { NamespaceDetails *d = nsdetails( ns ); uassert( 13051, "tailable cursor requested on non capped collection", d && d->isCapped() ); const BSONObj nat1 = BSON( "$natural" << 1 ); if ( order.isEmpty() ) { order = nat1; } else { uassert( 13052, "only {$natural:1} order allowed for tailable cursor", order == nat1 ); } } // Run a simple id query. if ( ! (explain || pq.showDiskLoc()) && isSimpleIdQuery( query ) && !pq.hasOption( QueryOption_CursorTailable ) ) { int n = 0; bool nsFound = false; bool indexFound = false; BSONObj resObject; Client& c = cc(); bool found = Helpers::findById( c, ns , query , resObject , &nsFound , &indexFound ); if ( nsFound == false || indexFound == true ) { if ( shardingState.needShardChunkManager( ns ) ) { ShardChunkManagerPtr m = shardingState.getShardChunkManager( ns ); if ( m && ! m->belongsToMe( resObject ) ) { // I have something this _id // but it doesn't belong to me // so return nothing resObject = BSONObj(); found = false; } } BufBuilder bb(sizeof(QueryResult)+resObject.objsize()+32); bb.skip(sizeof(QueryResult)); curop.debug().idhack = true; if ( found ) { n = 1; fillQueryResultFromObj( bb , pq.getFields() , resObject ); } auto_ptr< QueryResult > qr; qr.reset( (QueryResult *) bb.buf() ); bb.decouple(); qr->setResultFlagsToOk(); qr->len = bb.len(); curop.debug().responseLength = bb.len(); qr->setOperation(opReply); qr->cursorId = 0; qr->startingFrom = 0; qr->nReturned = n; result.setData( qr.release(), true ); return NULL; } } // Run a regular query. BSONObj oldPlan; if ( explain && ! pq.hasIndexSpecifier() ) { MultiPlanScanner mps( ns, query, order ); if ( mps.usingCachedPlan() ) { oldPlan = mps.oldExplain().firstElement().embeddedObject() .firstElement().embeddedObject().getOwned(); } } // In some cases the query may be retried if there is an in memory sort size assertion. for( int retry = 0; retry < 2; ++retry ) { try { return queryWithQueryOptimizer( m, queryOptions, ns, jsobj, curop, query, order, pq_shared, oldPlan, shardingVersionAtStart, result ); } catch ( const QueryRetryException & ) { verify( retry == 0 ); } } verify( false ); return 0; }
dfmt(int op) #endif { cgrad **cg0, **cg01, *cgi, *cgi0, *cgi00, *cgj, **cgnew, **cgprev, **cgx, *free_cg; ograd *og; real *LU, *LUdv, *LUdvi, *LUdrhs, *LUdvxi, *LUrhse, *LUve; int Max, aextra, i, m, me, n, n1, nextra; FILE *f; real t; char *dvtype, *dvt; static char *AXIN[2] = { "AX", "IN" }; dvthead dvth; char buf[32]; n = c_vars; if (n < o_vars) n = o_vars; n1 = n + 1; m = n_con; LUrhse = LUrhs + 2*m; LUve = LUv + 2*n; aextra = nextra = 0;; for (LU = LUrhs, cgx = Cgrad; LU < LUrhse; LU += 2, cgx++) if (LU[0] > negInfinity) { if (LU[0]) aextra++; if (LU[0] < LU[1] && LU[1] < Infinity) { nextra++; if (LU[1]) aextra++; for(cgi = *cgx; cgi; cgi = cgi->next) aextra++; } else if (LU[0]) aextra++; } else if (LU[1]) aextra++; for (LU = LUv; LU < LUve; LU += 2) { if (LU[0] > negInfinity) { aextra++; nextra++; if (LU[0]) aextra++; } if (LU[1] < Infinity) { aextra++; nextra++; if (LU[1]) aextra++; } } me = m + nextra; LUdvi = LUdv = (real *)Malloc((me + n)*2*sizeof(real) + me*sizeof(cgrad *) + aextra*sizeof(cgrad) + m); LUdvxi = LUdv + 2*m; LUdrhs = LUdvxi + 2*nextra; free_cg = (cgrad *)(LUdrhs + 2*n); cg0 = cg01 = (cgrad **)(free_cg + aextra); cgnew = cg0 + m; dvt = dvtype = (char *)(cgnew + nextra); for (LU = LUrhs, cgx = Cgrad; LU < LUrhse; LU += 2, cgx++) { cgi0 = 0; for(cgi = cgi00 = *cgx; cgi; cgi = cgj) { cgj = cgi->next; cgi->next = cgi0; cgi0 = cgi; cgi->varno++; } *cg01++ = cgi0; if (LU[0] > negInfinity) { *LUdvi++ = LU[0] == LU[1] ? negInfinity : 0; *LUdvi++ = Infinity; if (LU[0] < LU[1] && LU[1] < Infinity) { *LUdvxi++ = 0; *LUdvxi++ = Infinity; cgprev = cgnew++; for(cgi = *cgx; cgi; cgi = cgi->next) { *cgprev = cgj = free_cg++; cgprev = &cgj->next; cgj->varno = cgi->varno; cgj->coef = -cgi->coef; } if (LU[1]) { cgi = *cgprev = free_cg++; cgi->varno = n1; cgi->coef = -LU[1]; cgprev = &cgi->next; } *cgprev = 0; *dvt++ = 2; } else *dvt++ = LU[0] == LU[1] ? 3 : 0; if (LU[0]) { cgi = cgi00->next = free_cg++; cgi->varno = n1; cgi->coef = LU[0]; cgi->next = 0; } } else { *dvt++ = 1; *LUdvi++ = 0; *LUdvi++ = Infinity; for(cgi = cgi0; cgi; cgi = cgi->next) cgi->coef = -cgi->coef; if (LU[1]) { cgi = cgi00->next = free_cg++; cgi->varno = n1; cgi->coef = -LU[1]; cgi->next = 0; } } } for (LU = LUv, i = 1; LU < LUve; LU += 2, i++) { if (LU[0] > negInfinity) { *LUdvxi++ = 0; *LUdvxi++ = Infinity; *cgnew++ = cgi = free_cg++; cgi->varno = i; cgi->coef = 1; if (LU[0]) { cgi = cgi->next = free_cg++; cgi->varno = n1; cgi->coef = LU[0]; } cgi->next = 0; } if (LU[1] < Infinity) { *LUdvxi++ = 0; *LUdvxi++ = Infinity; *cgnew++ = cgi = free_cg++; cgi->varno = i; cgi->coef = -1; if (LU[1]) { cgi = cgi->next = free_cg++; cgi->varno = n1; cgi->coef = -LU[1]; } cgi->next = 0; } } memset(LUdrhs, 0, n*2*sizeof(real)); if (objno >= 0) for(og = Ograd[objno]; og; og = og->next) { LU = LUdrhs + 2*og->varno; LU[0] = LU[1] = og->coef; } if (Max = objtype[objno]) for(LU = LUdv; LU < LUdrhs; LU += 2) { t = LU[0]; LU[0] = -LU[1]; LU[1] = -t; } /* Negate columns with lower bound -Infinity, finite upper bound. */ /* This shouldn't be necessary, but shortens the MPS file */ /* and may avoid bugs in some solvers. */ for(cg01 = cg0, LU = LUdv; LU < LUdrhs; LU += 2, cg01++) if (LU[0] <= negInfinity && LU[1] < Infinity) { t = LU[0]; LU[0] = -LU[1]; LU[1] = -t; for(cgi = *cg01; cgi; cgi = cgi->next) cgi->coef = -cgi->coef; } if (op != 'm') { switch(op) { case 'b': binary_nl = 1; break; case 'g': binary_nl = 0; } nl_write(n, me, cg0, LUdv, LUdrhs, Max); } else { mps(n, me, cg0, LUdv, LUdrhs); f = openfo(".spc", "w"); fprintf(f, "BEGIN %s\nROWS %d\nCOLUMNS %d\n", Basename, n + 1, me+1); fprintf(f, "*true value: COLUMNS %d\n", me); fprintf(f, "ELEMENTS %d\nM%sIMIZE\nOBJECTIVE DUMMY\n", nzc + aextra, AXIN[Max]); fprintf(f, "END %s\n", Basename); fclose(f); f = openfo(".adj", "w"); g_fmt(buf, objconst(objno)); fprintf(f, "'objective' %s\n", buf); fclose(f); } f = openfo(".duw", "wb"); for(i = 0; i < 10; i++) dvth.Options[i] = ampl_options[i]; dvth.vbtol = ampl_vbtol; dvth.m = m; dvth.n = n; dvth.nextra = nextra; dvth.maxobj = Max; dvth.binary = binary_nl; fwrite(&dvth, sizeof(dvthead), 1, f); fwrite(dvtype, m, 1, f); fclose(f); }