void run() { ScopedTransaction transaction(&_txn, MODE_IX); Lock::DBLock lk(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X); Client::Context ctx(&_txn, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(&_txn, ns()); if (!coll) { WriteUnitOfWork wuow(&_txn); coll = db->createCollection(&_txn, ns()); wuow.commit(); } WorkingSet ws; // Add an object to the DB. insert(BSON("foo" << 5)); set<DiskLoc> locs; getLocs(&locs, coll); ASSERT_EQUALS(size_t(1), locs.size()); // Create a mock stage that returns the WSM. auto_ptr<MockStage> mockStage(new MockStage(&ws)); // Mock data. { WorkingSetMember mockMember; mockMember.state = WorkingSetMember::LOC_AND_IDX; mockMember.loc = *locs.begin(); // State is loc and index, shouldn't be able to get the foo data inside. BSONElement elt; ASSERT_FALSE(mockMember.getFieldDotted("foo", &elt)); mockStage->pushBack(mockMember); } // Make the filter. BSONObj filterObj = BSON("foo" << 6); StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj); verify(swme.isOK()); auto_ptr<MatchExpression> filterExpr(swme.getValue()); // Matcher requires that foo==6 but we only have data with foo==5. auto_ptr<FetchStage> fetchStage( new FetchStage(&_txn, &ws, mockStage.release(), filterExpr.get(), coll)); // First call should return a fetch request as it's not in memory. WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state; // Normally we'd return the object but we have a filter that prevents it. state = fetchStage->work(&id); ASSERT_EQUALS(PlanStage::NEED_TIME, state); // No more data to fetch, so, EOF. state = fetchStage->work(&id); ASSERT_EQUALS(PlanStage::IS_EOF, state); }
void run() { Client::WriteContext ctx(&_txn, ns()); Collection* coll = ctx.ctx().db()->getCollection( &_txn, ns() ); // Get the DiskLocs that would be returned by an in-order scan. vector<DiskLoc> locs; getLocs(coll, CollectionScanParams::FORWARD, &locs); // Configure the scan. CollectionScanParams params; params.collection = coll; params.direction = CollectionScanParams::FORWARD; params.tailable = false; WorkingSet ws; scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL)); int count = 0; while (count < 10) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); ASSERT_EQUALS(coll->docFor(&_txn, locs[count])["foo"].numberInt(), member->obj["foo"].numberInt()); ++count; } } // Remove locs[count]. scan->saveState(); scan->invalidate(locs[count], INVALIDATION_DELETION); remove(coll->docFor(&_txn, locs[count])); scan->restoreState(&_txn); // Skip over locs[count]. ++count; // Expect the rest. while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); ASSERT_EQUALS(coll->docFor(&_txn, locs[count])["foo"].numberInt(), member->obj["foo"].numberInt()); ++count; } } ctx.commit(); ASSERT_EQUALS(numObj(), count); }
void run() { Client::WriteContext ctx(&_txn, ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(&_txn, ns()); if (!coll) { WriteUnitOfWork wuow(&_txn); coll = db->createCollection(&_txn, ns()); wuow.commit(); } WorkingSet ws; // Add an object to the DB. insert(BSON("foo" << 5)); set<DiskLoc> locs; getLocs(&locs, coll); ASSERT_EQUALS(size_t(1), locs.size()); // Create a mock stage that returns the WSM. auto_ptr<MockStage> mockStage(new MockStage(&ws)); // Mock data. { WorkingSetMember mockMember; mockMember.state = WorkingSetMember::LOC_AND_UNOWNED_OBJ; mockMember.loc = *locs.begin(); mockMember.obj = coll->docFor(&_txn, mockMember.loc); // Points into our DB. mockStage->pushBack(mockMember); mockMember.state = WorkingSetMember::OWNED_OBJ; mockMember.loc = DiskLoc(); mockMember.obj = BSON("foo" << 6); ASSERT_TRUE(mockMember.obj.isOwned()); mockStage->pushBack(mockMember); } auto_ptr<FetchStage> fetchStage(new FetchStage(&_txn, &ws, mockStage.release(), NULL, coll)); WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state; // Don't bother doing any fetching if an obj exists already. state = fetchStage->work(&id); ASSERT_EQUALS(PlanStage::ADVANCED, state); state = fetchStage->work(&id); ASSERT_EQUALS(PlanStage::ADVANCED, state); // No more data to fetch, so, EOF. state = fetchStage->work(&id); ASSERT_EQUALS(PlanStage::IS_EOF, state); }
/** * We feed a mix of (key, unowned, owned) data to the sort stage. */ void insertVarietyOfObjects(MockStage* ms) { set<DiskLoc> locs; getLocs(&locs); set<DiskLoc>::iterator it = locs.begin(); for (int i = 0; i < numObj(); ++i, ++it) { // Insert some owned obj data. WorkingSetMember member; member.state = WorkingSetMember::OWNED_OBJ; member.obj = it->obj().getOwned(); ASSERT(member.obj.isOwned()); ms->pushBack(member); } }
/** * We feed a mix of (key, unowned, owned) data to the sort stage. */ void insertVarietyOfObjects(MockStage* ms, Collection* coll) { set<DiskLoc> locs; getLocs(&locs, coll); set<DiskLoc>::iterator it = locs.begin(); for (int i = 0; i < numObj(); ++i, ++it) { ASSERT_FALSE(it == locs.end()); // Insert some owned obj data. WorkingSetMember member; member.loc = *it; member.state = WorkingSetMember::LOC_AND_UNOWNED_OBJ; member.obj = coll->docFor(&_txn, *it); ms->pushBack(member); } }
// testcount is a wrapper around runCount that // - sets up a countStage // - runs it // - asserts count is not trivial // - asserts nCounted is equal to expected_n // - asserts nSkipped is correct void testCount(const CountRequest& request, int expected_n=kDocuments, bool indexed=false) { setup(); getLocs(); auto_ptr<WorkingSet> ws(new WorkingSet); StatusWithMatchExpression swme = MatchExpressionParser::parse(request.query); auto_ptr<MatchExpression> expression(swme.getValue()); PlanStage* scan; if (indexed) { scan = createIndexScan(expression.get(), ws.get()); } else { scan = createCollScan(expression.get(), ws.get()); } CountStage countStage(&_txn, _coll, request, ws.get(), scan); const CountStats* stats = runCount(countStage); ASSERT_FALSE(stats->trivialCount); ASSERT_EQUALS(stats->nCounted, expected_n); ASSERT_EQUALS(stats->nSkipped, request.skip); }
void run() { Client::WriteContext ctx(&_txn, ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(&_txn, ns()); if (!coll) { coll = db->createCollection(&_txn, ns()); } WorkingSet ws; // Sort by foo:1 MergeSortStageParams msparams; msparams.pattern = BSON("foo" << 1); auto_ptr<MergeSortStage> ms(new MergeSortStage(msparams, &ws, coll)); IndexScanParams params; params.bounds.isSimpleRange = true; params.bounds.startKey = objWithMinKey(1); params.bounds.endKey = objWithMaxKey(1); params.bounds.endKeyInclusive = true; params.direction = 1; // Index 'a'+i has foo equal to 'i'. int numIndices = 20; for (int i = 0; i < numIndices; ++i) { // 'a', 'b', ... string index(1, 'a' + i); insert(BSON(index << 1 << "foo" << i)); BSONObj indexSpec = BSON(index << 1 << "foo" << 1); addIndex(indexSpec); params.descriptor = getIndex(indexSpec, coll); ms->addChild(new IndexScan(&_txn, params, &ws, NULL)); } set<DiskLoc> locs; getLocs(&locs, coll); set<DiskLoc>::iterator it = locs.begin(); ctx.commit(); // Get 10 results. Should be getting results in order of 'locs'. int count = 0; while (!ms->isEOF() && count < 10) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status = ms->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT_EQUALS(member->loc, *it); BSONElement elt; string index(1, 'a' + count); ASSERT(member->getFieldDotted(index, &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(count, elt.numberInt()); ++count; ++it; } // Invalidate locs[11]. Should force a fetch. We don't get it back. ms->prepareToYield(); ms->invalidate(*it, INVALIDATION_DELETION); ms->recoverFromYield(&_txn); // Make sure locs[11] was fetched for us. { // TODO: If we have "return upon invalidation" ever triggerable, do the following test. /* WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status; do { status = ms->work(&id); } while (PlanStage::ADVANCED != status); WorkingSetMember* member = ws.get(id); ASSERT(!member->hasLoc()); ASSERT(member->hasObj()); string index(1, 'a' + count); BSONElement elt; ASSERT_TRUE(member->getFieldDotted(index, &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(count, elt.numberInt()); */ ++it; ++count; } // And get the rest. while (!ms->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status = ms->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT_EQUALS(member->loc, *it); BSONElement elt; string index(1, 'a' + count); ASSERT_TRUE(member->getFieldDotted(index, &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(count, elt.numberInt()); ++count; ++it; } }
// Read/compile/link shaders SHADER_MODE pie_LoadShader(const char *programName, const char *vertexPath, const char *fragmentPath, const std::vector<std::string> &uniformNames) { pie_internal::SHADER_PROGRAM program; GLint status; bool success = true; // Assume overall success char *buffer[2]; program.program = glCreateProgram(); glBindAttribLocation(program.program, 0, "vertex"); glBindAttribLocation(program.program, 1, "vertexTexCoord"); glBindAttribLocation(program.program, 2, "vertexColor"); ASSERT_OR_RETURN(SHADER_NONE, program.program, "Could not create shader program!"); *buffer = (char *)""; if (vertexPath) { success = false; // Assume failure before reading shader file if ((*(buffer + 1) = readShaderBuf(vertexPath))) { GLuint shader = glCreateShader(GL_VERTEX_SHADER); glShaderSource(shader, 2, (const char **)buffer, nullptr); glCompileShader(shader); // Check for compilation errors glGetShaderiv(shader, GL_COMPILE_STATUS, &status); if (!status) { debug(LOG_ERROR, "Vertex shader compilation has failed [%s]", vertexPath); printShaderInfoLog(LOG_ERROR, shader); } else { printShaderInfoLog(LOG_3D, shader); glAttachShader(program.program, shader); success = true; } if (GLEW_VERSION_4_3 || GLEW_KHR_debug) { glObjectLabel(GL_SHADER, shader, -1, vertexPath); } free(*(buffer + 1)); } } if (success && fragmentPath) { success = false; // Assume failure before reading shader file if ((*(buffer + 1) = readShaderBuf(fragmentPath))) { GLuint shader = glCreateShader(GL_FRAGMENT_SHADER); glShaderSource(shader, 2, (const char **)buffer, nullptr); glCompileShader(shader); // Check for compilation errors glGetShaderiv(shader, GL_COMPILE_STATUS, &status); if (!status) { debug(LOG_ERROR, "Fragment shader compilation has failed [%s]", fragmentPath); printShaderInfoLog(LOG_ERROR, shader); } else { printShaderInfoLog(LOG_3D, shader); glAttachShader(program.program, shader); success = true; } if (GLEW_VERSION_4_3 || GLEW_KHR_debug) { glObjectLabel(GL_SHADER, shader, -1, fragmentPath); } free(*(buffer + 1)); } } if (success) { glLinkProgram(program.program); // Check for linkage errors glGetProgramiv(program.program, GL_LINK_STATUS, &status); if (!status) { debug(LOG_ERROR, "Shader program linkage has failed [%s, %s]", vertexPath, fragmentPath); printProgramInfoLog(LOG_ERROR, program.program); success = false; } else { printProgramInfoLog(LOG_3D, program.program); } if (GLEW_VERSION_4_3 || GLEW_KHR_debug) { glObjectLabel(GL_PROGRAM, program.program, -1, programName); } } GLuint p = program.program; std::transform(uniformNames.begin(), uniformNames.end(), std::back_inserter(program.locations), [p](const std::string name) { return glGetUniformLocation(p, name.data()); }); getLocs(&program); glUseProgram(0); pie_internal::shaderProgram.push_back(program); return SHADER_MODE(pie_internal::shaderProgram.size() - 1); }
void run() { Client::WriteContext ctx(&_txn, ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(&_txn, ns()); if (!coll) { coll = db->createCollection(&_txn, ns()); } fillData(); // The data we're going to later invalidate. set<DiskLoc> locs; getLocs(&locs, coll); // Build the mock scan stage which feeds the data. WorkingSet ws; auto_ptr<MockStage> ms(new MockStage(&ws)); insertVarietyOfObjects(ms.get(), coll); SortStageParams params; params.collection = coll; params.pattern = BSON("foo" << 1); params.limit = limit(); auto_ptr<SortStage> ss(new SortStage(&_txn, params, &ws, ms.get())); const int firstRead = 10; // Have sort read in data from the mock stage. for (int i = 0; i < firstRead; ++i) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status = ss->work(&id); ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status); } // We should have read in the first 'firstRead' locs. Invalidate the first. ss->saveState(); set<DiskLoc>::iterator it = locs.begin(); ss->invalidate(*it++, INVALIDATION_DELETION); ss->restoreState(&_txn); // Read the rest of the data from the mock stage. while (!ms->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; ss->work(&id); } // Release to prevent double-deletion. ms.release(); // Let's just invalidate everything now. ss->saveState(); while (it != locs.end()) { ss->invalidate(*it++, INVALIDATION_DELETION); } ss->restoreState(&_txn); // Invalidation of data in the sort stage fetches it but passes it through. int count = 0; while (!ss->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status = ss->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT(member->hasObj()); ASSERT(!member->hasLoc()); ++count; } ctx.commit(); // Returns all docs. ASSERT_EQUALS(limit() ? limit() : numObj(), count); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } // Insert a bunch of data for (int i = 0; i < 50; ++i) { insert(BSON("foo" << 1 << "bar" << 1)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL)); // Scan over foo == 1 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // Scan over bar == 1 params.descriptor = getIndex(BSON("bar" << 1), coll); ah->addChild(new IndexScan(params, &ws, NULL)); // Get the set of disklocs in our collection to use later. set<DiskLoc> data; getLocs(&data, coll); // We're making an assumption here that happens to be true because we clear out the // collection before running this: increasing inserts have increasing DiskLocs. // This isn't true in general if the collection is not dropped beforehand. WorkingSetID id; // Sorted AND looks at the first child, which is an index scan over foo==1. ah->work(&id); // The first thing that the index scan returns (due to increasing DiskLoc trick) is the // very first insert, which should be the very first thing in data. Let's invalidate it // and make sure it shows up in the flagged results. ah->prepareToYield(); ah->invalidate(*data.begin()); remove(data.begin()->obj()); ah->recoverFromYield(); // Make sure the nuked obj is actually in the flagged data. ASSERT_EQUALS(ws.getFlagged().size(), size_t(1)); WorkingSetMember* member = ws.get(*ws.getFlagged().begin()); ASSERT_EQUALS(WorkingSetMember::OWNED_OBJ, member->state); BSONElement elt; ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_EQUALS(1, elt.numberInt()); set<DiskLoc>::iterator it = data.begin(); // Proceed along, AND-ing results. int count = 0; while (!ah->isEOF() && count < 10) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } ++count; ++it; member = ws.get(id); ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT_EQUALS(member->loc, *it); } // Move 'it' to a result that's yet to show up. for (int i = 0; i < count + 10; ++i) { ++it; } // Remove a result that's coming up. It's not the 'target' result of the AND so it's // not flagged. ah->prepareToYield(); ah->invalidate(*it); remove(it->obj()); ah->recoverFromYield(); // Get all results aside from the two we killed. while (!ah->isEOF()) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } ++count; member = ws.get(id); ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_EQUALS(1, elt.numberInt()); } ASSERT_EQUALS(count, 48); ASSERT_EQUALS(size_t(1), ws.getFlagged().size()); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 50; ++i) { insert(BSON("foo" << i << "bar" << i)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Foo <= 20 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = -1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 10); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // ah reads the first child into its hash table. // ah should read foo=20, foo=19, ..., foo=0 in that order. // Read half of them... for (int i = 0; i < 10; ++i) { WorkingSetID out; PlanStage::StageState status = ah->work(&out); ASSERT_EQUALS(PlanStage::NEED_TIME, status); } // ...yield ah->prepareToYield(); // ...invalidate one of the read objects set<DiskLoc> data; getLocs(&data, coll); for (set<DiskLoc>::const_iterator it = data.begin(); it != data.end(); ++it) { if (it->obj()["foo"].numberInt() == 15) { ah->invalidate(*it); remove(it->obj()); break; } } ah->recoverFromYield(); // And expect to find foo==15 it flagged for review. const unordered_set<WorkingSetID>& flagged = ws.getFlagged(); ASSERT_EQUALS(size_t(1), flagged.size()); // Expect to find the right value of foo in the flagged item. WorkingSetMember* member = ws.get(*flagged.begin()); ASSERT_TRUE(NULL != member); ASSERT_EQUALS(WorkingSetMember::OWNED_OBJ, member->state); BSONElement elt; ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(15, elt.numberInt()); // Now, finish up the AND. Since foo == bar, we would have 11 results, but we subtract // one because of a mid-plan invalidation, so 10. int count = 0; while (!ah->isEOF()) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } ++count; member = ws.get(id); ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_LESS_THAN_OR_EQUALS(elt.numberInt(), 20); ASSERT_NOT_EQUALS(15, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_GREATER_THAN_OR_EQUALS(elt.numberInt(), 10); } ASSERT_EQUALS(10, count); }
// Read/compile/link shaders GLuint pie_LoadShader(const char *programName, const char *vertexPath, const char *fragmentPath) { SHADER_PROGRAM program; GLint status; bool success = true; // Assume overall success char *buffer[2]; memset(&program, 0, sizeof(program)); program.program = glCreateProgram(); ASSERT_OR_RETURN(false, program.program, "Could not create shader program!"); *buffer = (char *)""; if (vertexPath) { success = false; // Assume failure before reading shader file if ((*(buffer + 1) = readShaderBuf(vertexPath))) { GLuint shader = glCreateShader(GL_VERTEX_SHADER); glShaderSource(shader, 2, (const char **)buffer, NULL); glCompileShader(shader); // Check for compilation errors glGetShaderiv(shader, GL_COMPILE_STATUS, &status); if (!status) { debug(LOG_ERROR, "Vertex shader compilation has failed [%s]", vertexPath); printShaderInfoLog(LOG_ERROR, shader); } else { printShaderInfoLog(LOG_3D, shader); glAttachShader(program.program, shader); success = true; } if (GLEW_VERSION_4_3 || GLEW_KHR_debug) { glObjectLabel(GL_SHADER, shader, -1, vertexPath); } free(*(buffer + 1)); } } if (success && fragmentPath) { success = false; // Assume failure before reading shader file if ((*(buffer + 1) = readShaderBuf(fragmentPath))) { GLuint shader = glCreateShader(GL_FRAGMENT_SHADER); glShaderSource(shader, 2, (const char **)buffer, NULL); glCompileShader(shader); // Check for compilation errors glGetShaderiv(shader, GL_COMPILE_STATUS, &status); if (!status) { debug(LOG_ERROR, "Fragment shader compilation has failed [%s]", fragmentPath); printShaderInfoLog(LOG_ERROR, shader); } else { printShaderInfoLog(LOG_3D, shader); glAttachShader(program.program, shader); success = true; } if (GLEW_VERSION_4_3 || GLEW_KHR_debug) { glObjectLabel(GL_SHADER, shader, -1, fragmentPath); } free(*(buffer + 1)); } } if (success) { glLinkProgram(program.program); // Check for linkage errors glGetProgramiv(program.program, GL_LINK_STATUS, &status); if (!status) { debug(LOG_ERROR, "Shader program linkage has failed [%s, %s]", vertexPath, fragmentPath); printProgramInfoLog(LOG_ERROR, program.program); success = false; } else { printProgramInfoLog(LOG_3D, program.program); } if (GLEW_VERSION_4_3 || GLEW_KHR_debug) { glObjectLabel(GL_PROGRAM, program.program, -1, programName); } } getLocs(&program); glUseProgram(0); shaderProgram.append(program); return shaderProgram.size() - 1; }
void run() { // Run the update. { Client::WriteContext ctx(&_txn, ns()); // Populate the collection. for (int i = 0; i < 10; ++i) { insert(BSON("_id" << i << "foo" << i)); } ASSERT_EQUALS(10U, count(BSONObj())); Client& c = cc(); CurOp& curOp = *c.curop(); OpDebug* opDebug = &curOp.debug(); UpdateDriver driver( (UpdateDriver::Options()) ); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(&_txn, ns()); // Get the DiskLocs that would be returned by an in-order scan. vector<DiskLoc> locs; getLocs(coll, CollectionScanParams::FORWARD, &locs); UpdateRequest request(&_txn, nsString()); UpdateLifecycleImpl updateLifecycle(false, nsString()); request.setLifecycle(&updateLifecycle); // Update is a multi-update that sets 'bar' to 3 in every document // where foo is less than 5. BSONObj query = fromjson("{foo: {$lt: 5}}"); BSONObj updates = fromjson("{$set: {bar: 3}}"); request.setMulti(); request.setQuery(query); request.setUpdates(updates); ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti())); // Configure the scan. CollectionScanParams collScanParams; collScanParams.collection = coll; collScanParams.direction = CollectionScanParams::FORWARD; collScanParams.tailable = false; // Configure the update. UpdateStageParams updateParams(&request, &driver, opDebug); scoped_ptr<CanonicalQuery> cq(canonicalize(query)); updateParams.canonicalQuery = cq.get(); scoped_ptr<WorkingSet> ws(new WorkingSet()); auto_ptr<CollectionScan> cs( new CollectionScan(&_txn, collScanParams, ws.get(), cq->root())); scoped_ptr<UpdateStage> updateStage( new UpdateStage(updateParams, ws.get(), db, cs.release())); const UpdateStats* stats = static_cast<const UpdateStats*>(updateStage->getSpecificStats()); const size_t targetDocIndex = 3; while (stats->nModified < targetDocIndex) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = updateStage->work(&id); ASSERT_EQUALS(PlanStage::NEED_TIME, state); } // Remove locs[targetDocIndex]; updateStage->saveState(); updateStage->invalidate(locs[targetDocIndex], INVALIDATION_DELETION); BSONObj targetDoc = coll->docFor(&_txn, locs[targetDocIndex]); ASSERT(!targetDoc.isEmpty()); remove(targetDoc); updateStage->restoreState(&_txn); // Do the remaining updates. while (!updateStage->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = updateStage->work(&id); ASSERT(PlanStage::NEED_TIME == state || PlanStage::IS_EOF == state); } ctx.commit(); // 4 of the 5 matching documents should have been modified (one was deleted). ASSERT_EQUALS(4U, stats->nModified); ASSERT_EQUALS(4U, stats->nMatched); } // Check the contents of the collection. { Client::ReadContext ctx(&_txn, ns()); Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns()); vector<BSONObj> objs; getCollContents(collection, &objs); // Verify that the collection now has 9 docs (one was deleted). ASSERT_EQUALS(9U, objs.size()); // Make sure that the collection has certain documents. assertHasDoc(objs, fromjson("{_id: 0, foo: 0, bar: 3}")); assertHasDoc(objs, fromjson("{_id: 1, foo: 1, bar: 3}")); assertHasDoc(objs, fromjson("{_id: 2, foo: 2, bar: 3}")); assertHasDoc(objs, fromjson("{_id: 4, foo: 4, bar: 3}")); assertHasDoc(objs, fromjson("{_id: 5, foo: 5}")); assertHasDoc(objs, fromjson("{_id: 6, foo: 6}")); } }
void run() { Client::WriteContext ctx(ns()); fillData(); // The data we're going to later invalidate. set<DiskLoc> locs; getLocs(&locs); // Build the mock stage which feeds the data. WorkingSet ws; auto_ptr<MockStage> ms(new MockStage(&ws)); insertVarietyOfObjects(ms.get()); SortStageParams params; params.pattern = BSON("foo" << 1); auto_ptr<SortStage> ss(new SortStage(params, &ws, ms.get())); const int firstRead = 10; // Have sort read in data from the mock stage. for (int i = 0; i < firstRead; ++i) { WorkingSetID id; PlanStage::StageState status = ss->work(&id); ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status); } // We should have read in the first 'firstRead' locs. Invalidate the first. ss->prepareToYield(); set<DiskLoc>::iterator it = locs.begin(); ss->invalidate(*it++); ss->recoverFromYield(); // Read the rest of the data from the mock stage. while (!ms->isEOF()) { WorkingSetID id; ss->work(&id); } // Release to prevent double-deletion. ms.release(); // Let's just invalidate everything now. ss->prepareToYield(); while (it != locs.end()) { ss->invalidate(*it++); } ss->recoverFromYield(); // The sort should still work. int count = 0; while (!ss->isEOF()) { WorkingSetID id; PlanStage::StageState status = ss->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT(member->hasObj()); ASSERT(!member->hasLoc()); ++count; } // We've invalidated everything, but only 2/3 of our data had a DiskLoc to be // invalidated. We get the rest as-is. ASSERT_EQUALS(count, numObj()); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } fillData(); // The data we're going to later invalidate. set<DiskLoc> locs; getLocs(&locs, coll); // Build the mock scan stage which feeds the data. WorkingSet ws; auto_ptr<MockStage> ms(new MockStage(&ws)); insertVarietyOfObjects(ms.get(), coll); SortStageParams params; params.pattern = BSON("foo" << 1); params.limit = limit(); auto_ptr<SortStage> ss(new SortStage(params, &ws, ms.get())); const int firstRead = 10; // Have sort read in data from the mock stage. for (int i = 0; i < firstRead; ++i) { WorkingSetID id; PlanStage::StageState status = ss->work(&id); ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status); } // We should have read in the first 'firstRead' locs. Invalidate the first. ss->prepareToYield(); set<DiskLoc>::iterator it = locs.begin(); ss->invalidate(*it++); ss->recoverFromYield(); // Read the rest of the data from the mock stage. while (!ms->isEOF()) { WorkingSetID id; ss->work(&id); } // Release to prevent double-deletion. ms.release(); // Let's just invalidate everything now. ss->prepareToYield(); while (it != locs.end()) { ss->invalidate(*it++); } ss->recoverFromYield(); // After invalidating all our data, we have nothing left to sort. int count = 0; while (!ss->isEOF()) { WorkingSetID id; PlanStage::StageState status = ss->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT(member->hasObj()); ASSERT(!member->hasLoc()); ++count; } // Therefore, we expect an empty result set from running the sort stage to completion. ASSERT_EQUALS(0, count); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 50; ++i) { insert(BSON("_id" << i << "foo" << i << "bar" << i << "baz" << i)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); addIndex(BSON("baz" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Foo <= 20 (descending) IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = -1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar <= 19 (descending) params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 19); ah->addChild(new IndexScan(params, &ws, NULL)); // First call to work reads the first result from the children. // The first result is for the first scan over foo is {foo: 20, bar: 20, baz: 20}. // The first result is for the second scan over bar is {foo: 19, bar: 19, baz: 19}. WorkingSetID id; PlanStage::StageState status = ah->work(&id); ASSERT_EQUALS(PlanStage::NEED_TIME, status); const unordered_set<WorkingSetID>& flagged = ws.getFlagged(); ASSERT_EQUALS(size_t(0), flagged.size()); // "delete" deletedObj (by invalidating the DiskLoc of the obj that matches it). BSONObj deletedObj = BSON("_id" << 20 << "foo" << 20 << "bar" << 20 << "baz" << 20); ah->prepareToYield(); set<DiskLoc> data; getLocs(&data, coll); for (set<DiskLoc>::const_iterator it = data.begin(); it != data.end(); ++it) { if (0 == deletedObj.woCompare(it->obj())) { ah->invalidate(*it, INVALIDATION_DELETION); break; } } ah->recoverFromYield(); // The deleted obj should show up in flagged. ASSERT_EQUALS(size_t(1), flagged.size()); // And not in our results. int count = 0; while (!ah->isEOF()) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* wsm = ws.get(id); ASSERT_NOT_EQUALS(0, deletedObj.woCompare(wsm->loc.obj())); ++count; } ASSERT_EQUALS(count, 20); }