// Entry point for a search. virtual shared_ptr<Cursor> newCursor(const BSONObj& query, const BSONObj& order, int numWanted) const { vector<QueryGeometry> regions; double maxDistance = DBL_MAX; bool isNear = false; bool isIntersect = false; // Go through the fields that we index, and for each geo one, make a QueryGeometry // object for the S2Cursor class to do intersection testing/cover generating with. for (size_t i = 0; i < _fields.size(); ++i) { const IndexedField &field = _fields[i]; if (IndexedField::GEO != field.type) { continue; } BSONElement e = query.getFieldDotted(field.name); if (e.eoo()) { continue; } if (!e.isABSONObj()) { continue; } BSONObj obj = e.Obj(); QueryGeometry geoQueryField(field.name); if (parseLegacy(obj, &geoQueryField, &isNear, &isIntersect, &maxDistance)) { regions.push_back(geoQueryField); } else if (parseQuery(obj, &geoQueryField, &isNear, &isIntersect, &maxDistance)) { regions.push_back(geoQueryField); } else { uasserted(16535, "can't parse query for *2d geo search: " + obj.toString()); } } if (isNear && isIntersect ) { uasserted(16474, "Can't do both near and intersect, query: " + query.toString()); } // I copied this from 2d.cpp. Guard against perversion. if (numWanted < 0) numWanted *= -1; if (0 == numWanted) numWanted = INT_MAX; BSONObjBuilder geoFieldsToNuke; for (size_t i = 0; i < _fields.size(); ++i) { const IndexedField &field = _fields[i]; if (IndexedField::GEO != field.type) { continue; } geoFieldsToNuke.append(field.name, ""); } // false means we want to filter OUT geoFieldsToNuke, not filter to include only that. BSONObj filteredQuery = query.filterFieldsUndotted(geoFieldsToNuke.obj(), false); if (isNear) { S2NearCursor *cursor = new S2NearCursor(keyPattern(), getDetails(), filteredQuery, regions, _params, numWanted, maxDistance); return shared_ptr<Cursor>(cursor); } else { // Default to intersect. S2Cursor *cursor = new S2Cursor(keyPattern(), getDetails(), filteredQuery, regions, _params, numWanted); return shared_ptr<Cursor>(cursor); } }
// Open the dictionary. Creates it if necessary. bool IndexDetails::open(const bool may_create) { const string dname = indexNamespace(); if (may_create) { addNewNamespaceToCatalog(dname); } TOKULOG(1) << "Opening IndexDetails " << dname << endl; try { _db.reset(new storage::Dictionary(dname, _info, *_descriptor, may_create, _info["background"].trueValue())); return true; } catch (storage::Dictionary::NeedsCreate) { if (cc().upgradingSystemUsers() && isSystemUsersCollection(parentNS()) && keyPattern() == oldSystemUsersKeyPattern) { // We're upgrading the system.users collection, and we are missing the old index. // That's ok, we'll signal the caller about this by returning a NULL pointer from // IndexDetails::make. See #673 return false; } // Unlike for NamespaceIndex, this dictionary must exist on disk if we think it should // exist. This error only gets thrown if may_create is false, which happens when we're // trying to open a collection for which we have serialized info. Therefore, this is a // fatal non-user error. msgasserted(16988, mongoutils::str::stream() << "dictionary " << dname << " should exist, but we got ENOENT"); } }
// Open the dictionary. Creates it if necessary. bool IndexDetailsBase::open(const bool may_create, const bool use_memcmp_magic) { const string dname = indexNamespace(); TOKULOG(1) << "Opening IndexDetails " << dname << endl; try { // We use the memcmp magic API only for single-key, ascending _id indexes, // because the _id field is always unique (and therefore we can simply // compare the OID fields if they exist and that will be sufficient) if (use_memcmp_magic) { verify(_unique); } _db.reset(new storage::Dictionary(dname, _info, *_descriptor, may_create, _info["background"].trueValue(), use_memcmp_magic)); return true; } catch (storage::Dictionary::NeedsCreate) { if (cc().upgradingSystemUsers() && isSystemUsersCollection(parentNS()) && keyPattern() == oldSystemUsersKeyPattern) { // We're upgrading the system.users collection, and we are missing the old index. // That's ok, we'll signal the caller about this by returning a NULL pointer from // IndexDetailsBase::make. See #673 return false; } // This dictionary must exist on disk if we think it should exist. // This error only gets thrown if may_create is false, which happens when we're // trying to open a collection for which we have serialized info. // Therefore, this is a fatal non-user error. msgasserted(16988, mongoutils::str::stream() << "dictionary " << dname << " should exist, but we got ENOENT"); } }
int IndexDetails::keyPatternOffset( const StringData& key ) const { BSONObjIterator i( keyPattern() ); int n = 0; while ( i.more() ) { BSONElement e = i.next(); if ( key == e.fieldName() ) return n; n++; } return -1; }
INT32 _ixmIndexCB::keyPatternOffset( const CHAR *key ) const { SDB_ASSERT ( _isInitialized, "index details must be initialized first" ) ; BSONObjIterator i ( keyPattern() ) ; INT32 n = 0 ; while ( i.more() ) { BSONElement e = i.next() ; if ( ossStrcmp ( key, e.fieldName() ) == 0 ) return n ; n++ ; } return -1 ; }
QueryResult* processGetMore(const char* ns, int ntoreturn, long long cursorid, CurOp& curop, int pass, bool& exhaust, bool* isCursorAuthorized ) { bool hasRunner = false; // Scoped to kill the pin after seeing if the runner's there. { // See if there's a runner. We do this until agg. is behind a Runner instead of a CC. ClientCursorPin p(cursorid); ClientCursor *cc = p.c(); if (NULL != cc && NULL != cc->getRunner()) { hasRunner = true; } } if (hasRunner) { return newGetMore(ns, ntoreturn, cursorid, curop, pass, exhaust, isCursorAuthorized); } exhaust = false; int bufSize = 512 + sizeof( QueryResult ) + MaxBytesToReturnToClientAtOnce; BufBuilder b( bufSize ); b.skip(sizeof(QueryResult)); int resultFlags = ResultFlag_AwaitCapable; int start = 0; int n = 0; scoped_ptr<Client::ReadContext> ctx(new Client::ReadContext(ns)); // call this readlocked so state can't change replVerifyReadsOk(); ClientCursorPin p(cursorid); ClientCursor *cc = p.c(); if ( unlikely(!cc) ) { LOGSOME << "getMore: cursorid not found " << ns << " " << cursorid << endl; cursorid = 0; resultFlags = ResultFlag_CursorNotFound; } else { // Some internal users create a ClientCursor with a Runner. Don't crash if this // happens. Instead, hand them off to the new framework. if (NULL != cc->getRunner()) { p.release(); return newGetMore(ns, ntoreturn, cursorid, curop, pass, exhaust, isCursorAuthorized); } // check for spoofing of the ns such that it does not match the one originally there for the cursor uassert(14833, "auth error", str::equals(ns, cc->ns().c_str())); *isCursorAuthorized = true; // This must be done after auth check to ensure proper cleanup. uassert(16951, "failing getmore due to set failpoint", !MONGO_FAIL_POINT(getMoreError)); // If the operation that spawned this cursor had a time limit set, apply leftover // time to this getmore. curop.setMaxTimeMicros( cc->getLeftoverMaxTimeMicros() ); killCurrentOp.checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point. if ( pass == 0 ) cc->updateSlaveLocation( curop ); int queryOptions = cc->queryOptions(); curop.debug().query = cc->query(); curop.setQuery( cc->query() ); start = cc->pos(); Cursor *c = cc->c(); if (!c->requiresLock()) { // make sure it won't be destroyed under us fassert(16952, !c->shouldDestroyOnNSDeletion()); fassert(16953, !c->supportYields()); ctx.reset(); // unlocks } c->recoverFromYield(); DiskLoc last; // This metadata may be stale, but it's the state of chunking when the cursor was // created. CollectionMetadataPtr metadata = cc->getCollMetadata(); KeyPattern keyPattern( metadata ? metadata->getKeyPattern() : BSONObj() ); while ( 1 ) { if ( !c->ok() ) { if ( c->tailable() ) { // when a tailable cursor hits "EOF", ok() goes false, and current() is // null. however advance() can still be retries as a reactivation attempt. // when there is new data, it will return true. that's what we are doing // here. if ( c->advance() ) continue; if( n == 0 && (queryOptions & QueryOption_AwaitData) && pass < 1000 ) { return 0; } break; } p.release(); bool ok = ClientCursor::erase(cursorid); verify(ok); cursorid = 0; cc = 0; break; } MatchDetails details; if ( cc->fields && cc->fields->getArrayOpType() == Projection::ARRAY_OP_POSITIONAL ) { // field projection specified, and contains an array operator details.requestElemMatchKey(); } // in some cases (clone collection) there won't be a matcher if ( !c->currentMatches( &details ) ) { } else if ( metadata && !metadata->keyBelongsToMe( extractKey(c, keyPattern ) ) ) { LOG(2) << "cursor skipping document in un-owned chunk: " << c->current() << endl; } else { if( c->getsetdup(c->currLoc()) ) { //out() << " but it's a dup \n"; } else { last = c->currLoc(); n++; // Fill out the fields requested by the query. const Projection::KeyOnly *keyFieldsOnly = c->keyFieldsOnly(); if ( keyFieldsOnly ) { fillQueryResultFromObj( b, 0, keyFieldsOnly->hydrate( c->currKey() ), &details ); } else { DiskLoc loc = c->currLoc(); fillQueryResultFromObj( b, cc->fields.get(), c->current(), &details, ( ( cc->pq.get() && cc->pq->showDiskLoc() ) ? &loc : 0 ) ); } if ( ( ntoreturn && n >= ntoreturn ) || b.len() > MaxBytesToReturnToClientAtOnce ) { c->advance(); cc->incPos( n ); break; } } } c->advance(); if ( ! cc->yieldSometimes( ( c->ok() && c->keyFieldsOnly() ) ? ClientCursor::DontNeed : ClientCursor::WillNeed ) ) { ClientCursor::erase(cursorid); cursorid = 0; cc = 0; break; } } if ( cc ) { if ( c->supportYields() ) { ClientCursor::YieldData data; verify( cc->prepareToYield( data ) ); } else { cc->c()->noteLocation(); } cc->storeOpForSlave( last ); exhaust = cc->queryOptions() & QueryOption_Exhaust; // If the getmore had a time limit, remaining time is "rolled over" back to the // cursor (for use by future getmore ops). cc->setLeftoverMaxTimeMicros( curop.getRemainingMaxTimeMicros() ); } } QueryResult *qr = (QueryResult *) b.buf(); qr->len = b.len(); qr->setOperation(opReply); qr->_resultFlags() = resultFlags; qr->cursorId = cursorid; qr->startingFrom = start; qr->nReturned = n; b.decouple(); return qr; }