virtual Status insert(OperationContext* opCtx, const BSONObj& key, const RecordId& loc, bool dupsAllowed) { invariant(loc.isNormal()); invariant(!hasFieldNames(key)); if (key.objsize() >= TempKeyMaxSize) { string msg = mongoutils::str::stream() << "EphemeralForTestBtree::insert: key too large to index, failing " << ' ' << key.objsize() << ' ' << key; return Status(ErrorCodes::KeyTooLong, msg); } // TODO optimization: save the iterator from the dup-check to speed up insert if (!dupsAllowed && isDup(*_data, key, loc)) return dupKeyError(key); IndexKeyEntry entry(key.getOwned(), loc); if (_data->insert(entry).second) { _currentKeySize += key.objsize(); opCtx->recoveryUnit()->registerChange(new IndexChange(_data, entry, true)); } return Status::OK(); }
Status addKey(const BSONObj& key, const RecordId& loc) { // inserts should be in ascending (key, RecordId) order. if (key.objsize() >= TempKeyMaxSize) { return Status(ErrorCodes::KeyTooLong, "key too big"); } invariant(loc.isNormal()); invariant(!hasFieldNames(key)); if (!_data->empty()) { // Compare specified key with last inserted key, ignoring its RecordId int cmp = _comparator.compare(IndexKeyEntry(key, RecordId()), *_last); if (cmp < 0 || (_dupsAllowed && cmp == 0 && loc < _last->loc)) { return Status(ErrorCodes::InternalError, "expected ascending (key, RecordId) order in bulk builder"); } else if (!_dupsAllowed && cmp == 0 && loc != _last->loc) { return dupKeyError(key); } } BSONObj owned = key.getOwned(); _last = _data->insert(_data->end(), IndexKeyEntry(owned, loc)); *_currentKeySize += key.objsize(); return Status::OK(); }
virtual Status insert(OperationContext* txn, const BSONObj& key, const DiskLoc& loc, bool dupsAllowed) { invariant(!loc.isNull()); invariant(loc.isValid()); invariant(!hasFieldNames(key)); if ( key.objsize() >= TempKeyMaxSize ) { string msg = mongoutils::str::stream() << "Heap1Btree::insert: key too large to index, failing " << ' ' << key.objsize() << ' ' << key; return Status(ErrorCodes::KeyTooLong, msg); } // TODO optimization: save the iterator from the dup-check to speed up insert if (!dupsAllowed && isDup(*_data, key, loc)) return dupKeyError(key); BSONObj owned = key.getOwned(); if ( _data->insert(IndexKeyEntry(owned, loc)).second ) { _currentKeySize += key.objsize(); Heap1RecoveryUnit::notifyIndexInsert( txn, this, owned, loc ); } return Status::OK(); }
Status addKey(const BSONObj& key, const DiskLoc& loc) { // inserts should be in ascending (key, DiskLoc) order. if ( key.objsize() >= TempKeyMaxSize ) { return Status(ErrorCodes::KeyTooLong, "key too big"); } invariant(!loc.isNull()); invariant(loc.isValid()); invariant(!hasFieldNames(key)); if (!_data->empty()) { if (key < _last->key || (_dupsAllowed && key == _last->key && loc < _last->loc)) { return Status(ErrorCodes::InternalError, "expected ascending (key, DiskLoc) order in bulk builder"); } else if (!_dupsAllowed && key == _last->key && loc != _last->loc) { return dupKeyError(key); } } BSONObj owned = key.getOwned(); _last = _data->insert(_data->end(), IndexKeyEntry(owned, loc)); *_currentKeySize += key.objsize(); return Status::OK(); }
Status RocksSortedDataImpl::dupKeyCheck(OperationContext* txn, const BSONObj& key, const DiskLoc& loc) { boost::scoped_ptr<SortedDataInterface::Cursor> cursor(newCursor(txn, 1)); cursor->locate(key, DiskLoc(0, 0)); if (cursor->isEOF() || cursor->getKey() != key || cursor->getDiskLoc() == loc) { return Status::OK(); } else { return Status(ErrorCodes::DuplicateKey, dupKeyError(key)); } }
Status addKey(const BSONObj& key, const DiskLoc& loc) { // inserts should be in ascending order. if ( key.objsize() >= TempKeyMaxSize ) { return Status(ErrorCodes::KeyTooLong, "key too big"); } invariant(!loc.isNull()); invariant(loc.isValid()); invariant(!hasFieldNames(key)); // TODO optimization: dup check can assume dup is only possible with last inserted key // and avoid the log(n) lookup. if (!_dupsAllowed && isDup(*_data, key, loc)) return dupKeyError(key); _data->insert(_data->end(), IndexEntry(key.getOwned(), loc)); *_currentKeySize += key.objsize(); return Status::OK(); }
virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key, const RecordId& loc) { invariant(!hasFieldNames(key)); if (isDup(*_data, key, loc)) return dupKeyError(key); return Status::OK(); }