Ejemplo n.º 1
0
 /* unindex all keys in all indexes for this record. */
 void unindexRecord(NamespaceDetails *d, 
                    Record *todelete, 
                    const DiskLoc& dl, 
                    bool noWarn /* = false */) {
     BSONObj obj = BSONObj::make(todelete);
     int n = d->nIndexes;
     for ( int i = 0; i < n; i++ )
         _unindexRecord(d->idx(i), obj, dl, !noWarn);
     if( d->indexBuildInProgress ) { // background index
         // always pass nowarn here, as this one may be missing for valid reasons as we are concurrently building it
         _unindexRecord(d->idx(n), obj, dl, false);
     }
 }
Ejemplo n.º 2
0
    /** add index keys for a newly inserted record 
        done in two steps/phases to allow potential deferal of write lock portion in the future
    */
    void indexRecordUsingTwoSteps(const char *ns, NamespaceDetails *d, BSONObj obj,
                                         DiskLoc loc, bool shouldBeUnlocked) {
        vector<int> multi;
        vector<BSONObjSet> multiKeys;

        IndexInterface::IndexInserter inserter;

        // Step 1, read phase.
        int n = d->nIndexesBeingBuilt();
        {
            BSONObjSet keys;
            for ( int i = 0; i < n; i++ ) {
                // this call throws on unique constraint violation.  we haven't done any writes yet so that is fine.
                fetchIndexInserters(/*out*/keys, inserter, d, i, obj, loc);
                if( keys.size() > 1 ) {
                    multi.push_back(i);
                    multiKeys.push_back(BSONObjSet());
                    multiKeys[multiKeys.size()-1].swap(keys);
                }
                keys.clear();
            }
        }

        inserter.finishAllInsertions();  // Step 2, write phase.

        // now finish adding multikeys
        for( unsigned j = 0; j < multi.size(); j++ ) {
            unsigned i = multi[j];
            BSONObjSet& keys = multiKeys[j];
            IndexDetails& idx = d->idx(i);
            IndexInterface& ii = idx.idxInterface();
            Ordering ordering = Ordering::make(idx.keyPattern());
            d->setIndexIsMultikey(ns, i);
            for( BSONObjSet::iterator k = ++keys.begin()/*skip 1*/; k != keys.end(); k++ ) {
                try {
                    ii.bt_insert(idx.head, loc, *k, ordering, !idx.unique(), idx);
                } catch (AssertionException& e) {
                    if( e.getCode() == 10287 && (int) i == d->nIndexes ) {
                        DEV log() << "info: caught key already in index on bg indexing (ok)" << endl;
                    }
                    else {
                        /* roll back previously added index entries
                           note must do self index as it is multikey and could require some cleanup itself
                        */
                        for( int j = 0; j < n; j++ ) {
                            try {
                                _unindexRecord(d->idx(j), obj, loc, false);
                            }
                            catch(...) {
                                log(3) << "unindex fails on rollback after unique key constraint prevented insert\n";
                            }
                        }
                        throw;
                    }
                }
            }
        }
    }
Ejemplo n.º 3
0
    /**
     * Remove the provided (obj, dl) pair from all indices.
     */
    void unindexRecord(NamespaceDetails* nsd, Record* todelete, const DiskLoc& dl,
                       bool noWarn /* = false */) {

        BSONObj obj = BSONObj::make(todelete);
        int numIndices = nsd->getTotalIndexCount();

        for (int i = 0; i < numIndices; i++) {
            // If i >= d->nIndexes, it's a background index, and we DO NOT want to log anything.
            bool logIfError = (i < nsd->getCompletedIndexCount()) ? !noWarn : false;
            _unindexRecord(nsd, i, obj, dl, logIfError);
        }
    }
Ejemplo n.º 4
0
    /**
     * Add the provided (obj, loc) pair to all indices.
     */
    void indexRecord(const char *ns, NamespaceDetails *d, const BSONObj &obj, const DiskLoc &loc) {
        int numIndices = d->getTotalIndexCount();

        for (int i = 0; i < numIndices; ++i) {
            IndexDetails &id = d->idx(i);

            try {
                addKeysToIndex(ns, d, i, obj, loc, !id.unique() || ignoreUniqueIndex(id));
            }
            catch (AssertionException&) {
                // TODO: the new index layer indexes either all or no keys, so j <= i can be j < i.
                for (int j = 0; j <= i; j++) {
                    try {
                        _unindexRecord(d, j, obj, loc, false);
                    }
                    catch(...) {
                        LOG(3) << "unindex fails on rollback after unique "
                                  "key constraint prevented insert" << std::endl;
                    }
                }
                throw;
            }
        }
    }