예제 #1
0
    bool CoveredIndexMatcher::matchesCurrent( Cursor * cursor , MatchDetails * details ) const {
        const bool keyUsable = !cursor->indexKeyPattern().isEmpty() && !cursor->isMultiKey();
        const BSONObj key = cursor->currKey();
        dassert( key.isValid() );

        LOG(5) << "CoveredIndexMatcher::matches() " << key.toString() << ", keyUsable " << keyUsable << endl;

        if ( details )
            details->resetOutput();

        if ( keyUsable ) {
            if ( !_keyMatcher.matches(key, details ) ) {
                return false;
            }
            bool needRecordForDetails = details && details->needRecord();
            if ( !_needRecord && !needRecordForDetails ) {
                return true;
            }
        }

        if ( details )
            details->setLoadedRecord( true );

        // Couldn't match off key, need to read full document.
        const BSONObj obj = cursor->current();
        bool res = _docMatcher->matches( obj, details ) && !isOrClauseDup( obj );
        LOG(5) << "CoveredIndexMatcher _docMatcher->matches() returns " << res << endl;
        return res;
    }
예제 #2
0
    void ScanAndOrder::add(const BSONObj& o, const DiskLoc* loc) {
        verify( o.isValid() );
        BSONObj k;
        try {
            k = _order.getKeyFromObject(o);
        }
        catch (UserException &e) {
            if ( e.getCode() == ParallelArraysCode ) { // cannot get keys for parallel arrays
                // fix lasterror text to be more accurate.
                uasserted( 15925, "cannot sort with keys that are parallel arrays" );
            }
            else
                throw;
        }

        if ( k.isEmpty() ) {
            return;   
        }
        if ( (int) _best.size() < _limit ) {
            _add(k, o, loc);
            return;
        }
        BestMap::iterator i;
        verify( _best.end() != _best.begin() );
        i = _best.end();
        i--;
        _addIfBetter(k, o, i, loc);
    }
예제 #3
0
    bool CoveredIndexMatcher::matches( const BSONObj& key, const DiskLoc& recLoc,
                                       MatchDetails* details, bool keyUsable ) const {

        LOG(5) << "CoveredIndexMatcher::matches() " << key.toString() << ' ' << recLoc.toString() << ' ' << keyUsable << endl;

        dassert( key.isValid() );

        if ( details )
            details->resetOutput();

        if ( keyUsable ) {
            if ( !_keyMatcher.matches(key, details ) ) {
                return false;
            }
            bool needRecordForDetails = details && details->needRecord();
            if ( !_needRecord && !needRecordForDetails ) {
                return true;
            }
        }

        if ( details )
            details->setLoadedRecord( true );

        BSONObj obj = recLoc.obj();
        bool res =
            _docMatcher->matches( obj, details ) &&
            !isOrClauseDup( obj );
        LOG(5) << "CoveredIndexMatcher _docMatcher->matches() returns " << res << endl;
        return res;
    }
예제 #4
0
파일: query.cpp 프로젝트: JakubOboza/mongo
 BSONObj ResponseBuildStrategy::current( bool allowCovered ) const {
     if ( _parsedQuery.returnKey() ) {
         BSONObjBuilder bob;
         bob.appendKeys( _cursor->indexKeyPattern(), _cursor->currKey() );
         return bob.obj();
     }
     if ( allowCovered ) {
         const Projection::KeyOnly *fields = keyFieldsOnly();
         if ( fields ) {
             return fields->hydrate( _cursor->currKey() );
         }
     }
     BSONObj ret = _cursor->current();
     verify( ret.isValid() );
     return ret;
 }
예제 #5
0
파일: query.cpp 프로젝트: enlyn/mongo
 BSONObj ResponseBuildStrategy::current( bool allowCovered,
                                         ResultDetails* resultDetails ) const {
     if ( _parsedQuery.returnKey() ) {
         BSONObjBuilder bob;
         bob.appendKeys( _cursor->indexKeyPattern(), _cursor->currKey() );
         return bob.obj();
     }
     if ( allowCovered ) {
         const Projection::KeyOnly *keyFieldsOnly = _cursor->keyFieldsOnly();
         if ( keyFieldsOnly ) {
             return keyFieldsOnly->hydrate( _cursor->currKey() );
         }
     }
     resultDetails->loadedRecord = true;
     BSONObj ret = _cursor->current();
     verify( ret.isValid() );
     return ret;
 }
예제 #6
0
	std::list<BSONObj>  mongoWrapper::getAll(std::string dbpath, std::string searchcri)
	{
		if(!c.isStillConnected ())
		{
			std::cout << "mongoWrapper::getAll():db not connected, connecting.." << endl;
			connect();
		}

		std::cout << "mongoWrapper::getAll():db connected :" <<dbpath << endl;
		int nc =c.count(dbpath);
		std::cout << "mongoWrapper::getAll():#" << nc << endl;
		std::list<BSONObj> retList;


		if(nc >0)
		{
			auto_ptr<mongo::DBClientCursor> cursor ;

			if(searchcri != "")
			{
				BSONObj bquery = mongo::fromjson(searchcri);
				mongo::Query q = mongo::Query(bquery);
				cursor = c.query(dbpath, q);
			}
			else
			{
				cursor = c.query(dbpath, BSONObj());

			}
			while (cursor->more())
			{
				BSONObj p = cursor->next().getOwned();
				if(p.isEmpty() || !p.isValid())
					break;
				std::string pval = p.toString();
	std::cout << ' ' << pval << std::endl;
				retList.push_back(p);

			}

		}
		return retList;

	}
예제 #7
0
파일: query.cpp 프로젝트: AshishSanju/mongo
void assembleQueryRequest(const string& ns,
                          BSONObj query,
                          int nToReturn,
                          int nToSkip,
                          const BSONObj* fieldsToReturn,
                          int queryOptions,
                          Message& toSend) {
    if (kDebugBuild) {
        massert(10337, (string) "object not valid assembleRequest query", query.isValid());
    }

    // see query.h for the protocol we are using here.
    BufBuilder b;
    int opts = queryOptions;
    b.appendNum(opts);
    b.appendStr(ns);
    b.appendNum(nToSkip);
    b.appendNum(nToReturn);
    query.appendSelfToBufBuilder(b);
    if (fieldsToReturn)
        fieldsToReturn->appendSelfToBufBuilder(b);
    toSend.setData(dbQuery, b.buf(), b.len());
}
void NetworkInterfaceASIO::_messageFromRequest(const RemoteCommandRequest& request,
                                               Message* toSend,
                                               bool useOpCommand) {
    BSONObj query = request.cmdObj;
    invariant(query.isValid());

    // TODO: Once OP_COMMAND work is complete,
    // look at client to see if it supports OP_COMMAND.

    // TODO: Investigate whether we can use CommandRequestBuilder here.

    BufBuilder b;
    b.appendNum(0);  // opts
    b.appendStr(request.dbname + ".$cmd");
    b.appendNum(0);  // toSkip
    b.appendNum(1);  // toReturn, don't care about responses
    query.appendSelfToBufBuilder(b);

    // TODO: If AsyncOp can own this buffer, we can avoid copying it in setData().
    toSend->setData(dbQuery, b.buf(), b.len());
    toSend->header().setId(nextMessageId());
    toSend->header().setResponseTo(0);
}
예제 #9
0
파일: validate.cpp 프로젝트: ChrisBg/mongo
        void validateNS(const string& ns,
                        Collection* collection,
                        const BSONObj& cmdObj,
                        BSONObjBuilder& result) {

            const bool full = cmdObj["full"].trueValue();
            const bool scanData = full || cmdObj["scandata"].trueValue();

            NamespaceDetails* nsd = collection->details();

            bool valid = true;
            BSONArrayBuilder errors; // explanation(s) for why valid = false
            if ( collection->isCapped() ){
                result.append("capped", nsd->isCapped());
                result.appendNumber("max", nsd->maxCappedDocs());
            }

            if ( nsd->firstExtent().isNull() )
                result.append( "firstExtent", "null" );
            else
                result.append( "firstExtent", str::stream() << nsd->firstExtent().toString()
                               << " ns:" << nsd->firstExtent().ext()->nsDiagnostic.toString());
            if ( nsd->lastExtent().isNull() )
                result.append( "lastExtent", "null" );
            else
                result.append( "lastExtent", str::stream() <<  nsd->lastExtent().toString()
                               << " ns:" <<  nsd->lastExtent().ext()->nsDiagnostic.toString());

            BSONArrayBuilder extentData;
            int extentCount = 0;
            try {

                if ( !nsd->firstExtent().isNull() ) {
                    nsd->firstExtent().ext()->assertOk();
                    nsd->lastExtent().ext()->assertOk();
                }

                DiskLoc extentDiskLoc = nsd->firstExtent();
                while (!extentDiskLoc.isNull()) {
                    Extent* thisExtent = extentDiskLoc.ext();
                    if (full) {
                        extentData << thisExtent->dump();
                    }
                    if (!thisExtent->validates(extentDiskLoc, &errors)) {
                        valid = false;
                    }
                    DiskLoc nextDiskLoc = thisExtent->xnext;
                    if (extentCount > 0 && !nextDiskLoc.isNull()
                                        &&  nextDiskLoc.ext()->xprev != extentDiskLoc) {
                        StringBuilder sb;
                        sb << "'xprev' pointer " << nextDiskLoc.ext()->xprev.toString()
                           << " in extent " << nextDiskLoc.toString()
                           << " does not point to extent " << extentDiskLoc.toString();
                        errors << sb.str();
                        valid = false;
                    }
                    if (nextDiskLoc.isNull() && extentDiskLoc != nsd->lastExtent()) {
                        StringBuilder sb;
                        sb << "'lastExtent' pointer " << nsd->lastExtent().toString()
                           << " does not point to last extent in list " << extentDiskLoc.toString();
                        errors << sb.str();
                        valid = false;
                    }
                    extentDiskLoc = nextDiskLoc;
                    extentCount++;
                    killCurrentOp.checkForInterrupt();
                }
            }
            catch (const DBException& e) {
                StringBuilder sb;
                sb << "exception validating extent " << extentCount
                   << ": " << e.what();
                errors << sb.str();
                valid = false;
            }
            result.append("extentCount", extentCount);

            if ( full )
                result.appendArray( "extents" , extentData.arr() );

            result.appendNumber("datasize", nsd->dataSize());
            result.appendNumber("nrecords", nsd->numRecords());
            result.appendNumber("lastExtentSize", nsd->lastExtentSize());
            result.appendNumber("padding", nsd->paddingFactor());

            try {

                bool testingLastExtent = false;
                try {
                    if (nsd->firstExtent().isNull()) {
                        // this is ok
                    }
                    else {
                        result.append("firstExtentDetails", nsd->firstExtent().ext()->dump());
                        if (!nsd->firstExtent().ext()->xprev.isNull()) {
                            StringBuilder sb;
                            sb << "'xprev' pointer in 'firstExtent' " << nsd->firstExtent().toString()
                               << " is " << nsd->firstExtent().ext()->xprev.toString()
                               << ", should be null";
                            errors << sb.str();
                            valid=false;
                        }
                    }
                    testingLastExtent = true;
                    if (nsd->lastExtent().isNull()) {
                        // this is ok
                    }
                    else {
                        if (nsd->firstExtent() != nsd->lastExtent()) {
                            result.append("lastExtentDetails", nsd->lastExtent().ext()->dump());
                            if (!nsd->lastExtent().ext()->xnext.isNull()) {
                                StringBuilder sb;
                                sb << "'xnext' pointer in 'lastExtent' " << nsd->lastExtent().toString()
                                   << " is " << nsd->lastExtent().ext()->xnext.toString()
                                   << ", should be null";
                                errors << sb.str();
                                valid = false;
                            }
                        }
                    }
                }
                catch (const DBException& e) {
                    StringBuilder sb;
                    sb << "exception processing '"
                       << (testingLastExtent ? "lastExtent" : "firstExtent")
                       << "': " << e.what();
                    errors << sb.str();
                    valid = false;
                }

                set<DiskLoc> recs;
                if( scanData ) {
                    int n = 0;
                    int nInvalid = 0;
                    long long nQuantizedSize = 0;
                    long long nPowerOf2QuantizedSize = 0;
                    long long len = 0;
                    long long nlen = 0;
                    long long bsonLen = 0;
                    int outOfOrder = 0;
                    DiskLoc cl_last;

                    DiskLoc cl;
                    Runner::RunnerState state;
                    auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns));
                    while (Runner::RUNNER_ADVANCED == (state = runner->getNext(NULL, &cl))) {
                        n++;

                        if ( n < 1000000 )
                            recs.insert(cl);
                        if ( nsd->isCapped() ) {
                            if ( cl < cl_last )
                                outOfOrder++;
                            cl_last = cl;
                        }

                        Record *r = cl.rec();
                        len += r->lengthWithHeaders();
                        nlen += r->netLength();
                        
                        if ( r->lengthWithHeaders() ==
                                NamespaceDetails::quantizeAllocationSpace
                                    ( r->lengthWithHeaders() ) ) {
                            // Count the number of records having a size consistent with
                            // the quantizeAllocationSpace quantization implementation.
                            ++nQuantizedSize;
                        }

                        if ( r->lengthWithHeaders() ==
                                NamespaceDetails::quantizePowerOf2AllocationSpace
                                    ( r->lengthWithHeaders() - 1 ) ) {
                            // Count the number of records having a size consistent with the
                            // quantizePowerOf2AllocationSpace quantization implementation.
                            // Because of SERVER-8311, power of 2 quantization is not idempotent and
                            // r->lengthWithHeaders() - 1 must be checked instead of the record
                            // length itself.
                            ++nPowerOf2QuantizedSize;
                        }

                        if (full){
                            BSONObj obj = BSONObj::make(r);
                            if (!obj.isValid() || !obj.valid()){ // both fast and deep checks
                                valid = false;
                                if (nInvalid == 0) // only log once;
                                    errors << "invalid bson object detected (see logs for more info)";

                                nInvalid++;
                                if (strcmp("_id", obj.firstElementFieldName()) == 0){
                                    try {
                                        obj.firstElement().validate(); // throws on error
                                        log() << "Invalid bson detected in " << ns << " with _id: " << obj.firstElement().toString(false) << endl;
                                    }
                                    catch(...){
                                        log() << "Invalid bson detected in " << ns << " with corrupt _id" << endl;
                                    }
                                }
                                else {
                                    log() << "Invalid bson detected in " << ns << " and couldn't find _id" << endl;
                                }
                            }
                            else {
                                bsonLen += obj.objsize();
                            }
                        }
                    }
                    if (Runner::RUNNER_EOF != state) {
                        // TODO: more descriptive logging.
                        warning() << "Internal error while reading collection " << ns << endl;
                    }
                    if ( nsd->isCapped() && !nsd->capLooped() ) {
                        result.append("cappedOutOfOrder", outOfOrder);
                        if ( outOfOrder > 1 ) {
                            valid = false;
                            errors << "too many out of order records";
                        }
                    }
                    result.append("objectsFound", n);

                    if (full) {
                        result.append("invalidObjects", nInvalid);
                    }

                    result.appendNumber("nQuantizedSize", nQuantizedSize);
                    result.appendNumber("nPowerOf2QuantizedSize", nPowerOf2QuantizedSize);
                    result.appendNumber("bytesWithHeaders", len);
                    result.appendNumber("bytesWithoutHeaders", nlen);

                    if (full) {
                        result.appendNumber("bytesBson", bsonLen);
                    }
                }

                BSONArrayBuilder deletedListArray;
                for ( int i = 0; i < Buckets; i++ ) {
                    deletedListArray << nsd->deletedListEntry(i).isNull();
                }

                int ndel = 0;
                long long delSize = 0;
                BSONArrayBuilder delBucketSizes;
                int incorrect = 0;
                for ( int i = 0; i < Buckets; i++ ) {
                    DiskLoc loc = nsd->deletedListEntry(i);
                    try {
                        int k = 0;
                        while ( !loc.isNull() ) {
                            if ( recs.count(loc) )
                                incorrect++;
                            ndel++;

                            if ( loc.questionable() ) {
                                if( nsd->isCapped() && !loc.isValid() && i == 1 ) {
                                    /* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
                                       see comments in namespace.h
                                    */
                                    break;
                                }

                                string err( str::stream() << "bad pointer in deleted record list: "
                                                          << loc.toString()
                                                          << " bucket: " << i
                                                          << " k: " << k );
                                errors << err;
                                valid = false;
                                break;
                            }

                            DeletedRecord *d = loc.drec();
                            delSize += d->lengthWithHeaders();
                            loc = d->nextDeleted();
                            k++;
                            killCurrentOp.checkForInterrupt();
                        }
                        delBucketSizes << k;
                    }
                    catch (...) {
                        errors << ("exception in deleted chain for bucket " + BSONObjBuilder::numStr(i));
                        valid = false;
                    }
                }
                result.appendNumber("deletedCount", ndel);
                result.appendNumber("deletedSize", delSize);
                if ( full ) {
                    result << "delBucketSizes" << delBucketSizes.arr();
                }

                if ( incorrect ) {
                    errors << (BSONObjBuilder::numStr(incorrect) + " records from datafile are in deleted list");
                    valid = false;
                }

                int idxn = 0;
                try  {
                    IndexCatalog* indexCatalog = collection->getIndexCatalog();

                    result.append("nIndexes", nsd->getCompletedIndexCount());
                    BSONObjBuilder indexes; // not using subObjStart to be exception safe
                    NamespaceDetails::IndexIterator i = nsd->ii();
                    while( i.more() ) {
                        IndexDetails& id = i.next();
                        log() << "validating index " << idxn << ": " << id.indexNamespace() << endl;

                        IndexDescriptor* descriptor = indexCatalog->getDescriptor( idxn );
                        verify( descriptor );
                        IndexAccessMethod* iam = indexCatalog->getIndex( descriptor );
                        verify( iam );

                        int64_t keys;
                        iam->validate(&keys);
                        indexes.appendNumber(id.indexNamespace(), static_cast<long long>(keys));
                        idxn++;
                    }
                    result.append("keysPerIndex", indexes.done());
                }
                catch (...) {
                    errors << ("exception during index validate idxn " + BSONObjBuilder::numStr(idxn));
                    valid=false;
                }

            }
            catch (AssertionException) {
                errors << "exception during validate";
                valid = false;
            }

            result.appendBool("valid", valid);
            result.append("errors", errors.arr());

            if ( !full ){
                result.append("warning", "Some checks omitted for speed. use {full:true} option to do more thorough scan.");
            }
            
            if ( !valid ) {
                result.append("advice", "ns corrupt, requires repair");
            }

        }
예제 #10
0
int LocalMemoryGridFile::flush() {
	trace() << " -> LocalMemoryGridFile::flush {file: " << _filename << "}" << endl;
	if (!_dirty) {
		// Since, there are no dirty chunks, this does not need a flush
		info() << "buffers are not dirty.. need not flush {filename: " << _filename << "}" << endl;
		return 0;
	}

	size_t bufferLen = 0;
	boost::shared_array<char> buffer = createFlushBuffer(bufferLen);
	if (!buffer.get() && bufferLen > 0) {
		// Failed to create flush buffer
		return -ENOMEM;
	}

	// Get the existing gridfile from GridFS to get metadata and delete the
	// file from the system
	try {
		ScopedDbConnection dbc(globalFSOptions._connectString);
		GridFS gridFS(dbc.conn(), globalFSOptions._db, globalFSOptions._collPrefix);
		GridFile origGridFile = gridFS.findFile(BSON("filename" << _filename));

		if (!origGridFile.exists()) {
			dbc.done();
			warn() << "Requested file not found for flushing back data {file: " << _filename << "}" << endl;
			return -EBADF;
		}

		//TODO: Make checks for appropriate object correctness
		//i.e. do not update anything that is not a Regular File
		//Check what happens in case of a link

		gridFS.removeFile(_filename);
		trace() << "Removing the current file from GridFS {file: " << _filename << "}" << endl;
		//TODO: Check for remove status if that was successfull or not
		//TODO: Rather have an update along with active / passive flag for the
		//file

		try {
			GridFS gridFS(dbc.conn(), globalFSOptions._db, globalFSOptions._collPrefix);

			// Create an empty file to signify the file creation and open a local file for the same
			trace() << "Adding new file to GridFS {file: " << _filename << "}" << endl;
			BSONObj fileObj = gridFS.storeFile(buffer.get(), bufferLen, _filename);
			if (!fileObj.isValid()) {
				warn() << "Failed to save file object in data flush {file: " << _filename << "}" << std::endl;
				dbc.done();
				return -EBADF;
			}

			// Update the last updated date for the document
			BSONObj metadata = origGridFile.getMetadata();
			BSONElement fileObjId = fileObj.getField("_id");
			dbc->update(globalFSOptions._filesNS, BSON("_id" << fileObjId.OID()), 
					BSON("$set" << BSON(
								"uploadDate" << origGridFile.getUploadDate() 
								<< "metadata.type" << "file"
								<< "metadata.filename" << mgridfs::getPathBasename(_filename)
								<< "metadata.directory" << mgridfs::getPathDirname(_filename)
								<< "metadata.lastUpdated" << jsTime()
								<< "metadata.uid" << metadata["uid"]
								<< "metadata.gid" << metadata["gid"]
								<< "metadata.mode" << metadata["mode"]
							)
						)
					);
	} catch (DBException& e) {
			error() << "Caught exception in saving remote file in flush {code: " << e.getCode() << ", what: " << e.what()
				<< ", exception: " << e.toString() << "}" << endl;
			return -EIO;
		}

		dbc.done();
	} catch (DBException& e) {
		// Something failed in getting the file from GridFS
		error() << "Caught exception in getting remote file for flush {code: " << e.getCode() << ", what: " << e.what()
			<< ", exception: " << e.toString() << "}" << endl;
		return -EIO;
	}

	_dirty = false;
	debug() << "Completed flushing the file content to GridFS {file: " << _filename << "}" << endl;
	return 0;
}
예제 #11
0
        void validateNS(const char *ns, NamespaceDetails *d, const BSONObj& cmdObj, BSONObjBuilder& result) {
            const bool full = cmdObj["full"].trueValue();
            const bool scanData = full || cmdObj["scandata"].trueValue();

            bool valid = true;
            BSONArrayBuilder errors; // explanation(s) for why valid = false
            if ( d->isCapped() ){
                result.append("capped", d->isCapped());
                result.appendNumber("max", d->maxCappedDocs());
            }

            result.append("firstExtent", str::stream() << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString());
            result.append( "lastExtent", str::stream() <<  d->lastExtent.toString() << " ns:" <<  d->lastExtent.ext()->nsDiagnostic.toString());
            
            BSONArrayBuilder extentData;

            try {
                d->firstExtent.ext()->assertOk();
                d->lastExtent.ext()->assertOk();

                DiskLoc el = d->firstExtent;
                int ne = 0;
                while( !el.isNull() ) {
                    Extent *e = el.ext();
                    e->assertOk();
                    el = e->xnext;
                    ne++;
                    if ( full )
                        extentData << e->dump();
                    
                    killCurrentOp.checkForInterrupt();
                }
                result.append("extentCount", ne);
            }
            catch (...) {
                valid=false;
                errors << "extent asserted";
            }

            if ( full )
                result.appendArray( "extents" , extentData.arr() );

            
            result.appendNumber("datasize", d->stats.datasize);
            result.appendNumber("nrecords", d->stats.nrecords);
            result.appendNumber("lastExtentSize", d->lastExtentSize);
            result.appendNumber("padding", d->paddingFactor());
            

            try {

                try {
                    result.append("firstExtentDetails", d->firstExtent.ext()->dump());

                    valid = valid && d->firstExtent.ext()->validates() && 
                        d->firstExtent.ext()->xprev.isNull();
                }
                catch (...) {
                    errors << "exception firstextent";
                    valid = false;
                }

                set<DiskLoc> recs;
                if( scanData ) {
                    shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
                    int n = 0;
                    int nInvalid = 0;
                    long long len = 0;
                    long long nlen = 0;
                    int outOfOrder = 0;
                    DiskLoc cl_last;
                    while ( c->ok() ) {
                        n++;

                        DiskLoc cl = c->currLoc();
                        if ( n < 1000000 )
                            recs.insert(cl);
                        if ( d->isCapped() ) {
                            if ( cl < cl_last )
                                outOfOrder++;
                            cl_last = cl;
                        }

                        Record *r = c->_current();
                        len += r->lengthWithHeaders();
                        nlen += r->netLength();

                        if (full){
                            BSONObj obj = BSONObj::make(r);
                            if (!obj.isValid() || !obj.valid()){ // both fast and deep checks
                                valid = false;
                                if (nInvalid == 0) // only log once;
                                    errors << "invalid bson object detected (see logs for more info)";

                                nInvalid++;
                                if (strcmp("_id", obj.firstElementFieldName()) == 0){
                                    try {
                                        obj.firstElement().validate(); // throws on error
                                        log() << "Invalid bson detected in " << ns << " with _id: " << obj.firstElement().toString(false) << endl;
                                    }
                                    catch(...){
                                        log() << "Invalid bson detected in " << ns << " with corrupt _id" << endl;
                                    }
                                }
                                else {
                                    log() << "Invalid bson detected in " << ns << " and couldn't find _id" << endl;
                                }
                            }
                        }

                        c->advance();
                    }
                    if ( d->isCapped() && !d->capLooped() ) {
                        result.append("cappedOutOfOrder", outOfOrder);
                        if ( outOfOrder > 1 ) {
                            valid = false;
                            errors << "too many out of order records";
                        }
                    }
                    result.append("objectsFound", n);

                    if (full) {
                        result.append("invalidObjects", nInvalid);
                    }

                    result.appendNumber("bytesWithHeaders", len);
                    result.appendNumber("bytesWithoutHeaders", nlen);
                }

                BSONArrayBuilder deletedListArray;
                for ( int i = 0; i < Buckets; i++ ) {
                    deletedListArray << d->deletedList[i].isNull();
                }

                int ndel = 0;
                long long delSize = 0;
                int incorrect = 0;
                for ( int i = 0; i < Buckets; i++ ) {
                    DiskLoc loc = d->deletedList[i];
                    try {
                        int k = 0;
                        while ( !loc.isNull() ) {
                            if ( recs.count(loc) )
                                incorrect++;
                            ndel++;

                            if ( loc.questionable() ) {
                                if( d->isCapped() && !loc.isValid() && i == 1 ) {
                                    /* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
                                       see comments in namespace.h
                                    */
                                    break;
                                }

                                if ( loc.a() <= 0 || strstr(ns, "hudsonSmall") == 0 ) {
                                    string err (str::stream() << "bad deleted loc: " << loc.toString() << " bucket:" << i << " k:" << k);
                                    errors << err;

                                    valid = false;
                                    break;
                                }
                            }

                            DeletedRecord *d = loc.drec();
                            delSize += d->lengthWithHeaders();
                            loc = d->nextDeleted();
                            k++;
                            killCurrentOp.checkForInterrupt();
                        }
                    }
                    catch (...) {
                        errors << ("exception in deleted chain for bucket " + BSONObjBuilder::numStr(i));
                        valid = false;
                    }
                }
                result.appendNumber("deletedCount", ndel);
                result.appendNumber("deletedSize", delSize);

                if ( incorrect ) {
                    errors << (BSONObjBuilder::numStr(incorrect) + " records from datafile are in deleted list");
                    valid = false;
                }

                int idxn = 0;
                try  {
                    result.append("nIndexes", d->nIndexes);
                    BSONObjBuilder indexes; // not using subObjStart to be exception safe
                    NamespaceDetails::IndexIterator i = d->ii();
                    while( i.more() ) {
                        IndexDetails& id = i.next();
                        log() << "validating index " << idxn << ": " << id.indexNamespace() << endl;
                        long long keys = id.idxInterface().fullValidate(id.head, id.keyPattern());
                        indexes.appendNumber(id.indexNamespace(), keys);
                        idxn++;
                    }
                    result.append("keysPerIndex", indexes.done());
                }
                catch (...) {
                    errors << ("exception during index validate idxn " + BSONObjBuilder::numStr(idxn));
                    valid=false;
                }

            }
            catch (AssertionException) {
                errors << "exception during validate";
                valid = false;
            }

            result.appendBool("valid", valid);
            result.append("errors", errors.arr());

            if ( !full ){
                result.append("warning", "Some checks omitted for speed. use {full:true} option to do more thorough scan.");
            }
            
            if ( !valid ) {
                result.append("advice", "ns corrupt, requires repair");
            }

        }