void IndexPersister::saveIndexToFile(shared_ptr<Dic> index, string termFile, string postingsFile) { cout << "Saving index to disk.." << endl; ofstream termStream(termFile, std::ios::binary); ofstream postingsStream(postingsFile, std::ios::binary); for(Dic::list_iterator list = index->begin(); list != index->end(); list++) { string term = list->getTerm(); if (term.length() > 255) { cout << "We don't index terms larger than 255 characters!" << endl; continue; } char charCount = term.length(); termStream.write(reinterpret_cast<const char *>(&charCount), sizeof(charCount)); termStream << term; int docNum = list->getLength(); termStream.write(reinterpret_cast<const char *>(&docNum), sizeof(docNum)); auto posting = list->getPostings(); while(posting != NULL) { int docId = posting->getDocId(); postingsStream.write(reinterpret_cast<const char *>(&docId), sizeof(docId)); posting = posting->next; } } }
// init our rdb bool Titledb::init ( ) { // key sanity tests int64_t uh48 = 0x1234567887654321LL & 0x0000ffffffffffffLL; int64_t docId = 123456789; key96_t k = makeKey(docId,uh48,false); if ( getDocId(&k) != docId ) { g_process.shutdownAbort(true);} if ( getUrlHash48(&k) != uh48 ) { g_process.shutdownAbort(true);} const char *url = "http://.ezinemark.com/int32_t-island-child-custody-attorneys-new-york-visitation-lawyers-melville-legal-custody-law-firm-45f00bbed18.html"; Url uu; uu.set(url); const char *d1 = uu.getDomain(); int32_t dlen1 = uu.getDomainLen(); int32_t dlen2 = 0; const char *d2 = getDomFast ( url , &dlen2 ); if ( !d1 || !d2 ) { g_process.shutdownAbort(true); } if ( dlen1 != dlen2 ) { g_process.shutdownAbort(true); } // another one url = "http://ok/"; uu.set(url); const char *d1a = uu.getDomain(); dlen1 = uu.getDomainLen(); dlen2 = 0; const char *d2a = getDomFast ( url , &dlen2 ); if ( d1a || d2a ) { g_process.shutdownAbort(true); } if ( dlen1 != dlen2 ) { g_process.shutdownAbort(true); } // . what's max # of tree nodes? // . assume avg TitleRec size (compressed html doc) is about 1k we get: // . NOTE: overhead is about 32 bytes per node int32_t maxTreeNodes = g_conf.m_titledbMaxTreeMem / (1*1024); // initialize our own internal rdb return m_rdb.init ( "titledb" , -1 , // fixed record size //g_conf.m_titledbMinFilesToMerge , // this should not really be changed... -1, g_conf.m_titledbMaxTreeMem , maxTreeNodes , false, // half keys? 12, // key size false, //isCollectionLess false); //useIndexFile // validate //return verify ( ); }
/* bool Titledb::addColl ( char *coll, bool doVerify ) { if ( ! m_rdb.addColl ( coll ) ) return false; if ( ! doVerify ) return true; // verify if ( verify(coll) ) return true; // if not allowing scale, return false if ( ! g_conf.m_allowScale ) return false; // otherwise let it go log ( "db: Verify failed, but scaling is allowed, passing." ); return true; } */ bool Titledb::verify(const char *coll) { log ( LOG_DEBUG, "db: Verifying Titledb for coll %s...", coll ); Msg5 msg5; RdbList list; key96_t startKey; key96_t endKey; startKey.setMin(); endKey.setMax(); //int32_t minRecSizes = 64000; const CollectionRec *cr = g_collectiondb.getRec(coll); if ( ! msg5.getList ( RDB_TITLEDB , cr->m_collnum , &list , startKey , endKey , 1024*1024 , // minRecSizes , true , // includeTree , 0 , // max cache age 0 , // startFileNum , -1 , // numFiles , NULL , // state NULL , // callback 0 , // niceness false , // err correction? NULL , // cache key ptr 0 , // retry num -1 , // maxRetries -1LL , // sync point false , // isRealMerge true)) // allowPageCache { log(LOG_DEBUG, "db: HEY! it did not block"); return false; } int32_t count = 0; int32_t got = 0; for ( list.resetListPtr() ; ! list.isExhausted() ; list.skipCurrentRecord() ) { key96_t k = list.getCurrentKey(); // skip negative keys if ( (k.n0 & 0x01) == 0x00 ) continue; count++; //uint32_t groupId = getGroupId ( RDB_TITLEDB , &k ); //if ( groupId == g_hostdb.m_groupId ) got++; uint32_t shardNum = getShardNum ( RDB_TITLEDB, &k ); if ( shardNum == getMyShardNum() ) got++; } if ( got != count ) { // tally it up g_rebalance.m_numForeignRecs += count - got; log ("db: Out of first %" PRId32" records in titledb, " "only %" PRId32" belong to our shard. c=%s",count,got,coll); // exit if NONE, we probably got the wrong data if ( count > 10 && got == 0 ) log("db: Are you sure you have the right " "data in the right directory? " "coll=%s " "Exiting.", coll); // repeat with log for ( list.resetListPtr() ; ! list.isExhausted() ; list.skipCurrentRecord() ) { key96_t k = list.getCurrentKey(); //uint32_t groupId = getGroupId ( RDB_TITLEDB,&k); //int32_t groupNum = g_hostdb.getGroupNum(groupId); int32_t shardNum = getShardNum ( RDB_TITLEDB, &k ); log("db: docid=%" PRId64" shard=%" PRId32, getDocId(&k),shardNum); } //if ( g_conf.m_bypassValidation ) return true; //if ( g_conf.m_allowScale ) return true; // don't exit any more, allow it, but do not delete // recs that belong to different shards when we merge now! log ( "db: db shards unbalanced. " "Click autoscale in master controls."); //return false; return true; } log ( LOG_DEBUG, "db: Titledb passed verification successfully for %" PRId32 " recs.", count ); // DONE return true; }
// init our rdb bool Titledb::init ( ) { // key sanity tests int64_t uh48 = 0x1234567887654321LL & 0x0000ffffffffffffLL; int64_t docId = 123456789; key_t k = makeKey(docId,uh48,false); if ( getDocId(&k) != docId ) { char *xx=NULL;*xx=0;} if ( getUrlHash48(&k) != uh48 ) { char *xx=NULL;*xx=0;} char *url = "http://.ezinemark.com/int32_t-island-child-custody-attorneys-new-york-visitation-lawyers-melville-legal-custody-law-firm-45f00bbed18.html"; Url uu; uu.set(url); char *d1 = uu.getDomain(); int32_t dlen1 = uu.getDomainLen(); int32_t dlen2 = 0; char *d2 = getDomFast ( url , &dlen2 ); if ( dlen1 != dlen2 ) { char *xx=NULL;*xx=0; } // another one url = "http://ok/"; uu.set(url); d1 = uu.getDomain(); dlen1 = uu.getDomainLen(); dlen2 = 0; d2 = getDomFast ( url , &dlen2 ); if ( dlen1 != dlen2 ) { char *xx=NULL;*xx=0; } int64_t maxMem = 200000000; // 200MB // . what's max # of tree nodes? // . assume avg TitleRec size (compressed html doc) is about 1k we get: // . NOTE: overhead is about 32 bytes per node int32_t maxTreeNodes = maxMem / (1*1024); // . we now use a disk page cache for titledb as opposed to the // old rec cache. i am trying to do away with the Rdb::m_cache rec // cache in favor of cleverly used disk page caches, because // the rec caches are not real-time and get stale. // . just hard-code 30MB for now int32_t pcmem = 30000000; // = g_conf.m_titledbMaxDiskPageCacheMem; // f**k that we need all the mem! //pcmem = 0; // do not use any page cache if doing tmp cluster in order to // prevent swapping if ( g_hostdb.m_useTmpCluster ) pcmem = 0; int32_t pageSize = GB_INDEXDB_PAGE_SIZE; // init the page cache // . MDW: "minimize disk seeks" not working otherwise i'd enable it! if ( ! m_pc.init ( "titledb", RDB_TITLEDB, pcmem , pageSize ) ) return log("db: Titledb init failed."); // each entry in the cache is usually just a single record, no lists //int32_t maxCacheNodes = g_conf.m_titledbMaxCacheMem / (10*1024); // initialize our own internal rdb if ( ! m_rdb.init ( g_hostdb.m_dir , "titledb" , true , // dedup same keys? -1 , // fixed record size //g_hostdb.m_groupMask , //g_hostdb.m_groupId , //g_conf.m_titledbMinFilesToMerge , // this should not really be changed... -1,//3,//230 minfilestomerge mintomerge maxMem, // g_conf.m_titledbMaxTreeMem , maxTreeNodes , // now we balance so Sync.cpp can ordered huge list true , // balance tree? // turn off cache for now because the page cache // is just as fast and does not get out of date // so bad?? //0 , 0,//g_conf.m_titledbMaxCacheMem , 0,//maxCacheNodes , false ,// half keys? false ,// g_conf.m_titledbSav &m_pc , // page cache ptr true ) )// is titledb? return false; return true; // validate //return verify ( ); }