int main(int argc, char **argv) {
	if (argc < 2) {
		print_usage(argv[0]);
		return 1;
	}

	if (strcmp(argv[1], "--h") == 0 || strcmp(argv[1], "--help") == 0 ) {
		print_usage(argv[0]);
		return 1;
	}

	char filepath[PATH_MAX];

	char dir[PATH_MAX];
	strcpy(filepath, argv[1]);
	strcpy(dir, dirname(filepath));

	char filename[PATH_MAX];
	strcpy(filepath, argv[1]);
	strcpy(filename, basename(filepath));

	// initialize library
	g_mem.init();
	hashinit();

	g_conf.init(NULL);

	BigFile bigFile;
	bigFile.set(dir, filename);

	RdbBuckets buckets;
	if (starts_with(filename, "posdb")) {
		buckets.set(Posdb::getFixedDataSize(), g_conf.m_posdbMaxTreeMem, "buckets-posdb", RDB_POSDB, "posdb", Posdb::getKeySize());
		if (!buckets.fastLoad(&bigFile, "posdb")) {
			fprintf(stdout, "Unable to load bucket\n");
			return 1;
		}

		buckets.printBuckets(printRecord);
	} else {
		fprintf(stdout, "Unsupported rdb type\n");
		return 1;
	}

	return 0;
}
bool Syncdb::init ( ) {
	// reset
	loopReset();
	m_doRcp      = false;
	m_rcpStarted = false;
	// setup quick tree
	if ( ! m_qt.set ( 0           , // fixedDataSize
			  300000      , // 300k nodes
			  true        , // balance?
			  -1          , // maxmem, no max
			  false       , // ownData?
			  "tresyncdb" ,
			  false       , // dataInPtrs?
			  "quicktree" , // dbname
			  16          , // keySize
			  false       ))// useProtection?
		return false;
	BigFile f;
	f.set ( g_hostdb.m_dir , "quicktree.dat" );
	// only load if it exists
	bool exists = f.doesExist();
	// load it
	if ( exists && ! m_qt.fastLoad( &f , &m_stack ) ) 
		return log("sync: quicktree.dat load failed: %s",
			   mstrerror(g_errno));
	// done
	f.close();
	// assume permanently out of sync
	long val = 2;
	// load the insync.dat file
	f.set ( g_hostdb.m_dir , "insync.dat" );
	// fail on open failure
	if ( ! f.open ( O_RDONLY ) ) return false;
	// if not there, permanently out of sync
	if ( ! f.doesExist() )
		log("sync: insync.dat does not exist. Assuming host is "
		    "unrecoverable.");
	else {
		// get the value
		char buf[20];
		long n = f.read ( &buf , 10 , 0 ) ;
		if ( n <= 0 )
			return log("sync: read insync.dat: %s",
				   mstrerror(g_errno));
		// must be digit
		if ( ! is_digit ( buf[0] ) )
			return log("sync: insync.dat has no number in it.");
		// unlink it
		if ( ! f.unlink() )
			return log("sync: failed to unlink insync.dat: %s",
				   mstrerror(g_errno));
		// get the value
		val = atol ( buf );
	}
	// bad val?
	if ( val < 0 || val > 2 ) 
		return log("sync: insync.dat had bad value of %li",val);
	// report if in sync or not
	if ( val == 0 ) log("sync: insync.dat says out of sync");
	if ( val == 1 ) log("sync: insync.dat says in sync");
	if ( val == 2 ) log("sync: insync.dat says PERMANENTLY out of sync");
	// set it
	Host *h = g_hostdb.m_myHost;
	if ( val == 1 ) h->m_inSync               = 1;
	if ( val == 2 ) h->m_isPermanentOutOfSync = 1;
	// call this once per second
	if ( ! g_loop.registerSleepCallback ( 1000 , NULL , sleepWrapper ) )
		return false;
	// 10 MB
	long maxTreeMem = 10000000;
	// . what's max # of tree nodes?
	// . key+4+left+right+parents+dataPtr = 12+4 +4+4+4+4 = 32
	// . 28 bytes per record when in the tree
	long maxTreeNodes  = maxTreeMem / ( 16 + 1000 );
	// . initialize our own internal rdb
	// . records are actual msg4 requests received from Msg4
	// . the key is formed calling Syncdb::makeKey() which is based on
	//   the tid, sid and zid of the msg4 request, where tid is the
	//   twin hostid we are chatting with in our group, sid is the 
	//   ORIGINAL sending hostid of the msg4 request, and zid is the
	//   kinda transaction #, and is unique.
	if ( ! m_rdb.init ( g_hostdb.m_dir ,
			    "syncdb"       ,
			    true           , // dedup
			    -1             , // dataSize is variable
			    50             , // min files to merge
			    maxTreeMem     ,
			    maxTreeNodes   , // maxTreeNodes  ,
			    true           , // balance tree?
			    50000          , // maxCacheMem   , 
			    100            , // maxCacheNodes ,
			    false          , // half keys?
			    false          ,  // save cache?
			    NULL           ,  // page cache
			    false          ,  // is titledb
			    false          ,  // preload disk page cache
			    16             ,  // key size
			    false          , // bias disk page cache?
			    true           ))// is collectionless?
		return false;
	// add the coll
	//if ( ! g_syncdb.m_rdb.addColl ( "dummy" ) ) return true;
	
	// reset quick tree?
	if ( ! h->m_isPermanentOutOfSync ) return true;
	// clear it all!
	m_qt.clear();
	// add the base since it is a collectionless rdb
	return m_rdb.addRdbBase1 ( NULL );
}
// . sets m_fileOffset and m_bf
// . returns false and sets g_errno on error
// . returns false if nothing to read too... but does not set g_errno
bool ImportState::setCurrentTitleFileAndOffset ( ) {

	// leave m_bf and m_fileOffset alone if there is more to read
	if ( m_fileOffset < m_bfFileSize )
		return true;

	CollectionRec *cr = g_collectiondb.getRec ( m_collnum );
	if ( ! cr ) return false;

	log("import: import finding next file");
	
	// if ( m_offIsValid ) {
	// 	//*off = m_fileOffset;
	// 	return &m_bf; 
	// }
	//m_offIsValid = true;

	// look for titledb0001.dat etc. files in the 
	// workingDir/inject/ subdir
	SafeBuf ddd;
	ddd.safePrintf("%sinject",cr->m_importDir.getBufStart());
	// now use the one provided. we should also provide the # of threads
	if ( cr->m_importDir.getBufStart() && 
	     cr->m_importDir.getBufStart()[0] ) {
		ddd.reset();
		ddd.safeStrcpy ( cr->m_importDir.getBufStart() );
	}

	//
	// assume we are the first filename
	// set s_fileId to the minimum
	//
	Dir dir;
	dir.set(ddd.getBufStart());

	if ( ! dir.open() ) return false;

	// assume none
	long minFileId = -1;

	// getNextFilename() writes into this
	char pattern[64]; strcpy ( pattern , "titledb*" );
	char *filename;
	while ( ( filename = dir.getNextFilename ( pattern ) ) ) {
		// filename must be a certain length
		long filenameLen = gbstrlen(filename);
		// we need at least "titledb0001.dat"
		if ( filenameLen < 15 ) continue;
		// ensure filename starts w/ our m_dbname
		if ( strncmp ( filename , "titledb", 7 ) != 0 )
			continue;
		// skip if not .dat file
		if ( ! strstr ( filename , ".dat" ) )
			continue;
		// then a 4 digit number should follow
		char *s = filename + 7;
		if ( ! isdigit(*(s+0)) ) continue;
		if ( ! isdigit(*(s+1)) ) continue;
		if ( ! isdigit(*(s+2)) ) continue;
		if ( ! isdigit(*(s+3)) ) continue;
		// convert digit to id
		long id = atol(s);
		// . do not accept files we've already processed
		// . -1 means we haven't processed any yet
		if ( m_bfFileId >= 0 && id <= m_bfFileId ) continue;
		// the min of those we haven't yet processed/injected
		if ( id < minFileId || minFileId < 0 ) minFileId = id;
	}

	// get where we left off
	if ( ! m_loadedPlaceHolder ) {
		// read where we left off from file if possible
		char fname[256];
		sprintf(fname,"%slasttitledbinjectinfo.dat",g_hostdb.m_dir);
		SafeBuf ff;
		ff.fillFromFile(fname);
		if ( ff.length() > 1 ) {
			m_loadedPlaceHolder = true;
			// get the placeholder
			sscanf ( ff.getBufStart() 
				 , "%llu,%lu"
				 , &m_fileOffset
				 , &minFileId
				 );
		}
	}

	// if no files! return false to indicate we are done
	if ( minFileId == -1 ) return false;

	// set up s_bf then
	//if ( m_bfFileId != minFileId ) {
	SafeBuf tmp;
	tmp.safePrintf("titledb%04li-000.dat"
		       //,dir.getDirname()
		       ,minFileId);
	m_bf.set ( dir.getDirname() ,tmp.getBufStart() );
	if ( ! m_bf.open( O_RDONLY ) ) {
		log("inject: import: could not open %s%s for reading",
		    dir.getDirname(),tmp.getBufStart());
		return false;
	}
	m_bfFileId = minFileId;
	// reset ptr into file
	//*off = 0;
	// and set this
	m_bfFileSize = m_bf.getFileSize();

	m_fileOffset = 0;
	//}

	log("import: importing from file %s",m_bf.getFilename());

	return true;//&m_bf;
}