示例#1
0
    Status IndexBuilder::_build(OperationContext* txn,
                                Database* db,
                                bool allowBackgroundBuilding,
                                Lock::DBLock* dbLock) const {
        const NamespaceString ns(_index["ns"].String());

        Collection* c = db->getCollection( ns.ns() );
        if ( !c ) {
            while (true) {
                try {
                    WriteUnitOfWork wunit(txn);
                    c = db->getOrCreateCollection( txn, ns.ns() );
                    verify(c);
                    wunit.commit();
                    break;
                }
                catch (const WriteConflictException& wce) {
                    LOG(2) << "WriteConflictException while creating collection in IndexBuilder"
                           << ", retrying.";
                    txn->recoveryUnit()->abandonSnapshot();
                    continue;
                }
            }
        }

        // Show which index we're building in the curop display.
        CurOp::get(txn)->setQuery(_index);

        bool haveSetBgIndexStarting = false;
        while (true) {
            Status status = Status::OK();
            try {
                MultiIndexBlock indexer(txn, c);
                indexer.allowInterruption();

                if (allowBackgroundBuilding)
                    indexer.allowBackgroundBuilding();


                IndexDescriptor* descriptor(NULL);
                try {
                    status = indexer.init(_index);
                    if ( status.code() == ErrorCodes::IndexAlreadyExists ) {
                        if (allowBackgroundBuilding) {
                            // Must set this in case anyone is waiting for this build.
                            _setBgIndexStarting();
                        }
                        return Status::OK();
                    }

                    if (status.isOK()) {
                        if (allowBackgroundBuilding) {
                            descriptor = indexer.registerIndexBuild();
                            if (!haveSetBgIndexStarting) {
                                _setBgIndexStarting();
                                haveSetBgIndexStarting = true;
                            }
                            invariant(dbLock);
                            dbLock->relockWithMode(MODE_IX);
                        }

                        Lock::CollectionLock colLock(txn->lockState(), ns.ns(), MODE_IX);
                        status = indexer.insertAllDocumentsInCollection();
                    }

                    if (status.isOK()) {
                        if (allowBackgroundBuilding) {
                            dbLock->relockWithMode(MODE_X);
                        }
                        WriteUnitOfWork wunit(txn);
                        indexer.commit();
                        wunit.commit();
                    }
                }
                catch (const DBException& e) {
                    status = e.toStatus();
                }

                if (allowBackgroundBuilding) {
                    dbLock->relockWithMode(MODE_X);
                    Database* reloadDb = dbHolder().get(txn, ns.db());
                    fassert(28553, reloadDb);
                    fassert(28554, reloadDb->getCollection(ns.ns()));
                    indexer.unregisterIndexBuild(descriptor);
                }

                if (status.code() == ErrorCodes::InterruptedAtShutdown) {
                    // leave it as-if kill -9 happened. This will be handled on restart.
                    indexer.abortWithoutCleanup();
                }
            }
            catch (const WriteConflictException& wce) {
                status = wce.toStatus();
            }

            if (status.code() != ErrorCodes::WriteConflict)
                return status;


            LOG(2) << "WriteConflictException while creating index in IndexBuilder, retrying.";
            txn->recoveryUnit()->abandonSnapshot();
        }
    }
示例#2
0
int main() {

	Database *db = Database::getInstance();
	/*
	   Get a instance of session at first
	*/
	Session *session = Session::getInstance();
	
	while (FCGI_Accept() >= 0) {
		
		/*
		    call this method every time to initialize the session
		*/
		session->sessionInit();


		//FCGI_printf("Content-type: text/html\r\n"
               //		"\r\n");
		
		
		string result("fail");
		string detail("");

		unordered_map<string,string> ans;

		char * method = getenv("REQUEST_METHOD");

		if (strcmp(method,"POST") == 0) {
			char *contentLength = getenv("CONTENT_LENGTH");
			int len;
			if (contentLength != NULL) {
				len = strtol(contentLength, NULL, 10);
			} else {
				len = 0;
			}
			int i, ch;
			string post_val="";
			for (i = 0; i < len; i++) {
				if ((ch = getchar()) < 0) {
					break;
				}
				post_val = post_val + (char) ch ;
			}
			ParseParam(post_val,ans);
		
		} else if(strcmp(method,"GET")==0){

			//char* str = getenv("QUERY_STRING");
	        	//string Param(str);
	        	//ParseParam(Param,ans);
			ctemplate::TemplateDictionary dict("login");
			std::string output;
	                ctemplate::ExpandTemplate("./dist/template/login.tpl", ctemplate::DO_NOT_STRIP, &dict, &output);
			
			FCGI_printf("Content-type : text/html\r\n"
			"\r\n"
			"%s",output.c_str());
			continue;
 		 }

 		 int argu_count = 0;
	        if(ans.find("username") != ans.end())
	        	argu_count++;

	        if(ans.find("password") != ans.end())
	        	argu_count++;

		if(argu_count < 2) {
			detail = "参数错误!";
		}
		else
		{
		       char query_buf[1024] = {0};
			char update_buf[1024] = {0};

			string username,password;

			unordered_map<string,string>::iterator it;

			it = ans.find("username");
			username = it->second;

			it = ans.find("password");
			password = it->second;

			snprintf(query_buf,sizeof(query_buf),"select * from users where username = '******' and password = '******' ",username.c_str(),password.c_str());
			snprintf(update_buf,sizeof(update_buf),"update users set state = '1' where username = '******' and password = '******' ",username.c_str(),password.c_str());
			string query(query_buf);
			string update(update_buf);
			int rows = db->dbQuery(query);
			if(rows > 0){
				result = "success";
				int c = db->dbQuery(update);
				/*
					need to set session and reply a cookie to client
				*/
				
				string cookie = session->getCookie();
				FCGI_printf("Set-Cookie:%s;PATH=/\n",cookie.c_str());
	      
	     			 /*
	     			 	set session
	     			 */
	     			 vector<unordered_map<string,string> > info;
	     			 db->dbQuery(query,info);
				session->setOnline(atoi(info[0]["user_id"].c_str()));
				session->setValue("user_id",info[0]["user_id"]);
				session->setValue("username",info[0]["username"]);
				session->setValue("nickname",info[0]["nickname"]);
				session->setValue("sex",info[0]["sex"]);


			}else{
				detail = detail + " 用户名密码错误!";			
			}
		} 

		/*
			the boundary of header and body
		*/
		
		
		FCGI_printf("Content-type: application/json\r\n"
		"\r\n");
		Json::Value root;
		root["result"] = Json::Value(result);
		if(strcmp(result.c_str(),"fail") == 0){
			root["detail"] = Json::Value(detail);
		}
		Json::FastWriter fw;
		string str = fw.write(root);
		FCGI_printf("%s",str.c_str());
	
	}
	return 0;
}
示例#3
0
    void ClientCursor::invalidate(const StringData& ns) {
        Lock::assertWriteLocked(ns);

        size_t dot = ns.find( '.' );
        verify( dot != string::npos );

        // first (and only) dot is the last char
        bool isDB = dot == ns.size() - 1;

        Database *db = cc().database();
        verify(db);
        verify(ns.startsWith(db->name()));

        recursive_scoped_lock cclock(ccmutex);
        // Look at all active non-cached Runners.  These are the runners that are in auto-yield mode
        // that are not attached to the the client cursor. For example, all internal runners don't
        // need to be cached -- there will be no getMore.
        for (set<Runner*>::iterator it = nonCachedRunners.begin(); it != nonCachedRunners.end();
             ++it) {

            Runner* runner = *it;
            const string& runnerNS = runner->ns();
            if ( ( isDB && StringData(runnerNS).startsWith(ns) ) || ns == runnerNS ) {
                runner->kill();
            }
        }

        // Look at all cached ClientCursor(s).  The CC may have a Runner, a Cursor, or nothing (see
        // sharding_block.h).
        CCById::const_iterator it = clientCursorsById.begin();
        while (it != clientCursorsById.end()) {
            ClientCursor* cc = it->second;

            // We're only interested in cursors over one db.
            if (cc->_db != db) {
                ++it;
                continue;
            }

            // Note that a valid ClientCursor state is "no cursor no runner."  This is because
            // the set of active cursor IDs in ClientCursor is used as representation of query
            // state.  See sharding_block.h.  TODO(greg,hk): Move this out.
            if (NULL == cc->c() && NULL == cc->_runner.get()) {
                ++it;
                continue;
            }

            bool shouldDelete = false;

            // We will only delete CCs with runners that are not actively in use.  The runners that
            // are actively in use are instead kill()-ed.
            if (NULL != cc->_runner.get()) {
                verify(NULL == cc->c());

                if (isDB || cc->_runner->ns() == ns) {
                    // If there is a pinValue >= 100, somebody is actively using the CC and we do
                    // not delete it.  Instead we notify the holder that we killed it.  The holder
                    // will then delete the CC.
                    if (cc->_pinValue >= 100) {
                        cc->_runner->kill();
                    }
                    else {
                        // pinvalue is <100, so there is nobody actively holding the CC.  We can
                        // safely delete it as nobody is holding the CC.
                        shouldDelete = true;
                    }
                }
            }
            // Begin cursor-only DEPRECATED
            else if (cc->c()->shouldDestroyOnNSDeletion()) {
                verify(NULL == cc->_runner.get());

                if (isDB) {
                    // already checked that db matched above
                    dassert( StringData(cc->_ns).startsWith( ns ) );
                    shouldDelete = true;
                }
                else {
                    if ( ns == cc->_ns ) {
                        shouldDelete = true;
                    }
                }
            }
            // End cursor-only DEPRECATED

            if (shouldDelete) {
                ClientCursor* toDelete = it->second;
                CursorId id = toDelete->cursorid();
                delete toDelete;
                // We're not following the usual paradigm of saving it, ++it, and deleting the saved
                // 'it' because deleting 'it' might invalidate the next thing in clientCursorsById.
                // TODO: Why?
                it = clientCursorsById.upper_bound(id);
            }
            else {
                ++it;
            }
        }
    }
示例#4
0
int main(int argc, const char* argv[])
{
    int j;
    int k;
    const char* dv50Filename = NULL;
    const char* mjpeg21Filename = NULL;
    const char* filenamePrefix = NULL;
    const char* recorderName = g_defaultRecorderName;
    bool oldSession = false;
    string tapeNumberPrefix = g_defaultTapeNumberPrefix;
    int64_t startPosition = 0;
    int cmdlnIndex = 1;
    int startPosHour = 10;
    int startPosMin = 0;
    int startPosSec = 0;
    int startPosFrame = 0;
    bool isPALProject = true;

    
    if (argc < 2)
    {
        usage(argv[0]);
        return 1;
    }
    
    if (argc == 2)
    {
        if (strcmp(argv[cmdlnIndex], "--help") == 0 ||
            strcmp(argv[cmdlnIndex], "-h") == 0)
        {
            usage(argv[0]);
            return 0;
        }
    }        
    
    // set logging
    Logging::initialise(LOG_LEVEL_DEBUG);
    
    // connect the MXF logging facility
    connectLibMXFLogging();
    
    while (cmdlnIndex + 1 < argc)
    {
        if (strcmp(argv[cmdlnIndex], "--help") == 0 ||
            strcmp(argv[cmdlnIndex], "-h") == 0)
        {
            usage(argv[0]);
            return 0;
        }
        else if (strcmp(argv[cmdlnIndex], "--dv50") == 0)
        {
            if (cmdlnIndex + 1 >= argc)
            {
                usage(argv[0]);
                fprintf(stderr, "Missing value for argument '%s'\n", argv[cmdlnIndex]);
                return 1;
            }
            dv50Filename = argv[cmdlnIndex + 1];
            cmdlnIndex += 2;
        }
        else if (strcmp(argv[cmdlnIndex], "--mjpeg21") == 0)
        {
            if (cmdlnIndex + 1 >= argc)
            {
                usage(argv[0]);
                fprintf(stderr, "Missing value for argument '%s'\n", argv[cmdlnIndex]);
                return 1;
            }
            mjpeg21Filename = argv[cmdlnIndex + 1];
            cmdlnIndex += 2;
        }
        else if (strcmp(argv[cmdlnIndex], "--old-session") == 0)
        {
            oldSession = true;
            cmdlnIndex++;
        }
        else if (strcmp(argv[cmdlnIndex], "--ntsc") == 0)
        {
            isPALProject = false;
            cmdlnIndex++;
        }
        else if (strcmp(argv[cmdlnIndex], "-r") == 0)
        {
            if (cmdlnIndex + 1 >= argc)
            {
                usage(argv[0]);
                fprintf(stderr, "Missing value for argument '%s'\n", argv[cmdlnIndex]);
                return 1;
            }
            recorderName = argv[cmdlnIndex + 1];
            cmdlnIndex += 2;
        }
        else if (strcmp(argv[cmdlnIndex], "-t") == 0)
        {
            if (cmdlnIndex + 1 >= argc)
            {
                usage(argv[0]);
                fprintf(stderr, "Missing value for argument '%s'\n", argv[cmdlnIndex]);
                return 1;
            }
            tapeNumberPrefix = argv[cmdlnIndex + 1];
            cmdlnIndex += 2;
        }
        else if (strcmp(argv[cmdlnIndex], "-s") == 0)
        {
            if (cmdlnIndex + 1 >= argc)
            {
                usage(argv[0]);
                fprintf(stderr, "Missing value for argument '%s'\n", argv[cmdlnIndex]);
                return 1;
            }
            if (sscanf(argv[cmdlnIndex + 1], "%d:%d:%d:%d", &startPosHour, &startPosMin, &startPosSec, &startPosFrame) != 4)
            {
                usage(argv[0]);
                fprintf(stderr, "Invalid value '%s' for argument '%s'\n", argv[cmdlnIndex + 1], argv[cmdlnIndex]);
                return 1;
            }
            cmdlnIndex += 2;
        }
        else
        {
            usage(argv[0]);
            fprintf(stderr, "Unknown argument '%s'\n", argv[cmdlnIndex]);
            return 1;
        }
    }

    if (cmdlnIndex >= argc)
    {
        usage(argv[0]);
        fprintf(stderr, "Missing filename prefix\n");
        return 1;
    }

    filenamePrefix = argv[cmdlnIndex];    

    // calculate the start position now we know the frame rate
    if (isPALProject)
    {
        startPosition = startPosHour * 60 * 60 * 25 + startPosMin * 60 * 25 + startPosSec * 25 + startPosFrame;
    }
    else
    {
        startPosition = startPosHour * 60 * 60 * 30 + startPosMin * 60 * 30 + startPosSec * 30 + startPosFrame;
    }


    // initialise the database
    try
    {
        Database::initialise("localhost", "prodautodb", "bamzooki", "bamzooki", 1, 2 * NUM_RECORD_THREADS);
    }
    catch (const DBException& ex)
    {
        fprintf(stderr, "Failed to connect to database:\n  %s\n", ex.getMessage().c_str());
        return 1;
    }

    // load the recorder configuration
    auto_ptr<Recorder> recorder;
    try
    {
        Database* database = Database::getInstance();
        recorder = auto_ptr<Recorder>(database->loadRecorder(recorderName));
        if (!recorder->hasConfig())
        {
            fprintf(stderr, "Recorder '%s' has null config\n", recorderName);
            Database::close();
            return 1;
        }
    }
    catch (const DBException& ex)
    {
        fprintf(stderr, "Failed to load recorder '%s':\n  %s\n", recorderName, ex.getMessage().c_str());
        Database::close();
        return 1;
    }
    
    
    // save the project name
    ProjectName projectName;
    try
    {
        string name = "testproject";
        projectName = Database::getInstance()->loadOrCreateProjectName(name);
    }
    catch (const DBException& ex)
    {
        fprintf(stderr, "Failed to load recorder '%s':\n  %s\n", recorderName, ex.getMessage().c_str());
        Database::close();
        return 1;
    }


    // load the source session
    try
    {
        if (oldSession)
        {
            // use the source config name + ' - ' + date string as the source package name
            Date now = generateDateNow();
            
            vector<SourceConfig*>::const_iterator iter;
            for (iter = recorder->getConfig()->sourceConfigs.begin(); 
                iter != recorder->getConfig()->sourceConfigs.end();
                iter++)
            {
                SourceConfig* sourceConfig = *iter;
                
                string sourcePackageName = sourceConfig->name;
                sourcePackageName += " - " + getDateString(now);
                
                sourceConfig->setSourcePackage(sourcePackageName);
            }
        }
        else
        {
            // use a spool number for the source package name
            int i;
            char buf[16];
            vector<SourceConfig*>::const_iterator iter;
            for (iter = recorder->getConfig()->sourceConfigs.begin(), i = 0; 
                iter != recorder->getConfig()->sourceConfigs.end();
                iter++, i++)
            {
                SourceConfig* sourceConfig = *iter;
                
                sprintf(buf, "%s%06x", tapeNumberPrefix.c_str(), i);
                
                sourceConfig->setSourcePackage(buf);
            }
        }
    }
    catch (const DBException& ex)
    {
        fprintf(stderr, "Failed to load source session:\n  %s\n", ex.getMessage().c_str());
        Database::close();
        return 1;
    }

    
    RecorderInputConfig* inputConfig[NUM_RECORDER_INPUTS];
    for (k = 0; k < NUM_RECORDER_INPUTS; k++)
    {
        if ((inputConfig[k] = recorder.get()->getConfig()->getInputConfig(k + 1)) == 0)
        {
            fprintf(stderr, "Unknown recorder input config %d\n", k + 1);
            Database::close();
            return 1;
        }
    }
    
    for (j = 0; j < NUM_RECORDS; j++)
    {
        printf("** j == %d\n", j);
        
        try
        {
            pthread_t thread[NUM_RECORD_THREADS];
            RecordData record_data[NUM_RECORD_THREADS];
            pthread_attr_t attr;
            int i, rc, status;
            
            pthread_attr_init(&attr);
            pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
    
            
            for (i = 0; i < NUM_RECORD_THREADS; i++)
            {
                stringstream prefix;
                prefix << filenamePrefix << "_" << i; 
                
                record_data[i].startPosition = startPosition;
                record_data[i].creatingFilePath = CREATING_FILE_PATH;
                record_data[i].destinationFilePath = DESTINATION_FILE_PATH;
                record_data[i].failuresFilePath = FAILURES_FILE_PATH;
                record_data[i].filenamePrefix = prefix.str();
                record_data[i].recorder = recorder.get();
                record_data[i].inputIndex = 1 + (i % NUM_RECORDER_INPUTS);
                record_data[i].userComments.push_back(UserComment(AVID_UC_COMMENTS_NAME, 
                    "a test file", STATIC_COMMENT_POSITION, 0));
                record_data[i].userComments.push_back(UserComment(AVID_UC_DESCRIPTION_NAME, 
                    "an mxfwriter test file produced by test_mxfwriter", STATIC_COMMENT_POSITION, 0));
                record_data[i].userComments.push_back(UserComment(POSITIONED_COMMENT_NAME, 
                    "event at position 0", 0, 1));
                record_data[i].userComments.push_back(UserComment(POSITIONED_COMMENT_NAME, 
                    "event at position 1", 1, (i % 8) + 1));
                record_data[i].userComments.push_back(UserComment(POSITIONED_COMMENT_NAME, 
                    "event at position 10000", 10000, 3));
                record_data[i].projectName = projectName;
                record_data[i].isPALProject = isPALProject;
                record_data[i].umidGenOffset = Database::getInstance()->getUMIDGenOffset();
                
                if (dv50Filename != NULL)
                {
                    record_data[i].dv50Filename = dv50Filename;
                }
                else if (mjpeg21Filename != NULL)
                {
                    record_data[i].mjpeg21Filename = mjpeg21Filename;
                }
                
                if (pthread_create(&thread[i], &attr, start_record_routine, (void *)(&record_data[i])))
                {
                    fprintf(stderr, "Failed to create record thread\n");
                    Database::close();
                    pthread_exit(NULL);
                    return 1;
                }
            }
            
            for (i = 0; i < NUM_RECORD_THREADS; i++)
            {
                rc = pthread_join(thread[i], (void **)&status);
                if (rc)
                {
                    fprintf(stderr, "Return code from pthread_join() is %d\n", rc);
                }
            }
            
            startPosition += NUM_FRAMES;
        }
        catch (...)
        {
            fprintf(stderr, "Unknown exception thrown\n");
            Database::close();
            pthread_exit(NULL);
            return 1;
        }
    
    }
        
    Database::close();
    
    pthread_exit(NULL);
    return 0;
}
void SelectUser::on_commandLinkButton_delete_clicked()
{
    QMessageBox::StandardButton reply;
    reply = QMessageBox::question(this, "AvisionR - Delete",
                                  "Are you sure you want to DELETE this Employee from your Database?", QMessageBox::Yes|QMessageBox::No);
    if(reply == QMessageBox::Yes)
    {
        {
            Database conn;
            if(!conn.connOpen("Employee"))
            {
                qDebug()<<"Failed to open Data";
                return;
            }

            QSqlQuery * qry = new QSqlQuery(conn.mydb);

            QString queryString;
            QTextStream queryStream(&queryString);

            queryStream << "DELETE FROM Employees WHERE ID = '" << ui->label_id->text() << "'";

            qry->prepare(queryString);

            if(qry->exec())
            {}
            else
            {
                QMessageBox::critical(this, tr("Error"), qry->lastError().text());
            }

            conn.connClose();
        }

        {
            Database conn;
            if(!conn.connOpen("Clock"))
            {
                qDebug()<<"Failed to open Data";
                return;
            }

            QSqlQuery * qry = new QSqlQuery(conn.mydb);

            QString queryString;
            QTextStream queryStream(&queryString);

            queryStream << "DROP TABLE '" << ui->label_id->text() << "'";

            qry->prepare(queryString);

            if(qry->exec())
            {
                thisUser->setup();
                QMessageBox::information(this, tr("AvisionR - Delete"), "Employee Deleted");
            }
            else
            {
                QMessageBox::critical(this, tr("Error"), qry->lastError().text());
            }

            conn.connClose();
        }


        this->hide();
    }
}
示例#6
0
文件: db.cpp 项目: carlyrobison/mongo
static void repairDatabasesAndCheckVersion(OperationContext* txn) {
    LOG(1) << "enter repairDatabases (to check pdfile version #)" << endl;

    ScopedTransaction transaction(txn, MODE_X);
    Lock::GlobalWrite lk(txn->lockState());

    vector<string> dbNames;

    StorageEngine* storageEngine = txn->getServiceContext()->getGlobalStorageEngine();
    storageEngine->listDatabases(&dbNames);

    // Repair all databases first, so that we do not try to open them if they are in bad shape
    if (storageGlobalParams.repair) {
        invariant(!storageGlobalParams.readOnly);
        for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) {
            const string dbName = *i;
            LOG(1) << "    Repairing database: " << dbName << endl;

            fassert(18506, repairDatabase(txn, storageEngine, dbName));
        }
    }

    const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings();

    // On replica set members we only clear temp collections on DBs other than "local" during
    // promotion to primary. On pure slaves, they are only cleared when the oplog tells them
    // to. The local DB is special because it is not replicated.  See SERVER-10927 for more
    // details.
    const bool shouldClearNonLocalTmpCollections =
        !(checkIfReplMissingFromCommandLine(txn) || replSettings.usingReplSets() ||
          replSettings.isSlave());

    const bool shouldDoCleanupForSERVER23299 = isSubjectToSERVER23299(txn);

    for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) {
        const string dbName = *i;
        LOG(1) << "    Recovering database: " << dbName << endl;

        Database* db = dbHolder().openDb(txn, dbName);
        invariant(db);

        // First thing after opening the database is to check for file compatibility,
        // otherwise we might crash if this is a deprecated format.
        auto status = db->getDatabaseCatalogEntry()->currentFilesCompatible(txn);
        if (!status.isOK()) {
            if (status.code() == ErrorCodes::CanRepairToDowngrade) {
                // Convert CanRepairToDowngrade statuses to MustUpgrade statuses to avoid logging a
                // potentially confusing and inaccurate message.
                //
                // TODO SERVER-24097: Log a message informing the user that they can start the
                // current version of mongod with --repair and then proceed with normal startup.
                status = {ErrorCodes::MustUpgrade, status.reason()};
            }
            severe() << "Unable to start mongod due to an incompatibility with the data files and"
                        " this version of mongod: "
                     << status;
            severe() << "Please consult our documentation when trying to downgrade to a previous"
                        " major release";
            quickExit(EXIT_NEED_UPGRADE);
            return;
        }

        // Major versions match, check indexes
        const string systemIndexes = db->name() + ".system.indexes";

        Collection* coll = db->getCollection(systemIndexes);
        unique_ptr<PlanExecutor> exec(
            InternalPlanner::collectionScan(txn, systemIndexes, coll, PlanExecutor::YIELD_MANUAL));

        BSONObj index;
        PlanExecutor::ExecState state;
        while (PlanExecutor::ADVANCED == (state = exec->getNext(&index, NULL))) {
            const BSONObj key = index.getObjectField("key");
            const string plugin = IndexNames::findPluginName(key);

            if (db->getDatabaseCatalogEntry()->isOlderThan24(txn)) {
                if (IndexNames::existedBefore24(plugin)) {
                    continue;
                }

                log() << "Index " << index << " claims to be of type '" << plugin << "', "
                      << "which is either invalid or did not exist before v2.4. "
                      << "See the upgrade section: "
                      << "http://dochub.mongodb.org/core/upgrade-2.4" << startupWarningsLog;
            }

            const Status keyStatus = validateKeyPattern(key);
            if (!keyStatus.isOK()) {
                log() << "Problem with index " << index << ": " << keyStatus.reason()
                      << " This index can still be used however it cannot be rebuilt."
                      << " For more info see"
                      << " http://dochub.mongodb.org/core/index-validation" << startupWarningsLog;
            }

            if (index["v"].isNumber() && index["v"].numberInt() == 0) {
                log() << "WARNING: The index: " << index << " was created with the deprecated"
                      << " v:0 format.  This format will not be supported in a future release."
                      << startupWarningsLog;
                log() << "\t To fix this, you need to rebuild this index."
                      << " For instructions, see http://dochub.mongodb.org/core/rebuild-v0-indexes"
                      << startupWarningsLog;
            }
        }

        // Non-yielding collection scans from InternalPlanner will never error.
        invariant(PlanExecutor::IS_EOF == state);

        if (replSettings.usingReplSets()) {
            // We only care about the _id index if we are in a replset
            checkForIdIndexes(txn, db);
            // Ensure oplog is capped (mmap does not guarantee order of inserts on noncapped
            // collections)
            repl::checkForCappedOplog(txn);
        }

        if (shouldDoCleanupForSERVER23299) {
            handleSERVER23299ForDb(txn, db);
        }

        if (!storageGlobalParams.readOnly &&
            (shouldClearNonLocalTmpCollections || dbName == "local")) {
            db->clearTmpCollections(txn);
        }
    }

    LOG(1) << "done repairDatabases" << endl;
}
示例#7
0
void DatabaseCanvas::OnDropTable(wxCommandEvent &event)
{
    ShapeList list;
    bool isTable;
    int answer;
    MyErdTable *erdTable = NULL;
    DatabaseTable *table = NULL;
    wxString name;
    ConstraintSign *sign = NULL;
    Constraint *constraint = NULL;
    DrawingDocument *doc = (DrawingDocument *) m_view->GetDocument();
    Database *db = doc->GetDatabase();
    std::vector<std::wstring> errors, localColumns, refColumn;
    std::vector<FKField *> newFK;
    std::wstring command;
    int match = 0;
    GetSelectedShapes( list );
    if( list.size() == 1 )
        isTable = true;
    else
        isTable = false;
    for( ShapeList::iterator it = list.begin(); it != list.end(); it++ )
    {
        MyErdTable *tbl = wxDynamicCast( (*it), MyErdTable );
        if( tbl )
            erdTable = tbl;
        ConstraintSign *s = wxDynamicCast( (*it), ConstraintSign );
        if( s )
            sign = s;
    }
    if( isTable )
    {
        table = &( const_cast<DatabaseTable &>( erdTable->GetTable() ) );
        name = const_cast<DatabaseTable &>( erdTable->GetTable() ).GetTableName();
    }
    else
    {
        constraint = sign->GetConstraint();
        constraint->GetLocalColumns( localColumns );
        constraint->GetRefColumns( refColumn );
        match = constraint->GetPGMatch();
    }
    int eventId = event.GetId();
    if( eventId == wxID_DROPOBJECT )
    {
        wxString message = _( "You are about to delete " );
        if( isTable )
            message += _( "table " ) + name + _( ". Are you sure?" );
        else
        {
            message += _( "foreign key " );
            wxString fkName = constraint->GetName();
            if( !fkName.empty() )
                message += fkName;
            else
                message += _( " on " ) + const_cast<DatabaseTable *>( constraint->GetFKTable() )->GetTableName() + _( " references " ) + constraint->GetRefTable() + _( ". Are you sure?" );
        }
        answer = wxMessageBox( message, _( "Database" ), wxYES_NO | wxNO_DEFAULT );
    }
    else
        answer = wxYES;
    if( answer == wxYES )
    {
        if( isTable && ( ( eventId == wxID_DROPOBJECT && !db->DeleteTable( name.ToStdWstring(), errors ) ) || eventId != wxID_DROPOBJECT ) )
        {
            if( m_realSelectedShape == m_selectedShape )
            {
                m_realSelectedShape = NULL;
                ShapeList listShapes;
                m_pManager.GetShapes( CLASSINFO( MyErdTable ), listShapes );
                int size = listShapes.size();
                if( listShapes.size() == 1 )
                    m_realSelectedShape = NULL;
                else
                {
                    MyErdTable *tableToRemove = (MyErdTable *) ( listShapes.Item( size - 1 )->GetData() );
                    if( tableToRemove == erdTable )
                        m_realSelectedShape = (MyErdTable *) ( listShapes.Item( size - 2 )->GetData() );
                    else
                    {
                        bool found = false;
                        int i;
                        for( i = 0; i < size - 1 || !found; i++ )
                            if( listShapes.Item( i )->GetData() == erdTable )
                                found = true;
                        m_realSelectedShape = listShapes.Item( i + 1 )->GetData();
                    }
                }
            }
            m_pManager.RemoveShape( erdTable );
/*            for( ShapeList::iterator it = listShapes.begin(); it != listShapes.end() || !nextShapeFound; ++it )
            {
                CommentFieldShape *shape = wxDynamicCast( (*it), CommentFieldShape );
                if( m_showComments )
                {
                    shape->SetText( const_cast<Field *>( shape->GetFieldForComment() )->GetComment() );
                }
                else
                {
                    shape->SetText( wxEmptyString );
                }
            }*/
            std::map<std::wstring, std::vector<DatabaseTable *> > tables = db->GetTableVector().m_tables;
            std::vector<DatabaseTable *> tableVec = tables.at( db->GetTableVector().m_dbName );
            std::vector<std::wstring> &names = doc->GetTableNameVector();
            if( event.GetId() == wxID_DROPOBJECT )
            {
                tableVec.erase( std::remove( tableVec.begin(), tableVec.end(), table ), tableVec.end() );
            }
            else
                names.erase( std::remove( names.begin(), names.end(), table->GetTableName() ), names.end() );
/*            if( m_realSelectedShape == m_selectedShape )
            {
                
            }
            else
            {*/
                if( m_realSelectedShape )
                    m_realSelectedShape->Select( true );
//            }
        }
        else if( !isTable && !db->ApplyForeignKey( command, constraint->GetName().ToStdWstring(), *( const_cast<DatabaseTable *>( constraint->GetFKTable() ) ), localColumns, constraint->GetRefTable().ToStdWstring(), refColumn, constraint->GetOnDelete(), constraint->GetOnUpdate(), false, newFK, false, match, errors ) )
        {
            sign->DeleteConstraint();
            m_pManager.RemoveShape( sign->GetParentShape() );
            Refresh();
        }
        else
        {
            for( std::vector<std::wstring>::iterator it = errors.begin(); it < errors.end(); it++ )
            {
                wxMessageBox( (*it) );
            }
        }
    }
    Refresh();
}
示例#8
0
    void run() {
        OldClientWriteContext ctx(&_txn, ns());
        Database* db = ctx.db();
        Collection* coll = db->getCollection(ns());
        if (!coll) {
            WriteUnitOfWork wuow(&_txn);
            coll = db->createCollection(&_txn, ns());
            wuow.commit();
        }

        {
            WriteUnitOfWork wuow(&_txn);
            fillData();
            wuow.commit();
        }

        // The data we're going to later invalidate.
        set<RecordId> locs;
        getLocs(&locs, coll);

        std::unique_ptr<PlanExecutor> exec(makePlanExecutorWithSortStage(coll));
        SortStage* ss = static_cast<SortStage*>(exec->getRootStage());
        QueuedDataStage* ms = static_cast<QueuedDataStage*>(ss->getChildren()[0]);

        // Have sort read in data from the queued data stage.
        const int firstRead = 5;
        for (int i = 0; i < firstRead; ++i) {
            WorkingSetID id = WorkingSet::INVALID_ID;
            PlanStage::StageState status = ss->work(&id);
            ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status);
        }

        // We should have read in the first 'firstRead' locs.  Invalidate the first one.
        // Since it's in the WorkingSet, the updates should not be reflected in the output.
        exec->saveState();
        set<RecordId>::iterator it = locs.begin();
        Snapshotted<BSONObj> oldDoc = coll->docFor(&_txn, *it);

        OID updatedId = oldDoc.value().getField("_id").OID();
        SnapshotId idBeforeUpdate = oldDoc.snapshotId();
        // We purposefully update the document to have a 'foo' value greater than limit().
        // This allows us to check that we don't return the new copy of a doc by asserting
        // foo < limit().
        BSONObj newDoc = BSON("_id" << updatedId << "foo" << limit() + 10);
        oplogUpdateEntryArgs args;
        {
            WriteUnitOfWork wuow(&_txn);
            coll->updateDocument(&_txn, *it, oldDoc, newDoc, false, false, NULL, args);
            wuow.commit();
        }
        exec->restoreState(&_txn);

        // Read the rest of the data from the queued data stage.
        while (!ms->isEOF()) {
            WorkingSetID id = WorkingSet::INVALID_ID;
            ss->work(&id);
        }

        // Let's just invalidate everything now. Already read into ss, so original values
        // should be fetched.
        exec->saveState();
        while (it != locs.end()) {
            oldDoc = coll->docFor(&_txn, *it);
            {
                WriteUnitOfWork wuow(&_txn);
                coll->updateDocument(&_txn, *it++, oldDoc, newDoc, false, false, NULL, args);
                wuow.commit();
            }
        }
        exec->restoreState(&_txn);

        // Verify that it's sorted, the right number of documents are returned, and they're all
        // in the expected range.
        int count = 0;
        int lastVal = 0;
        int thisVal;
        while (!ss->isEOF()) {
            WorkingSetID id = WorkingSet::INVALID_ID;
            PlanStage::StageState status = ss->work(&id);
            if (PlanStage::ADVANCED != status) {
                ASSERT_NE(status, PlanStage::FAILURE);
                ASSERT_NE(status, PlanStage::DEAD);
                continue;
            }
            WorkingSetMember* member = exec->getWorkingSet()->get(id);
            ASSERT(member->hasObj());
            if (member->obj.value().getField("_id").OID() == updatedId) {
                ASSERT(idBeforeUpdate == member->obj.snapshotId());
            }
            thisVal = member->obj.value().getField("foo").Int();
            ASSERT_LTE(lastVal, thisVal);
            // Expect docs in range [0, limit)
            ASSERT_LTE(0, thisVal);
            ASSERT_LT(thisVal, limit());
            lastVal = thisVal;
            ++count;
        }
        // Returns all docs.
        ASSERT_EQUALS(limit(), count);
    }
示例#9
0
    void run() {
        OldClientWriteContext ctx(&_txn, ns());
        Database* db = ctx.db();
        Collection* coll = db->getCollection(ns());
        if (!coll) {
            WriteUnitOfWork wuow(&_txn);
            coll = db->createCollection(&_txn, ns());
            wuow.commit();
        }

        {
            WriteUnitOfWork wuow(&_txn);
            fillData();
            wuow.commit();
        }

        // The data we're going to later invalidate.
        set<RecordId> locs;
        getLocs(&locs, coll);

        std::unique_ptr<PlanExecutor> exec(makePlanExecutorWithSortStage(coll));
        SortStage* ss = static_cast<SortStage*>(exec->getRootStage());
        QueuedDataStage* ms = static_cast<QueuedDataStage*>(ss->getChildren()[0]);

        const int firstRead = 10;
        // Have sort read in data from the queued data stage.
        for (int i = 0; i < firstRead; ++i) {
            WorkingSetID id = WorkingSet::INVALID_ID;
            PlanStage::StageState status = ss->work(&id);
            ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status);
        }

        // We should have read in the first 'firstRead' locs.  Invalidate the first.
        exec->saveState();
        set<RecordId>::iterator it = locs.begin();
        {
            WriteUnitOfWork wuow(&_txn);
            coll->deleteDocument(&_txn, *it++, false, false, NULL);
            wuow.commit();
        }
        exec->restoreState(&_txn);

        // Read the rest of the data from the queued data stage.
        while (!ms->isEOF()) {
            WorkingSetID id = WorkingSet::INVALID_ID;
            ss->work(&id);
        }

        // Let's just invalidate everything now.
        exec->saveState();
        while (it != locs.end()) {
            {
                WriteUnitOfWork wuow(&_txn);
                coll->deleteDocument(&_txn, *it++, false, false, NULL);
                wuow.commit();
            }
        }
        exec->restoreState(&_txn);

        // Regardless of storage engine, all the documents should come back with their objects
        int count = 0;
        while (!ss->isEOF()) {
            WorkingSetID id = WorkingSet::INVALID_ID;
            PlanStage::StageState status = ss->work(&id);
            if (PlanStage::ADVANCED != status) {
                ASSERT_NE(status, PlanStage::FAILURE);
                ASSERT_NE(status, PlanStage::DEAD);
                continue;
            }
            WorkingSetMember* member = exec->getWorkingSet()->get(id);
            ASSERT(member->hasObj());
            ++count;
        }

        // Returns all docs.
        ASSERT_EQUALS(limit() ? limit() : numObj(), count);
    }
    Status MMAPV1Engine::repairDatabase( OperationContext* txn,
                                         const std::string& dbName,
                                         bool preserveClonedFilesOnFailure,
                                         bool backupOriginalFiles ) {
        // We must hold some form of lock here
        invariant(txn->lockState()->threadState());
        invariant( dbName.find( '.' ) == string::npos );

        scoped_ptr<RepairFileDeleter> repairFileDeleter;

        log() << "repairDatabase " << dbName << endl;

        BackgroundOperation::assertNoBgOpInProgForDb(dbName);

        txn->recoveryUnit()->syncDataAndTruncateJournal(); // Must be done before and after repair

        intmax_t totalSize = dbSize( dbName );
        intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath);

        if ( freeSize > -1 && freeSize < totalSize ) {
            return Status( ErrorCodes::OutOfDiskSpace,
                           str::stream() << "Cannot repair database " << dbName
                           << " having size: " << totalSize
                           << " (bytes) because free disk space is: " << freeSize << " (bytes)" );
        }

        txn->checkForInterrupt();

        Path reservedPath =
            uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ?
                                "backup" : "_tmp" );
        MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::create_directory( reservedPath ) );
        string reservedPathString = reservedPath.string();

        if ( !preserveClonedFilesOnFailure )
            repairFileDeleter.reset( new RepairFileDeleter( txn,
                                                            dbName,
                                                            reservedPathString,
                                                            reservedPath ) );

        {
            Database* originalDatabase =
                            dbHolder().get(txn, dbName);
            if (originalDatabase == NULL) {
                return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair");
            }

            scoped_ptr<MMAPV1DatabaseCatalogEntry> dbEntry;
            scoped_ptr<Database> tempDatabase;
            {
                dbEntry.reset( new MMAPV1DatabaseCatalogEntry( txn,
                                                               dbName,
                                                               reservedPathString,
                                                               storageGlobalParams.directoryperdb,
                                                               true ) );
                invariant( !dbEntry->exists() );
                tempDatabase.reset( new Database( txn,
                                                  dbName,
                                                  dbEntry.get() ) );

            }

            map<string,CollectionOptions> namespacesToCopy;
            {
                string ns = dbName + ".system.namespaces";
                Client::Context ctx(txn,  ns );
                Collection* coll = originalDatabase->getCollection( txn, ns );
                if ( coll ) {
                    scoped_ptr<RecordIterator> it( coll->getIterator( txn,
                                                                      DiskLoc(),
                                                                      false,
                                                                      CollectionScanParams::FORWARD ) );
                    while ( !it->isEOF() ) {
                        DiskLoc loc = it->getNext();
                        BSONObj obj = coll->docFor( loc );

                        string ns = obj["name"].String();

                        NamespaceString nss( ns );
                        if ( nss.isSystem() ) {
                            if ( nss.isSystemDotIndexes() )
                                continue;
                            if ( nss.coll() == "system.namespaces" )
                                continue;
                        }

                        if ( !nss.isNormal() )
                            continue;

                        CollectionOptions options;
                        if ( obj["options"].isABSONObj() ) {
                            Status status = options.parse( obj["options"].Obj() );
                            if ( !status.isOK() )
                                return status;
                        }
                        namespacesToCopy[ns] = options;
                    }
                }
            }

            for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin();
                  i != namespacesToCopy.end();
                  ++i ) {
                string ns = i->first;
                CollectionOptions options = i->second;

                Collection* tempCollection = NULL;
                {
                    Client::Context tempContext(txn, ns, tempDatabase );
                    WriteUnitOfWork wunit(txn);
                    tempCollection = tempDatabase->createCollection(txn, ns, options, true, false);
                    wunit.commit();
                }

                Client::Context readContext(txn, ns, originalDatabase);
                Collection* originalCollection = originalDatabase->getCollection( txn, ns );
                invariant( originalCollection );

                // data

                // TODO SERVER-14812 add a mode that drops duplicates rather than failing
                MultiIndexBlock indexer(txn, tempCollection );
                {
                    vector<BSONObj> indexes;
                    IndexCatalog::IndexIterator ii =
                        originalCollection->getIndexCatalog()->getIndexIterator( false );
                    while ( ii.more() ) {
                        IndexDescriptor* desc = ii.next();
                        indexes.push_back( desc->infoObj() );
                    }

                    Client::Context tempContext(txn, ns, tempDatabase);
                    Status status = indexer.init( indexes );
                    if ( !status.isOK() )
                        return status;
                }

                scoped_ptr<RecordIterator> iterator(
                    originalCollection->getIterator( txn, DiskLoc(), false,
                                                     CollectionScanParams::FORWARD ));
                while ( !iterator->isEOF() ) {
                    DiskLoc loc = iterator->getNext();
                    invariant( !loc.isNull() );

                    BSONObj doc = originalCollection->docFor( loc );

                    Client::Context tempContext(txn, ns, tempDatabase);
                    
                    WriteUnitOfWork wunit(txn);
                    StatusWith<DiskLoc> result = tempCollection->insertDocument(txn,
                                                                                doc,
                                                                                &indexer,
                                                                                false);
                    if ( !result.isOK() )
                        return result.getStatus();

                    wunit.commit();
                    txn->checkForInterrupt(false);
                }
                
                Status status = indexer.doneInserting();
                if (!status.isOK())
                    return status;

                {
                    Client::Context tempContext(txn, ns, tempDatabase);
                    WriteUnitOfWork wunit(txn);
                    indexer.commit();
                    wunit.commit();
                }

            }

            txn->recoveryUnit()->syncDataAndTruncateJournal();
            globalStorageEngine->flushAllFiles(true); // need both in case journaling is disabled

            txn->checkForInterrupt(false);
        }

        // at this point if we abort, we don't want to delete new files
        // as they might be the only copies

        if ( repairFileDeleter.get() )
            repairFileDeleter->success();

        dbHolder().close( txn, dbName );

        if ( backupOriginalFiles ) {
            _renameForBackup( dbName, reservedPath );
        }
        else {
            // first make new directory before deleting data
            Path newDir = Path(storageGlobalParams.dbpath) / dbName;
            MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir));

            // this deletes old files
            _deleteDataFiles( dbName );

            if ( !boost::filesystem::exists(newDir) ) {
                // we deleted because of directoryperdb
                // re-create
                MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir));
            }
        }

        _replaceWithRecovered( dbName, reservedPathString.c_str() );

        if ( !backupOriginalFiles )
            MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );

        return Status::OK();
    }
示例#11
0
    Status IndexBuilder::_build(OperationContext* txn,
                                Database* db,
                                bool allowBackgroundBuilding,
                                Lock::DBLock* dbLock) const {
        const NamespaceString ns(_index["ns"].String());

        Collection* c = db->getCollection( txn, ns.ns() );
        if ( !c ) {
            WriteUnitOfWork wunit(txn);
            c = db->getOrCreateCollection( txn, ns.ns() );
            verify(c);
            wunit.commit();
        }

        // Show which index we're building in the curop display.
        txn->getCurOp()->setQuery(_index);

        MultiIndexBlock indexer(txn, c);
        indexer.allowInterruption();
        if (allowBackgroundBuilding)
            indexer.allowBackgroundBuilding();

        Status status = Status::OK();
        IndexDescriptor* descriptor(NULL);
        try {
            status = indexer.init(_index);
            if ( status.code() == ErrorCodes::IndexAlreadyExists )
                return Status::OK();

            if (status.isOK()) {
                if (allowBackgroundBuilding) {
                    descriptor = indexer.registerIndexBuild();
                    invariant(dbLock);
                    dbLock->relockWithMode(MODE_IX);
                }

                Lock::CollectionLock colLock(txn->lockState(), ns.ns(), MODE_IX);
                status = indexer.insertAllDocumentsInCollection();
            }

            if (status.isOK()) {
                if (allowBackgroundBuilding) {
                    dbLock->relockWithMode(MODE_X);
                }
                WriteUnitOfWork wunit(txn);
                indexer.commit();
                wunit.commit();
            }
        }
        catch (const DBException& e) {
            status = e.toStatus();
        }

        if (allowBackgroundBuilding) {
            dbLock->relockWithMode(MODE_X);
            Database* db = dbHolder().get(txn, ns.db());
            fassert(28553, db);
            fassert(28554, db->getCollection(txn, ns.ns()));
            indexer.unregisterIndexBuild(descriptor);
        }

        if (status.code() == ErrorCodes::InterruptedAtShutdown) {
            // leave it as-if kill -9 happened. This will be handled on restart.
            indexer.abortWithoutCleanup();
        }

        return status;
    }
示例#12
0
        bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
            const string ns = dbname + "." + cmdObj.firstElement().valuestr();

            if (!cmdObj["start"].eoo()) {
                errmsg = "using deprecated 'start' argument to geoNear";
                return false;
            }

            Client::ReadContext ctx(txn, ns);

            Database* db = ctx.ctx().db();
            if ( !db ) {
                errmsg = "can't find ns";
                return false;
            }

            Collection* collection = db->getCollection( txn, ns );
            if ( !collection ) {
                errmsg = "can't find ns";
                return false;
            }

            IndexCatalog* indexCatalog = collection->getIndexCatalog();

            // cout << "raw cmd " << cmdObj.toString() << endl;

            // We seek to populate this.
            string nearFieldName;
            bool using2DIndex = false;
            if (!getFieldName(collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
                return false;
            }

            uassert(17304, "'near' field must be point",
                    !cmdObj["near"].eoo() && cmdObj["near"].isABSONObj()
                    && GeoParser::isPoint(cmdObj["near"].Obj()));

            bool isSpherical = cmdObj["spherical"].trueValue();
            if (!using2DIndex) {
                uassert(17301, "2dsphere index must have spherical: true", isSpherical);
            }

            // Build the $near expression for the query.
            BSONObjBuilder nearBob;
            if (isSpherical) {
                nearBob.append("$nearSphere", cmdObj["near"].Obj());
            }
            else {
                nearBob.append("$near", cmdObj["near"].Obj());
            }

            if (!cmdObj["maxDistance"].eoo()) {
                uassert(17299, "maxDistance must be a number",cmdObj["maxDistance"].isNumber());
                nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
            }

            if (!cmdObj["minDistance"].eoo()) {
                uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
                uassert(17300, "minDistance must be a number",cmdObj["minDistance"].isNumber());
                nearBob.append("$minDistance", cmdObj["minDistance"].number());
            }

            if (!cmdObj["uniqueDocs"].eoo()) {
                warning() << ns << ": ignoring deprecated uniqueDocs option in geoNear command";
            }

            // And, build the full query expression.
            BSONObjBuilder queryBob;
            queryBob.append(nearFieldName, nearBob.obj());
            if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
                queryBob.appendElements(cmdObj["query"].Obj());
            }
            BSONObj rewritten = queryBob.obj();

            // cout << "rewritten query: " << rewritten.toString() << endl;

            int numWanted = 100;
            const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
            BSONElement eNumWanted = cmdObj[limitName];
            if (!eNumWanted.eoo()) {
                uassert(17303, "limit must be number", eNumWanted.isNumber());
                numWanted = eNumWanted.numberInt();
                uassert(17302, "limit must be >=0", numWanted >= 0);
            }

            bool includeLocs = false;
            if (!cmdObj["includeLocs"].eoo()) {
                includeLocs = cmdObj["includeLocs"].trueValue();
            }

            double distanceMultiplier = 1.0;
            BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"];
            if (!eDistanceMultiplier.eoo()) {
                uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
                distanceMultiplier = eDistanceMultiplier.number();
                uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
            }

            BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) <<
                                   "$dis" << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));

            CanonicalQuery* cq;

            const NamespaceString nss(dbname);
            const WhereCallbackReal whereCallback(txn, nss.db());

            if (!CanonicalQuery::canonicalize(ns,
                                              rewritten,
                                              BSONObj(),
                                              projObj,
                                              0,
                                              numWanted,
                                              BSONObj(),
                                              &cq,
                                              whereCallback).isOK()) {
                errmsg = "Can't parse filter / create query";
                return false;
            }

            PlanExecutor* rawExec;
            if (!getExecutor(txn, collection, cq, &rawExec, 0).isOK()) {
                errmsg = "can't get query runner";
                return false;
            }

            auto_ptr<PlanExecutor> exec(rawExec);
            const ScopedExecutorRegistration safety(exec.get());

            double totalDistance = 0;
            BSONObjBuilder resultBuilder(result.subarrayStart("results"));
            double farthestDist = 0;

            BSONObj currObj;
            int results = 0;
            while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) {

                // Come up with the correct distance.
                double dist = currObj["$dis"].number() * distanceMultiplier;
                totalDistance += dist;
                if (dist > farthestDist) { farthestDist = dist; }

                // Strip out '$dis' and '$pt' from the result obj.  The rest gets added as 'obj'
                // in the command result.
                BSONObjIterator resIt(currObj);
                BSONObjBuilder resBob;
                while (resIt.more()) {
                    BSONElement elt = resIt.next();
                    if (!mongoutils::str::equals("$pt", elt.fieldName())
                        && !mongoutils::str::equals("$dis", elt.fieldName())) {
                        resBob.append(elt);
                    }
                }
                BSONObj resObj = resBob.obj();

                // Don't make a too-big result object.
                if (resultBuilder.len() + resObj.objsize()> BSONObjMaxUserSize) {
                    warning() << "Too many geoNear results for query " << rewritten.toString()
                              << ", truncating output.";
                    break;
                }

                // Add the next result to the result builder.
                BSONObjBuilder oneResultBuilder(
                    resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
                oneResultBuilder.append("dis", dist);
                if (includeLocs) {
                    oneResultBuilder.appendAs(currObj["$pt"], "loc");
                }
                oneResultBuilder.append("obj", resObj);
                oneResultBuilder.done();
                ++results;
            }

            resultBuilder.done();

            // Fill out the stats subobj.
            BSONObjBuilder stats(result.subobjStart("stats"));

            // Fill in nscanned from the explain.
            PlanSummaryStats summary;
            Explain::getSummaryStats(exec.get(), &summary);
            stats.appendNumber("nscanned", summary.totalKeysExamined);
            stats.appendNumber("objectsLoaded", summary.totalDocsExamined);

            stats.append("avgDistance", totalDistance / results);
            stats.append("maxDistance", farthestDist);
            stats.append("time", txn->getCurOp()->elapsedMillis());
            stats.done();

            return true;
        }
示例#13
0
void mainLoader(int argc, char* argv[], ServiceManager* services)
{
	//dispatcher thread
	g_game.setGameState(GAME_STATE_STARTUP);

	srand((unsigned int)OTSYS_TIME());
#ifdef _WIN32
	SetConsoleTitle(STATUS_SERVER_NAME);
#endif
	std::cout << STATUS_SERVER_NAME << " - Version " << STATUS_SERVER_VERSION << std::endl;
	std::cout << "Compilied on " << __DATE__ << ' ' << __TIME__ << " for arch ";

#if defined(__amd64__) || defined(_M_X64)
	std::cout << "x64" << std::endl;
#elif defined(__i386__) || defined(_M_IX86) || defined(_X86_)
	std::cout << "x86" << std::endl;
#elif defined(__arm__)
	std::cout << "ARM" << std::endl;
#elif defined(__mips__)
	std::cout << "MIPS" << std::endl;
#else
	std::cout << "unk" << std::endl;
#endif
	std::cout << std::endl;

	std::cout << "A server developed by " << STATUS_SERVER_DEVELOPERS << std::endl;
	std::cout << "Visit our forum for updates, support, and resources: http://otland.net/." << std::endl;
	std::cout << std::endl;

	// read global config
	std::cout << ">> Loading config" << std::endl;
	if (!g_config.load()) {
		startupErrorMessage("Unable to load config.lua!");
		return;
	}

#ifdef _WIN32
	std::string defaultPriority = asLowerCaseString(g_config.getString(ConfigManager::DEFAULT_PRIORITY));
	if (defaultPriority == "realtime") {
		SetPriorityClass(GetCurrentProcess(), REALTIME_PRIORITY_CLASS);
	} else if (defaultPriority == "high") {
		SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS);
	} else if (defaultPriority == "higher") {
		SetPriorityClass(GetCurrentProcess(), ABOVE_NORMAL_PRIORITY_CLASS);
	}

	std::ostringstream mutexName;
	mutexName << "forgottenserver_" << g_config.getNumber(ConfigManager::LOGIN_PORT);
	CreateMutex(nullptr, FALSE, mutexName.str().c_str());
	if (GetLastError() == ERROR_ALREADY_EXISTS) {
		startupErrorMessage("Another instance of The Forgotten Server is already running with the same login port, please shut it down first or change ports for this one.");
		return;
	}
#endif

	//set RSA key
	const char* p("14299623962416399520070177382898895550795403345466153217470516082934737582776038882967213386204600674145392845853859217990626450972452084065728686565928113");
	const char* q("7630979195970404721891201847792002125535401292779123937207447574596692788513647179235335529307251350570728407373705564708871762033017096809910315212884101");
	g_RSA.setKey(p, q);

	std::cout << ">> Establishing database connection..." << std::flush;

	Database* db = Database::getInstance();
	if (!db->connect()) {
		startupErrorMessage("Failed to connect to database.");
		return;
	}

	std::cout << " MySQL " << Database::getClientVersion() << std::endl;

	// run database manager
	std::cout << ">> Running database manager" << std::endl;

	if (!DatabaseManager::isDatabaseSetup()) {
		startupErrorMessage("The database you have specified in config.lua is empty, please import the schema.sql to your database.");
		return;
	}

	DatabaseManager::updateDatabase();
	DatabaseManager::checkEncryption();

	if (g_config.getBoolean(ConfigManager::OPTIMIZE_DATABASE) && !DatabaseManager::optimizeTables()) {
		std::cout << "> No tables were optimized." << std::endl;
	}

	//load vocations
	std::cout << ">> Loading vocations" << std::endl;
	if (!g_vocations.loadFromXml()) {
		startupErrorMessage("Unable to load vocations!");
		return;
	}

	//load commands
	std::cout << ">> Loading commands" << std::endl;
	if (!g_commands.loadFromXml()) {
		startupErrorMessage("Unable to load commands!");
		return;
	}

	// load item data
	std::cout << ">> Loading items" << std::endl;
	if (Item::items.loadFromOtb("data/items/items.otb")) {
		startupErrorMessage("Unable to load items (OTB)!");
		return;
	}

	if (!Item::items.loadFromXml()) {
		startupErrorMessage("Unable to load items (XML)!");
		return;
	}

	std::cout << ">> Loading script systems" << std::endl;
	if (!ScriptingManager::getInstance()->loadScriptSystems()) {
		startupErrorMessage("Failed to load script systems");
		return;
	}

	std::cout << ">> Loading monsters" << std::endl;
	if (!g_monsters.loadFromXml()) {
		startupErrorMessage("Unable to load monsters!");
		return;
	}

	std::cout << ">> Loading outfits" << std::endl;
	Outfits* outfits = Outfits::getInstance();
	if (!outfits->loadFromXml()) {
		startupErrorMessage("Unable to load outfits!");
		return;
	}

	g_adminConfig = new AdminProtocolConfig();
	std::cout << ">> Loading admin protocol config" << std::endl;
	if (!g_adminConfig->loadXMLConfig()) {
		startupErrorMessage("Unable to load admin protocol config!");
		return;
	}

	std::cout << ">> Loading experience stages" << std::endl;
	if (!g_game.loadExperienceStages()) {
		startupErrorMessage("Unable to load experience stages!");
		return;
	}

	std::string passwordType = asLowerCaseString(g_config.getString(ConfigManager::PASSWORDTYPE));
	if (passwordType == "sha1") {
		g_config.setNumber(ConfigManager::PASSWORD_TYPE, PASSWORD_TYPE_SHA1);
		std::cout << ">> Using SHA1 passwords" << std::endl;
	} else {
		g_config.setNumber(ConfigManager::PASSWORD_TYPE, PASSWORD_TYPE_PLAIN);
		std::cout << ">> Using plaintext passwords" << std::endl;
	}

	std::cout << ">> Checking world type... " << std::flush;
	std::string worldType = asLowerCaseString(g_config.getString(ConfigManager::WORLD_TYPE));
	if (worldType == "pvp") {
		g_game.setWorldType(WORLD_TYPE_PVP);
	} else if (worldType == "no-pvp") {
		g_game.setWorldType(WORLD_TYPE_NO_PVP);
	} else if (worldType == "pvp-enforced") {
		g_game.setWorldType(WORLD_TYPE_PVP_ENFORCED);
	} else {
		std::cout << std::endl;

		std::ostringstream ss;
		ss << "> ERROR: Unknown world type: " << g_config.getString(ConfigManager::WORLD_TYPE) << ", valid world types are: pvp, no-pvp and pvp-enforced.";
		startupErrorMessage(ss.str());
		return;
	}
	std::cout << asUpperCaseString(worldType) << std::endl;

	std::cout << ">> Loading map" << std::endl;

	if (!g_game.loadMainMap(g_config.getString(ConfigManager::MAP_NAME))) {
		startupErrorMessage("Failed to load map");
		return;
	}

	std::cout << ">> Initializing gamestate" << std::endl;
	g_game.setGameState(GAME_STATE_INIT);

	// Tibia protocols
	services->add<ProtocolGame>(g_config.getNumber(ConfigManager::GAME_PORT));
	services->add<ProtocolLogin>(g_config.getNumber(ConfigManager::LOGIN_PORT));

	// OT protocols
	services->add<ProtocolAdmin>(g_config.getNumber(ConfigManager::ADMIN_PORT));
	services->add<ProtocolStatus>(g_config.getNumber(ConfigManager::STATUS_PORT));

	// Legacy protocols
	services->add<ProtocolOldLogin>(g_config.getNumber(ConfigManager::LOGIN_PORT));
	services->add<ProtocolOldGame>(g_config.getNumber(ConfigManager::LOGIN_PORT));

	int32_t autoSaveEachMinutes = g_config.getNumber(ConfigManager::AUTO_SAVE_EACH_MINUTES);
	if (autoSaveEachMinutes > 0) {
		g_scheduler->addEvent(createSchedulerTask(autoSaveEachMinutes * 1000 * 60, std::bind(&Game::autoSave, &g_game)));
	}

	if (g_config.getBoolean(ConfigManager::SERVERSAVE_ENABLED)) {
		int32_t serverSaveHour = g_config.getNumber(ConfigManager::SERVERSAVE_H);
		if (serverSaveHour >= 0 && serverSaveHour <= 24) {
			time_t timeNow = time(nullptr);
			tm* timeinfo = localtime(&timeNow);

			if (serverSaveHour == 0) {
				serverSaveHour = 23;
			} else {
				serverSaveHour--;
			}

			timeinfo->tm_hour = serverSaveHour;
			timeinfo->tm_min = 55;
			timeinfo->tm_sec = 0;

			double difference = difftime(mktime(timeinfo), timeNow);
			if (difference < 0) {
				difference += 86400;
			}
			g_scheduler->addEvent(createSchedulerTask(difference * 1000, std::bind(&Game::prepareServerSave, &g_game)));
		}
	}

	Houses::getInstance().payHouses();
	IOLoginData::updateHouseOwners();
	g_game.checkExpiredMarketOffers();
	IOMarket::getInstance()->updateStatistics();

	std::cout << ">> Loaded all modules, server starting up..." << std::endl;

#if !defined(WIN32) && !defined(__ROOT_PERMISSION__)
	if (getuid() == 0 || geteuid() == 0) {
		std::cout << "> WARNING: " << STATUS_SERVER_NAME << " has been executed as root user, it is recommended to execute is as a normal user." << std::endl;
	}
#endif

	g_game.start(services);
	g_game.setGameState(GAME_STATE_NORMAL);
	g_loaderSignal.notify_all();
}
示例#14
0
        virtual bool run(OperationContext* txn,
                         const string& dbname,
                         BSONObj& cmdObj,
                         int options,
                         string& errmsg,
                         BSONObjBuilder& result,
                         bool fromRepl) {

            const std::string ns = parseNsCollectionRequired(dbname, cmdObj);

            const BSONObj query = cmdObj.getObjectField("query");
            const BSONObj fields = cmdObj.getObjectField("fields");
            const BSONObj update = cmdObj.getObjectField("update");
            const BSONObj sort = cmdObj.getObjectField("sort");
            
            bool upsert = cmdObj["upsert"].trueValue();
            bool returnNew = cmdObj["new"].trueValue();
            bool remove = cmdObj["remove"].trueValue();

            if ( remove ) {
                if ( upsert ) {
                    errmsg = "remove and upsert can't co-exist";
                    return false;
                }
                if ( !update.isEmpty() ) {
                    errmsg = "remove and update can't co-exist";
                    return false;
                }
                if ( returnNew ) {
                    errmsg = "remove and returnNew can't co-exist";
                    return false;
                }
            }
            else if ( !cmdObj.hasField("update") ) {
                errmsg = "need remove or update";
                return false;
            }

            bool ok = false;
            MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
                errmsg = "";

                // We can always retry because we only ever modify one document
                ok = runImpl(txn,
                             dbname,
                             ns,
                             query,
                             fields,
                             update,
                             sort,
                             upsert,
                             returnNew,
                             remove,
                             result,
                             errmsg);
            } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "findAndModify", ns);

            if ( !ok && errmsg == "no-collection" ) {
                // Take X lock so we can create collection, then re-run operation.
                ScopedTransaction transaction(txn, MODE_IX);
                Lock::DBLock lk(txn->lockState(), dbname, MODE_X);
                Client::Context ctx(txn, ns, false /* don't check version */);
                if (!fromRepl &&
                    !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) {
                    return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
                        << "Not primary while creating collection " << ns
                        << " during findAndModify"));
                }
                Database* db = ctx.db();
                if ( db->getCollection( ns ) ) {
                    // someone else beat us to it, that's ok
                    // we might race while we unlock if someone drops
                    // but that's ok, we'll just do nothing and error out
                }
                else {
                    WriteUnitOfWork wuow(txn);
                    uassertStatusOK( userCreateNS( txn, db,
                                                   ns, BSONObj(),
                                                   !fromRepl ) );
                    wuow.commit();
                }

                errmsg = "";
                ok = runImpl(txn,
                             dbname,
                             ns,
                             query,
                             fields,
                             update,
                             sort,
                             upsert,
                             returnNew,
                             remove,
                             result,
                             errmsg);
            }

            return ok;
        }
示例#15
0
    Runner::RunnerState MultiPlanRunner::getNext(BSONObj* objOut, DiskLoc* dlOut) {
        if (_killed) { return Runner::RUNNER_DEAD; }
        if (_failure) { return Runner::RUNNER_ERROR; }

        // If we haven't picked the best plan yet...
        if (NULL == _bestPlan) {
            if (!pickBestPlan(NULL)) {
                verify(_failure || _killed);
                if (_killed) { return Runner::RUNNER_DEAD; }
                if (_failure) { return Runner::RUNNER_ERROR; }
            }
        }

        // Look for an already produced result that provides the data the caller wants.
        while (!_alreadyProduced.empty()) {
            WorkingSetID id = _alreadyProduced.front();
            _alreadyProduced.pop_front();

            WorkingSetMember* member = _bestPlan->getWorkingSet()->get(id);

            // Note that this copies code from PlanExecutor.
            if (NULL != objOut) {
                if (WorkingSetMember::LOC_AND_IDX == member->state) {
                    if (1 != member->keyData.size()) {
                        _bestPlan->getWorkingSet()->free(id);
                        // If the caller needs the key data and the WSM doesn't have it, drop the
                        // result and carry on.
                        continue;
                    }
                    *objOut = member->keyData[0].keyData;
                }
                else if (member->hasObj()) {
                    *objOut = member->obj;
                }
                else {
                    // If the caller needs an object and the WSM doesn't have it, drop and
                    // try the next result.
                    _bestPlan->getWorkingSet()->free(id);
                    continue;
                }
            }

            if (NULL != dlOut) {
                if (member->hasLoc()) {
                    *dlOut = member->loc;
                }
                else {
                    // If the caller needs a DiskLoc and the WSM doesn't have it, drop and carry on.
                    _bestPlan->getWorkingSet()->free(id);
                    continue;
                }
            }

            // If we're here, the caller has all the data needed and we've set the out
            // parameters.  Remove the result from the WorkingSet.
            _bestPlan->getWorkingSet()->free(id);
            return Runner::RUNNER_ADVANCED;
        }

        RunnerState state = _bestPlan->getNext(objOut, dlOut);

        if (Runner::RUNNER_ERROR == state && (NULL != _backupSolution)) {
            QLOG() << "Best plan errored out switching to backup\n";
            // Uncache the bad solution if we fall back
            // on the backup solution.
            //
            // XXX: Instead of uncaching we should find a way for the
            // cached plan runner to fall back on a different solution
            // if the best solution fails. Alternatively we could try to
            // defer cache insertion to be after the first produced result.
            Database* db = cc().database();
            verify(NULL != db);
            Collection* collection = db->getCollection(_query->ns());
            verify(NULL != collection);
            PlanCache* cache = collection->infoCache()->getPlanCache();
            cache->remove(*_query);

            _bestPlan.reset(_backupPlan);
            _backupPlan = NULL;
            _bestSolution.reset(_backupSolution);
            _backupSolution = NULL;
            _alreadyProduced = _backupAlreadyProduced;
            return getNext(objOut, dlOut);
        }

        if (NULL != _backupSolution && Runner::RUNNER_ADVANCED == state) {
            QLOG() << "Best plan had a blocking sort, became unblocked, deleting backup plan\n";
            delete _backupSolution;
            delete _backupPlan;
            _backupSolution = NULL;
            _backupPlan = NULL;
            // TODO: free from WS?
            _backupAlreadyProduced.clear();
        }

        return state;
    }
示例#16
0
int Path::recurse(
    Database&   db,
    const char* prefix,
    const char* cur_path,
    Directory*  dir,
    Parser*     parser) {
  if (terminating()) {
    errno = EINTR;
    return -1;
  }

  // Get relative path
  const char* rel_path;
  if (cur_path[_backup_path_length] == '\0') {
    rel_path = &cur_path[_backup_path_length];
  } else {
    rel_path = &cur_path[_backup_path_length + 1];
  }

  // Check whether directory is under SCM control
  if (! _parsers.empty()) {
    // We have a parser, check this directory with it
    if (parser != NULL) {
      parser = parser->isControlled(cur_path);
    }
    // We don't have a parser [anymore], check this directory
    if (parser == NULL) {
      parser = _parsers.isControlled(cur_path);
    }
  }
  if (dir->isValid() && ! dir->createList(cur_path)) {
    list<Node*> db_list;
    // Get database info for this directory
    db.getList(prefix, _path.c_str(), rel_path, db_list);

    list<Node*>::iterator i = dir->nodesList().begin();
    list<Node*>::iterator j = db_list.begin();
    while (i != dir->nodesList().end()) {
      if (! terminating()) {
        // Ignore inaccessible files
        if ((*i)->type() == '?') {
          i = dir->nodesList().erase(i);
          continue;
        }

        // Let the parser analyse the file data to know whether to back it up
        if ((parser != NULL) && (parser->ignore(*(*i)))) {
          i = dir->nodesList().erase(i);
          continue;
        }

        // Now pass it through the filters
        if (! _filters.empty() && _filters.match(rel_path, *(*i))) {
          i = dir->nodesList().erase(i);
          continue;
        }

        // Count the nodes considered, for info
        _nodes++;

        // For link, find out linked path
        if ((*i)->type() == 'l') {
          Link *l = new Link(*(*i), cur_path);
          delete *i;
          *i = l;
        }

        // Also deal with directory, as some fields should not be considered
        if ((*i)->type() == 'd') {
          Directory *d = new Directory(*(*i));
          delete *i;
          *i = d;
        }

        // Synchronize with DB records
        int cmp = -1;
        while ((j != db_list.end())
            && ((cmp = Node::pathCompare((*j)->name(), (*i)->name())) < 0)) {
          if (! terminating()) {
            if (verbosity() > 2) {
              cout << " --> R ";
              if (rel_path[0] != '\0') {
                cout << rel_path << "/";
              }
              cout << (*j)->name() << endl;
            }
            recurse_remove(db, prefix, _path, rel_path, *j);
          }
          delete *j;
          j = db_list.erase(j);
        }

        // Deal with data
        if ((j == db_list.end()) || (cmp > 0)) {
          // Not found in DB => new
          if (verbosity() > 2) {
            cout << " --> A ";
            if (rel_path[0] != '\0') {
              cout << rel_path << "/";
            }
            cout << (*i)->name() << endl;
          }
          db.add(prefix, _path.c_str(), rel_path, cur_path, *i);
        } else {
          // Same file name found in DB
          if (**i != **j) {
            const char* checksum = NULL;
            // Metadata differ
            if (((*i)->type() == 'f')
            && ((*j)->type() == 'f')
            && ((*i)->size() == (*j)->size())
            && ((*i)->mtime() == (*j)->mtime())) {
              // If the file data is there, just add new metadata
              // If the checksum is missing, this shall retry too
              checksum = ((File*)(*j))->checksum();
              if (verbosity() > 2) {
                cout << " --> ~ ";
              }
            } else {
              // Do it all
              if (verbosity() > 2) {
                cout << " --> M ";
              }
            }
            if (verbosity() > 2) {
              if (rel_path[0] != '\0') {
                cout << rel_path << "/";
              }
              cout << (*i)->name() << endl;
            }
            db.add(prefix, _path.c_str(), rel_path, cur_path, *i, checksum);
          } else {
            // i and j have same metadata, hence same type...
            // Compare linked data
            if (((*i)->type() == 'l')
            && (strcmp(((Link*)(*i))->link(), ((Link*)(*j))->link()) != 0)) {
              if (verbosity() > 2) {
                cout << " --> L ";
                if (rel_path[0] != '\0') {
                  cout << rel_path << "/";
                }
                cout << (*i)->name() << endl;
              }
              db.add(prefix, _path.c_str(), rel_path, cur_path, *i);
            } else
            // Check that file data is present
            if (((*i)->type() == 'f')
             && (((File*)(*j))->checksum()[0] == '\0')) {
              // Checksum missing: retry
              if (verbosity() > 2) {
                cout << " --> ! ";
                if (rel_path[0] != '\0') {
                  cout << rel_path << "/";
                }
                cout << (*i)->name() << endl;
              }
              const char* checksum = ((File*)(*j))->checksum();
              db.add(prefix, _path.c_str(), rel_path, cur_path, *i, checksum);
            } else if ((*i)->type() == 'd') {
              if (verbosity() > 3) {
                cout << " --> D ";
                if (rel_path[0] != '\0') {
                  cout << rel_path << "/";
                }
                cout << (*i)->name() << endl;
              }
            }
          }
          delete *j;
          j = db_list.erase(j);
        }

        // For directory, recurse into it
        if ((*i)->type() == 'd') {
          char* dir_path = Node::path(cur_path, (*i)->name());
          recurse(db, prefix, dir_path, (Directory*) *i, parser);
          free(dir_path);
        }
      }
      delete *i;
      i = dir->nodesList().erase(i);
    }

    // Deal with removed records
    while (j != db_list.end()) {
      if (! terminating()) {
        if (verbosity() > 2) {
          cout << " --> R ";
          if (rel_path[0] != '\0') {
            cout << rel_path << "/";
          }
          cout << (*j)->name() << endl;
        }
        recurse_remove(db, prefix, _path, rel_path, *j);
      }
      delete *j;
      j = db_list.erase(j);
    }
  } else {
    cerr << strerror(errno) << ": " << rel_path << endl;
  }
  return 0;
}
示例#17
0
    bool MultiPlanRunner::pickBestPlan(size_t* out) {
        static const int timesEachPlanIsWorked = 100;

        // Run each plan some number of times.
        for (int i = 0; i < timesEachPlanIsWorked; ++i) {
            bool moreToDo = workAllPlans();
            if (!moreToDo) { break; }
        }

        if (_failure || _killed) { return false; }

        // After picking best plan, ranking will own plan stats from
        // candidate solutions (winner and losers).
        std::auto_ptr<PlanRankingDecision> ranking(new PlanRankingDecision);
        size_t bestChild = PlanRanker::pickBestPlan(_candidates, ranking.get());

        // Copy candidate order. We will need this to sort candidate stats for explain
        // after transferring ownership of 'ranking' to plan cache.
        std::vector<size_t> candidateOrder = ranking->candidateOrder;

        // Run the best plan.  Store it.
        _bestPlan.reset(new PlanExecutor(_candidates[bestChild].ws,
                                         _candidates[bestChild].root));
        _bestPlan->setYieldPolicy(_policy);
        _alreadyProduced = _candidates[bestChild].results;
        _bestSolution.reset(_candidates[bestChild].solution);

        QLOG() << "Winning solution:\n" << _bestSolution->toString() << endl;

        size_t backupChild = bestChild;
        if (_bestSolution->hasSortStage && (0 == _alreadyProduced.size())) {
            QLOG() << "Winner has blocked sort, looking for backup plan...\n";
            for (size_t i = 0; i < _candidates.size(); ++i) {
                if (!_candidates[i].solution->hasSortStage) {
                    QLOG() << "Candidate " << i << " is backup child\n";
                    backupChild = i;
                    _backupSolution = _candidates[i].solution;
                    _backupAlreadyProduced = _candidates[i].results;
                    _backupPlan = new PlanExecutor(_candidates[i].ws, _candidates[i].root);
                    _backupPlan->setYieldPolicy(_policy);
                    break;
                }
            }
        }

        // Store the choice we just made in the cache.
        if (PlanCache::shouldCacheQuery(*_query)) {
            Database* db = cc().database();
            verify(NULL != db);
            Collection* collection = db->getCollection(_query->ns());
            verify(NULL != collection);
            PlanCache* cache = collection->infoCache()->getPlanCache();
            // Create list of candidate solutions for the cache with
            // the best solution at the front.
            std::vector<QuerySolution*> solutions;

            // Generate solutions and ranking decisions sorted by score.
            for (size_t orderingIndex = 0;
                 orderingIndex < candidateOrder.size(); ++orderingIndex) {
                // index into candidates/ranking
                size_t i = candidateOrder[orderingIndex];
                solutions.push_back(_candidates[i].solution);
            }

            // Check solution cache data. Do not add to cache if
            // we have any invalid SolutionCacheData data.
            // XXX: One known example is 2D queries
            bool validSolutions = true;
            for (size_t i = 0; i < solutions.size(); ++i) {
                if (NULL == solutions[i]->cacheData.get()) {
                    QLOG() << "Not caching query because this solution has no cache data: "
                           << solutions[i]->toString();
                    validSolutions = false;
                    break;
                }
            }

            if (validSolutions) {
                cache->add(*_query, solutions, ranking.release());
            }
        }

        // Clear out the candidate plans, leaving only stats as we're all done w/them.
        // Traverse candidate plans in order or score
        for (size_t orderingIndex = 0;
             orderingIndex < candidateOrder.size(); ++orderingIndex) {
            // index into candidates/ranking
            size_t i = candidateOrder[orderingIndex];

            if (i == bestChild) { continue; }
            if (i == backupChild) { continue; }

            delete _candidates[i].solution;

            // Remember the stats for the candidate plan because we always show it on an
            // explain. (The {verbose:false} in explain() is client-side trick; we always
            // generate a "verbose" explain.)
            PlanStageStats* stats = _candidates[i].root->getStats();
            if (stats) {
                _candidateStats.push_back(stats);
            }
            delete _candidates[i].root;

            // ws must die after the root.
            delete _candidates[i].ws;
        }

        _candidates.clear();
        if (NULL != out) { *out = bestChild; }
        return true;
    }
示例#18
0
AccountFrame::pointer
AccountFrame::loadAccount(AccountID const& accountID, Database& db)
{
    LedgerKey key;
    key.type(ACCOUNT);
    key.account().accountID = accountID;
    if (cachedEntryExists(key, db))
    {
        auto p = getCachedEntry(key, db);
        return p ? std::make_shared<AccountFrame>(*p) : nullptr;
    }

    std::string actIDStrKey = PubKeyUtils::toStrKey(accountID);

    std::string publicKey, inflationDest, creditAuthKey;
    std::string homeDomain, thresholds;
    soci::indicator inflationDestInd;

    AccountFrame::pointer res = make_shared<AccountFrame>(accountID);
    AccountEntry& account = res->getAccount();

    auto prep =
        db.getPreparedStatement("SELECT balance, seqnum, numsubentries, "
                                "inflationdest, homedomain, thresholds, "
                                "flags, lastmodified "
                                "FROM accounts WHERE accountid=:v1");
    auto& st = prep.statement();
    st.exchange(into(account.balance));
    st.exchange(into(account.seqNum));
    st.exchange(into(account.numSubEntries));
    st.exchange(into(inflationDest, inflationDestInd));
    st.exchange(into(homeDomain));
    st.exchange(into(thresholds));
    st.exchange(into(account.flags));
    st.exchange(into(res->getLastModified()));
    st.exchange(use(actIDStrKey));
    st.define_and_bind();
    {
        auto timer = db.getSelectTimer("account");
        st.execute(true);
    }

    if (!st.got_data())
    {
        putCachedEntry(key, nullptr, db);
        return nullptr;
    }

    account.homeDomain = homeDomain;

    bn::decode_b64(thresholds.begin(), thresholds.end(),
                   res->mAccountEntry.thresholds.begin());

    if (inflationDestInd == soci::i_ok)
    {
        account.inflationDest.activate() =
            PubKeyUtils::fromStrKey(inflationDest);
    }

    account.signers.clear();

    if (account.numSubEntries != 0)
    {
        auto signers = loadSigners(db, actIDStrKey);
        account.signers.insert(account.signers.begin(), signers.begin(),
                               signers.end());
    }

    res->normalize();
    res->mUpdateSigners = false;
    assert(res->isValid());
    res->mKeyCalculated = false;
    res->putCachedEntry(db);
    return res;
}
示例#19
0
Session::~Session()
{
   Database* instance = Database::get_instance();
   instance->log_attempt(this->authenticated, this->ip, this->hostname, this->lb);
   close(this->fd);
}
示例#20
0
void
AccountFrame::storeUpdate(LedgerDelta& delta, Database& db, bool insert)
{
    assert(isValid());

    touch(delta);

    flushCachedEntry(db);

    std::string actIDStrKey = PubKeyUtils::toStrKey(mAccountEntry.accountID);
    std::string sql;

    if (insert)
    {
        sql = std::string(
            "INSERT INTO accounts ( accountid, balance, seqnum, "
            "numsubentries, inflationdest, homedomain, thresholds, flags, "
            "lastmodified ) "
            "VALUES ( :id, :v1, :v2, :v3, :v4, :v5, :v6, :v7, :v8 )");
    }
    else
    {
        sql = std::string(
            "UPDATE accounts SET balance = :v1, seqnum = :v2, "
            "numsubentries = :v3, "
            "inflationdest = :v4, homedomain = :v5, thresholds = :v6, "
            "flags = :v7, lastmodified = :v8 WHERE accountid = :id");
    }

    auto prep = db.getPreparedStatement(sql);

    soci::indicator inflation_ind = soci::i_null;
    string inflationDestStrKey;

    if (mAccountEntry.inflationDest)
    {
        inflationDestStrKey =
            PubKeyUtils::toStrKey(*mAccountEntry.inflationDest);
        inflation_ind = soci::i_ok;
    }

    string thresholds(bn::encode_b64(mAccountEntry.thresholds));

    {
        soci::statement& st = prep.statement();
        st.exchange(use(actIDStrKey, "id"));
        st.exchange(use(mAccountEntry.balance, "v1"));
        st.exchange(use(mAccountEntry.seqNum, "v2"));
        st.exchange(use(mAccountEntry.numSubEntries, "v3"));
        st.exchange(use(inflationDestStrKey, inflation_ind, "v4"));
        string homeDomain(mAccountEntry.homeDomain);
        st.exchange(use(homeDomain, "v5"));
        st.exchange(use(thresholds, "v6"));
        st.exchange(use(mAccountEntry.flags, "v7"));
        st.exchange(use(getLastModified(), "v8"));
        st.define_and_bind();
        {
            auto timer = insert ? db.getInsertTimer("account")
                                : db.getUpdateTimer("account");
            st.execute(true);
        }

        if (st.get_affected_rows() != 1)
        {
            throw std::runtime_error("Could not update data in SQL");
        }
        if (insert)
        {
            delta.addEntry(*this);
        }
        else
        {
            delta.modEntry(*this);
        }
    }

    if (mUpdateSigners)
    {
        applySigners(db);
    }
}
示例#21
0
    Status MMAPV1Engine::repairDatabase( OperationContext* txn,
                                         const std::string& dbName,
                                         bool preserveClonedFilesOnFailure,
                                         bool backupOriginalFiles ) {
        unique_ptr<RepairFileDeleter> repairFileDeleter;

        // Must be done before and after repair
        getDur().syncDataAndTruncateJournal(txn);

        intmax_t totalSize = dbSize( dbName );
        intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath);

        if ( freeSize > -1 && freeSize < totalSize ) {
            return Status( ErrorCodes::OutOfDiskSpace,
                           str::stream() << "Cannot repair database " << dbName
                           << " having size: " << totalSize
                           << " (bytes) because free disk space is: " << freeSize << " (bytes)" );
        }

        txn->checkForInterrupt();

        Path reservedPath =
            uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ?
                                "backup" : "_tmp" );
        bool created = false;
        MONGO_ASSERT_ON_EXCEPTION( created = boost::filesystem::create_directory( reservedPath ) );
        invariant( created );
        string reservedPathString = reservedPath.string();

        if ( !preserveClonedFilesOnFailure )
            repairFileDeleter.reset( new RepairFileDeleter( txn,
                                                            dbName,
                                                            reservedPathString,
                                                            reservedPath ) );

        {
            Database* originalDatabase = dbHolder().openDb(txn, dbName);
            if (originalDatabase == NULL) {
                return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair");
            }

            unique_ptr<MMAPV1DatabaseCatalogEntry> dbEntry;
            unique_ptr<Database> tempDatabase;

            // Must call this before MMAPV1DatabaseCatalogEntry's destructor closes the DB files
            ON_BLOCK_EXIT(&dur::DurableInterface::syncDataAndTruncateJournal, &getDur(), txn);

            {
                dbEntry.reset(new MMAPV1DatabaseCatalogEntry(txn,
                                                             dbName,
                                                             reservedPathString,
                                                             storageGlobalParams.directoryperdb,
                                                             true));
                tempDatabase.reset( new Database(txn, dbName, dbEntry.get()));
            }

            map<string,CollectionOptions> namespacesToCopy;
            {
                string ns = dbName + ".system.namespaces";
                OldClientContext ctx(txn,  ns );
                Collection* coll = originalDatabase->getCollection( ns );
                if ( coll ) {
                    auto cursor = coll->getCursor(txn);
                    while (auto record = cursor->next()) {
                        BSONObj obj = record->data.releaseToBson();

                        string ns = obj["name"].String();

                        NamespaceString nss( ns );
                        if ( nss.isSystem() ) {
                            if ( nss.isSystemDotIndexes() )
                                continue;
                            if ( nss.coll() == "system.namespaces" )
                                continue;
                        }

                        if ( !nss.isNormal() )
                            continue;

                        CollectionOptions options;
                        if ( obj["options"].isABSONObj() ) {
                            Status status = options.parse( obj["options"].Obj() );
                            if ( !status.isOK() )
                                return status;
                        }
                        namespacesToCopy[ns] = options;
                    }
                }
            }

            for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin();
                  i != namespacesToCopy.end();
                  ++i ) {
                string ns = i->first;
                CollectionOptions options = i->second;

                Collection* tempCollection = NULL;
                {
                    WriteUnitOfWork wunit(txn);
                    tempCollection = tempDatabase->createCollection(txn, ns, options, false);
                    wunit.commit();
                }

                OldClientContext readContext(txn, ns, originalDatabase);
                Collection* originalCollection = originalDatabase->getCollection( ns );
                invariant( originalCollection );

                // data

                // TODO SERVER-14812 add a mode that drops duplicates rather than failing
                MultiIndexBlock indexer(txn, tempCollection );
                {
                    vector<BSONObj> indexes;
                    IndexCatalog::IndexIterator ii =
                        originalCollection->getIndexCatalog()->getIndexIterator( txn, false );
                    while ( ii.more() ) {
                        IndexDescriptor* desc = ii.next();
                        indexes.push_back( desc->infoObj() );
                    }

                    Status status = indexer.init( indexes );
                    if (!status.isOK()) {
                        return status;
                    }
                }

                auto cursor = originalCollection->getCursor(txn);
                while (auto record = cursor->next()) {
                    BSONObj doc = record->data.releaseToBson();

                    WriteUnitOfWork wunit(txn);
                    StatusWith<RecordId> result = tempCollection->insertDocument(txn,
                                                                                 doc,
                                                                                 &indexer,
                                                                                 false);
                    if ( !result.isOK() )
                        return result.getStatus();

                    wunit.commit();
                    txn->checkForInterrupt();
                }
                
                Status status = indexer.doneInserting();
                if (!status.isOK())
                    return status;

                {
                    WriteUnitOfWork wunit(txn);
                    indexer.commit();
                    wunit.commit();
                }

            }

            getDur().syncDataAndTruncateJournal(txn);

            // need both in case journaling is disabled
            MongoFile::flushAll(true);

            txn->checkForInterrupt();
        }

        // at this point if we abort, we don't want to delete new files
        // as they might be the only copies

        if ( repairFileDeleter.get() )
            repairFileDeleter->success();

        // Close the database so we can rename/delete the original data files
        dbHolder().close(txn, dbName);

        if ( backupOriginalFiles ) {
            _renameForBackup( dbName, reservedPath );
        }
        else {
            // first make new directory before deleting data
            Path newDir = Path(storageGlobalParams.dbpath) / dbName;
            MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir));

            // this deletes old files
            _deleteDataFiles( dbName );

            if ( !boost::filesystem::exists(newDir) ) {
                // we deleted because of directoryperdb
                // re-create
                MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir));
            }
        }

        _replaceWithRecovered( dbName, reservedPathString.c_str() );

        if (!backupOriginalFiles) {
            MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::remove_all(reservedPath));
        }

        // Reopen the database so it's discoverable
        dbHolder().openDb(txn, dbName);

        return Status::OK();
    }
示例#22
0
void
AccountFrame::applySigners(Database& db)
{
    std::string actIDStrKey = PubKeyUtils::toStrKey(mAccountEntry.accountID);

    // generates a diff with the signers stored in the database

    // first, load the signers stored in the database for this account
    auto signers = loadSigners(db, actIDStrKey);

    auto it_new = mAccountEntry.signers.begin();
    auto it_old = signers.begin();
    bool changed = false;
    // iterate over both sets from smallest to biggest key
    while (it_new != mAccountEntry.signers.end() || it_old != signers.end())
    {
        bool updated = false, added = false;

        if (it_old == signers.end())
        {
            added = true;
        }
        else if (it_new != mAccountEntry.signers.end())
        {
            updated = (it_new->pubKey == it_old->pubKey);
            if (!updated)
            {
                added = (it_new->pubKey < it_old->pubKey);
            }
        }
        else
        {
            // deleted
        }

        if (updated)
        {
            if (it_new->weight != it_old->weight)
            {
                std::string signerStrKey =
                    PubKeyUtils::toStrKey(it_new->pubKey);
                auto timer = db.getUpdateTimer("signer");
                auto prep2 = db.getPreparedStatement(
                    "UPDATE signers set weight=:v1 WHERE "
                    "accountid=:v2 AND publickey=:v3");
                auto& st = prep2.statement();
                st.exchange(use(it_new->weight));
                st.exchange(use(actIDStrKey));
                st.exchange(use(signerStrKey));
                st.define_and_bind();
                st.execute(true);
                if (st.get_affected_rows() != 1)
                {
                    throw std::runtime_error("Could not update data in SQL");
                }
                changed = true;
            }
            it_new++;
            it_old++;
        }
        else if (added)
        {
            // signer was added
            std::string signerStrKey = PubKeyUtils::toStrKey(it_new->pubKey);

            auto prep2 = db.getPreparedStatement("INSERT INTO signers "
                                                 "(accountid,publickey,weight) "
                                                 "VALUES (:v1,:v2,:v3)");
            auto& st = prep2.statement();
            st.exchange(use(actIDStrKey));
            st.exchange(use(signerStrKey));
            st.exchange(use(it_new->weight));
            st.define_and_bind();
            st.execute(true);

            if (st.get_affected_rows() != 1)
            {
                throw std::runtime_error("Could not update data in SQL");
            }
            changed = true;
            it_new++;
        }
        else
        {
            // signer was deleted
            std::string signerStrKey = PubKeyUtils::toStrKey(it_old->pubKey);

            auto prep2 = db.getPreparedStatement("DELETE from signers WHERE "
                                                 "accountid=:v2 AND "
                                                 "publickey=:v3");
            auto& st = prep2.statement();
            st.exchange(use(actIDStrKey));
            st.exchange(use(signerStrKey));
            st.define_and_bind();
            {
                auto timer = db.getDeleteTimer("signer");
                st.execute(true);
            }

            if (st.get_affected_rows() != 1)
            {
                throw std::runtime_error("Could not update data in SQL");
            }

            changed = true;
            it_old++;
        }
    }

    if (changed)
    {
        // Flush again to ensure changed signers are reloaded.
        flushCachedEntry(db);
    }
}
示例#23
0
//! ---------------- addToDatabase ---------------------------------------------
void HistoManager::addToDatabase()
{
    m_timer->stop();

    if(Engine::instance()->state() != ENGINE::PLAYING) {
       m_timer->stop();
       return;
    }

    MEDIA::TrackPtr media   =  m_player->playingTrack();
    int       now_date      =  QDateTime::currentDateTime().toTime_t();

    QString   engine_url    =  media->url;
    if(engine_url.isEmpty())
      return;

    QString   media_name;
    if(media->type() == TYPE_TRACK)
      media_name = media->artist + " - " + media->album + " - " + media->title;
    else
      media_name = media->name;

    Database db;
    if (!db.connect()) return;

    QSqlQuery("BEGIN TRANSACTION;",*db.sqlDb());

    //---------------------------------------
    //    add or update entry in history
    //---------------------------------------
    QSqlQuery q("", *db.sqlDb());
    q.prepare("SELECT `id`,`url` FROM `histo` WHERE `url`=:val;");
    q.bindValue(":val", engine_url );
    q.exec();

    if ( !q.next() ) {
      Debug::debug() << "[Histo] add a new entry" << engine_url;

      q.prepare("INSERT INTO `histo`(`url`,`name`,`date`) VALUES (:u,:n,:d);");
      q.bindValue(":u", engine_url);
      q.bindValue(":n", media_name);
      q.bindValue(":d", now_date);
      q.exec();

      if(q.numRowsAffected() < 1)
        Debug::warning() << "[Histo] error adding entry !! ";

      QSqlQuery query("DELETE FROM `histo` WHERE `id` <= (SELECT MAX(`id`) FROM `histo`) - 2000;", *db.sqlDb());
    }
    else
    {
      Debug::debug() << "[Histo] update an existing entry" << engine_url;
      int histo_id = q.value(0).toString().toInt();

      q.prepare("UPDATE `histo` SET `date`=:d WHERE `id`=:id;");
      q.bindValue(":d", now_date);
      q.bindValue(":id", histo_id);
      q.exec();
    }

    //---------------------------------------
    //    update playcount
    //---------------------------------------
    q.prepare("SELECT `id`,`artist_id`,`album_id` FROM `view_tracks` WHERE `filename`=:val LIMIT 1;");
    q.bindValue(":val", engine_url );
    q.exec();

    if (q.next())
    {
      //Debug::debug() << "update playcount!";

      const int trackId  = q.value(0).toInt();
      const int artistId = q.value(1).toInt();
      const int albumId  = q.value(2).toInt();

      QSqlQuery query1("UPDATE `tracks` " \
                       "SET `playcount`=`playcount`+1 " \
                       "WHERE `id`="+QString::number(trackId)+";", *db.sqlDb());

      QSqlQuery query2("UPDATE `albums` " \
                       "SET `playcount`=`playcount`+1 " \
                       "WHERE `id`="+QString::number(albumId)+";", *db.sqlDb());

      QSqlQuery query3("UPDATE `artists` " \
                       "SET `playcount`=`playcount`+1 " \
                       "WHERE `id`="+QString::number(artistId)+";", *db.sqlDb());

      /* update collection model item */
      MEDIA::TrackPtr track = MEDIA::TrackPtr(
               LocalTrackModel::instance()->trackItemHash.value(trackId)
               );

      if(!track.isNull()) {
        track->playcount++;
        track->lastPlayed = now_date;

        MEDIA::AlbumPtr album = MEDIA::AlbumPtr::staticCast(track->parent());
        album->playcount++;

        MEDIA::ArtistPtr artist = MEDIA::ArtistPtr::staticCast(album->parent());
        artist->playcount++;
      }
    }


    QSqlQuery("COMMIT TRANSACTION;",*db.sqlDb());
}
示例#24
0
void ReportStudent::go()
{
    ofstream HTML("Students.html");
    db.Write2HTML_Student(HTML);
    cout << "Output successful" << endl << endl;
}
示例#25
0
文件: ohillres.cpp 项目: 112212/7k2
//------- Begin of function HillRes::load_hill_info -------//
//
void HillRes::load_hill_block_info()
{
	HillBlockRec		*hillBlockRec;
	HillBlockInfo		*hillBlockInfo;
	int			i;
	uint32_t		bitmapOffset;
	
	//---- read in hill count and initialize hill block info array ----//

	String hillDbName;
	hillDbName = DIR_RES;
	hillDbName += "HILL";
	hillDbName += config.terrain_set;
	hillDbName += ".RES";
	Database hillDbObj(hillDbName, 1);
	// Database *dbHill = game_set.open_db(HILL_DB);	// only one database can be opened at a time
	Database *dbHill = &hillDbObj;

	hill_block_count      = (short) dbHill->rec_count();
	hill_block_info_array = (HillBlockInfo*) mem_add( sizeof(HillBlockInfo)*hill_block_count );

	memset( hill_block_info_array, 0, sizeof(HillBlockInfo) * hill_block_count );
	max_pattern_id = 0;

	//---------- read in HILL.DBF ---------//

	for( i=0 ; i<hill_block_count ; i++ )
	{
		hillBlockRec  = (HillBlockRec*) dbHill->read(i+1);
		hillBlockInfo = hill_block_info_array+i;
		hillBlockInfo->block_id = i + 1;
		hillBlockInfo->pattern_id = (char) misc.atoi( hillBlockRec->pattern_id, hillBlockRec->PATTERN_ID_LEN);
		if( hillBlockInfo->pattern_id > max_pattern_id)
			max_pattern_id = hillBlockInfo->pattern_id;
		hillBlockInfo->sub_pattern_id = (char) misc.atoi(hillBlockRec->sub_pattern_id, hillBlockRec->SUB_PATTERN_ID_LEN);
		hillBlockInfo->special_flag = hillBlockRec->special_flag;
		if( hillBlockRec->special_flag == ' ')
			hillBlockInfo->special_flag = 0;
		hillBlockInfo->layer = hillBlockRec->layer - '0';

		hillBlockInfo->priority = (char) misc.atoi( hillBlockRec->priority, hillBlockRec->PRIORITY_LEN);
		hillBlockInfo->bitmap_type = hillBlockRec->bitmap_type;
		hillBlockInfo->offset_x = misc.atoi(hillBlockRec->offset_x, hillBlockRec->OFFSET_LEN);
		hillBlockInfo->offset_y = misc.atoi(hillBlockRec->offset_y, hillBlockRec->OFFSET_LEN);

		memcpy( &bitmapOffset, hillBlockRec->bitmap_ptr, sizeof(uint32_t) );
		hillBlockInfo->bitmap_ptr = res_bitmap.read_imported(bitmapOffset);
	}

	//------ build index for the first block of each pattern -------//
	// e.g first block id of pattern 1 is 1
	//     first block id of pattern 3 is 4
	//     first block id of pattern 4 is 7
	//     last block id (which is pattern 4) is 10
	// first_block_index is { 1, 4, 4, 7 };
	// such that, blocks which are pattern 1 are between [1,4)
	//                                     2 are between [4,4) i.e. not found
	//                                     3 are between [4,7)
	//                                     4 are between [7,11)
	// see also first_block()
	//
	first_block_index = (short *) mem_add(sizeof(short) * max_pattern_id);
	memset( first_block_index, 0, sizeof(short) * max_pattern_id);
	int patternMarked = 0;
	for(i = 0, hillBlockInfo = hill_block_info_array; i < hill_block_count;
		++i, ++hillBlockInfo)
	{
		err_when( hillBlockInfo->pattern_id < patternMarked);
		while(patternMarked < hillBlockInfo->pattern_id)
		{
			first_block_index[patternMarked] = i+1;
			patternMarked++;
		}
	}

}
示例#26
0
void ReportCourse::go()
{
    ofstream HTML("Courses.html");
    db.Write2HTML_Course(HTML);
    cout << "Output successful" << endl << endl;
}
示例#27
0
void ChemotaxisMovementSimulator::initialise()
{

	if(agent==NULL)
		throw HiveException("Cannot initialize ChemotaxisMovementSimulator if no agent was added.",
				"ChemotaxisMovementSimulator::initialise()");

	// Check the database for the presence of the basic objects, and extract that info
	Database *db = agent->getDatabase();
	if(db->existsDataItem("CellSpeed")) {
		DoubleData *dd=(DoubleData *)db->getDataItem("CellSpeed");
		speed = dd->getDouble();
	} else {
		throw HiveException("ChemotaxisMovementSimulator needs CellSpeed data.",
						"ChemotaxisMovementSimulator::initialise()");
	}
	if(db->existsDataItem("RotDiffConst")) {
		DoubleData *dd=(DoubleData *)db->getDataItem("RotDiffConst");
		rotDiffConst = dd->getDouble();
	} else {
		throw HiveException("ChemotaxisMovementSimulator needs RotationalDiffusion data.",
							"ChemotaxisMovementSimulator::initialise()");
	}
	if(db->existsDataItem("Position")) {
		DoubleVectorData *dvd=(DoubleVectorData *)db->getDataItem("Position");
		pos[X]=dvd->getDouble(X+1);
		pos[Y]=dvd->getDouble(Y+1);
		pos[Z]=dvd->getDouble(Z+1);
	} else {
		throw HiveException("ChemotaxisMovementSimulator needs RotationalDiffusion data.",
							"ChemotaxisMovementSimulator::initialise()");
	}
	if(db->existsDataItem("MovementDt")) {
		DoubleData *dd=(DoubleData *)db->getDataItem("MovementDt");
		movementDt = dd->getDouble();
	} else {
		throw HiveException("ChemotaxisMovementSimulator needs MovementDt data.",
						"ChemotaxisMovementSimulator::initialise()");
	}
	if(db->existsDataItem("CurrentCellState")) {
		IntegerData *id=(IntegerData *)db->getDataItem("CurrentCellState");
		currentMovementState = id->getInteger();
	} else {
		throw HiveException("ChemotaxisMovementSimulator needs CurrentCellState data.",
						"ChemotaxisMovementSimulator::initialise()");
	}
	if(db->existsDataItem("LastCellState")) {
		IntegerData *id=(IntegerData *)db->getDataItem("LastCellState");
		lastMovementState = id->getInteger();
	} else {
		throw HiveException("ChemotaxisMovementSimulator needs LastCellState data.",
						"ChemotaxisMovementSimulator::initialise()");
	}



	//Now that we have the basic data, we can set up the simulator's internal
	//information here
	gs = new Util::GammaSampler(DefaultGammaAlpha,DefaultGammaBeta,DefaultGammaOffset);

	//starting direction
	dir[X]=1; dir[Y]=0; dir[Z]=0;
	up[X]=0; up[Y]=1; up[Z]=0;

	//Init the rotation matrix
	rotMat[0][0]=1;   rotMat[0][1]=0;   rotMat[0][2]=0;
	rotMat[1][0]=0;   rotMat[1][1]=1;   rotMat[1][2]=0;
	rotMat[2][0]=0;   rotMat[2][1]=0;   rotMat[2][2]=1;


	//orient ourselves randomly
	changeDirRandom();





// OLD VERSION FOR ORIGINAL JAM FEST CODE
//	if(agent==NULL)
//		throw HiveException("Cannot initialize ChemotaxisMovementSimulator if no agent was added.",
//				"ChemotaxisMovementSimulator::initialise()");
//
//	// Check the database for the presence of the basic objects, and extract that info
//	Database *db = agent->getDatabase();
//	if(db->existsDataItem("CellSpeed")) {
//		DoubleData *dd=(DoubleData *)db->getDataItem("CellSpeed");
//		speed = dd->getDouble();
//	} else {
//		throw HiveException("ChemotaxisMovementSimulator needs CellSpeed data.",
//						"ChemotaxisMovementSimulator::initialise()");
//	}
//	if(db->existsDataItem("RotDiffConst")) {
//		DoubleData *dd=(DoubleData *)db->getDataItem("RotDiffConst");
//		rotDiffConst = dd->getDouble();
//	} else {
//		throw HiveException("ChemotaxisMovementSimulator needs RotationalDiffusion data.",
//							"ChemotaxisMovementSimulator::initialise()");
//	}
//	if(db->existsDataItem("Position")) {
//		DoubleVectorData *dvd=(DoubleVectorData *)db->getDataItem("Position");
//		pos[X]=dvd->getDouble(X+1);
//		pos[Y]=dvd->getDouble(Y+1);
//		pos[Z]=dvd->getDouble(Z+1);
//	} else {
//		throw HiveException("ChemotaxisMovementSimulator needs RotationalDiffusion data.",
//							"ChemotaxisMovementSimulator::initialise()");
//	}
////	if(db->existsDataItem("xPos")) {
////		DoubleData *dd=(DoubleData *)db->getDataItem("xPos");
////		xPos = dd->getDouble();
////	} else {
////		throw HiveException("ChemotaxisMovementSimulator needs xPos data.",
////							"ChemotaxisMovementSimulator::initialise()");
////	}
////	if(db->existsDataItem("yPos")) {
////		DoubleData *dd=(DoubleData *)db->getDataItem("yPos");
////		yPos = dd->getDouble();
////	} else {
////		throw HiveException("ChemotaxisMovementSimulator needs yPos data.",
////						"ChemotaxisMovementSimulator::initialise()");
////	}
////	if(db->existsDataItem("zPos")) {
////		DoubleData *dd=(DoubleData *)db->getDataItem("zPos");
////		zPos = dd->getDouble();
////	} else {
////		throw HiveException("ChemotaxisMovementSimulator needs zPos data.",
////						"ChemotaxisMovementSimulator::initialise()");
////	}
//	if(db->existsDataItem("MovementDt")) {
//		DoubleData *dd=(DoubleData *)db->getDataItem("MovementDt");
//		movementDt = dd->getDouble();
//	} else {
//		throw HiveException("ChemotaxisMovementSimulator needs MovementDt data.",
//						"ChemotaxisMovementSimulator::initialise()");
//	}
//	if(db->existsDataItem("CurrentMovementState")) {
//		IntegerData *id=(IntegerData *)db->getDataItem("CurrentMovementState");
//		currentMovementState = id->getInteger();
//	} else {
//		throw HiveException("ChemotaxisMovementSimulator needs CurrentMovementState data.",
//						"ChemotaxisMovementSimulator::initialise()");
//	}
//	if(db->existsDataItem("LastMovementState")) {
//		IntegerData *id=(IntegerData *)db->getDataItem("LastMovementState");
//		lastMovementState = id->getInteger();
//	} else {
//		throw HiveException("ChemotaxisMovementSimulator needs LastMovementState data.",
//						"ChemotaxisMovementSimulator::initialise()");
//	}
//
//
//
//	if(db->existsDataItem("NumberCW_Motors")) {
//		IntegerData *id=(IntegerData *)db->getDataItem("NumberCW_Motors");
//		numMotorsCW = id->getInteger();
//	} else {
//		throw HiveException("ChemotaxisMovementSimulator needs NumberCW_Motors data.",
//						"ChemotaxisMovementSimulator::initialise()");
//	}
//	if(db->existsDataItem("NumberCCW_Motors")) {
//		IntegerData *id=(IntegerData *)db->getDataItem("NumberCCW_Motors");
//		numMotorsCCW = id->getInteger();
//	} else {
//		throw HiveException("ChemotaxisMovementSimulator needs NumberCCW_Motors data.",
//						"ChemotaxisMovementSimulator::initialise()");
//	}
//	if(db->existsDataItem("Chemical")) {
//		DoubleVectorData *id=(DoubleVectorData *)db->getDataItem("Chemical");
//		double chemicalOne = id->getDouble(0);
//		double chemicalTwo = id->getDouble(1);
//	} else {
//		throw HiveException("ChemotaxisMovementSimulator needs chemical data.",
//						"ChemotaxisMovementSimulator::initialise()");
//	}
//
//	//Now that we have the basic data, we can set up the simulator's internal
//	//information here
//	gs = new Util::GammaSampler(DefaultGammaAlpha,DefaultGammaBeta,DefaultGammaOffset);
//
//	//starting direction
//	dir[X]=1; dir[Y]=0; dir[Z]=0;
//	up[X]=0; up[Y]=1; up[Z]=0;
//
//	//Init the rotation matrix
//	rotMat[0][0]=1;   rotMat[0][1]=0;   rotMat[0][2]=0;
//	rotMat[1][0]=0;   rotMat[1][1]=1;   rotMat[1][2]=0;
//	rotMat[2][0]=0;   rotMat[2][1]=0;   rotMat[2][2]=1;
//
//
//	//orient ourselves randomly
//	changeDirRandom();
}
示例#28
0
文件: osnowres.cpp 项目: 112212/7k2
// ------- Begin of function SnowRes::load_info ------//
void SnowRes::load_info()
{
	SnowRec		*snowRec;
	SnowInfo		*snowInfo;
	int			i;
	uint32_t		bitmapOffset;
	
	//---- read in snow map count and initialize snow info array ----//

	Database *dbSnow = game_set.open_db(SNOW_DB);	// only one database can be opened at a time

	snow_info_count = (int) dbSnow->rec_count();
	snow_info_array = (SnowInfo *) mem_add(sizeof(SnowInfo) * snow_info_count );

	memset( snow_info_array, 0, sizeof(SnowInfo) * snow_info_count );
	root_count = 0;

	//---------- read in SNOWG.DBF ---------//

	for( i=0 ; i<snow_info_count; i++ )
	{
		snowRec = (SnowRec *) dbSnow->read(i+1);
		snowInfo = snow_info_array+i;

		snowInfo->snow_map_id = i+1;
		memcpy( &bitmapOffset, snowRec->bitmap_ptr, sizeof(uint32_t) );
		snowInfo->bitmap_ptr	= res_bitmap.read_imported(bitmapOffset);
		if( snowRec->offset_x[0] != '\0' && snowRec->offset_x[0] != ' ')
			snowInfo->offset_x = (short) misc.atoi(snowRec->offset_x, snowRec->OFFSET_LEN);
		else
			snowInfo->offset_x = -(snowInfo->bitmap_width() / 2);
		if( snowRec->offset_y[0] != '\0' && snowRec->offset_y[0] != ' ')
			snowInfo->offset_y = (short) misc.atoi(snowRec->offset_y, snowRec->OFFSET_LEN);
		else
			snowInfo->offset_y = -(snowInfo->bitmap_height() / 2);

		snowInfo->next_count = 0;
		snowInfo->prev_count = 0;

		if( snowRec->next_file1[0] != '\0' && snowRec->next_file1[0] != ' ')
		{
			snowInfo->next_file[snowInfo->next_count++] = snow_info_array + misc.atoi( snowRec->next_ptr1, snowRec->RECNO_LEN) -1;
		}
		if( snowRec->next_file2[0] != '\0' && snowRec->next_file2[0] != ' ')
		{
			snowInfo->next_file[snowInfo->next_count++] = snow_info_array + misc.atoi( snowRec->next_ptr2, snowRec->RECNO_LEN) -1;
		}
		if( snowRec->next_file3[0] != '\0' && snowRec->next_file3[0] != ' ')
		{
			snowInfo->next_file[snowInfo->next_count++] = snow_info_array + misc.atoi( snowRec->next_ptr3, snowRec->RECNO_LEN) -1;
		}
		if( snowRec->next_file4[0] != '\0' && snowRec->next_file4[0] != ' ')
		{
			snowInfo->next_file[snowInfo->next_count++] = snow_info_array + misc.atoi( snowRec->next_ptr4, snowRec->RECNO_LEN) -1;
		}
		if( snowRec->prev_file1[0] != '\0' && snowRec->prev_file1[0] != ' ')
		{
			snowInfo->prev_file[snowInfo->prev_count++] = snow_info_array + misc.atoi( snowRec->prev_ptr1, snowRec->RECNO_LEN) -1;
		}
		if( snowRec->prev_file2[0] != '\0' && snowRec->prev_file2[0] != ' ')
		{
			snowInfo->prev_file[snowInfo->prev_count++] = snow_info_array + misc.atoi( snowRec->prev_ptr2, snowRec->RECNO_LEN) -1;
		}

		if(snowInfo->is_root())
		{
			root_count++;
		}
	}

	root_info_array = (SnowInfo **)mem_add(sizeof(SnowInfo *) * root_count);
	unsigned j = 0;
	for( i=0 ; i<snow_info_count; i++ )
	{
		snowInfo = snow_info_array+i;
		if(snowInfo->is_root())
		{
			root_info_array[j++] = snowInfo;
			if( j >= root_count)
				break;
		}
	}
}
示例#29
0
    /* must call this on a delete so we clean up the cursors. */
    void ClientCursor::aboutToDelete(const StringData& ns,
                                     const NamespaceDetails* nsd,
                                     const DiskLoc& dl) {
        // Begin cursor-only
        NoPageFaultsAllowed npfa;
        // End cursor-only

        recursive_scoped_lock lock(ccmutex);

        Database *db = cc().database();
        verify(db);

        aboutToDeleteForSharding( ns, db, nsd, dl );

        // Check our non-cached active runner list.
        for (set<Runner*>::iterator it = nonCachedRunners.begin(); it != nonCachedRunners.end();
             ++it) {

            Runner* runner = *it;
            if (0 == ns.compare(runner->ns())) {
                runner->invalidate(dl);
            }
        }

        // TODO: This requires optimization.  We walk through *all* CCs and send the delete to every
        // CC open on the db we're deleting from.  We could:
        // 1. Map from ns to open runners,
        // 2. Map from ns -> (a map of DiskLoc -> runners who care about that DL)
        //
        // We could also queue invalidations somehow and have them processed later in the runner's
        // read locks.
        for (CCById::const_iterator it = clientCursorsById.begin(); it != clientCursorsById.end();
             ++it) {

            ClientCursor* cc = it->second;
            // We're only interested in cursors over one db.
            if (cc->_db != db) { continue; }
            if (NULL == cc->_runner.get()) { continue; }
            cc->_runner->invalidate(dl);
        }

        // Begin cursor-only.  Only cursors that are in ccByLoc are processed here.
        CCByLoc& bl = db->ccByLoc();
        CCByLoc::iterator j = bl.lower_bound(ByLocKey::min(dl));
        CCByLoc::iterator stop = bl.upper_bound(ByLocKey::max(dl));
        if ( j == stop )
            return;

        vector<ClientCursor*> toAdvance;

        while ( 1 ) {
            toAdvance.push_back(j->second);
            DEV verify( j->first.loc == dl );
            ++j;
            if ( j == stop )
                break;
        }

        if( toAdvance.size() >= 3000 ) {
            log() << "perf warning MPW101: " << toAdvance.size() << " cursors for one diskloc "
                  << dl.toString()
                  << ' ' << toAdvance[1000]->_ns
                  << ' ' << toAdvance[2000]->_ns
                  << ' ' << toAdvance[1000]->_pinValue
                  << ' ' << toAdvance[2000]->_pinValue
                  << ' ' << toAdvance[1000]->_pos
                  << ' ' << toAdvance[2000]->_pos
                  << ' ' << toAdvance[1000]->_idleAgeMillis
                  << ' ' << toAdvance[2000]->_idleAgeMillis
                  << ' ' << toAdvance[1000]->_doingDeletes
                  << ' ' << toAdvance[2000]->_doingDeletes
                  << endl;
            //wassert( toAdvance.size() < 5000 );
        }

        for ( vector<ClientCursor*>::iterator i = toAdvance.begin(); i != toAdvance.end(); ++i ) {
            ClientCursor* cc = *i;
            wassert(cc->_db == db);

            if ( cc->_doingDeletes ) continue;

            Cursor *c = cc->_c.get();
            if ( c->capped() ) {
                /* note we cannot advance here. if this condition occurs, writes to the oplog
                   have "caught" the reader.  skipping ahead, the reader would miss postentially
                   important data.
                   */
                delete cc;
                continue;
            }

            c->recoverFromYield();
            DiskLoc tmp1 = c->refLoc();
            if ( tmp1 != dl ) {
                // This might indicate a failure to call ClientCursor::prepareToYield() but it can
                // also happen during correct operation, see SERVER-2009.
                problem() << "warning: cursor loc " << tmp1 << " does not match byLoc position " << dl << " !" << endl;
            }
            else {
                c->advance();
            }
            while (!c->eof() && c->refLoc() == dl) {
                /* We don't delete at EOF because we want to return "no more results" rather than "no such cursor".
                 * The loop is to handle MultiKey indexes where the deleted record is pointed to by multiple adjacent keys.
                 * In that case we need to advance until we get to the next distinct record or EOF.
                 * SERVER-4154
                 * SERVER-5198
                 * But see SERVER-5725.
                 */
                c->advance();
            }
            cc->updateLocation();
        }
        // End cursor-only
    }
示例#30
0
文件: compact.cpp 项目: rzfish/mongo
/** @return number of skipped (invalid) documents */
unsigned compactExtent(const char *ns, NamespaceDetails *d, const DiskLoc diskloc, int n,
                       int nidx, bool validate, double pf, int pb, bool useDefaultPadding) {

    log() << "compact begin extent #" << n << " for namespace " << ns << endl;
    unsigned oldObjSize = 0; // we'll report what the old padding was
    unsigned oldObjSizeWithPadding = 0;

    Extent *e = diskloc.ext();
    e->assertOk();
    verify( e->validates(diskloc) );
    unsigned skipped = 0;

    Database* db = cc().database();

    {
        // the next/prev pointers within the extent might not be in order so we first
        // page the whole thing in sequentially
        log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
        Timer t;
        DataFile* mdf = db->getFile( diskloc.a() );
        HANDLE fd = mdf->getFd();
        int offset = diskloc.getOfs();
        Extent* ext = diskloc.ext();
        size_t length = ext->length;

        touch_pages(fd, offset, length, ext);
        int ms = t.millis();
        if( ms > 1000 )
            log() << "compact end paging in " << ms << "ms "
                  << e->length/1000000.0/ms << "MB/sec" << endl;
    }

    {
        log() << "compact copying records" << endl;
        long long datasize = 0;
        long long nrecords = 0;
        DiskLoc L = e->firstRecord;
        if( !L.isNull() ) {
            while( 1 ) {
                Record *recOld = L.rec();
                L = db->getExtentManager().getNextRecordInExtent(L);
                BSONObj objOld = BSONObj::make(recOld);

                if( !validate || objOld.valid() ) {
                    nrecords++;
                    unsigned sz = objOld.objsize();

                    oldObjSize += sz;
                    oldObjSizeWithPadding += recOld->netLength();

                    unsigned lenWHdr = sz + Record::HeaderSize;
                    unsigned lenWPadding = lenWHdr;
                    // maintain UsePowerOf2Sizes if no padding values were passed in
                    if (d->isUserFlagSet(NamespaceDetails::Flag_UsePowerOf2Sizes)
                            && useDefaultPadding) {
                        lenWPadding = d->quantizePowerOf2AllocationSpace(lenWPadding);
                    }
                    // otherwise use the padding values (pf and pb) that were passed in
                    else {
                        lenWPadding = static_cast<unsigned>(pf*lenWPadding);
                        lenWPadding += pb;
                        lenWPadding = lenWPadding & quantizeMask(lenWPadding);
                    }
                    if (lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
                        lenWPadding = lenWHdr;
                    }
                    DiskLoc loc = allocateSpaceForANewRecord(ns, d, lenWPadding, false);
                    uassert(14024, "compact error out of space during compaction", !loc.isNull());
                    Record *recNew = loc.rec();
                    datasize += recNew->netLength();
                    recNew = (Record *) getDur().writingPtr(recNew, lenWHdr);
                    addRecordToRecListInExtent(recNew, loc);
                    memcpy(recNew->data(), objOld.objdata(), sz);
                }
                else {
                    if( ++skipped <= 10 )
                        log() << "compact skipping invalid object" << endl;
                }

                if( L.isNull() ) {
                    // we just did the very last record from the old extent.  it's still pointed to
                    // by the old extent ext, but that will be fixed below after this loop
                    break;
                }

                // remove the old records (orphan them) periodically so our commit block doesn't get too large
                bool stopping = false;
                RARELY stopping = *killCurrentOp.checkForInterruptNoAssert() != 0;
                if( stopping || getDur().aCommitIsNeeded() ) {
                    e->firstRecord.writing() = L;
                    Record *r = L.rec();
                    getDur().writingInt(r->prevOfs()) = DiskLoc::NullOfs;
                    getDur().commitIfNeeded();
                    killCurrentOp.checkForInterrupt(false);
                }
            }
        } // if !L.isNull()

        verify( d->firstExtent() == diskloc );
        verify( d->lastExtent() != diskloc );
        DiskLoc newFirst = e->xnext;
        d->firstExtent().writing() = newFirst;
        newFirst.ext()->xprev.writing().Null();
        getDur().writing(e)->markEmpty();
        cc().database()->getExtentManager().freeExtents( diskloc, diskloc );

        // update datasize/record count for this namespace's extent
        d->incrementStats( datasize, nrecords );

        getDur().commitIfNeeded();

        {
            double op = 1.0;
            if( oldObjSize )
                op = static_cast<double>(oldObjSizeWithPadding)/oldObjSize;
            log() << "compact finished extent #" << n << " containing " << nrecords << " documents (" << datasize/1000000.0 << "MB)"
                  << " oldPadding: " << op << ' ' << static_cast<unsigned>(op*100.0)/100
                  << endl;
        }
    }

    return skipped;
}