MojErr MojDbPerfCreateTest::run() { MojErr err = file.open(CreateTestFileName, MOJ_O_RDWR | MOJ_O_CREAT | MOJ_O_TRUNC, MOJ_S_IRUSR | MOJ_S_IWUSR); MojString buf; err = buf.format("MojoDb Create Performance Test,,,,,\n\nOperation,Kind,Total Time,Time Per Iteration,Time Per Object\n"); MojTestErrCheck(err); err = fileWrite(file, buf); MojTestErrCheck(err); err = testCreate(); MojTestErrCheck(err); allTestsTime += totalTestTime; err = MojPrintF("\n\n TOTAL TEST TIME: %llu nanoseconds. | %10.3f seconds.\n\n", totalTestTime, totalTestTime / 1000000000.0f); MojTestErrCheck(err); err = MojPrintF("\n-------\n"); MojTestErrCheck(err); err = buf.format("\n\nTOTAL TEST TIME,,%llu,,,", totalTestTime); MojTestErrCheck(err); err = fileWrite(file, buf); MojTestErrCheck(err); err = file.close(); MojTestErrCheck(err); return MojErrNone; }
MojErr MojDbPerfUpdateTest::run() { MojErr err = file.open(UpdateTestFileName, MOJ_O_RDWR | MOJ_O_CREAT | MOJ_O_TRUNC, MOJ_S_IRUSR | MOJ_S_IWUSR); MojTestErrCheck(err); MojString buf; err = buf.format("MojoDb Update Performance Test,,,,,\n\nOperation,Kind,Total Time,Time Per Iteration,Time Per Object\n"); MojTestErrCheck(err); err = fileWrite(file, buf); MojTestErrCheck(err); MojDb db; err = db.open(MojDbTestDir); MojTestErrCheck(err); err = testPut(db); MojTestErrCheck(err); err = testMerge(db); MojTestErrCheck(err); err = testUpdateKind(db); MojTestErrCheck(err); err = MojPrintF("\n\n TOTAL TEST TIME: %llu microseconds\n\n", totalTestTime.microsecs()); MojTestErrCheck(err); err = MojPrintF("\n-------\n"); MojTestErrCheck(err); err = buf.format("\n\nTOTAL TEST TIME,,%llu,,,", totalTestTime.microsecs()); MojTestErrCheck(err); err = fileWrite(file, buf); MojTestErrCheck(err); err = db.close(); MojTestErrCheck(err); err = file.close(); MojTestErrCheck(err); return MojErrNone; }
MojErr MojDb::load(const MojChar* path, MojUInt32& countOut, MojUInt32 flags, MojDbReqRef req) { MojAssert(path); MojLogTrace(s_log); MojErr err = beginReq(req, true); MojErrCheck(err); MojFile file; err = file.open(path, MOJ_O_RDONLY); MojErrCheck(err); MojJsonParser parser; parser.begin(); MojSize bytesRead = 0; MojObjectBuilder visitor; int total_mutexes, mutexes_free, mutexes_used, mutexes_used_highwater, mutex_regionsize; m_objDb->mutexStats(&total_mutexes, &mutexes_free, &mutexes_used, &mutexes_used_highwater, &mutex_regionsize); MojLogDebug(s_log, _T("Starting load of %s, total_mutexes: %d, mutexes_free: %d, mutexes_used: %d, mutexes_used_highwater: %d, &mutex_regionsize: %d\n"), path, total_mutexes, mutexes_free, mutexes_used, mutexes_used_highwater, mutex_regionsize); int orig_mutexes_used = mutexes_used; struct timeval startTime = {0,0}, stopTime = {0,0}; gettimeofday(&startTime, NULL); int total_transaction_time = 0; int total = 0; int transactions = 0; do { MojChar buf[MojFile::MojFileBufSize]; err = file.read(buf, sizeof(buf), bytesRead); MojErrCheck(err); const MojChar* parseEnd = buf; while (parseEnd < (buf + bytesRead)) { err = parser.parseChunk(visitor, parseEnd, bytesRead - (parseEnd - buf), parseEnd); MojErrCheck(err); if (parser.finished()) { //store the object err = loadImpl(visitor.object(), flags, req); MojErrCheck(err); countOut++; parser.begin(); visitor.reset(); total++; if ((total % 10) == 0) { // For debugging mutex consumption during load operations, we periodically retrieve the mutex stats. m_objDb->mutexStats(&total_mutexes, &mutexes_free, &mutexes_used, &mutexes_used_highwater, &mutex_regionsize); MojLogDebug(s_log, _T("Loading %s record %d, total_mutexes: %d, mutexes_free: %d, mutexes_used: %d, mutexes_used_highwater: %d, &mutex_regionsize: %d\n"), path, total, total_mutexes, mutexes_free, mutexes_used, mutexes_used_highwater, mutex_regionsize); } // If a loadStepSize is configured, then break up the load into separate transactions. // This is intended to prevent run-away mutex consumption in some particular scenarios. // The transactions do not reverse or prevent mutex consumption, but seem to reduce the // growth and eventually cause it to level off. if ((m_loadStepSize > 0) && ((total % m_loadStepSize) == 0)) { // Close and reopen transaction, to prevent a very large transaction from building up. MojLogDebug(s_log, _T("Loading %s record %d, closing and reopening transaction.\n"), path, total); struct timeval transactionStartTime = {0,0}, transactionStopTime = {0,0}; gettimeofday(&transactionStartTime, NULL); err = req->end(); MojErrCheck(err); err = req->endBatch(); MojErrCheck(err); req->beginBatch(); // beginBatch() invocation for first transaction happened in MojDbServiceHandlerBase::invokeImpl err = beginReq(req, true); MojErrCheck(err); gettimeofday(&transactionStopTime, NULL); long int elapsedTransactionTimeMS = (transactionStopTime.tv_sec - transactionStartTime.tv_sec) * 1000 + (transactionStopTime.tv_usec - transactionStartTime.tv_usec) / 1000; total_transaction_time += (int)elapsedTransactionTimeMS; transactions++; } } } } while (bytesRead > 0); err = parser.end(visitor); MojErrCheck(err); if (parser.finished()) { err = loadImpl(visitor.object(), flags, req); MojErrCheck(err); countOut++; } else if (bytesRead > 0) { MojErrThrow(MojErrJsonParseEof); } err = req->end(); MojErrCheck(err); gettimeofday(&stopTime, NULL); long int elapsedTimeMS = (stopTime.tv_sec - startTime.tv_sec) * 1000 + (stopTime.tv_usec - startTime.tv_usec) / 1000; m_objDb->mutexStats(&total_mutexes, &mutexes_free, &mutexes_used, &mutexes_used_highwater, &mutex_regionsize); MojLogDebug(s_log, _T("Finished load of %s, total_mutexes: %d, mutexes_free: %d, mutexes_used: %d, mutexes_used_highwater: %d, &mutex_regionsize: %d\n"), path, total_mutexes, mutexes_free, mutexes_used, mutexes_used_highwater, mutex_regionsize); MojLogDebug(s_log, _T("Loaded %s with %d records in %ldms (%dms of that for %d extra transactions), consuming %d mutexes, afterwards %d are available out of %d\n"), path, total, elapsedTimeMS, total_transaction_time, transactions, mutexes_used - orig_mutexes_used, mutexes_free, total_mutexes); return MojErrNone; }
MojErr MojDb::dump(const MojChar* path, MojUInt32& countOut, bool incDel, MojDbReqRef req, bool backup, MojUInt32 maxBytes, const MojObject* incrementalKey, MojObject* backupResponse) { MojAssert(path); MojLogTrace(s_log); MojErr err = beginReq(req); MojErrCheck(err); if (!req->admin()) { MojLogError(s_log, _T("access denied: '%s' cannot dump db to path: '%s'"), req->domain().data(), path); MojErrThrow(MojErrDbAccessDenied); } MojFile file; err = file.open(path, MOJ_O_WRONLY | MOJ_O_CREAT | MOJ_O_TRUNC, MOJ_S_IRUSR | MOJ_S_IWUSR); MojErrCheck(err); // write out kinds first, then existing objects, then deleted objects MojSize bytesWritten = 0; MojSize totalwarns = 0; MojSize newwarns = 0; MojDbQuery objQuery; MojVector<MojObject> kindVec; MojObject revParam = -1; MojObject delRevParam = -1; // if we were given an incremental key, pull out the revs now if (incrementalKey) { incrementalKey->get(MojDbServiceDefs::RevKey, revParam); incrementalKey->get(MojDbServiceDefs::DeletedRevKey, delRevParam); } err = m_kindEngine.getKinds(kindVec); MojErrCheck(err); // write kinds - if incremental, only write the kinds that have changed since the respective revs MojString countStr; for (MojVector<MojObject>::ConstIterator i = kindVec.begin(); i != kindVec.end(); ++i) { if (backup) { bool backupKind = false; i->get(MojDbKind::SyncKey, backupKind); if (!backupKind) continue; MojString id; err = i->getRequired(MojDbServiceDefs::IdKey, id); MojErrCheck(err); MojDbQuery countQuery; err = countQuery.from(id); MojErrCheck(err); MojDbCursor cursor; err = find(countQuery, cursor, req); MojErrCheck(err); MojUInt32 count = 0; err = cursor.count(count); MojErrCheck(err); if (count > 0) { if (i != kindVec.begin()) { err = countStr.appendFormat(_T(", ")); MojErrCheck(err); } err = countStr.appendFormat("%s=%u", id.data(), count); MojErrCheck(err); } } bool deleted = false; i->get(DelKey, deleted); MojObject kindRev; err = i->getRequired(RevKey, kindRev); MojErrCheck(err); if ((deleted && kindRev > delRevParam) || (!deleted && kindRev > revParam)) { err = dumpObj(file, (*i), bytesWritten, maxBytes); MojErrCheck(err); countOut++; } } // dump all the non-deleted objects err = dumpImpl(file, backup, false, revParam, delRevParam, true, countOut, req, backupResponse, MojDbServiceDefs::RevKey, bytesWritten, newwarns, maxBytes); MojErrCheck(err); totalwarns += newwarns; // If we're supposed to include deleted objects, dump the deleted objects now. // There's a chance that we may have run out of space in our backup. If that's the case, // we don't want to try to dump deleted objects - we can detect this by looking for the HasMoreKey if (incDel && backupResponse && !backupResponse->contains(MojDbServiceDefs::HasMoreKey)) { err = dumpImpl(file, backup, true, revParam, delRevParam, false, countOut, req, backupResponse, MojDbServiceDefs::DeletedRevKey, bytesWritten, newwarns, maxBytes); MojErrCheck(err); } totalwarns += newwarns; // Add the Full and Version keys if (backup && backupResponse) { bool incremental = (incrementalKey != NULL); err = backupResponse->putBool(MojDbServiceDefs::FullKey, !incremental); MojErrCheck(err); err = backupResponse->put(MojDbServiceDefs::VersionKey, DatabaseVersion); MojErrCheck(err); err = backupResponse->put(MojDbServiceDefs::WarningsKey, (MojInt32)totalwarns); MojErrCheck(err); MojString description; err = description.format(_T("incremental=%u"), countOut); MojErrCheck(err); if (!countStr.empty()) { err = description.appendFormat(_T(", %s"), countStr.data()); MojErrCheck(err); } err = backupResponse->put(_T("description"), description); MojErrCheck(err); } err = req->end(); MojErrCheck(err); return MojErrNone; }