MojErr InsertEmailsCommand::ReserverEmailIdsResponse(MojObject& response, MojErr err) { try { ErrorToException(err); MojLogDebug(m_log, "Got reserver ids response: %s", AsJsonString(response).c_str()); MojObject idArray; err = response.getRequired("ids", idArray); ErrorToException(err); for (int ndx = 0; ndx < (int)m_emails->size(); ndx++) { MojObject id; idArray.at(ndx, id); PopEmail::PopEmailPtr emailPtr = m_emails->at(ndx); MojLogDebug(m_log, "Assigning id '%s' to email '%s'", AsJsonString(id).c_str(), emailPtr->GetServerUID().c_str()); emailPtr->SetId(id); } SaveEmails(); } catch (const std::exception& e) { MojLogError(m_log, "Exception in reserving email ID: '%s'", e.what()); Failure(e); } catch (...) { MojLogError(m_log, "Unknown exception in reserving email ID"); Failure(MailException("Unknown exception in reserving email ID", __FILE__, __LINE__)); } return MojErrNone; }
/* * Save the new incoming IM message to the DB * * @return MojErr if error - caller will log it */ MojErr IncomingIMHandler::saveNewIMMessage(MojRefCountedPtr<IMMessage> IMMessage) { MojErr err; // The message we are handling m_IMMessage = IMMessage; // save it MojObject dbObject; err = m_IMMessage->createDBObject(dbObject); MojErrCheck(err); //add our kind to the object //luna://com.palm.db/put '{"objects":[{"_kind":"com.palm.test:1","foo":1,"bar":1000}]}' err = dbObject.putString(_T("_kind"), PALM_DB_IMMESSAGE_KIND); MojErrCheck(err); // log it - OK to show body in debug log MojString json; err = dbObject.toJson(json); MojErrCheck(err); MojLogDebug(IMServiceApp::s_log, _T("saving message to db: %s"), json.data()); MojLogInfo(IMServiceApp::s_log, _T("saving message to db")); // save it // the save generates a call to the save result handler err = m_dbClient.put(this->m_IMSaveMessageSlot, dbObject); MojErrCheck(err); return MojErrNone; }
MojErr MojDb::compact() { MojLogTrace(s_log); MojErr err = requireOpen(); MojErrCheck(err); MojLogDebug(s_log, _T("compacting...")); err = m_storageEngine->compact(); MojErrCheck(err); MojLogDebug(s_log, _T("compaction complete")); return MojErrNone; }
void IdleCommand::EndIdle() { CommandTraceFunction(); if(!m_endingIdle) { m_endingIdle = true; // Tell the server to break out of IDLE // It should respond by completing the original IDLE command request MojLogDebug(m_log, "sending DONE to break out of idle"); try { if(m_session.GetOutputStream().get() != NULL) { m_session.GetOutputStream()->Write("DONE\r\n"); m_session.GetOutputStream()->Flush(); if(m_session.GetLineReader().get()) { // Give it 30 seconds to respond to the DONE m_session.GetLineReader()->SetTimeout(30); } } else { throw MailException("no connection output stream", __FILE__, __LINE__); } } CATCH_AS_FAILURE } else {
MojErr MojDbWatcher::activate(const MojDbKey& limitKey) { MojAssert(m_index); MojThreadGuard guard(m_mutex); m_state = StateActive; bool fired = !m_fireKey.empty(); bool inRange = fired && (limitKey.empty() || (m_desc && m_fireKey >= limitKey) || (!m_desc && m_fireKey <= limitKey)); if (inRange) { // we were fired before activation, so if the maxKey our cursor returned // is >= the minKey with which we were fired, go ahead and do the fire MojErr err = fireImpl(); MojErrCheck(err); } else { // keep limit so we can reject fires for larger keys m_limitKey = limitKey; } MojLogDebug(MojDb::s_log, _T("Watcher_activate: state= %d; fired = %d; inrange = %d; index name = %s; domain = %s\n"), (int)m_state, (int)fired, (int)inRange, ((m_index) ? m_index->name().data(): NULL), ((m_domain) ? m_domain.data(): NULL)); return MojErrNone; }
MojErr MojDbWatcher::fire(const MojDbKey& key) { MojThreadGuard guard(m_mutex); // ignore all fires greater than our key bool limited = !m_limitKey.empty(); bool inRange = !limited || (m_desc && key >= m_limitKey) || (!m_desc && key <= m_limitKey); MojLogDebug(MojDb::s_log, _T("Watcher_fire: state= %d; inrange = %d; limited = %d; index name = %s; domain = %s\n"), (int)m_state, (int)inRange, (int)limited, ((m_index) ? m_index->name().data() : NULL), ((m_domain) ? m_domain.data() : NULL)); if (!inRange) return MojErrNone; if (m_state == StateActive) { MojErr err = fireImpl(); MojErrCheck(err); } else if (m_state == StatePending) { // keep min fire key (or max for desc) bool save = (m_desc && key > m_fireKey) || (!m_desc && (m_fireKey.empty() || key < m_fireKey)); if (save) { m_fireKey = key; } } return MojErrNone; }
/* * Result of query for buddy with given username and serviceName * * If the buddy is found, we can send no matter what. If buddy is not found, we need to check if this service allows sending IM messages * to non buddies (gtalk does not allow this, aol does) */ MojErr SendOneMessageHandler::findBuddyResult(MojObject& result, MojErr findErr) { if (findErr) { MojString error; MojErrToString(findErr, error); MojLogError(IMServiceApp::s_log, _T("findBuddyResult: error %d - %s"), findErr, error.data()); // not much we can do here... failMessage(ERROR_SEND_GENERIC_ERROR); } else { // log the results MojString mojStringJson; result.toJson(mojStringJson); MojLogDebug(IMServiceApp::s_log, _T("findBuddyResult result: %s"), mojStringJson.data()); // "results" in result MojObject results; result.get(_T("results"), results); // check to see if array is empty - there should really always be 0 or 1 item here if (!results.empty()){ // buddy is on the list - send to the transport MojLogInfo(IMServiceApp::s_log, _T("findBuddyResult - user is on buddy list. No need to check account template.")); sendToTransport(); } else { MojLogInfo(IMServiceApp::s_log, _T("findBuddyResult - no buddy found. Need to check account template if this is allowed")); // check palm://com.palm.service.accounts/listAccountTemplates {"capability":"MESSAGING"} to get a list of account templates that support messaging. // If the account template includes "chatWithNonBuddies":false, it should fail to send. MojLogInfo(IMServiceApp::s_log, "Issuing listAccountTemplates request to com.palm.service.accounts"); // parameters: {“capability”:”MESSAGING”} MojObject params; params.putString("capability", "MESSAGING"); // Get a request object from the service MojRefCountedPtr<MojServiceRequest> req; MojErr err = m_service->createRequest(req); if (!err) { err = req->send(m_listAccountSlot, "com.palm.service.accounts", "listAccountTemplates", params); } if (err) { MojString error; MojErrToString(err, error); MojLogError(IMServiceApp::s_log, _T("findBuddyResult failed to send request to accounts: error %d - %s"), err, error.data()); // not much we can do here...don't leave message still pending failMessage(ERROR_SEND_GENERIC_ERROR); } } } return MojErrNone; }
void PopAccountUpdateCommand::RunImpl() { MojString json; MojErr err = m_payload.toJson(json); ErrorToException(err); MojLogDebug(m_log, "PopAccountUpdateCommand with payload %s", json.data()); m_accountId = m_client.GetAccountId(); m_activityBuilderFactory->SetAccountId(m_accountId); m_client.GetDatabaseInterface().GetAccount(m_getAccountSlot, m_accountId); }
void PopSession::AutoDownloadEmails(const MojObject& folderId) { if (m_state == State_SyncingEmails || m_state == State_OkToSync) { m_state = State_OkToSync; MojLogDebug(m_log, "Creating command to auto download email bodies for folder '%s'", AsJsonString(folderId).c_str()); MojRefCountedPtr<AutoDownloadCommand> command(new AutoDownloadCommand(*this, folderId)); m_commandManager->QueueCommand(command); CheckQueue(); } }
void SyncFolderCommand::SyncFolder() { PopClient::AccountPtr account = m_client.GetAccount(); MojLogInfo(m_log, "inbox=%s, syncing folder=%s, force=%d", AsJsonString(account->GetInboxFolderId()).c_str(), AsJsonString(m_folderId).c_str(), m_force); if (m_folderId != account->GetInboxFolderId()) { // POP transport only supports inbox sync. MojLogInfo(m_log, "POP transport will skip non-inbox syncing"); Complete(); return; } SyncSessionPtr sSession = m_client.GetOrCreateSyncSession(m_folderId); m_activity = ActivityParser::GetActivityFromPayload(m_payload); if (!sSession->IsActive() && !sSession->IsEnding()) { // since sync session is not running, we can start the folder sync MojLogDebug(m_log, "Sync session is not active"); StartFolderSync(m_activity); Complete(); } else { if (m_force) { // Manual Sync and session is either active or ending // TODO: sync session is in progress, need to terminate the current // sync session first. Then re-start sync session. StartFolderSync(m_activity); Complete(); } else { // This is either auto sync, schedule sync, retry sync. if (sSession->IsActive()) { if (m_activity.get()) { MojLogInfo(m_log, "Attach sync activity to actively syncing session"); // Adopt activity and then add it to sync session m_activity->Adopt(m_client); sSession->AttachActivity(m_activity); } Complete(); } else if (sSession->IsEnding()) { // wait for sync session to end and then start folder sync MojLogInfo(m_log, "Waiting for ending sync session to complete"); sSession->WaitForSessionComplete(m_syncSessionCompletedSlot); } } } }
MojErr MojDbWatcher::fireImpl() { MojAssertMutexLocked(m_mutex); MojErr err = invalidate(); MojErrCheck(err); err = m_signal.fire(); MojErrCatchAll(err); MojLogDebug(MojDb::s_log, _T("Watcher_fired!!: err = %d; state= %d; index name = %s; domain = %s\n"), (int)err, (int)m_state, ((m_index) ? m_index->name().data() : NULL), ((m_domain) ? m_domain.data() : NULL)); return MojErrNone; }
MojErr MojDbWatcher::handleCancel() { MojThreadGuard guard(m_mutex); MojLogDebug(MojDb::s_log, _T("Watcher_handleCancel: state= %d; index name = %s; domain = %s\n"), (int)m_state, ((m_index) ? m_index->name().data() : NULL), ((m_domain) ? m_domain.data() : NULL)); if (m_index == NULL) MojErrThrow(MojErrDbWatcherNotRegistered); if (m_state != StateInvalid) { MojErr err = invalidate(); MojErrCheck(err); } return MojErrNone; }
void SyncSession::SyncSessionComplete() { MojLogInfo(m_log, "Sync session completed"); if (m_completedSignal.connected()) { m_completedSignal.fire(); } PopClient::AccountPtr account = m_client.GetAccount(); if (account->IsInitialSync() && !PopErrors::IsNetworkError(m_accountError)) { MojLogDebug(m_log, "Account '%s' is initially synced.", AsJsonString(account->GetAccountId()).c_str()); account->SetInitialSync(false); m_client.GetDatabaseInterface().UpdateAccountInitialSync(m_updateAccountSlot, account->GetAccountId(), account->IsInitialSync()); } else { BaseSyncSession::SyncSessionComplete(); } }
void PopSession::SyncFolder(const MojObject& folderId, bool force) { MojLogInfo(m_log, "PopSession %p: inbox=%s, syncing folder=%s, force=%d", this, AsJsonString(m_account->GetInboxFolderId()).c_str(), AsJsonString(folderId).c_str(), force); if (folderId != m_account->GetInboxFolderId()) { // POP transport only supports inbox sync. MojLogInfo(m_log, "PopSession %p will skip non-inbox syncing", this); return; } if (m_state == State_AccountDisabled || m_state == State_AccountDeleted) { MojLogInfo(m_log, "Cannot sync an inbox of disabled/deleted account"); return; } if (force) { // TODO: kills existing inbox sync if (HasLoginError() || HasRetryAccountError() || HasNetworkError()) { // In State_Connecting means the socket was just reconnected. // We don't want to force reconnection again. if(m_state != State_Connecting) { m_state = State_NeedsConnection; } } } else if (HasLoginError()) { MojLogInfo(m_log, "Cannot sync inbox of an account with invalid credentials"); if (m_syncSession.get()) { m_syncSession->RequestStop(); } return; } else if (m_state == State_SyncingEmails) { MojLogDebug(m_log, "Already syncing inbox emails."); return; } if (m_state == State_NeedsConnection) { m_commandManager->Pause(); } MojLogInfo(m_log, "Creating SyncEmailsCommand"); MojRefCountedPtr<SyncEmailsCommand> command(new SyncEmailsCommand(*this, folderId, m_uidMap)); m_syncSession->RequestStart(); m_requestManager->RegisterCommand(command); m_commandManager->QueueCommand(command); CheckQueue(); }
void MojoDatabase::UpdateEmailParts(Signal::SlotRef slot, const MojObject& emailId, const MojObject& parts, bool autoDownload) { MojObject email; MojErr err = email.put(PopEmailAdapter::ID, emailId); ErrorToException(err); err = email.put(EmailSchema::PARTS, parts); ErrorToException(err); if ((!parts.undefined() && !parts.null() && parts.size() > 0) || !autoDownload) { err = email.put(PopEmailAdapter::DOWNLOADED, true); ErrorToException(err); } MojLogDebug(PopClient::s_log, "Updating email '%s' with parts: '%s'", AsJsonString(emailId).c_str(), AsJsonString(parts).c_str()); err = m_dbClient.merge(slot, email); ErrorToException(err); }
MojErr MojDbWatcher::invalidate() { MojAssertMutexLocked(m_mutex); MojAssert(m_index); MojAssert(m_state == StateActive); MojLogDebug(MojDb::s_log, _T("Watcher_invalidate: state= %d; index name = %s; domain = %s\n"), (int)m_state, ((m_index) ? m_index->name().data() : NULL),((m_domain) ? m_domain.data() : NULL)); // index always drops its mutex before firing, so // it's ok to hold our mutex while calling into index MojDbIndex* index = m_index; m_index = NULL; m_state = StateInvalid; MojErr err = index->cancelWatch(this); MojErrCheck(err); return MojErrNone; }
/* * Parse the account id from the results object for the accountId query * saved in m_accountId member variable * */ MojErr SendOneMessageHandler::readAccountIdFromResults(MojObject& result) { // log the results MojString mojStringJson; result.toJson(mojStringJson); MojLogDebug(IMServiceApp::s_log, _T("readAccountIdFromResults result: %s"), mojStringJson.data()); // "results" in result MojObject results; result.get(_T("results"), results); // check to see if array is empty - there should really always be 1 item here if (!results.empty()){ // get the db id of the buddy MojObject loginState; MojObject::ConstArrayIterator itr = results.arrayBegin(); bool foundOne = false; while (itr != results.arrayEnd()) { if (foundOne) { MojLogError(IMServiceApp::s_log, _T("readAccountIdFromResults: found more than one ImLoginState with same username/serviceName - using the first one")); break; } loginState = *itr; foundOne = true; itr++; } // create the DB object MojErr err = loginState.getRequired("accountId", m_accountId); if (err) { MojLogError(IMServiceApp::s_log, _T("readAccountIdFromResults: missing accountId in loginState entry")); } MojLogInfo(IMServiceApp::s_log, _T("readAccountIdFromResults - accountId: %s. "), m_accountId.data()); } else { MojLogError(IMServiceApp::s_log, _T("readAccountIdFromResults: no matching loginState record found for %s"), m_serviceName.data()); } return MojErrNone; }
void SaveEmailCommand::CreateNextCacheObject() { MojErr err; for(; m_partsIt != m_partsArray.arrayEnd(); ++m_partsIt) { MojObject& part = *m_partsIt; bool hasContent; MojString content; err = part.get("content", content, hasContent); ErrorToException(err); if(hasContent) { MojString cacheType; cacheType.assign("email"); bool hasDisplayName; MojString displayName; err = part.get("displayName", displayName, hasDisplayName); ErrorToException(err); MojString cacheFileName; if(hasDisplayName && displayName.length() > 0) { cacheFileName.assign(displayName); } else { // FIXME only for HTML parts cacheFileName.assign("body.html"); } MojLogDebug(m_log, "creating a file cache entry"); m_client.GetFileCacheClient().InsertCacheObject(m_fileCacheInsertedSlot, cacheType, cacheFileName, content.length(), EMAIL_FILECACHE_COST, EMAIL_NO_LIFE_TIME); // Exit method and wait for response return; } } // If this is the last part with content, persist to database now PersistToDatabase(); }
MojErr SaveEmailCommand::FileCacheObjectInserted(MojObject &response, MojErr err) { try { ResponseToException(response, err); MojLogDebug(m_log, "successfully created file cache entry"); MojString pathName; err = response.getRequired("pathName", pathName); ErrorToException(err); MojObject& part = *m_partsIt++; part.putString(EmailSchema::Part::PATH, pathName); WriteToFileCache(part, pathName); } catch(const std::exception& e) { Error(e); } catch(...) { Error( MailException("unknown", __FILE__, __LINE__) ); } return MojErrNone; }
MojErr SaveEmailCommand::GetAccountResponse(MojObject &response, MojErr err) { try { ResponseToException(response, err); MojObject accountObj; DatabaseAdapter::GetOneResult(response, accountObj); const char* folderPropertyName = m_isDraft ? EmailAccountAdapter::DRAFTS_FOLDERID : EmailAccountAdapter::OUTBOX_FOLDERID; err = accountObj.getRequired(folderPropertyName, m_folderId); ErrorToException(err); MojLogDebug(m_log, "preparing to save email to folderId %s", AsJsonString(m_folderId).c_str()); PrepareParts(); } catch(const std::exception& e) { Error(e); } catch(...) { Error( MailException("unknown", __FILE__, __LINE__) ); } return MojErrNone; }
MojErr SmtpSendMailCommand::FinishCalculateEmailSize(const std::exception* exc) { if(exc) { HandleException(*exc, __func__, __FILE__, __LINE__); return MojErrNone; } try { m_bytesLeft = m_counter->GetBytesWritten(); MojLogInfo(m_log, "done calculating email size"); MojLogDebug(m_log, "email size: %d bytes", m_bytesLeft); m_write_state = State_SendMailFrom; // If SIZE extension is present, and server specified a fixed maximum size, // check calculated mail size against server limit. if (m_session.GetSizeMax() && m_session.GetSizeMaxValue() > 0) { if (m_bytesLeft > m_session.GetSizeMaxValue()) { MojLogInfo(m_log, "Outgoing mail is larger than server stated SIZE, so rejecting immediately"); m_error.internalError = "Message larger than server stated limit, not trying to send"; m_error.errorCode = MailError::EMAIL_SIZE_EXCEEDED; m_error.errorText = ""; // No server text, cannot present message m_error.errorOnEmail = true; m_write_state = State_SendErrorRset; } } WriteEmail(); } catch(const std::exception& e) { HandleException(e, __func__, __FILE__, __LINE__); } catch(...) { HandleUnknownException(); } return MojErrNone; }
MojErr MojDbKindEngine::putKind(const MojObject& obj, MojDbReq& req, bool builtin) { MojAssert(isOpen()); MojAssertWriteLocked(m_db->m_schemaLock); MojLogTrace(s_log); // parse out id MojString id; MojErr err = obj.getRequired(MojDbServiceDefs::IdKey, id); MojErrCheck(err); KindMap::ConstIterator i = m_kinds.find(id); if (i == m_kinds.end()) { // new kind err = createKind(id, obj, req, builtin); MojErrCheck(err); } else { // existing kind MojLogDebug(s_log, _T("UpdatingKind: %s \n"), id.data()); err = (*i)->configure(obj, m_kinds, m_locale, req); MojErrCheck(err); } return MojErrNone; }
void PopSession::CheckQueue() { MojLogDebug(m_log, "PopSession: Checking the queue. Current state: %d", m_state); switch(m_state) { case State_None: { MojLogCritical(m_log, "unable to run commands, no account available"); break; } case State_NeedsConnection: { MojLogInfo(m_log, "State: NeedsConnection"); MojRefCountedPtr<ConnectCommand> command(new ConnectCommand(*this)); m_commandManager->RunCommand(command); m_state = State_Connecting; m_canShutdown = false; break; } case State_Connecting: { MojLogInfo(m_log, "State: Connecting"); break; } case State_CheckTlsSupport: { MojLogInfo(m_log, "State: CheckTlsSupport"); MojRefCountedPtr<CheckTlsCommand> command(new CheckTlsCommand(*this)); m_commandManager->RunCommand(command); m_state = State_PendingTlsSupport; break; } case State_NegotiateTlsConnection: { MojLogInfo(m_log, "State: NegotiateTlsConnect"); MojRefCountedPtr<NegotiateTlsCommand> command(new NegotiateTlsCommand(*this)); m_commandManager->RunCommand(command); m_state = State_PendingTlsNegotiation; break; } case State_UsernameRequired: { MojLogInfo(m_log, "State: UsernameRequired"); MojRefCountedPtr<UserCommand> command(new UserCommand(*this, m_account->GetUsername())); m_commandManager->RunCommand(command); m_state = State_PendingLogin; break; } case State_PasswordRequired: { MojLogInfo(m_log, "State: PasswordRequired"); MojRefCountedPtr<PasswordCommand> command(new PasswordCommand(*this, m_account->GetPassword())); m_commandManager->RunCommand(command); m_state = State_PendingLogin; break; } case State_PendingLogin: { break; } case State_NeedUidMap: { MojLogInfo(m_log, "State: NeedUidMap"); MojRefCountedPtr<CreateUidMapCommand> command(new CreateUidMapCommand(*this, m_uidMap)); m_commandManager->RunCommand(command); m_state = State_GettingUidMap; break; } case State_GettingUidMap: { MojLogInfo(m_log, "State: GettingUidMap"); break; } case State_OkToSync: { MojLogInfo(m_log, "State: OkToSync"); MojLogInfo(m_log, "%i command(s) to run in queue", m_commandManager->GetPendingCommandCount()); m_commandManager->Resume(); RunCommandsInQueue(); break; } case State_CancelPendingCommands: { MojLogInfo(m_log, "State: CancelPendingCommands"); CancelCommands(); break; } case State_InvalidCredentials: { break; } case State_AccountDisabled: { MojLogInfo(m_log, "State: AccountDisabled"); CancelCommands(); break; } case State_AccountDeleted: { break; } case State_LogOut: { MojLogInfo(m_log, "State: LogOut"); LogOut(); break; } case State_LoggingOut: { break; } default: { } } }
MojErr ReconcileEmailsCommand::GetDeletedEmailsResponse(MojObject& response, MojErr err) { try { MojLogDebug(m_log, "Got local deleted emails response: %s", AsJsonString(response).c_str()); // Check database response ErrorToException(err); MojObject results; err = response.getRequired("results", results); ErrorToException(err); if(results.size() > 0) { MojLogInfo(m_log, "Got %d local deleted emails", results.size()); } MojObject::ArrayIterator it; err = results.arrayBegin(it); ErrorToException(err); // Read email properties: _id, UID, flags // If you need additional properties, modify the database query for (; it != results.arrayEnd(); ++it) { MojObject& emailObj = *it; MojObject id; err = emailObj.getRequired(PopEmailAdapter::ID, id); ErrorToException(err); MojString uid; err = emailObj.getRequired(PopEmailAdapter::SERVER_UID, uid); ErrorToException(err); std::string uidStr(uid); m_localDeletedEmailUids.push_back(LocalDeletedEmailInfo(id, uidStr)); ReconcileInfoPtr infoPtr = m_reconcileUidMap[uidStr]; if (infoPtr.get()) { m_reconcileUidMap.erase(std::string(uid)); UidMap::MessageInfo info = infoPtr->GetMessageInfo(); int msgNum = info.GetMessageNumber(); m_reconcileMsgNumMap.erase(msgNum); } } bool hasMore = DatabaseAdapter::GetNextPage(response, m_localEmailsPage); if(hasMore) { // Get next batch of emails MojLogDebug(m_log, "getting another batch of local deleted emails"); GetLocalDeletedEmails(); } else { MojLogDebug(m_log, "Got old emails cache"); GetUidCache(); } } catch(const std::exception& e) { MojLogError(m_log, "Failed to sync folder: %s", e.what()); Failure(e); } catch(...) { MojLogError(m_log, "Uncaught exception %s", __PRETTY_FUNCTION__); MailException exc("Unable to get deleted emails", __FILE__, __LINE__); Failure(exc); } return MojErrNone; }
MojErr SmtpSendMailCommand::GetEmailResponse(MojObject& response, MojErr err) { try { ResponseToException(response, err); MojString json; response.toJson(json); MojLogInfo(m_log, "got Email response"); MojLogDebug(m_log, "Response: %s", json.data()); // check the database result first ErrorToException(err); MojObject results; err = response.getRequired(_T("results"), results); ErrorToException(err); if(results.size() == 0) { throw MailException("email not found", __FILE__, __LINE__); } // There should only be one email for a given id assert(results.size() == 1); MojObject emailObj; bool hasObject = results.at(0, emailObj); if(!hasObject) { throw MailException("error getting email from results", __FILE__, __LINE__); } EmailAdapter::ParseDatabaseObject(emailObj, m_email); m_toAddress.clear(); if (m_email.GetTo()) { for (EmailAddressList::iterator i = m_email.GetTo()->begin(); i != m_email.GetTo()->end(); i++) m_toAddress.push_back((*i)->GetAddress()); } if (m_email.GetCc()) { for (EmailAddressList::iterator i = m_email.GetCc()->begin(); i != m_email.GetCc()->end(); i++) m_toAddress.push_back((*i)->GetAddress()); } if (m_email.GetBcc()) { for (EmailAddressList::iterator i = m_email.GetBcc()->begin(); i != m_email.GetBcc()->end(); i++) m_toAddress.push_back((*i)->GetAddress()); } m_toIdx = 0; // FIXME: Subscribe to file cache to pin cache contents CalculateEmailSize(); } catch (const std::exception& e) { HandleException(e, __func__, __FILE__, __LINE__); } catch (...) { HandleUnknownException(); } return MojErrNone; }
MojErr ReconcileEmailsCommand::GetLocalEmailsResponse(MojObject& response, MojErr err) { try { MojLogDebug(m_log, "Got local emails response: %s", AsJsonString(response).c_str()); // Check database response ErrorToException(err); MojObject results; err = response.getRequired("results", results); ErrorToException(err); MojObject::ArrayIterator it; err = results.arrayBegin(it); ErrorToException(err); for (; it != results.arrayEnd(); ++it) { MojObject& emailObj = *it; MojErr err; MojObject id; err = emailObj.getRequired(PopEmailAdapter::ID, id); ErrorToException(err); MojString uid; err = emailObj.getRequired(PopEmailAdapter::SERVER_UID, uid); ErrorToException(err); std::string uidStr(uid); MojInt64 timestamp; err = emailObj.getRequired(EmailSchema::TIMESTAMP, timestamp); if (!m_uidMap->HasUid(uidStr)) { // email is not found in server if (!m_account->IsDeleteOnDevice()) { // if the user chooses not to delete emails on device that // have been deleted from the server, that means these emails // won't been deleted. So we need to include these emails as wells. m_messageCount++; } // TODO: need to handle the case when an email is moved from other // folders into inbox. May add a virtual flag in POP email to indicate // that the email has been locally move into inbox. m_serverDeletedEmailIds.push(id); } else { boost::shared_ptr<ReconcileInfo> info = m_reconcileUidMap[uidStr]; if (timestamp < m_cutOffTime) { // email is beyond sync window // add id to the array of IDs that will be deleted soon m_oldEmailIds.push(id); // add this old email into old emails' cache m_uidCache.GetOldEmailsCache().AddToCache(std::string(uid), timestamp); m_uidCache.GetOldEmailsCache().SetChanged(true); // mark reconcile info as old email info->SetStatus(Status_Old_Email); info->SetTimestamp(timestamp); } else { // email is still within sync window // mark reconcile info as downloaded info->SetStatus(Status_Downloaded); info->SetTimestamp(timestamp); if (timestamp > m_latestEmailTimestamp) { m_latestEmailTimestamp = timestamp; } // keep track of how many emails will be remained in the inbox m_messageCount++; } } } bool hasMore = DatabaseAdapter::GetNextPage(response, m_localEmailsPage); if(hasMore) { // Get next batch of emails MojLogDebug(m_log, "getting another batch of local emails"); GetLocalEmails(); } else { MojLogDebug(m_log, "Got local deleted emails cache"); GetLocalDeletedEmails(); } } catch(const std::exception& e) { MojLogError(m_log, "Failed to get local emails: %s", e.what()); Failure(e); } catch(...) { MojLogError(m_log, "Uncaught exception %s", __PRETTY_FUNCTION__); MailException exc("Unable to get local emails", __FILE__, __LINE__); Failure(exc); } return MojErrNone; }
MojErr MojDb::purge(MojUInt32& countOut, MojInt64 numDays, MojDbReqRef req) { MojLogTrace(s_log); countOut = 0; if (numDays <= -1) { numDays = m_purgeWindow; } MojErr err = beginReq(req); MojErrCheck(err); MojLogDebug(s_log, _T("purging objects deleted more than %lld days ago..."), numDays); MojTime time; err = MojGetCurrentTime(time); MojErrCheck(err); // store the revision number to current timestamp mapping MojObject revTimeMapping; MojInt64 rev; err = nextId(rev); MojErrCheck(err); err = revTimeMapping.put(RevNumKey, rev); MojErrCheck(err); err = revTimeMapping.put(TimestampKey, time.microsecs()); MojErrCheck(err); err = revTimeMapping.putString(KindKey, MojDbKindEngine::RevTimestampId); MojErrCheck(err); err = putImpl(revTimeMapping, MojDb::FlagNone, req); MojErrCheck(err); // find the revision number for numDays prior to now MojInt64 purgeTime = time.microsecs() - (MojTime::UnitsPerDay * numDays); MojDbQuery query; err = query.from(MojDbKindEngine::RevTimestampId); MojErrCheck(err); query.limit(1); err = query.where(TimestampKey, MojDbQuery::OpLessThanEq, purgeTime); MojErrCheck(err); err = query.order(TimestampKey); MojErrCheck(err); query.desc(true); MojDbCursor cursor; err = findImpl(query, cursor, NULL, req, OpDelete); MojErrCheck(err); bool found = false; MojObject obj; err = cursor.get(obj, found); MojErrCheck(err); err = cursor.close(); MojErrCheck(err); MojUInt32 batchCount = 0; MojUInt32 totalCount = 0; while ((found)) { // Do it in AutoBatchSize batches batchCount = 0; req->fixmode(true); // purge even if index mis-matches err = purgeImpl(obj, batchCount, req); MojLogDebug(s_log, _T("purge batch processed: batch: %d; total: %d; err = %d\n"), batchCount, (totalCount + batchCount), err); MojErrCheck(err); totalCount += batchCount; countOut = totalCount; if (batchCount < AutoBatchSize) // last batch break; err = commitBatch(req); MojErrCheck(err); continue; } // end request err = req->end(); MojErrCheck(err); MojLogDebug(s_log, _T("purged %d objects"), countOut); return MojErrNone; }
MojErr MojDb::dumpImpl(MojFile& file, bool backup, bool incDel, const MojObject& revParam, const MojObject& delRevParam, bool skipKinds, MojUInt32& countOut, MojDbReq& req, MojObject* response, const MojChar* keyName, MojSize& bytesWritten, MojSize& warns, MojUInt32 maxBytes) { // query for objects, adding the backup key and rev key if necessary MojDbQuery query; MojErr err = query.from(MojDbKindEngine::RootKindId); MojErrCheck(err); err = query.where(MojDb::DelKey, MojDbQuery::OpEq, incDel); MojErrCheck(err); if (backup) { err = query.where(MojDb::SyncKey, MojDbQuery::OpEq, true); MojErrCheck(err); if (incDel) { err = query.where(MojDb::RevKey, MojDbQuery::OpGreaterThan, delRevParam); MojErrCheck(err); } else { err = query.where(MojDb::RevKey, MojDbQuery::OpGreaterThan, revParam); MojErrCheck(err); } } MojDbCursor cursor; err = findImpl(query, cursor, NULL, req, OpRead); MojErrCheck(err); warns = 0; MojObject curRev; for(;;) { bool found = false; MojObject obj; err = cursor.get(obj, found); // So that we can get as much data as possible from a corrupt database // We simply skip ghost keys and continue if (err == MojErrInternalIndexOnFind) { warns++; continue; } MojErrCheck(err); if (!found) break; if (skipKinds) { MojString kind; err = obj.getRequired(KindKey, kind); MojErrCheck(err); if (kind == MojDbKindEngine::KindKindId) { continue; } } // write out each object, if the backup is full, insert the appropriate incremental key err = dumpObj(file, obj, bytesWritten, maxBytes); MojErrCatch(err, MojErrDbBackupFull) { if (response) { MojErr errBackup = MojErrNone; if (!curRev.undefined()) { errBackup = insertIncrementalKey(*response, keyName, curRev); MojErrCheck(errBackup); } else { errBackup = insertIncrementalKey(*response, keyName, incDel ? delRevParam: revParam); MojErrCheck(errBackup); } errBackup = handleBackupFull(revParam, delRevParam, *response, keyName); MojErrCheck(errBackup); } return MojErrNone; } MojErrCheck(err); err = obj.getRequired(MojDb::RevKey, curRev); MojErrCheck(err); countOut++; } err = cursor.close(); MojErrCheck(err); if (warns > 0) { MojLogWarning(s_log, _T("Finished Backup with %d warnings \n"), (int)warns); } else { MojLogDebug(s_log, _T("Finished Backup with no warnings \n")); } // construct the next incremental key if (response && !curRev.undefined()) { err = insertIncrementalKey(*response, keyName, curRev); MojErrCheck(err); } return MojErrNone; }
MojErr MojDbBerkeleyEngine::compact() { const char * DatabaseRoot = "/var/db"; // FIXME: Should not be hard-coded, but so is the disk space monitor! struct statvfs statAtBeginning, statAfterCompact, statAtEnd; MojLogTrace(MojDbBerkeleyEngine::s_log); struct timeval totalStartTime = {0,0}, totalStopTime = {0,0}; gettimeofday(&totalStartTime, NULL); memset(&statAtBeginning, '\0', sizeof(statAtBeginning)); ::statvfs(DatabaseRoot, &statAtBeginning); const int blockSize = (int)statAtBeginning.f_bsize; // checkpoint before compact MojErr err = m_env->checkpoint(0); MojErrCheck(err); memset(&statAfterCompact, '\0', sizeof(statAfterCompact)); ::statvfs(DatabaseRoot, &statAfterCompact); int pre_compact_reclaimed_blocks = (int)(statAfterCompact.f_bfree - statAtBeginning.f_bfree); MojLogDebug(s_log, _T("Starting compact: Checkpoint freed %d bytes. Volume %s has %lu bytes free out of %lu bytes (%.1f full)\n"), pre_compact_reclaimed_blocks * blockSize, DatabaseRoot, statAfterCompact.f_bfree * blockSize, statAfterCompact.f_blocks * blockSize, (float)(statAfterCompact.f_blocks - statAfterCompact.f_bfree) * 100.0 / (float)statAfterCompact.f_blocks); // Retrieve setting for record count used to break up compact operations const int stepSize = m_env->compactStepSize(); memset(&statAtBeginning, '\0', sizeof(statAtBeginning)); ::statvfs(DatabaseRoot, &statAtBeginning); int total_pages_examined = 0, total_pages_freed = 0, total_pages_truncated = 0; int max_pages_examined = 0, max_pages_freed = 0, max_pages_truncated = 0; int total_log_generation_blocks = 0, total_reclaimed_blocks = 0; int max_log_generation_blocks = 0, max_reclaimed_blocks = 0; int total_compact_time = 0, total_step_time = 0; int max_compact_time = 0, max_step_time = 0; int total_key_total = 0, total_value_total = 0; int max_key_total = 0, max_value_total = 0; MojThreadGuard guard(m_dbMutex); // call compact on each database for (DatabaseVec::ConstIterator i = m_dbs.begin(); i != m_dbs.end(); ++i) { DB* db = (*i)->impl(); DB_COMPACT c_data; MojZero(&c_data, sizeof(c_data)); DBC * dbc = NULL; int dbErr; DBT key1, key2; DBT value; memset(&key1, '\0', sizeof(key1)); memset(&key2, '\0', sizeof(key2)); memset(&value, '\0', sizeof(value)); key1.flags = DB_DBT_REALLOC; key2.flags = DB_DBT_REALLOC; value.flags = DB_DBT_REALLOC; int key1_count = 0, key2_count = 0; dbErr = 0; // Continue compacting the database by chunks until we run into an error. If a stepSize // isn't configured, don't chunk it at all. while ((stepSize >= 1) && (dbErr == 0)) { // Construct key to step forward by a set number of records, to select the compact window. // We close the cursor after we've found the next key, so it won't keep a lock open that // could disrupt the compaction. Without locking, we might miss an insertion or deletion // happening between compactions, but that int key_total = 0, value_total = 0; // Tracked only for debugging purposes. dbErr = db->cursor(db, NULL, &dbc, 0); if (dbErr == 0) { if (key1.data == NULL) { // Move the cursor to the beginning of the database dbErr = dbc->get(dbc, &key1, &value, DB_FIRST); key_total += key1.size; value_total += value.size; // discard key1, we don't want the key for the beginning if (key1.data) free(key1.data); key1.data = NULL; key1.size = 0; } else { // move the cursor to the location of the prior key. // If that exact key is missing, this should choose the // next one. dbErr = dbc->get(dbc, &key1, &value, DB_SET_RANGE); } int elapsedStepTimeMS = 0; if (dbErr == DB_NOTFOUND) { // If we didn't find a first key, the DB is presumably empty, // and we shouldn't search for the end key. dbErr = 0; if (key1.data) free(key1.data); key1.data = NULL; key1.size = 0; if (key2.data) free(key2.data); key2.data = NULL; key2.size = 0; } else if (dbErr == 0) { int count; // Move the cursor forward by the chosen stepSize. // May exit early with error DB_NOTFOUND, indicating end of database. struct timeval startTime = {0,0}, stopTime = {0,0}; gettimeofday(&startTime, NULL); for (count = 0; (dbErr == 0) && (count < stepSize); count++) { dbErr = dbc->get(dbc, &key2, &value, DB_NEXT); key_total += key2.size; value_total += value.size; } key2_count = key1_count + count; if (dbErr == DB_NOTFOUND) { dbErr = 0; if (key2.data) free(key2.data); key2.data = NULL; key2.size = 0; } gettimeofday(&stopTime, NULL); elapsedStepTimeMS = (int)(stopTime.tv_sec - startTime.tv_sec) * 1000 + (int)(stopTime.tv_usec - startTime.tv_usec) / 1000; } dbc->close(dbc); if (dbErr != 0) break; // Compact from key1 to key2. (The documentation says it starts at 'the // smallest key greater than or equal to the specified key', and ends at // 'the page with the smallest key greater than the specified key'. I don't // know exactly what that means regarding inclusivity, so this procedure may // not be fully compacting the pages which contain the keys.) MojLogDebug(s_log, _T("Compacting %s (partial from ~record %d to %d). Stepped over %d/%d bytes of keys/values in %dms.\n"), (*i)->m_name.data(), key1_count, key2_count, key_total, value_total, elapsedStepTimeMS); struct statvfs statBeforeCompact, statAfterCompact, statAfterCheckpoint; memset(&statBeforeCompact, '\0', sizeof(statBeforeCompact)); ::statvfs(DatabaseRoot, &statBeforeCompact); struct timeval startTime = {0,0}, stopTime = {0,0}; gettimeofday(&startTime, NULL); MojZero(&c_data, sizeof(c_data)); dbErr = db->compact(db, NULL, key1.data ? &key1 : NULL, key2.data ? &key2 : NULL, &c_data, DB_FREE_SPACE, NULL); gettimeofday(&stopTime, NULL); int elapsedCompactTimeMS = (int)(stopTime.tv_sec - startTime.tv_sec) * 1000 + (int)(stopTime.tv_usec - startTime.tv_usec) / 1000; MojLogDebug(s_log, _T("Compact stats of %s (partial from ~record %d to %d): time %dms, compact_deadlock=%d, compact_pages_examine=%d, compact_pages_free=%d, compact_levels=%d, compact_pages_truncated=%d\n"), (*i)->m_name.data(), key1_count, key2_count, elapsedCompactTimeMS, c_data.compact_deadlock, c_data.compact_pages_examine, c_data.compact_pages_free, c_data.compact_levels, c_data.compact_pages_truncated); total_compact_time += elapsedCompactTimeMS; if (elapsedCompactTimeMS > max_compact_time) max_compact_time = elapsedCompactTimeMS; total_step_time += elapsedStepTimeMS; if (elapsedStepTimeMS > max_step_time) max_step_time = elapsedStepTimeMS; total_key_total += key_total; if (key_total > max_key_total) max_key_total = key_total; total_value_total += value_total; if (value_total > max_value_total) max_value_total = value_total; total_pages_examined += c_data.compact_pages_examine; if ((int)c_data.compact_pages_examine > max_pages_examined) max_pages_examined = c_data.compact_pages_examine; total_pages_freed += c_data.compact_pages_free; if ((int)c_data.compact_pages_free > max_pages_freed) max_pages_freed = c_data.compact_pages_free; total_pages_truncated += c_data.compact_pages_truncated; if ((int)c_data.compact_pages_truncated > max_pages_truncated) max_pages_truncated = c_data.compact_pages_truncated; memset(&statAfterCompact, '\0', sizeof(statAfterCompact)); ::statvfs(DatabaseRoot, &statAfterCompact); int log_generation_blocks = (int)(statBeforeCompact.f_bfree - statAfterCompact.f_bfree); total_log_generation_blocks += log_generation_blocks; if (log_generation_blocks > max_log_generation_blocks) max_log_generation_blocks = log_generation_blocks; err = m_env->checkpoint(0); MojErrCheck(err); memset(&statAfterCompact, '\0', sizeof(statAfterCheckpoint)); ::statvfs(DatabaseRoot, &statAfterCheckpoint); int reclaimed_blocks = (int)(statAfterCheckpoint.f_bfree - statBeforeCompact.f_bfree); total_reclaimed_blocks += reclaimed_blocks; if (reclaimed_blocks > max_reclaimed_blocks) max_reclaimed_blocks = reclaimed_blocks; MojLogDebug(s_log, _T("Compact of %s (partial from ~record %d to %d) generated %d bytes of log data, ultimately reclaiming %d bytes after checkpoint.\n"), (*i)->m_name.data(), key1_count, key2_count, log_generation_blocks * blockSize, reclaimed_blocks * blockSize); // copy key2 over key1 if (key1.data) free(key1.data); key1.data = key2.data; key1.size = key2.size; key2.data = NULL; key2.size = 0; key1_count = key2_count; // if key2 was empty, then we are done. if (key1.data == NULL) break; } } if (key1.data) free(key1.data); if (key2.data) free(key2.data); if (value.data) free(value.data); // If no step size was configured, fall back and do a complete compact. Do the same // if there was an error performing the chunked compaction. The complete compact risks // running out of disk space, but that's preferable to not compacting at all, which will // also likely eventually lead to running out of space. if (dbErr == DB_LOCK_DEADLOCK) { // But for deadlock, we should just give up, as this might // happen in normal use. MojBdbErrCheck(dbErr, _T("cursor and compact deadlocked")); } if ((stepSize <= 1) || (dbErr != 0)) { MojLogDebug(s_log, "Compacting %s\n", (*i)->m_name.data()); struct statvfs statBeforeCompact, statAfterCompact, statAfterCheckpoint; memset(&statBeforeCompact, '\0', sizeof(statBeforeCompact)); ::statvfs(DatabaseRoot, &statBeforeCompact); struct timeval startTime = {0,0}, stopTime = {0,0}; gettimeofday(&startTime, NULL); MojZero(&c_data, sizeof(c_data)); dbErr = db->compact(db, NULL, NULL, NULL, &c_data, DB_FREE_SPACE, NULL); gettimeofday(&stopTime, NULL); int elapsedCompactTimeMS = (int)(stopTime.tv_sec - startTime.tv_sec) * 1000 + (int)(stopTime.tv_usec - startTime.tv_usec) / 1000; total_compact_time += elapsedCompactTimeMS; if (elapsedCompactTimeMS > max_compact_time) max_compact_time = elapsedCompactTimeMS; MojLogDebug(s_log, "Compact stats of %s: time %dms, compact_deadlock=%d, compact_pages_examine=%d, compact_pages_free=%d, compact_levels=%d, compact_pages_truncated=%d\n", (*i)->m_name.data(), elapsedCompactTimeMS, c_data.compact_deadlock, c_data.compact_pages_examine, c_data.compact_pages_free, c_data.compact_levels, c_data.compact_pages_truncated); total_pages_examined += c_data.compact_pages_examine; if ((int)c_data.compact_pages_examine > max_pages_examined) max_pages_examined = c_data.compact_pages_examine; total_pages_freed += c_data.compact_pages_free; if ((int)c_data.compact_pages_free > max_pages_freed) max_pages_freed = c_data.compact_pages_free; total_pages_truncated += c_data.compact_pages_truncated; if ((int)c_data.compact_pages_truncated > max_pages_truncated) max_pages_truncated = c_data.compact_pages_truncated; memset(&statAfterCompact, '\0', sizeof(statAfterCompact)); ::statvfs(DatabaseRoot, &statAfterCompact); int log_generation_blocks = (int)(statBeforeCompact.f_bfree - statAfterCompact.f_bfree); total_log_generation_blocks += log_generation_blocks; if (log_generation_blocks > max_log_generation_blocks) max_log_generation_blocks = log_generation_blocks; err = m_env->checkpoint(0); MojErrCheck(err); memset(&statAfterCompact, '\0', sizeof(statAfterCheckpoint)); ::statvfs(DatabaseRoot, &statAfterCheckpoint); int reclaimed_blocks = (int)(statAfterCheckpoint.f_bfree - statBeforeCompact.f_bfree); total_reclaimed_blocks += reclaimed_blocks; if (reclaimed_blocks > max_reclaimed_blocks) max_reclaimed_blocks = reclaimed_blocks; MojLogDebug(s_log, "Compact of %s generated %d bytes of log data, ultimately reclaiming %d bytes after checkpoint.\n", (*i)->m_name.data(), log_generation_blocks * blockSize, reclaimed_blocks * blockSize); } MojBdbErrCheck(dbErr, _T("db->compact")); } guard.unlock(); gettimeofday(&totalStopTime, NULL); int elapsedTotalMS = (int)(totalStopTime.tv_sec - totalStartTime.tv_sec) * 1000 + (int)(totalStopTime.tv_usec - totalStartTime.tv_usec) / 1000; memset(&statAtEnd, '\0', sizeof(statAtEnd)); ::statvfs(DatabaseRoot, &statAtEnd); int compact_freed_blocks = (int)(statAtEnd.f_bfree - statAtBeginning.f_bfree); MojLogDebug(s_log, _T("During compact: %d db pages examined (max burst %d), %d db pages freed (max burst %d), " "%d db pages truncated (max burst %d), " "%d log bytes created by compacts (max burst %d), " "%d bytes reclaimed by checkpoints (max burst %d), " "%d bytes of keys stepped over (max burst %d), " "%d bytes of values stepped over (max burst %d), " "%dms spent in stepping (max burst %dms), " "%dms spent in compact (max burst %dms)\n"), total_pages_examined, max_pages_examined, total_pages_freed, max_pages_freed, total_pages_truncated, max_pages_truncated, total_log_generation_blocks * blockSize, max_log_generation_blocks * blockSize, total_reclaimed_blocks * blockSize, max_reclaimed_blocks * blockSize, total_key_total, max_key_total, total_value_total, max_value_total, total_step_time, max_step_time, total_compact_time, max_step_time ); MojLogDebug(s_log, _T("Compact complete: took %dms, freed %d bytes (including pre-checkpoint of %d bytes). Volume %s has %lu bytes free out of %lu bytes (%.1f full)\n"), elapsedTotalMS, compact_freed_blocks * blockSize, pre_compact_reclaimed_blocks * blockSize, DatabaseRoot, statAfterCompact.f_bfree * blockSize, statAfterCompact.f_blocks * blockSize, (float)(statAfterCompact.f_blocks - statAfterCompact.f_bfree) * 100.0 / (float)statAfterCompact.f_blocks); return MojErrNone; }
MojErr MojDb::load(const MojChar* path, MojUInt32& countOut, MojUInt32 flags, MojDbReqRef req) { MojAssert(path); MojLogTrace(s_log); MojErr err = beginReq(req, true); MojErrCheck(err); MojFile file; err = file.open(path, MOJ_O_RDONLY); MojErrCheck(err); MojJsonParser parser; parser.begin(); MojSize bytesRead = 0; MojObjectBuilder visitor; int total_mutexes, mutexes_free, mutexes_used, mutexes_used_highwater, mutex_regionsize; m_objDb->mutexStats(&total_mutexes, &mutexes_free, &mutexes_used, &mutexes_used_highwater, &mutex_regionsize); MojLogDebug(s_log, _T("Starting load of %s, total_mutexes: %d, mutexes_free: %d, mutexes_used: %d, mutexes_used_highwater: %d, &mutex_regionsize: %d\n"), path, total_mutexes, mutexes_free, mutexes_used, mutexes_used_highwater, mutex_regionsize); int orig_mutexes_used = mutexes_used; struct timeval startTime = {0,0}, stopTime = {0,0}; gettimeofday(&startTime, NULL); int total_transaction_time = 0; int total = 0; int transactions = 0; do { MojChar buf[MojFile::MojFileBufSize]; err = file.read(buf, sizeof(buf), bytesRead); MojErrCheck(err); const MojChar* parseEnd = buf; while (parseEnd < (buf + bytesRead)) { err = parser.parseChunk(visitor, parseEnd, bytesRead - (parseEnd - buf), parseEnd); MojErrCheck(err); if (parser.finished()) { //store the object err = loadImpl(visitor.object(), flags, req); MojErrCheck(err); countOut++; parser.begin(); visitor.reset(); total++; if ((total % 10) == 0) { // For debugging mutex consumption during load operations, we periodically retrieve the mutex stats. m_objDb->mutexStats(&total_mutexes, &mutexes_free, &mutexes_used, &mutexes_used_highwater, &mutex_regionsize); MojLogDebug(s_log, _T("Loading %s record %d, total_mutexes: %d, mutexes_free: %d, mutexes_used: %d, mutexes_used_highwater: %d, &mutex_regionsize: %d\n"), path, total, total_mutexes, mutexes_free, mutexes_used, mutexes_used_highwater, mutex_regionsize); } // If a loadStepSize is configured, then break up the load into separate transactions. // This is intended to prevent run-away mutex consumption in some particular scenarios. // The transactions do not reverse or prevent mutex consumption, but seem to reduce the // growth and eventually cause it to level off. if ((m_loadStepSize > 0) && ((total % m_loadStepSize) == 0)) { // Close and reopen transaction, to prevent a very large transaction from building up. MojLogDebug(s_log, _T("Loading %s record %d, closing and reopening transaction.\n"), path, total); struct timeval transactionStartTime = {0,0}, transactionStopTime = {0,0}; gettimeofday(&transactionStartTime, NULL); err = req->end(); MojErrCheck(err); err = req->endBatch(); MojErrCheck(err); req->beginBatch(); // beginBatch() invocation for first transaction happened in MojDbServiceHandlerBase::invokeImpl err = beginReq(req, true); MojErrCheck(err); gettimeofday(&transactionStopTime, NULL); long int elapsedTransactionTimeMS = (transactionStopTime.tv_sec - transactionStartTime.tv_sec) * 1000 + (transactionStopTime.tv_usec - transactionStartTime.tv_usec) / 1000; total_transaction_time += (int)elapsedTransactionTimeMS; transactions++; } } } } while (bytesRead > 0); err = parser.end(visitor); MojErrCheck(err); if (parser.finished()) { err = loadImpl(visitor.object(), flags, req); MojErrCheck(err); countOut++; } else if (bytesRead > 0) { MojErrThrow(MojErrJsonParseEof); } err = req->end(); MojErrCheck(err); gettimeofday(&stopTime, NULL); long int elapsedTimeMS = (stopTime.tv_sec - startTime.tv_sec) * 1000 + (stopTime.tv_usec - startTime.tv_usec) / 1000; m_objDb->mutexStats(&total_mutexes, &mutexes_free, &mutexes_used, &mutexes_used_highwater, &mutex_regionsize); MojLogDebug(s_log, _T("Finished load of %s, total_mutexes: %d, mutexes_free: %d, mutexes_used: %d, mutexes_used_highwater: %d, &mutex_regionsize: %d\n"), path, total_mutexes, mutexes_free, mutexes_used, mutexes_used_highwater, mutex_regionsize); MojLogDebug(s_log, _T("Loaded %s with %d records in %ldms (%dms of that for %d extra transactions), consuming %d mutexes, afterwards %d are available out of %d\n"), path, total, elapsedTimeMS, total_transaction_time, transactions, mutexes_used - orig_mutexes_used, mutexes_free, total_mutexes); return MojErrNone; }