/** locking in read lock when called */ static void _go(AlignedBuilder& bb) { if( wi._writes.empty() ) return; PREPLOGBUFFER(bb); // todo: add double buffering so we can be (not even read locked) during WRITETOJOURNAL WRITETOJOURNAL(bb); // write the noted write intent entries to the data files WRITETODATAFILES(); wi.clear(); REMAPPRIVATEVIEW(); }
/** locking in read lock when called @see MongoMMF::close() */ static void groupCommit() { dbMutex.assertAtLeastReadLocked(); if( !commitJob.hasWritten() ) return; PREPLOGBUFFER(); WRITETOJOURNAL(commitJob._ab); // data is now in the journal, which is sufficient for acknowledging getlasterror. // (ok to crash after that) log() << "TEMP NOTIFYING COMMITTED" << endl; commitJob.notifyCommitted(); // write the noted write intent entries to the data files. // this has to come after writing to the journal, obviously... MongoFile::markAllWritable(); // for _DEBUG. normally we don't write in a read lock WRITETODATAFILES(); if (!dbMutex.isWriteLocked()) MongoFile::unmarkAllWritable(); commitJob.reset(); // REMAPPRIVATEVIEW // // remapping private views must occur after WRITETODATAFILES otherwise // we wouldn't see newly written data on reads. // DEV assert( !commitJob.hasWritten() ); if( !dbMutex.isWriteLocked() ) { // this needs done in a write lock thus we do it on the next acquisition of that // instead of here (there is no rush if you aren't writing anyway -- but it must happen, // if it is done, before any uncommitted writes occur). // dbMutex._remapPrivateViewRequested = true; } else { // however, if we are already write locked, we must do it now -- up the call tree someone // may do a write without a new lock acquisition. this can happen when MongoMMF::close() calls // this method when a file (and its views) is about to go away. // REMAPPRIVATEVIEW(); } }
static void _groupCommit() { LOG(4) << "_groupCommit " << endl; { AlignedBuilder &ab = __theBuilder; // we need to make sure two group commits aren't running at the same time // (and we are only read locked in the dbMutex, so it could happen -- while // there is only one dur thread, "early commits" can be done by other threads) SimpleMutex::scoped_lock lk(commitJob.groupCommitMutex); commitJob.commitingBegin(); if( !commitJob.hasWritten() ) { // getlasterror request could have came after the data was already committed commitJob.committingNotifyCommitted(); } else { JSectHeader h; PREPLOGBUFFER(h,ab); // todo : write to the journal outside locks, as this write can be slow. // however, be careful then about remapprivateview as that cannot be done // if new writes are then pending in the private maps. WRITETOJOURNAL(h, ab); // data is now in the journal, which is sufficient for acknowledging getLastError. // (ok to crash after that) commitJob.committingNotifyCommitted(); WRITETODATAFILES(h, ab); debugValidateAllMapsMatch(); commitJob.committingReset(); ab.reset(); } } }
void JournalWriter::_journalWriterThread() { Client::initThread("journal writer"); log() << "Journal writer thread started"; try { while (true) { Buffer* const buffer = [&] { MONGO_IDLE_THREAD_BLOCK; return _journalQueue.blockingPop(); }(); BufferGuard bufferGuard(buffer, &_readyQueue); if (buffer->_isShutdown) { invariant(buffer->_builder.len() == 0); // The journal writer thread is terminating. Nothing to notify or write. break; } if (buffer->_isNoop) { invariant(buffer->_builder.len() == 0); // There's nothing to be writen, but we still need to notify this commit number _commitNotify->notifyAll(buffer->_commitNumber); _applyToDataFilesNotify->notifyAll(buffer->_commitNumber); continue; } LOG(4) << "Journaling commit number " << buffer->_commitNumber << " (journal file " << buffer->_header.fileId << ", sequence " << buffer->_header.seqNumber << ", size " << buffer->_builder.len() << " bytes)"; // This performs synchronous I/O to the journal file and will block. WRITETOJOURNAL(buffer->_header, buffer->_builder); // Data is now persisted in the journal, which is sufficient for acknowledging // durability. dur::getJournalListener()->onDurable(buffer->journalListenerToken); _commitNotify->notifyAll(buffer->_commitNumber); // Apply the journal entries on top of the shared view so that when flush is // requested it would write the latest. WRITETODATAFILES(cc().makeOperationContext().get(), buffer->_header, buffer->_builder); // Data is now persisted on the shared view, so notify any potential journal file // cleanup waiters. _applyToDataFilesNotify->notifyAll(buffer->_commitNumber); } } catch (const DBException& e) { severe() << "dbexception in journalWriterThread causing immediate shutdown: " << redact(e); MONGO_UNREACHABLE; } catch (const std::ios_base::failure& e) { severe() << "ios_base exception in journalWriterThread causing immediate shutdown: " << e.what(); MONGO_UNREACHABLE; } catch (const std::bad_alloc& e) { severe() << "bad_alloc exception in journalWriterThread causing immediate shutdown: " << e.what(); MONGO_UNREACHABLE; } catch (const std::exception& e) { severe() << "exception in journalWriterThread causing immediate shutdown: " << redact(e.what()); MONGO_UNREACHABLE; } catch (...) { severe() << "unhandled exception in journalWriterThread causing immediate shutdown"; MONGO_UNREACHABLE; } log() << "Journal writer thread stopped"; }