static void remapPrivateView() { try { // REMAPPRIVATEVIEW // // remapping private views must occur after WRITETODATAFILES otherwise // we wouldn't see newly written data on reads. // invariant(!commitJob.hasWritten()); stats.curr->_commitsInWriteLock++; REMAPPRIVATEVIEW(); } catch (DBException& e) { log() << "dbexception in remapPrivateView causing immediate shutdown: " << e.toString() << endl; mongoAbort("gc1"); } catch (std::ios_base::failure& e) { log() << "ios_base exception in remapPrivateView causing immediate shutdown: " << e.what() << endl; mongoAbort("gc2"); } catch (std::bad_alloc& e) { log() << "bad_alloc exception in remapPrivateView causing immediate shutdown: " << e.what() << endl; mongoAbort("gc3"); } catch (std::exception& e) { log() << "exception in remapPrivateView causing immediate shutdown: " << e.what() << endl; mongoAbort("gc4"); } LOG(4) << "remapPrivateView end" << endl; }
/** locking: in at least 'R' when called or, for early commits (commitIfNeeded), in W or X @param lwg set if the durcommitthread *only* -- then we will upgrade the lock to W so we can remapprivateview. only durcommitthread calls with lgw != 0 as more than one thread upgrading would deadlock @see DurableMappedFile::close() */ static void groupCommit() { try { _groupCommit(); } catch(DBException& e ) { log() << "dbexception in groupCommit causing immediate shutdown: " << e.toString() << endl; mongoAbort("gc1"); } catch(std::ios_base::failure& e) { log() << "ios_base exception in groupCommit causing immediate shutdown: " << e.what() << endl; mongoAbort("gc2"); } catch(std::bad_alloc& e) { log() << "bad_alloc exception in groupCommit causing immediate shutdown: " << e.what() << endl; mongoAbort("gc3"); } catch(std::exception& e) { log() << "exception in groupCommit causing immediate shutdown: " << e.what() << endl; mongoAbort("gc4"); } LOG(4) << "groupCommit end" << endl; }
static void shutdownServer() { log() << "shutdown: going to close listening sockets..." << endl; ListeningSockets::get()->closeAll(); log() << "shutdown: going to flush diaglog..." << endl; _diaglog.flush(); /* must do this before unmapping mem or you may get a seg fault */ log() << "shutdown: going to close sockets..." << endl; boost::thread close_socket_thread( boost::bind(MessagingPort::closeAllSockets, 0) ); // wait until file preallocation finishes // we would only hang here if the file_allocator code generates a // synchronous signal, which we don't expect log() << "shutdown: waiting for fs preallocator..." << endl; FileAllocator::get()->waitUntilFinished(); if( cmdLine.dur ) { log() << "shutdown: lock for final commit..." << endl; { int n = 10; while( 1 ) { // we may already be in a read lock from earlier in the call stack, so do read lock here // to be consistent with that. readlocktry w(20000); if( w.got() ) { log() << "shutdown: final commit..." << endl; getDur().commitNow(); break; } if( --n <= 0 ) { log() << "shutdown: couldn't acquire write lock, aborting" << endl; mongoAbort("couldn't acquire write lock"); } log() << "shutdown: waiting for write lock..." << endl; } } MemoryMappedFile::flushAll(true); } log() << "shutdown: closing all files..." << endl; stringstream ss3; MemoryMappedFile::closeAllFiles( ss3 ); log() << ss3.str() << endl; if( cmdLine.dur ) { dur::journalCleanup(true); } #if !defined(__sunos__) if ( lockFile ) { log() << "shutdown: removing fs lock..." << endl; /* This ought to be an unlink(), but Eliot says the last time that was attempted, there was a race condition with acquirePathLock(). */ #ifdef _WIN32 if( _chsize( lockFile , 0 ) ) log() << "couldn't remove fs lock " << errnoWithDescription(_doserrno) << endl; CloseHandle(lockFileHandle); #else if( ftruncate( lockFile , 0 ) ) log() << "couldn't remove fs lock " << errnoWithDescription() << endl; flock( lockFile, LOCK_UN ); #endif } #endif }
static void durThread() { Client::initThread("journal"); bool samePartition = true; try { const std::string dbpathDir = boost::filesystem::path(storageGlobalParams.dbpath).string(); samePartition = onSamePartition(getJournalDir().string(), dbpathDir); } catch(...) { } while (shutdownRequested.loadRelaxed() == 0) { unsigned ms = storageGlobalParams.journalCommitInterval; if( ms == 0 ) { ms = samePartition ? 100 : 30; } unsigned oneThird = (ms / 3) + 1; // +1 so never zero try { stats.rotate(); boost::mutex::scoped_lock lock(flushMutex); // commit sooner if one or more getLastError j:true is pending for (unsigned i = 0; i <= 2; i++) { if (flushRequested.timed_wait(lock, Milliseconds(oneThird))) { // Someone forced a flush break; } if (commitJob._notify.nWaiting()) break; if (commitJob.bytes() > UncommittedBytesLimit / 2) break; } OperationContextImpl txn; // Waits for all active operations to drain and won't let new ones start. This // should be optimized to allow readers in (see SERVER-15262). AutoAcquireFlushLockForMMAPV1Commit flushLock(txn.lockState()); groupCommit(); remapPrivateView(); } catch(std::exception& e) { log() << "exception in durThread causing immediate shutdown: " << e.what() << endl; mongoAbort("exception in durThread"); } catch (...) { log() << "unhandled exception in durThread causing immediate shutdown" << endl; mongoAbort("unhandled exception in durThread"); } } cc().shutdown(); }