__declspec(noinline) void makeChunkWritable(size_t chunkno) { scoped_lock lk(mapViewMutex); if( writable.get(chunkno) ) // double check lock return; // remap all maps in this chunk. common case is a single map, but could have more than one with smallfiles or .ns files size_t chunkStart = chunkno * MemoryMappedFile::ChunkSize; size_t chunkNext = chunkStart + MemoryMappedFile::ChunkSize; scoped_lock lk2(privateViews._mutex()); map<void*,DurableMappedFile*>::iterator i = privateViews.finditer_inlock((void*) (chunkNext-1)); while( 1 ) { const pair<void*,DurableMappedFile*> x = *(--i); DurableMappedFile *mmf = x.second; if( mmf == 0 ) break; size_t viewStart = (size_t) x.first; size_t viewEnd = (size_t) (viewStart + mmf->length()); if( viewEnd <= chunkStart ) break; size_t protectStart = max(viewStart, chunkStart); dassert(protectStart<chunkNext); size_t protectEnd = min(viewEnd, chunkNext); size_t protectSize = protectEnd - protectStart; dassert(protectSize>0&&protectSize<=MemoryMappedFile::ChunkSize); DWORD oldProtection; bool ok = VirtualProtect( reinterpret_cast<void*>( protectStart ), protectSize, PAGE_WRITECOPY, &oldProtection ); if ( !ok ) { DWORD dosError = GetLastError(); log() << "VirtualProtect for " << mmf->filename() << " chunk " << chunkno << " failed with " << errnoWithDescription( dosError ) << " (chunk size is " << protectSize << ", address is " << hex << protectStart << dec << ")" << " in mongo::makeChunkWritable, terminating" << endl; fassertFailed( 16362 ); } } writable.set(chunkno); }
/** notification on unmapping so we can clear writable bits */ void MemoryMappedFile::clearWritableBits(void *p) { for( unsigned i = ((size_t)p)/ChunkSize; i <= (((size_t)p)+len)/ChunkSize; i++ ) { writable.clear(i); verify( !writable.get(i) ); } }
__declspec(noinline) void makeChunkWritable(size_t chunkno) { scoped_lock lk(mapViewMutex); if( writable.get(chunkno) ) // double check lock return; // remap all maps in this chunk. common case is a single map, but could have more than one with smallfiles or .ns files size_t chunkStart = chunkno * MemoryMappedFile::ChunkSize; size_t chunkNext = chunkStart + MemoryMappedFile::ChunkSize; scoped_lock lk2(privateViews._mutex()); map<void*,DurableMappedFile*>::iterator i = privateViews.finditer_inlock((void*) (chunkNext-1)); while( 1 ) { const pair<void*,DurableMappedFile*> x = *(--i); DurableMappedFile *mmf = x.second; if( mmf == 0 ) break; size_t viewStart = (size_t) x.first; size_t viewEnd = (size_t) (viewStart + mmf->length()); if( viewEnd <= chunkStart ) break; size_t protectStart = max(viewStart, chunkStart); dassert(protectStart<chunkNext); size_t protectEnd = min(viewEnd, chunkNext); size_t protectSize = protectEnd - protectStart; dassert(protectSize>0&&protectSize<=MemoryMappedFile::ChunkSize); DWORD oldProtection; bool ok = VirtualProtect( reinterpret_cast<void*>( protectStart ), protectSize, PAGE_WRITECOPY, &oldProtection ); if ( !ok ) { DWORD dosError = GetLastError(); if (dosError == ERROR_COMMITMENT_LIMIT) { // System has run out of memory between physical RAM & page file, tell the user BSONObjBuilder bb; ProcessInfo p; p.getExtraInfo(bb); log() << "MongoDB has exhausted the system memory capacity."; log() << "Current Memory Status: " << bb.obj().toString(); } log() << "VirtualProtect for " << mmf->filename() << " chunk " << chunkno << " failed with " << errnoWithDescription( dosError ) << " (chunk size is " << protectSize << ", address is " << hex << protectStart << dec << ")" << " in mongo::makeChunkWritable, terminating" << endl; fassertFailed( 16362 ); } } writable.set(chunkno); }