void* OpMmapSegment::AllocateUnused(UINT16 idx, UINT16 pages, OpMemoryClass type) { OP_ASSERT(page_handle[idx].flag == OP_MMAP_FLAG_UNUSED); OP_ASSERT(page_handle[idx].size >= pages); #ifdef DEBUG_ENABLE_OPASSERT unsigned int endmarker = idx + page_handle[idx].size - 1; OP_ASSERT(page_handle[endmarker].flag == OP_MMAP_FLAG_UNUSED); OP_ASSERT(page_handle[endmarker].size == page_handle[idx].size); #endif #ifdef ENABLE_MEMORY_MANAGER // Move the accounting from "available" to allocated by correct type, // and fail allocation if this is not possible if ( ! mm->Transfer((OpMemoryClass)unusedtype, type, pages * pagesize) ) return 0; #endif // Record the number of remaining pages in the block. // This number will always be 0 or positive. unsigned int pages2 = page_handle[idx].size - pages; // Unlink from unused chain, and set new details Unlink(idx); SetDetails(idx, pages, OP_MMAP_FLAG_ALLOCATED); page_handle[idx].type = type; // If there are any remaining pages, insert them on the unused chain if ( pages2 ) { // Create a new unused allocation for remainder unsigned int idx2 = idx + pages; unsigned int cls2 = ComputeSizeClass(pages2); SetDetails(idx2, pages2, OP_MMAP_FLAG_UNUSED); Link(OP_MMAP_UNUSED_SIZECLASS + cls2, idx2); } allocated_pages += pages; unused_pages -= pages; return reinterpret_cast<void*>(base + idx * pagesize); }
void OpMmapSegment::Merge(UINT8 flag, UINT8 type, UINT16 idx) { OP_ASSERT(flag == OP_MMAP_FLAG_UNUSED || flag == OP_MMAP_FLAG_RESERVED); UINT16 pages = page_handle[idx].size; if ( page_handle[idx - 1].flag == flag ) { UINT16 size2 = page_handle[idx - 1].size; idx -= size2; OP_ASSERT(page_handle[idx].size == size2); OP_ASSERT(page_handle[idx].flag == flag); pages += size2; Unlink(idx); } UINT16 idx2 = idx + pages; if ( page_handle[idx2].flag == flag ) { UINT16 size2 = page_handle[idx2].size; OP_ASSERT(page_handle[idx2 + size2 - 1].size == size2); OP_ASSERT(page_handle[idx2 + size2 - 1].flag == flag); pages += size2; Unlink(idx2); } unsigned int cls; unsigned int size_class = ComputeSizeClass(pages); SetDetails(idx, pages, flag); page_handle[idx].type = type; switch ( flag ) { case OP_MMAP_FLAG_UNUSED: cls = OP_MMAP_UNUSED_SIZECLASS + size_class; break; case OP_MMAP_FLAG_RESERVED: cls = OP_MMAP_RESERVED_SIZECLASS + size_class; break; default: OP_ASSERT(!"Critical - Illegal merge operation"); return; } Link(cls, idx); }
void LootState::UpdateGames(std::list<GameSettings>& games) { // Acquire the lock for the scope of this method. base::AutoLock lock_scope(_lock); unordered_set<string> newGameFolders; // Update existing games, add new games. BOOST_LOG_TRIVIAL(trace) << "Updating existing games and adding new games."; for (auto &game : games) { auto pos = find(_games.begin(), _games.end(), game); if (pos != _games.end()) { pos->SetDetails(game.Name(), game.Master(), game.RepoURL(), game.RepoBranch(), game.GamePath().string(), game.RegistryKey()); } else { BOOST_LOG_TRIVIAL(trace) << "Adding new game entry for: " << game.FolderName(); _games.push_back(game); } newGameFolders.insert(game.FolderName()); } // Remove deleted games. As the current game is stored using its index, // removing an earlier game may invalidate it. BOOST_LOG_TRIVIAL(trace) << "Removing deleted games."; for (auto it = _games.begin(); it != _games.end();) { if (newGameFolders.find(it->FolderName()) == newGameFolders.end()) { BOOST_LOG_TRIVIAL(trace) << "Removing game: " << it->FolderName(); it = _games.erase(it); } else ++it; } // Re-initialise the current game in case the game path setting was changed. _currentGame->Init(true); // Update game path in settings object. _settings["games"] = ToGameSettings(_games); }
void OpMmapSegment::ForceReleaseAll(void) { #ifdef MEMORY_USE_LOCKING OpMemory::MallocLock(); #endif unsigned int idx = OP_MMAP_ANCHOR_COUNT + 1; while ( page_handle[idx].flag != OP_MMAP_FLAG_SENTINEL ) { UINT16 size = page_handle[idx].size; UINT16 flag = page_handle[idx].flag; OP_ASSERT(page_handle[idx + size - 1].size == size); OP_ASSERT(page_handle[idx + size - 1].flag == flag); if ( flag == OP_MMAP_FLAG_ALLOCATED ) { unsigned int idx2 = idx; while ( page_handle[idx2].flag == OP_MMAP_FLAG_ALLOCATED ) { UINT16 size2 = page_handle[idx2].size; #ifdef DEBUG_ENABLE_OPASSERT UINT16 flag2 = page_handle[idx2].flag; OP_ASSERT(page_handle[idx2 + size2 - 1].size == size2); OP_ASSERT(page_handle[idx2 + size2 - 1].flag == flag2); #endif #ifdef ENABLE_MEMORY_MANAGER OpMemoryClass type = (OpMemoryClass)page_handle[idx2].type; mm->Free(type, size2 * pagesize); #endif idx2 += size2; } unsigned int pages = idx2 - idx; unsigned int cls = ComputeSizeClass(pages); SetDetails(idx, pages, OP_MMAP_FLAG_UNUSED); #ifdef ENABLE_MEMORY_MANAGER mm->ForceAlloc((OpMemoryClass)unusedtype, pages * pagesize); #endif Link(OP_MMAP_UNUSED_SIZECLASS + cls, idx); allocated_pages -= pages; unused_pages += pages; size = pages; } idx += size; } // We should have no allocated pages any more OP_ASSERT(allocated_pages == 0); // The previously allocated ones should now be unused; release them all #ifdef MEMORY_USE_LOCKING ReleaseAllUnused(FALSE); #else ReleaseAllUnused(); #endif OP_ASSERT(unused_pages == 0); // // The last action is to release the memory used for the header and // page handles. After this operation, 'this' is no longer accessible. // OpMemoryClass type = (OpMemoryClass)hdrtype; if ( address_upper_handles != 0 ) { // Remove the upper handles, allocated on its own OpMemory_VirtualFree(mseg, address_upper_handles, size_upper_handles, type); } OpMemory_VirtualFree(mseg, mseg->address, pagesize * header_pages, type); #ifdef MEMORY_USE_LOCKING OpMemory::MallocUnlock(); #endif }
void* OpMmapSegment::mmap(size_t size, OpMemoryClass type) #endif { unsigned int pages = (size + pagesize - 1) / pagesize; OP_ASSERT(pages > 0); // Zero allocations with mmap is not supported // We can't handle "impossibly large" single allocations. This means // pages that can't be indexed by 16-bit variables. if ( pages > 0xffff ) return 0; #ifdef MEMORY_USE_LOCKING if ( memlock ) OpMemory::MallocLock(); #endif unsigned int size_class = ComputeSizeClass(pages); char* ptr = 0; unsigned int cls; if ( max_size[OP_MMAP_UNUSED_SIZECLASS + size_class] >= pages ) { // Apparently, there should be an unused allocation large // enough to hold this request, so search for it. unsigned int idx = page_handle[OP_MMAP_UNUSED_SIZECLASS + size_class].next; UINT16 found_max_size = 0; // For recomputing max_size while ( idx >= OP_MMAP_ANCHOR_COUNT ) { unsigned int found_pages = page_handle[idx].size; if ( found_pages >= pages ) { // We have an unused allocation that is large enough ptr = (char*)AllocateUnused(idx, pages, type); goto done; } if ( found_pages > found_max_size ) found_max_size = found_pages; idx = page_handle[idx].next; } // No unused allocation large enough for our needs, even // though it was supposed to be one somewhere... The // 'max_size' must have been wrong, so update it to the correct // value and fall through and try bigger size-classes. max_size[OP_MMAP_UNUSED_SIZECLASS + size_class] = found_max_size; } #ifdef DEBUG_ENABLE_OPASSERT else { // // Make sure there really isn't any blocks larger than given, // this would cause a corruption furhter down when reusing an // an unused block and extend it with fresh memory. // unsigned int idx = page_handle[OP_MMAP_UNUSED_SIZECLASS + size_class].next; unsigned int biggest = max_size[OP_MMAP_UNUSED_SIZECLASS + size_class]; while ( idx >= OP_MMAP_ANCHOR_COUNT ) { OP_ASSERT(page_handle[idx].size <= biggest); idx = page_handle[idx].next; } } #endif // DEBUG_ENABLE_OPASSERT // Search higher size-classes for ( cls = size_class + 1; cls <= max_size_class; cls++ ) { unsigned int idx = page_handle[OP_MMAP_UNUSED_SIZECLASS + cls].next; if ( idx > OP_MMAP_ANCHOR_COUNT ) { // This is an unused allocation, and since it is of a // higher size-class, it will always be large enough ptr = (char*)AllocateUnused(idx, pages, type); goto done; } } // // No unused allocations of sufficient size are available, so we // need to request some from the operating system. // // Search for sufficiently large reserved memory space for ( cls = size_class; cls <= max_size_class; cls++ ) { unsigned int idx = page_handle[OP_MMAP_RESERVED_SIZECLASS + cls].next; while ( idx > OP_MMAP_ANCHOR_COUNT ) { if ( page_handle[idx].size >= pages ) { // // Found a reserved block that is large enough // if ( page_handle[idx - 1].flag == OP_MMAP_FLAG_UNUSED ) { // // The block below is unused: Include it into our own // allocation, so we don't have to request as much // memory as we otherwise would have. // // How this impacts fragmentation is unknown. // unsigned int size2 = page_handle[idx - 1].size; OP_ASSERT(size2 < pages); // We would have found it previously unsigned int idx2 = idx - size2; OP_ASSERT(page_handle[idx2].size == size2); OP_ASSERT(page_handle[idx2].flag == OP_MMAP_FLAG_UNUSED); #ifdef ENABLE_MEMORY_MANAGER if ( mm->Alloc(type, size2 * pagesize) ) #endif { ptr = (char*)AllocateReserved(idx, pages - size2, type); if ( ptr != 0 ) { ptr -= (size2 * pagesize); Unlink(idx2); SetDetails(idx2, pages, OP_MMAP_FLAG_ALLOCATED); page_handle[idx2].type = type; unused_pages -= size2; allocated_pages += size2; #ifdef ENABLE_MEMORY_MANAGER // The 'size2' unused block is no longer unused mm->Free((OpMemoryClass)unusedtype, size2 * pagesize); } else { // Allocation failed, reverse the reservation // on the 'size2' unused block below mm->Free(type, size2 * pagesize); #endif } } goto done; } ptr = (char*)AllocateReserved(idx, pages, type); goto done; } idx = page_handle[idx].next; } } done: #ifdef MEMORY_USE_LOCKING if ( memlock ) OpMemory::MallocUnlock(); #endif return (void*)ptr; }
void* OpMmapSegment::AllocateReserved(UINT16 idx, UINT16 pages, OpMemoryClass type) { OP_ASSERT(page_handle[idx].flag == OP_MMAP_FLAG_RESERVED); OP_ASSERT(page_handle[idx].size >= pages); #ifdef DEBUG_ENABLE_OPASSERT unsigned int endmarker = idx + page_handle[idx].size - 1; OP_ASSERT(page_handle[endmarker].flag == OP_MMAP_FLAG_RESERVED); OP_ASSERT(page_handle[endmarker].size == page_handle[idx].size); #endif // // Identify and allocate the memory area that needs to be // requested with VirtualAlloc() // void* ptr = (void*)(base + idx * pagesize); unsigned int bytes = pages * pagesize; if ( OpMemory_VirtualAlloc(mseg, ptr, bytes, type) ) { // // Succeeded, but we need to make sure the two handles of the // new block is available, and the the first handle in the // following block. // if ( AllocateHandles(idx + pages) ) { // // Main block and handles allocated ok. // // Recordthe number of remaining pages in the block. This // number will always be 0 or positive. // unsigned int pages2 = page_handle[idx].size - pages; // Unlink from reserved chain, and set new details Unlink(idx); SetDetails(idx, pages, OP_MMAP_FLAG_ALLOCATED); page_handle[idx].type = type; // If there are any remaining pages, add them to reserved chain if ( pages2 ) { // Create a new reserved block for remainder of block unsigned int idx2 = idx + pages; unsigned int cls2 = ComputeSizeClass(pages2); SetDetails(idx2, pages2, OP_MMAP_FLAG_RESERVED); Link(OP_MMAP_RESERVED_SIZECLASS + cls2, idx2); } allocated_pages += pages; goto success; } // // Could not allocate handles, free the previously // allocated memory and fall through into failure. // OpMemory_VirtualFree(mseg, ptr, bytes, type); } // Failure ptr = 0; success: return ptr; }