TEST(MemoryRangeAnalyzer, GetMaxContiguousFreeBlockSize_twoblocks) { auto analyzer = MemoryRangeAnalyzer(); auto ranges = std::make_shared<std::vector<const MemoryRange>>(); ranges->push_back(MemoryRange(0, 1, State::Commit, Usage::EnvironmentBlock)); ranges->push_back(MemoryRange(101, 1001, State::Commit, Usage::EnvironmentBlock)); auto size = analyzer.GetMaxContiguousFreeBlockSize(RangeList(ranges)); EXPECT_EQ(size, 100); }
TEST(MemoryRangeAnalyzer, GetMinContiguousSOHHeapSize_threeblocks) { auto analyzer = MemoryRangeAnalyzer(); auto ranges = std::make_shared<std::vector<const MemoryRange>>(); ranges->push_back(MemoryRange(0, 10000, State::Commit, Usage::GCHeap)); ranges->push_back(MemoryRange(20000, 20000, State::Commit, Usage::GCHeap)); ranges->push_back(MemoryRange(50000, 0x2f000, State::Commit, Usage::GCHeap)); auto size = analyzer.GetMinContiguousSOHHeapSize(RangeList(ranges)); EXPECT_EQ(size, 10000); }
MemoryRange MappableSeekableZStream::mmap(const void *addr, size_t length, int prot, int flags, off_t offset) { /* Map with PROT_NONE so that accessing the mapping would segfault, and * bring us to ensure() */ void *res = buffer->mmap(addr, length, PROT_NONE, flags, offset); if (res == MAP_FAILED) return MemoryRange(MAP_FAILED, 0); /* Store the mapping, ordered by offset and length */ std::vector<LazyMap>::reverse_iterator it; for (it = lazyMaps.rbegin(); it < lazyMaps.rend(); ++it) { if ((it->offset < offset) || ((it->offset == offset) && (it->length < length))) break; } LazyMap map = { res, length, prot, offset }; lazyMaps.insert(it.base(), map); return MemoryRange(res, length); }
TEST(MemoryRangeAnalyzer, GetMinContiguousSOHHeapSize_oneblock_nonsoh2) { auto analyzer = MemoryRangeAnalyzer(); auto ranges = std::make_shared<std::vector<const MemoryRange>>(); ranges->push_back(MemoryRange(0, 101, State::Commit, Usage::GCLOHeap)); auto size = analyzer.GetMinContiguousSOHHeapSize(RangeList(ranges)); EXPECT_EQ(size, MemoryRangeAnalyzer::UNDETERMINED_SIZE); }
MemoryRange MappableDeflate::mmap(const void *addr, size_t length, int prot, int flags, off_t offset) { MOZ_ASSERT(buffer); MOZ_ASSERT(!(flags & MAP_SHARED)); flags |= MAP_PRIVATE; /* The deflate stream is uncompressed up to the required offset + length, if * it hasn't previously been uncompressed */ ssize_t missing = offset + length + zStream.avail_out - buffer->GetLength(); if (missing > 0) { uInt avail_out = zStream.avail_out; zStream.avail_out = missing; if ((*buffer == zStream.next_out) && (inflateInit2(&zStream, -MAX_WBITS) != Z_OK)) { LOG("inflateInit failed: %s", zStream.msg); return MemoryRange(MAP_FAILED, 0); } int ret = inflate(&zStream, Z_SYNC_FLUSH); if (ret < 0) { LOG("inflate failed: %s", zStream.msg); return MemoryRange(MAP_FAILED, 0); } if (ret == Z_NEED_DICT) { LOG("zstream requires a dictionary. %s", zStream.msg); return MemoryRange(MAP_FAILED, 0); } zStream.avail_out = avail_out - missing + zStream.avail_out; if (ret == Z_STREAM_END) { if (inflateEnd(&zStream) != Z_OK) { LOG("inflateEnd failed: %s", zStream.msg); return MemoryRange(MAP_FAILED, 0); } if (zStream.total_out != buffer->GetLength()) { LOG("File not fully uncompressed! %ld / %d", zStream.total_out, static_cast<unsigned int>(buffer->GetLength())); return MemoryRange(MAP_FAILED, 0); } } } #if defined(ANDROID) && defined(__arm__) if (prot & PROT_EXEC) { /* We just extracted data that may be executed in the future. * We thus need to ensure Instruction and Data cache coherency. */ DEBUG_LOG("cacheflush(%p, %p)", *buffer + offset, *buffer + (offset + length)); cacheflush(reinterpret_cast<uintptr_t>(*buffer + offset), reinterpret_cast<uintptr_t>(*buffer + (offset + length)), 0); } #endif return MemoryRange(buffer->mmap(addr, length, prot, flags, offset), length); }
break_id_t Watcher::addMemoryExecBreak(uint16_t addr_begin, uint16_t addr_end) { break_id_t const id = createBreakUniq(); this->execBreaks_.push_back( MemoryRange(id, addr_begin, addr_end) ); return id; }