void* MapAlignedPages(size_t size, size_t alignment) { MOZ_ASSERT(size >= alignment); MOZ_ASSERT(size % alignment == 0); MOZ_ASSERT(size % pageSize == 0); MOZ_ASSERT(alignment % allocGranularity == 0); void* p = MapMemory(size); /* Special case: If we want page alignment, no further work is needed. */ if (alignment == allocGranularity) return p; if (OffsetFromAligned(p, alignment) == 0) return p; void* retainedAddr; GetNewChunk(&p, &retainedAddr, size, alignment); if (retainedAddr) UnmapPages(retainedAddr, size); if (p) { if (OffsetFromAligned(p, alignment) == 0) return p; UnmapPages(p, size); } p = MapAlignedPagesSlow(size, alignment); if (!p) return MapAlignedPagesLastDitch(size, alignment); MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0); return p; }
/* * In a low memory or high fragmentation situation, alignable chunks of the * desired size may still be available, even if there are no more contiguous * free chunks that meet the |size + alignment - pageSize| requirement of * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk * by temporarily holding onto the unaligned parts of each chunk until the * allocator gives us a chunk that either is, or can be aligned. */ static void* MapAlignedPagesLastDitch(size_t size, size_t alignment) { void* tempMaps[MaxLastDitchAttempts]; int attempt = 0; void* p = MapMemory(size); if (OffsetFromAligned(p, alignment) == 0) return p; for (; attempt < MaxLastDitchAttempts; ++attempt) { GetNewChunk(&p, tempMaps + attempt, size, alignment); if (OffsetFromAligned(p, alignment) == 0) { if (tempMaps[attempt]) UnmapPages(tempMaps[attempt], size); break; } if (!tempMaps[attempt]) break; /* Bail if GetNewChunk failed. */ } if (OffsetFromAligned(p, alignment)) { UnmapPages(p, size); p = nullptr; } while (--attempt >= 0) UnmapPages(tempMaps[attempt], size); return p; }
TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr) { aChunk=NULL; DMemModelChunk* pC=NULL; TInt r=GetNewChunk(pC,aInfo); if (r!=KErrNone) { if (pC) pC->Close(NULL); return r; } if (aInfo.iForceFixed || iAttributes & DMemModelProcess::EFixedAddress) pC->iAttributes |= DMemModelChunk::EFixedAddress; if (!aInfo.iGlobal && (iAttributes & DMemModelProcess::EPrivate)!=0) pC->iAttributes |= DMemModelChunk::EPrivate; if (pC->iChunkType==EDll || pC->iChunkType==EUserCode || pC->iChunkType==EUserSelfModCode || pC->iChunkType==EKernelCode) pC->iAttributes |= (DMemModelChunk::EFixedAddress|DMemModelChunk::ECode); pC->iOwningProcess=(aInfo.iGlobal)?NULL:this; r=pC->Create(aInfo); if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust)) { if (aInfo.iRunAddress!=0) pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated); if (aInfo.iPreallocated==0) { if (pC->iAttributes & DChunk::EDisconnected) { r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom); } else if (pC->iAttributes & DChunk::EDoubleEnded) { r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop); } else { r=pC->Adjust(aInfo.iInitialTop); } } if (r==KErrNone && pC->iHomeRegionBase==0 && (pC->iAttributes&DMemModelChunk::EFixedAddress)!=0) { r=pC->Reserve(0); aRunAddr=(TLinAddr)pC->Base(); } } if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd)) { if (pC->iAttributes & DMemModelChunk::ECode) Mmu::Get().SyncCodeMappings(); if (pC->iChunkType!=EUserCode) { r=WaitProcessLock(); if (r==KErrNone) { r=AddChunk(pC,aRunAddr,EFalse); SignalProcessLock(); } } else aRunAddr=(TLinAddr)pC->Base(); // code chunks always fixed address } if (r==KErrNone) { pC->iDestroyedDfc = aInfo.iDestroyedDfc; aChunk=(DChunk*)pC; } else pC->Close(NULL); // NULL since chunk can't have been added to process return r; }