JitCode* Linker::newCode(JSContext* cx, CodeKind kind) { JS::AutoAssertNoGC nogc(cx); if (masm.oom()) return fail(cx); masm.performPendingReadBarriers(); static const size_t ExecutableAllocatorAlignment = sizeof(void*); static_assert(CodeAlignment >= ExecutableAllocatorAlignment, "Unexpected alignment requirements"); // We require enough bytes for the code, header, and worst-case alignment padding. size_t bytesNeeded = masm.bytesNeeded() + sizeof(JitCodeHeader) + (CodeAlignment - ExecutableAllocatorAlignment); if (bytesNeeded >= MAX_BUFFER_SIZE) return fail(cx); // ExecutableAllocator requires bytesNeeded to be aligned. bytesNeeded = AlignBytes(bytesNeeded, ExecutableAllocatorAlignment); ExecutablePool* pool; uint8_t* result = (uint8_t*)cx->runtime()->jitRuntime()->execAlloc().alloc(cx, bytesNeeded, &pool, kind); if (!result) return fail(cx); // The JitCodeHeader will be stored right before the code buffer. uint8_t* codeStart = result + sizeof(JitCodeHeader); // Bump the code up to a nice alignment. codeStart = (uint8_t*)AlignBytes((uintptr_t)codeStart, CodeAlignment); MOZ_ASSERT(codeStart + masm.bytesNeeded() <= result + bytesNeeded); uint32_t headerSize = codeStart - result; JitCode* code = JitCode::New<NoGC>(cx, codeStart, bytesNeeded - headerSize, headerSize, pool, kind); if (!code) return fail(cx); if (masm.oom()) return fail(cx); awjc.emplace(result, bytesNeeded); code->copyFrom(masm); masm.link(code); if (masm.embedsNurseryPointers()) cx->runtime()->gc.storeBuffer().putWholeCell(code); return code; }
static void* MapAlignedPagesSlow(size_t size, size_t alignment) { /* * Windows requires that there be a 1:1 mapping between VM allocation * and deallocation operations. Therefore, take care here to acquire the * final result via one mapping operation. This means unmapping any * preliminary result that is not correctly aligned. */ void* p; do { /* * Over-allocate in order to map a memory region that is definitely * large enough, then deallocate and allocate again the correct size, * within the over-sized mapping. * * Since we're going to unmap the whole thing anyway, the first * mapping doesn't have to commit pages. */ size_t reserveSize = size + alignment - pageSize; p = MapMemory(reserveSize, MEM_RESERVE); if (!p) return nullptr; void* chunkStart = (void*)AlignBytes(uintptr_t(p), alignment); UnmapPages(p, reserveSize); p = MapMemoryAt(chunkStart, size, MEM_COMMIT | MEM_RESERVE); /* Failure here indicates a race with another thread, so try again. */ } while (!p); return p; }
JitCode* Linker::newCode(JSContext* cx, CodeKind kind, bool hasPatchableBackedges /* = false */) { MOZ_ASSERT(masm.numAsmJSAbsoluteAddresses() == 0); MOZ_ASSERT_IF(hasPatchableBackedges, kind == ION_CODE); gc::AutoSuppressGC suppressGC(cx); if (masm.oom()) return fail(cx); ExecutablePool* pool; size_t bytesNeeded = masm.bytesNeeded() + sizeof(JitCode*) + CodeAlignment; if (bytesNeeded >= MAX_BUFFER_SIZE) return fail(cx); // ExecutableAllocator requires bytesNeeded to be word-size aligned. bytesNeeded = AlignBytes(bytesNeeded, sizeof(void*)); ExecutableAllocator& execAlloc = hasPatchableBackedges ? cx->runtime()->jitRuntime()->backedgeExecAlloc() : cx->runtime()->jitRuntime()->execAlloc(); uint8_t* result = (uint8_t*)execAlloc.alloc(bytesNeeded, &pool, kind); if (!result) return fail(cx); // The JitCode pointer will be stored right before the code buffer. uint8_t* codeStart = result + sizeof(JitCode*); // Bump the code up to a nice alignment. codeStart = (uint8_t*)AlignBytes((uintptr_t)codeStart, CodeAlignment); uint32_t headerSize = codeStart - result; JitCode* code = JitCode::New<allowGC>(cx, codeStart, bytesNeeded - headerSize, headerSize, pool, kind); if (!code) return nullptr; if (masm.oom()) return fail(cx); awjc.emplace(result, bytesNeeded); code->copyFrom(masm); masm.link(code); if (masm.embedsNurseryPointers()) cx->runtime()->gc.storeBuffer.putWholeCell(code); return code; }