Ejemplo n.º 1
0
    bool DynamicTypeHandler::CheckHeuristicsForFixedDataProps(DynamicObject* instance, const PropertyRecord * propertyRecord, Var value)
    {
        if (PHASE_FORCE1(Js::FixDataPropsPhase))
        {
            return true;
        }

        if (Js::TaggedInt::Is(value) &&
            ((instance->GetTypeId() == TypeIds_GlobalObject && instance->GetScriptContext()->IsIntConstPropertyOnGlobalObject(propertyRecord->GetPropertyId())) ||
            (instance->GetTypeId() == TypeIds_Object && instance->GetScriptContext()->IsIntConstPropertyOnGlobalUserObject(propertyRecord->GetPropertyId()))))
        {
            return true;
        }

        // Disabled by default
        if (PHASE_ON1(Js::FixDataVarPropsPhase))
        {
            if (instance->GetTypeHandler()->GetFlags() & IsPrototypeFlag)
            {
                return true;
            }
            if (instance->GetType()->GetTypeId() == TypeIds_GlobalObject)
            {
                // if we have statically seen multiple stores - we should not do this optimization
                RootObjectInlineCache* cache = (static_cast<Js::RootObjectBase*>(instance))->GetRootInlineCache(propertyRecord, /*isLoadMethod*/ false, /*isStore*/ true);
                uint refCount = cache->Release();
                return refCount <= 1;
            }
        }
        return false;
    }
Ejemplo n.º 2
0
LPVOID PreReservedVirtualAllocWrapper::EnsurePreReservedRegionInternal()
{
    LPVOID startAddress = preReservedStartAddress;
    if (startAddress != nullptr)
    {
        return startAddress;
    }

    //PreReserve a (bigger) segment
    size_t bytes = PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageSize();
    if (PHASE_FORCE1(Js::PreReservedHeapAllocPhase))
    {
        //This code is used where CFG is not available, but still PreReserve optimization for CFG can be tested
        startAddress = VirtualAlloc(NULL, bytes, MEM_RESERVE, PAGE_READWRITE);
        PreReservedHeapTrace(_u("Reserving PreReservedSegment For the first time(CFG Non-Enabled). Address: 0x%p\n"), preReservedStartAddress);
        preReservedStartAddress = startAddress;
        return startAddress;
    }

#if defined(_CONTROL_FLOW_GUARD)
    bool supportPreReservedRegion = true;
#if !TARGET_64
#if _M_IX86
    // We want to restrict the number of prereserved segment for 32-bit process so that we don't use up the address space

    // Note: numPreReservedSegment is for the whole process, and access and update to it is not protected by a global lock.
    // So we may allocate more than the maximum some of the time if multiple thread check it simutaniously and allocate pass the limit.
    // It doesn't affect functionally, and it should be OK if we exceed.

    if (PreReservedVirtualAllocWrapper::numPreReservedSegment > PreReservedVirtualAllocWrapper::MaxPreReserveSegment)
    {
        supportPreReservedRegion = false;
    }
#else
    // TODO: fast check for prereserved segment is not implementated in ARM yet, so it is only enabled for x86
    supportPreReservedRegion = false;
#endif // _M_IX86
#endif

    if (AutoSystemInfo::Data.IsCFGEnabled() && supportPreReservedRegion)
    {
        startAddress = VirtualAlloc(NULL, bytes, MEM_RESERVE, PAGE_READWRITE);
        PreReservedHeapTrace(_u("Reserving PreReservedSegment For the first time(CFG Enabled). Address: 0x%p\n"), preReservedStartAddress);
        preReservedStartAddress = startAddress;

#if !TARGET_64
        if (startAddress)
        {
            InterlockedIncrement(&PreReservedVirtualAllocWrapper::numPreReservedSegment);
        }
#endif
    }
#endif


    return startAddress;
}
Ejemplo n.º 3
0
LPVOID PreReservedVirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation)
{
    Assert(this);
    AssertMsg(isCustomHeapAllocation, "PreReservation used for allocations other than CustomHeap?");
    AssertMsg(AutoSystemInfo::Data.IsCFGEnabled() || PHASE_FORCE1(Js::PreReservedHeapAllocPhase), "PreReservation without CFG ?");
    Assert(dwSize != 0);

    {
        AutoCriticalSection autocs(&this->cs);
        //Return nullptr, if no space to Reserve
        if (EnsurePreReservedRegionInternal() == nullptr)
        {
            PreReservedHeapTrace(_u("No space to pre-reserve memory with %d pages. Returning NULL\n"), PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageCount());
            return nullptr;
        }

        char * addressToReserve = nullptr;

        uint freeSegmentsBVIndex = BVInvalidIndex;
        size_t requestedNumOfSegments = dwSize / (AutoSystemInfo::Data.GetAllocationGranularityPageSize());
        Assert(requestedNumOfSegments <= MAXUINT32);

        if (lpAddress == nullptr)
        {
            Assert(requestedNumOfSegments != 0);
            AssertMsg(dwSize % AutoSystemInfo::Data.GetAllocationGranularityPageSize() == 0, "dwSize should be aligned with Allocation Granularity");

            do
            {
                freeSegmentsBVIndex = freeSegments.GetNextBit(freeSegmentsBVIndex + 1);
                //Return nullptr, if we don't have free/decommit pages to allocate
                if ((freeSegments.Length() - freeSegmentsBVIndex < requestedNumOfSegments) ||
                        freeSegmentsBVIndex == BVInvalidIndex)
                {
                    PreReservedHeapTrace(_u("No more space to commit in PreReserved Memory region.\n"));
                    return nullptr;
                }
            } while (!freeSegments.TestRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments)));

            uint offset = freeSegmentsBVIndex * AutoSystemInfo::Data.GetAllocationGranularityPageSize();
            addressToReserve = (char*) preReservedStartAddress + offset;

            //Check if the region is not already in MEM_COMMIT state.
            MEMORY_BASIC_INFORMATION memBasicInfo;
            size_t bytes = VirtualQuery(addressToReserve, &memBasicInfo, sizeof(memBasicInfo));
            if (bytes == 0
                    || memBasicInfo.RegionSize < requestedNumOfSegments * AutoSystemInfo::Data.GetAllocationGranularityPageSize()
                    || memBasicInfo.State == MEM_COMMIT
               )
            {
                CustomHeap_BadPageState_fatal_error((ULONG_PTR)this);
                return nullptr;
            }
        }
        else
        {
            //Check If the lpAddress is within the range of the preReserved Memory Region
            Assert(((char*) lpAddress) >= (char*) preReservedStartAddress || ((char*) lpAddress + dwSize) < GetPreReservedEndAddress());

            addressToReserve = (char*) lpAddress;
            freeSegmentsBVIndex = (uint) ((addressToReserve - (char*) preReservedStartAddress) / AutoSystemInfo::Data.GetAllocationGranularityPageSize());
#if DBG
            uint numOfSegments = (uint)ceil((double)dwSize / (double)AutoSystemInfo::Data.GetAllocationGranularityPageSize());
            Assert(numOfSegments != 0);
            Assert(freeSegmentsBVIndex + numOfSegments - 1 < freeSegments.Length());
            Assert(!freeSegments.TestRange(freeSegmentsBVIndex, numOfSegments));
#endif
        }

        AssertMsg(freeSegmentsBVIndex < PreReservedAllocationSegmentCount, "Invalid BitVector index calculation?");
        AssertMsg(dwSize % AutoSystemInfo::PageSize == 0, "COMMIT is managed at AutoSystemInfo::PageSize granularity");

        char * allocatedAddress = nullptr;

        if ((allocationType & MEM_COMMIT) != 0)
        {
#if defined(ENABLE_JIT_CLAMP)
            AutoEnableDynamicCodeGen enableCodeGen;
#endif

#if defined(_CONTROL_FLOW_GUARD)
            if (AutoSystemInfo::Data.IsCFGEnabled())
            {
                DWORD oldProtect = 0;
                DWORD allocProtectFlags = 0;

                if (AutoSystemInfo::Data.IsCFGEnabled())
                {
                    allocProtectFlags = PAGE_EXECUTE_RW_TARGETS_INVALID;
                }
                else
                {
                    allocProtectFlags = PAGE_EXECUTE_READWRITE;
                }

                allocatedAddress = (char *)VirtualAlloc(addressToReserve, dwSize, MEM_COMMIT, allocProtectFlags);

                AssertMsg(allocatedAddress != nullptr, "If no space to allocate, then how did we fetch this address from the tracking bit vector?");
                VirtualProtect(allocatedAddress, dwSize, protectFlags, &oldProtect);
                AssertMsg(oldProtect == (PAGE_EXECUTE_READWRITE), "CFG Bitmap gets allocated and bits will be set to invalid only upon passing these flags.");
            }
            else
#endif
            {
                allocatedAddress = (char *)VirtualAlloc(addressToReserve, dwSize, MEM_COMMIT, protectFlags);
            }
        }
        else
        {
            // Just return the uncommitted address if we didn't ask to commit it.
            allocatedAddress = addressToReserve;
        }

        // Keep track of the committed pages within the preReserved Memory Region
        if (lpAddress == nullptr && allocatedAddress != nullptr)
        {
            Assert(allocatedAddress == addressToReserve);
            Assert(requestedNumOfSegments != 0);
            freeSegments.ClearRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments));
        }

        PreReservedHeapTrace(_u("MEM_COMMIT: StartAddress: 0x%p of size: 0x%x * 0x%x bytes \n"), allocatedAddress, requestedNumOfSegments, AutoSystemInfo::Data.GetAllocationGranularityPageSize());
        return allocatedAddress;
    }
}