hsa_status_t MemoryRegion::Allocate(size_t size, void** address) const {
    if (address == NULL) {
        return HSA_STATUS_ERROR_INVALID_ARGUMENT;
    }

    const HSAuint32 node_id =
        (owner_->device_type() == core::Agent::kAmdGpuDevice)
        ? static_cast<const amd::GpuAgent*>(owner_)->node_id()
        : static_cast<const amd::CpuAgent*>(owner_)->node_id();

    *address = amd::AllocateKfdMemory(mem_flag_, node_id, size);

    if (*address != NULL) {
        if (IsSystem()) {
            amd::MakeKfdMemoryResident(*address, size);
        } else if (IsLocalMemory()) {
            // TODO: remove immediate pinning on local memory when HSA API to
            // explicitly unpin memory is available.
            if (!amd::MakeKfdMemoryResident(*address, size)) {
                amd::FreeKfdMemory(*address, size);
                *address = NULL;
                return HSA_STATUS_ERROR_OUT_OF_RESOURCES;
            }
        }

        return HSA_STATUS_SUCCESS;
    }

    return HSA_STATUS_ERROR_OUT_OF_RESOURCES;
}
MemoryRegion::MemoryRegion(bool fine_grain, const core::Agent& owner,
                           const HsaMemoryProperties& mem_props)
    : core::MemoryRegion(fine_grain),
      owner_(&owner),
      mem_props_(mem_props),
      max_single_alloc_size_(0),
      virtual_size_(0) {
    virtual_size_ = GetPhysicalSize();

    mem_flag_.Value = 0;
    if (IsLocalMemory()) {
        mem_flag_.ui32.PageSize = HSA_PAGE_SIZE_4KB;
        mem_flag_.ui32.NoSubstitute = 1;
        mem_flag_.ui32.HostAccess = 0;
        mem_flag_.ui32.NonPaged = 1;

        char* char_end = NULL;
        HSAuint64 max_alloc_size = static_cast<HSAuint64>(strtoull(
                                       os::GetEnvVar("HSA_LOCAL_MEMORY_MAX_ALLOC").c_str(), &char_end, 10));
        max_alloc_size = std::max(max_alloc_size, GetPhysicalSize() / 4);
        max_alloc_size = std::min(max_alloc_size, GetPhysicalSize());

        max_single_alloc_size_ =
            AlignDown(static_cast<size_t>(max_alloc_size), kPageSize_);

        static const HSAuint64 kGpuVmSize = (1ULL << 40);
        virtual_size_ = kGpuVmSize;
    } else if (IsSystem()) {
        mem_flag_.ui32.PageSize = HSA_PAGE_SIZE_4KB;
        mem_flag_.ui32.NoSubstitute = 1;
        mem_flag_.ui32.HostAccess = 1;
        mem_flag_.ui32.CachePolicy = HSA_CACHING_CACHED;

        max_single_alloc_size_ =
            AlignDown(static_cast<size_t>(GetPhysicalSize()), kPageSize_);

        virtual_size_ = os::GetUserModeVirtualMemorySize();
    }

    assert(GetVirtualSize() != 0);
    assert(GetPhysicalSize() <= GetVirtualSize());
    assert(IsMultipleOf(max_single_alloc_size_, kPageSize_));
}
/* readonly attribute bool isSystem; */
NS_IMETHODIMP
nsPerformanceGroupDetails::GetIsSystem(bool *_retval) {
  *_retval = IsSystem();
  return NS_OK;
}