/*!
******************************************************************************

 @Function                SYSDEVU_GetCpuKmAddr

******************************************************************************/
IMG_RESULT SYSDEVU_GetCpuKmAddr(
	SYSDEVU_sInfo *		hSysDevHandle,
    SYSDEVU_eRegionId	eRegionId,
    IMG_VOID **         ppvCpuKmAddr,
    IMG_UINT32 *        pui32Size
)
{
	SYSDEVU_sInfo *  psDevice = (SYSDEVU_sInfo *)hSysDevHandle;
    IMG_PHYSADDR        paCpuPhysAddr;
    IMG_UINT32          ui32Result;
    IMG_HANDLE          hRegHandle;

    IMG_ASSERT(hSysDevHandle != IMG_NULL);
    if (hSysDevHandle == IMG_NULL)
    {
        return IMG_ERROR_GENERIC_FAILURE;
    }

    ui32Result = SYSDEVU_GetCpuAddrs(psDevice, eRegionId, ppvCpuKmAddr, &paCpuPhysAddr, pui32Size);
    IMG_ASSERT(ui32Result == IMG_SUCCESS);
    if (ui32Result != IMG_SUCCESS)
    {
        return ui32Result;
    }

    /* Add this to the list of mappable regions...*/
    if (!SYSBRGU_GetMappableRegion(paCpuPhysAddr))
    {
        ui32Result = SYSBRGU_CreateMappableRegion(paCpuPhysAddr, *pui32Size,
                                                  SYS_MEMATTRIB_UNCACHED, NULL, &hRegHandle);
        IMG_ASSERT(ui32Result == IMG_SUCCESS);
        if (ui32Result != IMG_SUCCESS)
        {
            return ui32Result;
        }
    }

    return IMG_SUCCESS;
}
static IMG_RESULT AllocPages(
    SYSMEM_Heap *     heap,
    IMG_UINT32        ui32Size,
    SYSMEMU_sPages *  psPages,
    SYS_eMemAttrib    eMemAttrib
)
{
    IMG_UINT32  ui32NoPages;
    IMG_UINT32  ui32ExamPages;
    IMG_UINT32  i;
    IMG_UINT64  ui64DeviceMemoryBase;
    IMG_PHYSADDR  paCpuPhysAddr;
    IMG_UINT32  ui32Result;
    size_t      physAddrArrSize;

    struct priv_params *  prv = (struct priv_params *)heap->priv;

    /* If we don't know where the memory is...*/
    SYSOSKM_DisableInt();

    /* Calculate required no. of pages...*/
    ui32NoPages = (ui32Size + (HOST_MMU_PAGE_SIZE-1)) / HOST_MMU_PAGE_SIZE;

    /* Loop over allocated pages until we find an unallocated slot big enough for this allocation...*/
    ui32ExamPages = 0;
    while (ui32ExamPages < prv->npages)
    {
        /* If the current page is not allocated and we might have enough remaining to make this allocation...*/
        if (
                (!prv->alloc_pool[prv->cur_index]) &&
                ((prv->cur_index + ui32NoPages) <= prv->npages)
            )
        {
            /* Can we make this allocation...*/
            for (i=0; i<ui32NoPages; i++)
            {
                if (prv->alloc_pool[prv->cur_index+i])
                {
                    break;
                }
            }
            if (i == ui32NoPages)
            {
                /* Yes, mark pages as allocated...*/
                for (i=0; i<ui32NoPages; i++)
                {
                    prv->alloc_pool[prv->cur_index+i] = IMG_TRUE;
                }

                /* Calculate the memory address of the start of the allocation...*/
                //psPages->pvCpuKmAddr = (IMG_VOID *)((IMG_UINTPTR)prv->vstart + (prv->cur_index * HOST_MMU_PAGE_SIZE));
                psPages->pvImplData = (IMG_VOID *)(prv->vstart + (prv->cur_index * HOST_MMU_PAGE_SIZE));

                /* Update the current page index....*/
                prv->cur_index += ui32NoPages;
                if (prv->cur_index >= prv->npages)
                {
                    prv->cur_index = 0;
                }
                break;
            }
        }

        /* Update examined pages and page index...*/
        ui32ExamPages++;
        prv->cur_index++;
        if (prv->cur_index >= prv->npages)
        {
            prv->cur_index = 0;
        }
    }
    SYSOSKM_EnableInt();

    /* Check if allocation failed....*/
    IMG_ASSERT(ui32ExamPages < prv->npages);
    if (ui32ExamPages >= prv->npages)
    {
        /* Failed...*/
        /* dump some fragmentation information */
        int i = 0;
        int nAllocated = 0;
        int n64kBlocks  = 0;    // number of blocks of <16 consecutive pages
        int n128kBlocks = 0;
        int n256kBlocks = 0;
        int nBigBlocks  = 0;    // number of blocks of >=64 consecutive pages
        int nMaxBlocks  = 0;
        int nPages = 0;
        for(i = 0; i < (int)prv->npages; i++)
        {
            IMG_UINT8 isallocated = prv->alloc_pool[i];
            nPages++;
            if(i == prv->npages-1 || isallocated != prv->alloc_pool[i+1])
            {
                if(isallocated)
                    nAllocated += nPages;
                else if(nPages < 16)
                    n64kBlocks++;
                else if(nPages < 32)
                    n128kBlocks++;
                else if(nPages < 64)
                    n256kBlocks++;
                else
                    nBigBlocks++;
                    if(nMaxBlocks < nPages)
                        nMaxBlocks = nPages;
                isallocated = prv->alloc_pool[i];
                nPages = 0;
            }
        }
#ifdef printk
        /* hopefully, this will give some idea of the fragmentation of the memory */
        printk("AllocPages not able to allocate memory \n");
        printk("  number available memory areas under 64k:%d\n", n64kBlocks);
        printk("  number available memory areas under 128k:%d\n", n128kBlocks);
        printk("  number available memory areas under 256k:%d\n", n256kBlocks);
        printk("  number available memory areas over 256k:%d\n", nBigBlocks);
        printk("  total allocated memory:%dk/%dk\n", nAllocated*4, prv->npages*4);
#endif


        return IMG_ERROR_OUT_OF_MEMORY;
    }


    paCpuPhysAddr = CpuKmAddrToCpuPAddr(heap, psPages->pvImplData);
    IMG_ASSERT(paCpuPhysAddr != 0);
    if (paCpuPhysAddr == 0)
    {
        return IMG_ERROR_GENERIC_FAILURE;
    }

#ifdef CONFIG_ARM
    /* This flushes the outer cache in ARM, so we avoid memory corruption by late
       flushes of memory previously marked as cached. */
    if ((eMemAttrib & SYS_MEMATTRIB_CACHED) == 0) {
        mb();
        /* the following two calls are somewhat expensive, but are there for defensive reasons */
        flush_cache_all();
        outer_flush_all();
    }
#endif
    {
        IMG_PHYSADDR *      ppaCpuPhysAddrs;
        size_t numPages, pg_i, offset;

        // Memory for physical addresses
        numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE;
        physAddrArrSize = sizeof(*ppaCpuPhysAddrs) * numPages;
        ppaCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize);
        if (!ppaCpuPhysAddrs)
        {
            return IMG_ERROR_OUT_OF_MEMORY;
        }
        for (pg_i = 0, offset = 0; pg_i < numPages; offset += HOST_MMU_PAGE_SIZE, ++pg_i)
        {
                ppaCpuPhysAddrs[pg_i] = paCpuPhysAddr + offset;
        }
        // Set pointer to physical address in structure
        psPages->ppaPhysAddr = ppaCpuPhysAddrs;

    }
    /* Add this to the list of mappable regions...*/
    ui32Result = SYSBRGU_CreateMappableRegion(paCpuPhysAddr, ui32Size, eMemAttrib, psPages, &psPages->hRegHandle);
    IMG_ASSERT(ui32Result == IMG_SUCCESS);
    if (ui32Result != IMG_SUCCESS) 
    {
        goto error_mappable_region;
    }

#if defined (CLEAR_PAGES)
        if (psPages->pvImplData)
    IMG_MEMSET( psPages->pvImplData, 0, ui32Size);
#endif

    return IMG_SUCCESS;

    /* Error handling. */
error_mappable_region:
    IMG_BIGORSMALL_FREE(physAddrArrSize, psPages->ppaPhysAddr);
    psPages->ppaPhysAddr = IMG_NULL;

    return ui32Result;
}
/*****************************************************************************
 @Function                AllocPages
******************************************************************************/
static IMG_RESULT AllocPages(
	SYSMEM_Heap *		heap,
	IMG_UINT32			ui32Size,
	SYSMEMU_sPages *	psPages,
	SYS_eMemAttrib		eMemAttrib
)
{
    IMG_UINT32           Res;
    struct ion_handle *  ion_handle;
    unsigned             allocFlags;
    struct ion_client *  ion_client;
    IMG_UINT64 *         pCpuPhysAddrs;
    size_t               numPages;
    size_t               physAddrArrSize;

    ion_client = (struct ion_client *)heap->priv;

    if (   (eMemAttrib & SYS_MEMATTRIB_WRITECOMBINE)
        || (eMemAttrib & SYS_MEMATTRIB_UNCACHED))
    {
        allocFlags = 0;
    } else {
        allocFlags = ION_FLAG_CACHED;
    }

    if (eMemAttrib == SYS_MEMATTRIB_UNCACHED)
        REPORT(REPORT_MODULE_SYSMEM, REPORT_WARNING,
               "Purely uncached memory is not supported by ION");

    // PAGE_SIZE aligment, heap depends on platform
    ion_handle = ion_alloc(ion_client, ui32Size, PAGE_SIZE,
    					ION_HEAP_SYSTEM_MASK,
                          allocFlags);
    if (!ion_handle) {
        REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR,
               "Error allocating %u bytes from ion", ui32Size);
        Res = IMG_ERROR_OUT_OF_MEMORY;
        goto errAlloc;
    }

    /* Find out physical addresses in the mappable region */
    numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE;

    physAddrArrSize = sizeof *pCpuPhysAddrs * numPages;
    pCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize);
    if (!pCpuPhysAddrs) {
        Res = IMG_ERROR_OUT_OF_MEMORY;
        goto errPhysArrAlloc;
    }

    {
        struct scatterlist *psScattLs, *psScattLsAux;
        struct sg_table *psSgTable;
        size_t pg_i = 0;

        psSgTable = ion_sg_table(ion_client, ion_handle);
        if (psSgTable == NULL)
        {
            REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining sg table");
            Res = IMG_ERROR_FATAL;
            goto errGetPhys;
        }
        psScattLs = psSgTable->sgl;

        if (psScattLs == NULL)
        {
            REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining scatter list");
            Res = IMG_ERROR_FATAL;
            goto errGetPhys;
        }

        // Get physical addresses from scatter list
        for (psScattLsAux = psScattLs; psScattLsAux; psScattLsAux = sg_next(psScattLsAux))
        {
            int offset;
            dma_addr_t chunkBase = sg_phys(psScattLsAux);

            for (offset = 0; offset < psScattLsAux->length; offset += PAGE_SIZE, ++pg_i)
            {
                if (pg_i >= numPages)
                    break;

                //pCpuPhysAddrs[pg_i] = dma_map_page(NULL, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
                pCpuPhysAddrs[pg_i] = chunkBase + offset;
            }
            if (pg_i >= numPages)
                break;
        }
    }

    // Set pointer to physical address in structure
    psPages->ppaPhysAddr = pCpuPhysAddrs;

    DEBUG_REPORT(REPORT_MODULE_SYSMEM, "%s region of size %u phys 0x%llx",
                 __FUNCTION__, ui32Size, psPages->ppaPhysAddr[0]);

    Res = SYSBRGU_CreateMappableRegion(psPages->ppaPhysAddr[0], ui32Size, eMemAttrib,
    						psPages, &psPages->hRegHandle);
    if (Res != IMG_SUCCESS) {
        REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR,
               "Error %u in SYSBRGU_CreateMappableRegion", Res);
        goto errCreateMapRegion;
    }

    psPages->pvImplData = ion_handle;

    return IMG_SUCCESS;

errCreateMapRegion:
errGetPhys:
    IMG_BIGORSMALL_FREE(numPages*sizeof(*pCpuPhysAddrs), pCpuPhysAddrs);
errPhysArrAlloc:
    ion_unmap_kernel(ion_client, ion_handle);
    ion_free(ion_client, ion_handle);
errAlloc:
    return Res;
}
/*!
******************************************************************************

 @Function                SYSMEMKM_AllocPages

******************************************************************************/
static IMG_RESULT AllocPages(
    SYSMEM_Heap *     heap,
    IMG_UINT32        ui32Size,
    SYSMEMU_sPages *  psPages,
    SYS_eMemAttrib    eMemAttrib
)
{
    struct priv_params *  prv = (struct priv_params *)heap->priv;
    IMG_PHYSADDR          paCpuPhysAddr, paOffset;
    IMG_RESULT            ui32Result;
    IMG_PHYSADDR *        ppaCpuPhysAddrs;
    size_t                numPages, pg_i;
    size_t                physAddrArrSize;

    /* Calculate required no. of pages...*/
    psPages->pvImplData = (IMG_VOID *)gen_pool_alloc(prv->pool, ui32Size);
    if(psPages->pvImplData == IMG_NULL)
    {
        return IMG_ERROR_OUT_OF_MEMORY;
    }

    paCpuPhysAddr = CpuKmAddrToCpuPAddr(heap, psPages->pvImplData);
    IMG_ASSERT(paCpuPhysAddr != 0);
    if (paCpuPhysAddr == 0)
    {
        ui32Result = IMG_ERROR_GENERIC_FAILURE;
        goto error_map;
    }

    numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE;
    physAddrArrSize = sizeof(*ppaCpuPhysAddrs) * numPages;
    ppaCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize);
    if (!ppaCpuPhysAddrs)
    {
        ui32Result = IMG_ERROR_OUT_OF_MEMORY;
        goto error_array_alloc;
    }
    for (pg_i = 0, paOffset = 0; pg_i < numPages; paOffset += HOST_MMU_PAGE_SIZE, ++pg_i)
    {
        ppaCpuPhysAddrs[pg_i] = paCpuPhysAddr + paOffset;
    }
    psPages->ppaPhysAddr = ppaCpuPhysAddrs;

    /* Add this to the list of mappable regions...*/
    ui32Result = SYSBRGU_CreateMappableRegion(psPages->ppaPhysAddr[0], ui32Size, eMemAttrib, IMG_FALSE, psPages, &psPages->hRegHandle);
    IMG_ASSERT(ui32Result == IMG_SUCCESS);
    if (ui32Result != IMG_SUCCESS)
    {
        goto error_region_create;
    }

#if defined (CLEAR_PAGES)
    IMG_MEMSET( psPages->pvImplData, 0, ui32Size);
#endif

    return IMG_SUCCESS;

    /* Error handling. */
error_region_create:
    IMG_BIGORSMALL_FREE(physAddrArrSize, psPages->ppaPhysAddr);
error_array_alloc:
error_map:
    gen_pool_free(prv->pool, (unsigned long)psPages->pvImplData, ui32Size);

    psPages->ppaPhysAddr = IMG_NULL;
    psPages->hRegHandle = IMG_NULL;

    return ui32Result;
}
/*!
******************************************************************************

 @Function                SYSMEMKM_AllocPages

******************************************************************************/
static IMG_RESULT AllocPages(
    SYSMEM_Heap *     heap,
    IMG_UINT32        ui32Size,
    SYSMEMU_sPages *  psPages,
    SYS_eMemAttrib    eMemAttrib
)
{
    IMG_UINT32 Res;
    dma_addr_t dma;
    unsigned numPages, pg_i;
    IMG_UINT64 *pCpuPhysAddrs;
    IMG_VOID **pCpuKernAddrs = IMG_NULL;
    size_t physAddrArrSize;

    // This heap only supports uncached | write-combined memory allocations
    IMG_ASSERT(eMemAttrib == (SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE));
    eMemAttrib = SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE;

    numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE;

    // Memory for physical addresses
    physAddrArrSize = sizeof(*pCpuPhysAddrs) * numPages;
    pCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize);
    if (!pCpuPhysAddrs) {
        Res = IMG_ERROR_OUT_OF_MEMORY;
        goto errPhysAddrsAlloc;
    }

    psPages->pvCpuKmAddr = dma_alloc_coherent(NULL, ui32Size, &dma, GFP_KERNEL | __GFP_HIGHMEM);
    if (!psPages->pvCpuKmAddr) {
        pCpuKernAddrs = IMG_BIGORSMALL_ALLOC(numPages*(sizeof(IMG_VOID **)));
        if (!pCpuKernAddrs) {
            Res = IMG_ERROR_OUT_OF_MEMORY;
            goto errKernAddrsAlloc;
        }

        for (pg_i = 0; pg_i < numPages; ++pg_i) {
            pCpuKernAddrs[pg_i] = dma_alloc_coherent(NULL, PAGE_SIZE, &dma, GFP_KERNEL | __GFP_HIGHMEM);
            if (!pCpuKernAddrs[pg_i]) {
                Res = IMG_ERROR_OUT_OF_MEMORY;
                goto errPageAlloc;
            }

            pCpuPhysAddrs[pg_i] = VAL64(dma);
        }
        psPages->pvImplData = (IMG_VOID *)((long)pCpuKernAddrs | 1);
    } else {
        int paddr;

        psPages->pvImplData = (IMG_VOID *)dma;
        paddr = dma;

        for (pg_i = 0; pg_i < numPages; ++pg_i) {
            pCpuPhysAddrs[pg_i] = VAL64(paddr + (PAGE_SIZE * pg_i));
        }
    }

    // Set pointer to physical address in structure
    psPages->ppaPhysAddr = pCpuPhysAddrs;

    Res = SYSBRGU_CreateMappableRegion(psPages->ppaPhysAddr[0], ui32Size, eMemAttrib,
                                       IMG_TRUE, psPages, &psPages->hRegHandle);
    DEBUG_REPORT(REPORT_MODULE_SYSMEM, "%s (unified) region of size %u phys 0x%llx",
                 __FUNCTION__, ui32Size, psPages->ppaPhysAddr[0]);
    IMG_ASSERT(Res == IMG_SUCCESS);
    if (Res != IMG_SUCCESS)
    {
        goto errCreateMapRegion;
    }

    return IMG_SUCCESS;

errCreateMapRegion:
errPageAlloc:
    for (--pg_i; pg_i >= 0; pg_i--) {
        dma_free_coherent(NULL, PAGE_SIZE, pCpuKernAddrs[pg_i], psPages->ppaPhysAddr[pg_i]);
    }
    IMG_BIGORSMALL_FREE(numPages * sizeof(*pCpuKernAddrs), pCpuKernAddrs);
errKernAddrsAlloc:
    IMG_BIGORSMALL_FREE(numPages * sizeof(*pCpuPhysAddrs), pCpuPhysAddrs);
errPhysAddrsAlloc:
    return Res;
}