/***************************************************************************** @Function FreePages ******************************************************************************/ static IMG_VOID FreePages( SYSMEM_Heap * heap, IMG_HANDLE hPagesHandle ) { size_t numPages; SYSMEMU_sPages * psPages = hPagesHandle; struct ion_client *ion_client; struct ion_handle *ion_handle; ion_client = heap->priv; if (psPages->bImported) { IMG_RESULT result = IMG_ERROR_FATAL; struct ion_client *pIONcl; struct ion_handle *ionHandle; DEBUG_REPORT(REPORT_MODULE_SYSMEM, "Releasing ion_handle 0x%p", psPages->pvImplData); IMG_ASSERT(psPages->pvImplData); ionHandle = (struct ion_handle *)psPages->pvImplData; pIONcl = get_ion_client(); if (pIONcl) { #if defined(ION_SYSTEM_HEAP) if (psPages->pvCpuKmAddr) ion_unmap_kernel(pIONcl, ionHandle); #endif ion_free(pIONcl, ionHandle); result = IMG_SUCCESS; } else { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Releasing cannot find ION client"); } return; } if (psPages->bDuplicated) return; /* Remove from the list of mappable regions */ SYSBRGU_DestroyMappableRegion(psPages->hRegHandle); /* Free array with physical addresses */ numPages = (psPages->ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE; IMG_BIGORSMALL_FREE(numPages * sizeof(*psPages->ppaPhysAddr), psPages->ppaPhysAddr); /* Free memory */ ion_handle = psPages->pvImplData; if (psPages->pvCpuKmAddr) ion_unmap_kernel(ion_client, ion_handle); ion_free(ion_client, ion_handle); }
/*! ****************************************************************************** @Function RESOURCE_ListRemove ******************************************************************************/ IMG_RESULT RESOURCE_ListRemove( LST_T * psList, IMG_VOID * pvItem ) { RESOURCE_sListElem * psListElem = IMG_NULL; IMG_UINT32 ui32Result; /* Check input params. */ IMG_ASSERT(IMG_NULL != psList); IMG_ASSERT(IMG_NULL != pvItem); if (psList == IMG_NULL || pvItem == IMG_NULL) { ui32Result = IMG_ERROR_INVALID_PARAMETERS; goto error; } /* Find the specified item in the list. */ psListElem = LST_first(psList); while (psListElem) { if (psListElem->pvItem == pvItem) { if (*psListElem->pui32RefCount != 0) { REPORT(REPORT_MODULE_RESOURCE, REPORT_WARNING, "Element removed from list whilst still in use"); } /* Remove the item from the list. */ LST_remove(psList, psListElem); /* Free the stream unit queue element. */ IMG_FREE(psListElem); psListElem = IMG_NULL; return IMG_SUCCESS; } psListElem = LST_next(psListElem); } DEBUG_REPORT(REPORT_MODULE_RESOURCE, "Item could not be located to remove from RESOURCE list"); return IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE; error: return ui32Result; }
/*! ****************************************************************************** @Function ImportPages ******************************************************************************/ static IMG_RESULT ImportPages( SYSMEM_Heap *heap, SYSDEVU_sInfo *sysdev, IMG_UINT32 ui32Size, SYSMEMU_sPages *psPages, SYS_eMemAttrib eMemAttrib, IMG_INT32 buff_fd, IMG_UINT64 *pPhyAddrs, IMG_VOID *priv, IMG_BOOL kernelMapped ) { size_t numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE; struct ion_handle *ionHandle; IMG_RESULT result = IMG_ERROR_FATAL; unsigned pg_i = 0; struct ion_client *pIONcl; DEBUG_REPORT(REPORT_MODULE_SYSMEM, "Importing buff_fd %d of size %u", buff_fd, ui32Size); pIONcl = get_ion_client(); if (!pIONcl) goto exitFailGetClient; ionHandle = ion_import_dma_buf(pIONcl, buff_fd); if (IS_ERR(ionHandle)) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining handle from fd %d", buff_fd); result = IMG_ERROR_FATAL; goto exitFailImportFD; } psPages->pvImplData = ionHandle; #if defined(ION_SYSTEM_HEAP) { struct scatterlist *psScattLs, *psScattLsAux; struct sg_table *psSgTable; psSgTable = ion_sg_table(pIONcl, ionHandle); if (psSgTable == NULL) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining sg table"); result = IMG_ERROR_FATAL; goto exitFailMap; } psScattLs = psSgTable->sgl; if (psScattLs == NULL) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining scatter list"); result = IMG_ERROR_FATAL; goto exitFailMap; } // Get physical addresses from scatter list for (psScattLsAux = psScattLs; psScattLsAux; psScattLsAux = sg_next(psScattLsAux)) { int offset; dma_addr_t chunkBase = sg_phys(psScattLsAux); for (offset = 0; offset < psScattLsAux->length; offset += PAGE_SIZE, ++pg_i) { if (pg_i >= numPages) break; pPhyAddrs[pg_i] = chunkBase + offset; } if (pg_i >= numPages) break; } if (kernelMapped) psPages->pvCpuKmAddr = ion_map_kernel(pIONcl, ionHandle); } #else { int offset; ion_phys_addr_t physaddr; size_t len = 0; result = ion_phys(pIONcl, ionHandle, &physaddr, &len); if(result) { IMG_ASSERT(!"ion_phys failed"); result = IMG_ERROR_FATAL; goto exitFailMap; } for (offset = 0; pg_i < numPages; offset += PAGE_SIZE, ++pg_i) { if (pg_i >= numPages) break; pPhyAddrs[pg_i] = physaddr + offset; } if (kernelMapped) psPages->pvCpuKmAddr = SYSMEMU_CpuPAddrToCpuKmAddr(heap->memId, physaddr); } #endif { size_t physAddrArrSize = numPages * sizeof(psPages->ppaPhysAddr[0]); size_t phy_i; psPages->ppaPhysAddr = IMG_BIGORSMALL_ALLOC(physAddrArrSize); IMG_ASSERT(psPages->ppaPhysAddr != IMG_NULL); if (psPages->ppaPhysAddr == IMG_NULL) { return IMG_ERROR_OUT_OF_MEMORY; } for (phy_i = 0; phy_i < numPages; ++phy_i) psPages->ppaPhysAddr[phy_i] = pPhyAddrs[phy_i]; } if (kernelMapped && psPages->pvCpuKmAddr == NULL) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error mapping to kernel address"); result = IMG_ERROR_FATAL; goto exitFailMapKernel; } result = IMG_SUCCESS; exitFailMapKernel: exitFailMap: exitFailImportFD: exitFailGetClient: return result; }
/***************************************************************************** @Function AllocPages ******************************************************************************/ static IMG_RESULT AllocPages( SYSMEM_Heap * heap, IMG_UINT32 ui32Size, SYSMEMU_sPages * psPages, SYS_eMemAttrib eMemAttrib ) { IMG_UINT32 Res; struct ion_handle * ion_handle; unsigned allocFlags; struct ion_client * ion_client; IMG_UINT64 * pCpuPhysAddrs; size_t numPages; size_t physAddrArrSize; ion_client = (struct ion_client *)heap->priv; if ( (eMemAttrib & SYS_MEMATTRIB_WRITECOMBINE) || (eMemAttrib & SYS_MEMATTRIB_UNCACHED)) { allocFlags = 0; } else { allocFlags = ION_FLAG_CACHED; } if (eMemAttrib == SYS_MEMATTRIB_UNCACHED) REPORT(REPORT_MODULE_SYSMEM, REPORT_WARNING, "Purely uncached memory is not supported by ION"); // PAGE_SIZE aligment, heap depends on platform ion_handle = ion_alloc(ion_client, ui32Size, PAGE_SIZE, ION_HEAP_SYSTEM_MASK, allocFlags); if (!ion_handle) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error allocating %u bytes from ion", ui32Size); Res = IMG_ERROR_OUT_OF_MEMORY; goto errAlloc; } /* Find out physical addresses in the mappable region */ numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE; physAddrArrSize = sizeof *pCpuPhysAddrs * numPages; pCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize); if (!pCpuPhysAddrs) { Res = IMG_ERROR_OUT_OF_MEMORY; goto errPhysArrAlloc; } { struct scatterlist *psScattLs, *psScattLsAux; struct sg_table *psSgTable; size_t pg_i = 0; psSgTable = ion_sg_table(ion_client, ion_handle); if (psSgTable == NULL) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining sg table"); Res = IMG_ERROR_FATAL; goto errGetPhys; } psScattLs = psSgTable->sgl; if (psScattLs == NULL) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining scatter list"); Res = IMG_ERROR_FATAL; goto errGetPhys; } // Get physical addresses from scatter list for (psScattLsAux = psScattLs; psScattLsAux; psScattLsAux = sg_next(psScattLsAux)) { int offset; dma_addr_t chunkBase = sg_phys(psScattLsAux); for (offset = 0; offset < psScattLsAux->length; offset += PAGE_SIZE, ++pg_i) { if (pg_i >= numPages) break; //pCpuPhysAddrs[pg_i] = dma_map_page(NULL, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); pCpuPhysAddrs[pg_i] = chunkBase + offset; } if (pg_i >= numPages) break; } } // Set pointer to physical address in structure psPages->ppaPhysAddr = pCpuPhysAddrs; DEBUG_REPORT(REPORT_MODULE_SYSMEM, "%s region of size %u phys 0x%llx", __FUNCTION__, ui32Size, psPages->ppaPhysAddr[0]); Res = SYSBRGU_CreateMappableRegion(psPages->ppaPhysAddr[0], ui32Size, eMemAttrib, psPages, &psPages->hRegHandle); if (Res != IMG_SUCCESS) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error %u in SYSBRGU_CreateMappableRegion", Res); goto errCreateMapRegion; } psPages->pvImplData = ion_handle; return IMG_SUCCESS; errCreateMapRegion: errGetPhys: IMG_BIGORSMALL_FREE(numPages*sizeof(*pCpuPhysAddrs), pCpuPhysAddrs); errPhysArrAlloc: ion_unmap_kernel(ion_client, ion_handle); ion_free(ion_client, ion_handle); errAlloc: return Res; }
/*! ****************************************************************************** @Function RESOURCE_ListRemoveNextAvail ******************************************************************************/ IMG_RESULT RESOURCE_ListRemoveNextAvail( LST_T * psList, RESOURCE_pfnFreeItem pfnFreeItem, IMG_VOID * pvFreeCbParam ) { RESOURCE_sListElem * psListElem = IMG_NULL; IMG_UINT32 ui32Result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE; /* Check input params. */ IMG_ASSERT(IMG_NULL != psList); if (psList == IMG_NULL) { ui32Result = IMG_ERROR_INVALID_PARAMETERS; goto error; } /* Find the next unused item in the list. */ psListElem = LST_first(psList); while (psListElem) { if (RESOURCE_ItemIsAvailable(psListElem->pui32RefCount)) { /* Return and free. */ RESOURCE_ItemReturn(psListElem->pui32RefCount); if (psListElem->pui32RefCount == IMG_NULL || *psListElem->pui32RefCount == 0) { if (pfnFreeItem) { pfnFreeItem(psListElem->pvItem, pvFreeCbParam); } else { IMG_FREE(psListElem->pvItem); } psListElem->pvItem = IMG_NULL; } /* Get the next element from the list. */ LST_remove(psList, psListElem); /* Free the buffer list element. */ IMG_FREE(psListElem); psListElem = IMG_NULL; ui32Result = IMG_SUCCESS; break; } psListElem = LST_next(psListElem); } if (ui32Result == IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE) { DEBUG_REPORT(REPORT_MODULE_RESOURCE, "Failed to locate an available RESOURCE element to remove"); } error: return ui32Result; }
/*! ****************************************************************************** @Function PALLOC_Import1 ******************************************************************************/ IMG_RESULT PALLOC_Import1( IMG_UINT32 ui32AttachId, SYS_eMemAttrib eMemAttrib, int buff_fd, PALLOC_sUmAlloc __user * psUmAlloc ) { IMG_HANDLE hDevHandle; IMG_UINT32 ui32Result; PALLOC_sKmAlloc * psKmAlloc; IMG_HANDLE hAttachHandle; PALLOC_sAttachContext * psAttachContext; IMG_UINT32 ui32PageNo; IMG_UINT32 ui32PageIdx; IMG_UINT64 ui64CpuPAddr; PALLOC_sUmAlloc sUmAllocCp; IMG_UINT64 * paui64DevAddrs; SYSDEVU_sInfo * psSysDev; SYS_eMemPool eMemPool; IMG_PVOID pvCpuKmAddr; LOG_EVENT(PALLOC, PALLOC_IMPORT, LOG_FLAG_START | LOG_FLAG_QUAL_ARG1 | LOG_FLAG_QUAL_ARG2, ui32AttachId, buff_fd); DEBUG_REPORT(REPORT_MODULE_PALLOC, "PALLOC_Import1 fd %d", buff_fd); if (SYSOSKM_CopyFromUser(&sUmAllocCp, psUmAlloc, sizeof sUmAllocCp) != IMG_SUCCESS) { return IMG_ERROR_FATAL; } IMG_ASSERT(sUmAllocCp.bMappingOnly); /* Get the attachment handle from its ID... */ ui32Result = DMANKM_GetAttachHandleFromId(ui32AttachId, &hAttachHandle); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { return ui32Result; } /* Get access to the attachment specific data...*/ psAttachContext = DMANKM_GetCompAttachmentData(hAttachHandle); /* Get access to the device handle...*/ hDevHandle = DMANKM_GetDevHandleFromAttach(hAttachHandle); /* Lock the device...*/ DMANKM_LockDeviceContext(hDevHandle); psSysDev = SYSDEVU_GetDeviceById(SYSDEVKM_GetDeviceID(psAttachContext->hSysDevHandle)); IMG_ASSERT(psSysDev != IMG_NULL); // I if (psSysDev == IMG_NULL) { ui32Result = IMG_ERROR_DEVICE_NOT_FOUND; goto error_get_dev_by_id; } eMemPool = (eMemAttrib & SYS_MEMATTRIB_SECURE) ? psSysDev->secureMemPool : psSysDev->sMemPool; /* Allocate allocation info...*/ psKmAlloc = IMG_MALLOC(sizeof *psKmAlloc); IMG_ASSERT(psKmAlloc != IMG_NULL); if (psKmAlloc == IMG_NULL) { ui32Result = IMG_ERROR_OUT_OF_MEMORY; goto error_alloc_info; } IMG_MEMSET(psKmAlloc, 0, sizeof *psKmAlloc); /* Save device handle etc... */ psKmAlloc->hDevHandle = hDevHandle; psKmAlloc->sAllocInfo.ui32Size = sUmAllocCp.ui32Size; psKmAlloc->sAllocInfo.bIsContiguous = IMG_FALSE; /* Get the device id...*/ ui32Result = DMANKM_GetDeviceId(hDevHandle, &sUmAllocCp.ui32DeviceId); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_get_dev_id; } psKmAlloc->sAllocInfo.bMappingOnly = IMG_TRUE; /* Calculate the size of the allocation in pages */ ui32PageNo = (sUmAllocCp.ui32Size + SYS_MMU_PAGE_SIZE - 1)/SYS_MMU_PAGE_SIZE; psKmAlloc->sAllocInfo.psSysPAddr = IMG_BIGORSMALL_ALLOC(sizeof(IMG_SYS_PHYADDR) * ui32PageNo); IMG_ASSERT(psKmAlloc->sAllocInfo.psSysPAddr); if (IMG_NULL == psKmAlloc->sAllocInfo.psSysPAddr) { ui32Result = IMG_ERROR_OUT_OF_MEMORY; goto error_page_array; } paui64DevAddrs = IMG_BIGORSMALL_ALLOC((sizeof *paui64DevAddrs) * ui32PageNo); IMG_ASSERT(paui64DevAddrs); if (IMG_NULL == paui64DevAddrs) { ui32Result = IMG_ERROR_OUT_OF_MEMORY; goto error_addr_array; } if(buff_fd >= 0) { pvCpuKmAddr = NULL; /* ION buffer */ #if defined ANDROID_ION_BUFFERS psKmAlloc->eBufType = PALLOC_BUFTYPE_ANDROIDNATIVE; #if defined CONFIG_X86 ui32Result = palloc_GetIONPages(eMemPool, buff_fd, sUmAllocCp.ui32Size, psKmAlloc->sAllocInfo.psSysPAddr, &pvCpuKmAddr, &psKmAlloc->hBufHandle); #else // if CONFIG_X86 ui32Result = palloc_GetIONPages(eMemPool, buff_fd, sUmAllocCp.ui32Size, psKmAlloc->sAllocInfo.psSysPAddr, NULL, &psKmAlloc->hBufHandle); #endif // if CONFIG_X86 if (ui32Result != IMG_SUCCESS) { IMG_ASSERT(!"palloc_GetIONPages"); goto error_get_pages; } #else // if ANDROID_ION_BUFFERS IMG_ASSERT(!"NOT ANDROID: ION not supported"); goto error_get_pages; #endif // if ANDROID_ION_BUFFERS } else { /* User space allocated buffer */ IMG_VOID __user * pvUmBuff = ( IMG_VOID __user * ) sUmAllocCp.pvCpuUmAddr; IMG_ASSERT(pvUmBuff); psKmAlloc->hBufHandle = (IMG_HANDLE)(sUmAllocCp.pvCpuUmAddr); psKmAlloc->eBufType = PALLOC_BUFTYPE_USERALLOC; /* Assign and lock physical addresses to the user space buffer. The mapping of the first page in the kernel is also returned */ ui32Result = SYSOSKM_CpuUmAddrToCpuPAddrArray(pvUmBuff, psKmAlloc->sAllocInfo.psSysPAddr, ui32PageNo, &pvCpuKmAddr); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_get_pages; } } /* Import pages */ ui32Result = SYSMEMU_ImportExternalPages(eMemPool, sUmAllocCp.ui32Size, eMemAttrib, &psKmAlloc->hPagesHandle, pvCpuKmAddr, psKmAlloc->sAllocInfo.psSysPAddr); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_import_pages; } // Access from user space is not needed for the moment. Can be changed. sUmAllocCp.lOffset = 0; #if PALLOC_EXPOSE_KM_HANDLE sUmAllocCp.hKmAllocHandle = psKmAlloc->hPagesHandle; #endif /* PALLOC_EXPOSE_KM_HANDLE */ for (ui32PageIdx = 0; ui32PageIdx < ui32PageNo; ++ui32PageIdx) { ui64CpuPAddr = psKmAlloc->sAllocInfo.psSysPAddr[ui32PageIdx]; paui64DevAddrs[ui32PageIdx] = SYSDEVKM_CpuPAddrToDevPAddr(psAttachContext->hSysDevHandle, ui64CpuPAddr); } /* Register this with the resource manager */ ui32Result = RMAN_RegisterResource(psAttachContext->hResBHandle, PALLOC_RES_TYPE_1, palloc_fnFree, psKmAlloc, IMG_NULL, &sUmAllocCp.ui32AllocId); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_resource_register; } LOG_EVENT(PALLOC, PALLOC_IMPORTID, LOG_FLAG_END | LOG_FLAG_QUAL_ARG1 | LOG_FLAG_QUAL_ARG2, ui32AttachId, sUmAllocCp.ui32AllocId); /* Unlock the device...*/ DMANKM_UnlockDeviceContext(hDevHandle); /* Copy to user changed PALLOC_sUmAlloc, including physical device addresses */ if (SYSOSKM_CopyToUser(psUmAlloc, &sUmAllocCp, sizeof sUmAllocCp)) { ui32Result = IMG_ERROR_FATAL; goto error_copy_to_user; } if (SYSOSKM_CopyToUser(psUmAlloc->aui64DevPAddr, paui64DevAddrs, (sizeof *paui64DevAddrs) * ui32PageNo)) { ui32Result = IMG_ERROR_FATAL; goto error_copy_to_user; } /* Free the address array */ IMG_BIGORSMALL_FREE((sizeof *paui64DevAddrs) * ui32PageNo, paui64DevAddrs); LOG_EVENT(PALLOC, PALLOC_IMPORT, LOG_FLAG_END | LOG_FLAG_QUAL_ARG1 | LOG_FLAG_QUAL_ARG2, ui32AttachId, buff_fd); /* Return. */ return IMG_SUCCESS; /* Error handling. */ error_copy_to_user: /* Free everything. */ PALLOC_Free1(sUmAllocCp.ui32AllocId); goto error_return; error_resource_register: SYSMEMU_FreePages(psKmAlloc->hPagesHandle); error_import_pages: if (buff_fd >= 0) { #ifdef ANDROID_ION_BUFFERS palloc_ReleaseIONBuf(psKmAlloc->hBufHandle, NULL); #endif /* ANDROID_ION_BUFFERS */ } else { SYSOSKM_ReleaseCpuPAddrArray(pvCpuKmAddr, psKmAlloc->hBufHandle, psKmAlloc->sAllocInfo.psSysPAddr, ui32PageNo); } error_get_pages: IMG_BIGORSMALL_FREE((sizeof *paui64DevAddrs) * ui32PageNo, paui64DevAddrs); error_addr_array: IMG_BIGORSMALL_FREE(sizeof(IMG_SYS_PHYADDR) * ui32PageNo, psKmAlloc->sAllocInfo.psSysPAddr); error_page_array: error_get_dev_id: IMG_FREE(psKmAlloc); error_alloc_info: error_get_dev_by_id: /* Unlock the device. */ DMANKM_UnlockDeviceContext(hDevHandle); error_return: return ui32Result; }
/*! ****************************************************************************** @Function SYSMEMKM_AllocPages ******************************************************************************/ static IMG_RESULT AllocPages( SYSMEM_Heap * heap, IMG_UINT32 ui32Size, SYSMEMU_sPages * psPages, SYS_eMemAttrib eMemAttrib ) { IMG_UINT32 Res; dma_addr_t dma; unsigned numPages, pg_i; IMG_UINT64 *pCpuPhysAddrs; IMG_VOID **pCpuKernAddrs = IMG_NULL; size_t physAddrArrSize; // This heap only supports uncached | write-combined memory allocations IMG_ASSERT(eMemAttrib == (SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE)); eMemAttrib = SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE; numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE; // Memory for physical addresses physAddrArrSize = sizeof(*pCpuPhysAddrs) * numPages; pCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize); if (!pCpuPhysAddrs) { Res = IMG_ERROR_OUT_OF_MEMORY; goto errPhysAddrsAlloc; } psPages->pvCpuKmAddr = dma_alloc_coherent(NULL, ui32Size, &dma, GFP_KERNEL | __GFP_HIGHMEM); if (!psPages->pvCpuKmAddr) { pCpuKernAddrs = IMG_BIGORSMALL_ALLOC(numPages*(sizeof(IMG_VOID **))); if (!pCpuKernAddrs) { Res = IMG_ERROR_OUT_OF_MEMORY; goto errKernAddrsAlloc; } for (pg_i = 0; pg_i < numPages; ++pg_i) { pCpuKernAddrs[pg_i] = dma_alloc_coherent(NULL, PAGE_SIZE, &dma, GFP_KERNEL | __GFP_HIGHMEM); if (!pCpuKernAddrs[pg_i]) { Res = IMG_ERROR_OUT_OF_MEMORY; goto errPageAlloc; } pCpuPhysAddrs[pg_i] = VAL64(dma); } psPages->pvImplData = (IMG_VOID *)((long)pCpuKernAddrs | 1); } else { int paddr; psPages->pvImplData = (IMG_VOID *)dma; paddr = dma; for (pg_i = 0; pg_i < numPages; ++pg_i) { pCpuPhysAddrs[pg_i] = VAL64(paddr + (PAGE_SIZE * pg_i)); } } // Set pointer to physical address in structure psPages->ppaPhysAddr = pCpuPhysAddrs; Res = SYSBRGU_CreateMappableRegion(psPages->ppaPhysAddr[0], ui32Size, eMemAttrib, IMG_TRUE, psPages, &psPages->hRegHandle); DEBUG_REPORT(REPORT_MODULE_SYSMEM, "%s (unified) region of size %u phys 0x%llx", __FUNCTION__, ui32Size, psPages->ppaPhysAddr[0]); IMG_ASSERT(Res == IMG_SUCCESS); if (Res != IMG_SUCCESS) { goto errCreateMapRegion; } return IMG_SUCCESS; errCreateMapRegion: errPageAlloc: for (--pg_i; pg_i >= 0; pg_i--) { dma_free_coherent(NULL, PAGE_SIZE, pCpuKernAddrs[pg_i], psPages->ppaPhysAddr[pg_i]); } IMG_BIGORSMALL_FREE(numPages * sizeof(*pCpuKernAddrs), pCpuKernAddrs); errKernAddrsAlloc: IMG_BIGORSMALL_FREE(numPages * sizeof(*pCpuPhysAddrs), pCpuPhysAddrs); errPhysAddrsAlloc: return Res; }