/*! ****************************************************************************** @Function SYSMEMKM_ImportExternalPages ******************************************************************************/ IMG_RESULT ImportExternalPages( SYSMEM_Heap * heap, IMG_UINT32 ui32Size, SYSMEMU_sPages * psPages, SYS_eMemAttrib eMemAttrib, IMG_VOID * pvCpuKmAddr, IMG_UINT64 * pPhyAddrs ) { size_t numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE; size_t physAddrArrSize = sizeof *(psPages->ppaPhysAddr) * numPages; size_t phy_i; psPages->bMappingOnly = IMG_TRUE; psPages->bImported = IMG_TRUE; psPages->pvCpuKmAddr = pvCpuKmAddr; //psPages->pvImplData = pvCpuKmAddr; psPages->ppaPhysAddr = IMG_BIGORSMALL_ALLOC(physAddrArrSize); IMG_ASSERT(IMG_NULL != psPages->ppaPhysAddr); if (IMG_NULL == psPages->ppaPhysAddr) { return IMG_ERROR_OUT_OF_MEMORY; } for (phy_i = 0; phy_i < numPages; ++phy_i) psPages->ppaPhysAddr[phy_i] = pPhyAddrs[phy_i]; /* No SYSBRGU_sMappableReg - SYSBRGU_CreateMappableRegion not called. */ psPages->hRegHandle = NULL; return IMG_SUCCESS; }
/*! ****************************************************************************** @Function SYSMEMKM_GetCpuKmAddr ******************************************************************************/ static IMG_RESULT GetCpuKmAddr( SYSMEM_Heap * heap, IMG_VOID ** ppvCpuKmAddr, IMG_HANDLE hPagesHandle ) { SYSMEMU_sPages * psPages = hPagesHandle; if(psPages->pvCpuKmAddr == IMG_NULL) { IMG_UINT32 numPages; pgprot_t pageProt; unsigned pg_i; struct page **pages; pageProt = PAGE_KERNEL; numPages = (psPages->ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) /* Write combined implies non-cached in Linux x86. If we additionally call pgprot_noncached, we will not have write combining, just non-cached. */ if ((psPages->eMemAttrib & SYS_MEMATTRIB_WRITECOMBINE) != 0) { pageProt = pgprot_writecombine(pageProt); } #if defined(CONFIG_X86) else #endif #endif /* If uncached...*/ if ((psPages->eMemAttrib & SYS_MEMATTRIB_UNCACHED) != 0) { pageProt = pgprot_noncached(pageProt); } pages = IMG_BIGORSMALL_ALLOC(numPages*(sizeof *pages)); IMG_ASSERT(IMG_NULL != pages); if(IMG_NULL == pages) { return IMG_ERROR_OUT_OF_MEMORY; } for (pg_i = 0; pg_i < numPages; ++pg_i) { pages[pg_i] = pfn_to_page(VAL32((psPages->ppaPhysAddr[pg_i]) >> PAGE_SHIFT)); } psPages->pvCpuKmAddr = vmap(pages, numPages, VM_MAP, pageProt); IMG_BIGORSMALL_FREE(numPages*sizeof(*pages), pages); }
/*! ****************************************************************************** @Function SYSMEMKM_ImportExternalPages ******************************************************************************/ static IMG_RESULT ImportExternalPages( SYSMEM_Heap * heap, IMG_UINT32 ui32Size, SYSMEMU_sPages * psPages, SYS_eMemAttrib eMemAttrib, IMG_VOID * pvCpuKmAddr, IMG_PHYSADDR * ppaPhyAddrs ) { // IMG_RESULT ui32Result; size_t numPages; size_t physAddrArrSize; size_t phy_i; IMG_ASSERT(pvCpuKmAddr != IMG_NULL); /* Allocate page structure */ psPages->bMappingOnly = IMG_TRUE; psPages->bImported = IMG_TRUE; psPages->pvCpuKmAddr = pvCpuKmAddr; //psPages->pvImplData = psPages->pvCpuKmAddr; numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE; physAddrArrSize = sizeof *(psPages->ppaPhysAddr) * numPages; psPages->ppaPhysAddr = IMG_BIGORSMALL_ALLOC(physAddrArrSize); IMG_ASSERT(IMG_NULL != psPages->ppaPhysAddr); if(IMG_NULL == psPages->ppaPhysAddr) { return IMG_ERROR_OUT_OF_MEMORY; } for (phy_i = 0; phy_i < numPages; ++phy_i) psPages->ppaPhysAddr[phy_i] = ppaPhyAddrs[phy_i]; //ui32Result = SYSBRGU_CreateMappableRegion(pvCpuKmAddr, ppaPhyAddrs[0], ui32Size, eMemAttrib, // IMG_TRUE, &psPages->hRegHandle); psPages->hRegHandle = IMG_NULL; //IMG_ASSERT(ui32Result == IMG_SUCCESS); //if (ui32Result != IMG_SUCCESS) //{ // return ui32Result; //} return IMG_SUCCESS; }
static IMG_RESULT AllocPages( SYSMEM_Heap * heap, IMG_UINT32 ui32Size, SYSMEMU_sPages * psPages, SYS_eMemAttrib eMemAttrib ) { IMG_UINT32 ui32NoPages; IMG_UINT32 ui32ExamPages; IMG_UINT32 i; IMG_UINT64 ui64DeviceMemoryBase; IMG_PHYSADDR paCpuPhysAddr; IMG_UINT32 ui32Result; size_t physAddrArrSize; struct priv_params * prv = (struct priv_params *)heap->priv; /* If we don't know where the memory is...*/ SYSOSKM_DisableInt(); /* Calculate required no. of pages...*/ ui32NoPages = (ui32Size + (HOST_MMU_PAGE_SIZE-1)) / HOST_MMU_PAGE_SIZE; /* Loop over allocated pages until we find an unallocated slot big enough for this allocation...*/ ui32ExamPages = 0; while (ui32ExamPages < prv->npages) { /* If the current page is not allocated and we might have enough remaining to make this allocation...*/ if ( (!prv->alloc_pool[prv->cur_index]) && ((prv->cur_index + ui32NoPages) <= prv->npages) ) { /* Can we make this allocation...*/ for (i=0; i<ui32NoPages; i++) { if (prv->alloc_pool[prv->cur_index+i]) { break; } } if (i == ui32NoPages) { /* Yes, mark pages as allocated...*/ for (i=0; i<ui32NoPages; i++) { prv->alloc_pool[prv->cur_index+i] = IMG_TRUE; } /* Calculate the memory address of the start of the allocation...*/ //psPages->pvCpuKmAddr = (IMG_VOID *)((IMG_UINTPTR)prv->vstart + (prv->cur_index * HOST_MMU_PAGE_SIZE)); psPages->pvImplData = (IMG_VOID *)(prv->vstart + (prv->cur_index * HOST_MMU_PAGE_SIZE)); /* Update the current page index....*/ prv->cur_index += ui32NoPages; if (prv->cur_index >= prv->npages) { prv->cur_index = 0; } break; } } /* Update examined pages and page index...*/ ui32ExamPages++; prv->cur_index++; if (prv->cur_index >= prv->npages) { prv->cur_index = 0; } } SYSOSKM_EnableInt(); /* Check if allocation failed....*/ IMG_ASSERT(ui32ExamPages < prv->npages); if (ui32ExamPages >= prv->npages) { /* Failed...*/ /* dump some fragmentation information */ int i = 0; int nAllocated = 0; int n64kBlocks = 0; // number of blocks of <16 consecutive pages int n128kBlocks = 0; int n256kBlocks = 0; int nBigBlocks = 0; // number of blocks of >=64 consecutive pages int nMaxBlocks = 0; int nPages = 0; for(i = 0; i < (int)prv->npages; i++) { IMG_UINT8 isallocated = prv->alloc_pool[i]; nPages++; if(i == prv->npages-1 || isallocated != prv->alloc_pool[i+1]) { if(isallocated) nAllocated += nPages; else if(nPages < 16) n64kBlocks++; else if(nPages < 32) n128kBlocks++; else if(nPages < 64) n256kBlocks++; else nBigBlocks++; if(nMaxBlocks < nPages) nMaxBlocks = nPages; isallocated = prv->alloc_pool[i]; nPages = 0; } } #ifdef printk /* hopefully, this will give some idea of the fragmentation of the memory */ printk("AllocPages not able to allocate memory \n"); printk(" number available memory areas under 64k:%d\n", n64kBlocks); printk(" number available memory areas under 128k:%d\n", n128kBlocks); printk(" number available memory areas under 256k:%d\n", n256kBlocks); printk(" number available memory areas over 256k:%d\n", nBigBlocks); printk(" total allocated memory:%dk/%dk\n", nAllocated*4, prv->npages*4); #endif return IMG_ERROR_OUT_OF_MEMORY; } paCpuPhysAddr = CpuKmAddrToCpuPAddr(heap, psPages->pvImplData); IMG_ASSERT(paCpuPhysAddr != 0); if (paCpuPhysAddr == 0) { return IMG_ERROR_GENERIC_FAILURE; } #ifdef CONFIG_ARM /* This flushes the outer cache in ARM, so we avoid memory corruption by late flushes of memory previously marked as cached. */ if ((eMemAttrib & SYS_MEMATTRIB_CACHED) == 0) { mb(); /* the following two calls are somewhat expensive, but are there for defensive reasons */ flush_cache_all(); outer_flush_all(); } #endif { IMG_PHYSADDR * ppaCpuPhysAddrs; size_t numPages, pg_i, offset; // Memory for physical addresses numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE; physAddrArrSize = sizeof(*ppaCpuPhysAddrs) * numPages; ppaCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize); if (!ppaCpuPhysAddrs) { return IMG_ERROR_OUT_OF_MEMORY; } for (pg_i = 0, offset = 0; pg_i < numPages; offset += HOST_MMU_PAGE_SIZE, ++pg_i) { ppaCpuPhysAddrs[pg_i] = paCpuPhysAddr + offset; } // Set pointer to physical address in structure psPages->ppaPhysAddr = ppaCpuPhysAddrs; } /* Add this to the list of mappable regions...*/ ui32Result = SYSBRGU_CreateMappableRegion(paCpuPhysAddr, ui32Size, eMemAttrib, psPages, &psPages->hRegHandle); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_mappable_region; } #if defined (CLEAR_PAGES) if (psPages->pvImplData) IMG_MEMSET( psPages->pvImplData, 0, ui32Size); #endif return IMG_SUCCESS; /* Error handling. */ error_mappable_region: IMG_BIGORSMALL_FREE(physAddrArrSize, psPages->ppaPhysAddr); psPages->ppaPhysAddr = IMG_NULL; return ui32Result; }
/*! ****************************************************************************** @Function ImportPages ******************************************************************************/ static IMG_RESULT ImportPages( SYSMEM_Heap *heap, SYSDEVU_sInfo *sysdev, IMG_UINT32 ui32Size, SYSMEMU_sPages *psPages, SYS_eMemAttrib eMemAttrib, IMG_INT32 buff_fd, IMG_UINT64 *pPhyAddrs, IMG_VOID *priv, IMG_BOOL kernelMapped ) { size_t numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE; struct ion_handle *ionHandle; IMG_RESULT result = IMG_ERROR_FATAL; unsigned pg_i = 0; struct ion_client *pIONcl; DEBUG_REPORT(REPORT_MODULE_SYSMEM, "Importing buff_fd %d of size %u", buff_fd, ui32Size); pIONcl = get_ion_client(); if (!pIONcl) goto exitFailGetClient; ionHandle = ion_import_dma_buf(pIONcl, buff_fd); if (IS_ERR(ionHandle)) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining handle from fd %d", buff_fd); result = IMG_ERROR_FATAL; goto exitFailImportFD; } psPages->pvImplData = ionHandle; #if defined(ION_SYSTEM_HEAP) { struct scatterlist *psScattLs, *psScattLsAux; struct sg_table *psSgTable; psSgTable = ion_sg_table(pIONcl, ionHandle); if (psSgTable == NULL) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining sg table"); result = IMG_ERROR_FATAL; goto exitFailMap; } psScattLs = psSgTable->sgl; if (psScattLs == NULL) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining scatter list"); result = IMG_ERROR_FATAL; goto exitFailMap; } // Get physical addresses from scatter list for (psScattLsAux = psScattLs; psScattLsAux; psScattLsAux = sg_next(psScattLsAux)) { int offset; dma_addr_t chunkBase = sg_phys(psScattLsAux); for (offset = 0; offset < psScattLsAux->length; offset += PAGE_SIZE, ++pg_i) { if (pg_i >= numPages) break; pPhyAddrs[pg_i] = chunkBase + offset; } if (pg_i >= numPages) break; } if (kernelMapped) psPages->pvCpuKmAddr = ion_map_kernel(pIONcl, ionHandle); } #else { int offset; ion_phys_addr_t physaddr; size_t len = 0; result = ion_phys(pIONcl, ionHandle, &physaddr, &len); if(result) { IMG_ASSERT(!"ion_phys failed"); result = IMG_ERROR_FATAL; goto exitFailMap; } for (offset = 0; pg_i < numPages; offset += PAGE_SIZE, ++pg_i) { if (pg_i >= numPages) break; pPhyAddrs[pg_i] = physaddr + offset; } if (kernelMapped) psPages->pvCpuKmAddr = SYSMEMU_CpuPAddrToCpuKmAddr(heap->memId, physaddr); } #endif { size_t physAddrArrSize = numPages * sizeof(psPages->ppaPhysAddr[0]); size_t phy_i; psPages->ppaPhysAddr = IMG_BIGORSMALL_ALLOC(physAddrArrSize); IMG_ASSERT(psPages->ppaPhysAddr != IMG_NULL); if (psPages->ppaPhysAddr == IMG_NULL) { return IMG_ERROR_OUT_OF_MEMORY; } for (phy_i = 0; phy_i < numPages; ++phy_i) psPages->ppaPhysAddr[phy_i] = pPhyAddrs[phy_i]; } if (kernelMapped && psPages->pvCpuKmAddr == NULL) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error mapping to kernel address"); result = IMG_ERROR_FATAL; goto exitFailMapKernel; } result = IMG_SUCCESS; exitFailMapKernel: exitFailMap: exitFailImportFD: exitFailGetClient: return result; }
/***************************************************************************** @Function AllocPages ******************************************************************************/ static IMG_RESULT AllocPages( SYSMEM_Heap * heap, IMG_UINT32 ui32Size, SYSMEMU_sPages * psPages, SYS_eMemAttrib eMemAttrib ) { IMG_UINT32 Res; struct ion_handle * ion_handle; unsigned allocFlags; struct ion_client * ion_client; IMG_UINT64 * pCpuPhysAddrs; size_t numPages; size_t physAddrArrSize; ion_client = (struct ion_client *)heap->priv; if ( (eMemAttrib & SYS_MEMATTRIB_WRITECOMBINE) || (eMemAttrib & SYS_MEMATTRIB_UNCACHED)) { allocFlags = 0; } else { allocFlags = ION_FLAG_CACHED; } if (eMemAttrib == SYS_MEMATTRIB_UNCACHED) REPORT(REPORT_MODULE_SYSMEM, REPORT_WARNING, "Purely uncached memory is not supported by ION"); // PAGE_SIZE aligment, heap depends on platform ion_handle = ion_alloc(ion_client, ui32Size, PAGE_SIZE, ION_HEAP_SYSTEM_MASK, allocFlags); if (!ion_handle) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error allocating %u bytes from ion", ui32Size); Res = IMG_ERROR_OUT_OF_MEMORY; goto errAlloc; } /* Find out physical addresses in the mappable region */ numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE; physAddrArrSize = sizeof *pCpuPhysAddrs * numPages; pCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize); if (!pCpuPhysAddrs) { Res = IMG_ERROR_OUT_OF_MEMORY; goto errPhysArrAlloc; } { struct scatterlist *psScattLs, *psScattLsAux; struct sg_table *psSgTable; size_t pg_i = 0; psSgTable = ion_sg_table(ion_client, ion_handle); if (psSgTable == NULL) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining sg table"); Res = IMG_ERROR_FATAL; goto errGetPhys; } psScattLs = psSgTable->sgl; if (psScattLs == NULL) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining scatter list"); Res = IMG_ERROR_FATAL; goto errGetPhys; } // Get physical addresses from scatter list for (psScattLsAux = psScattLs; psScattLsAux; psScattLsAux = sg_next(psScattLsAux)) { int offset; dma_addr_t chunkBase = sg_phys(psScattLsAux); for (offset = 0; offset < psScattLsAux->length; offset += PAGE_SIZE, ++pg_i) { if (pg_i >= numPages) break; //pCpuPhysAddrs[pg_i] = dma_map_page(NULL, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); pCpuPhysAddrs[pg_i] = chunkBase + offset; } if (pg_i >= numPages) break; } } // Set pointer to physical address in structure psPages->ppaPhysAddr = pCpuPhysAddrs; DEBUG_REPORT(REPORT_MODULE_SYSMEM, "%s region of size %u phys 0x%llx", __FUNCTION__, ui32Size, psPages->ppaPhysAddr[0]); Res = SYSBRGU_CreateMappableRegion(psPages->ppaPhysAddr[0], ui32Size, eMemAttrib, psPages, &psPages->hRegHandle); if (Res != IMG_SUCCESS) { REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error %u in SYSBRGU_CreateMappableRegion", Res); goto errCreateMapRegion; } psPages->pvImplData = ion_handle; return IMG_SUCCESS; errCreateMapRegion: errGetPhys: IMG_BIGORSMALL_FREE(numPages*sizeof(*pCpuPhysAddrs), pCpuPhysAddrs); errPhysArrAlloc: ion_unmap_kernel(ion_client, ion_handle); ion_free(ion_client, ion_handle); errAlloc: return Res; }
/*! ****************************************************************************** @Function PALLOC_Import1 ******************************************************************************/ IMG_RESULT PALLOC_Import1( IMG_UINT32 ui32AttachId, SYS_eMemAttrib eMemAttrib, int buff_fd, PALLOC_sUmAlloc __user * psUmAlloc ) { IMG_HANDLE hDevHandle; IMG_UINT32 ui32Result; PALLOC_sKmAlloc * psKmAlloc; IMG_HANDLE hAttachHandle; PALLOC_sAttachContext * psAttachContext; IMG_UINT32 ui32PageNo; IMG_UINT32 ui32PageIdx; IMG_UINT64 ui64CpuPAddr; PALLOC_sUmAlloc sUmAllocCp; IMG_UINT64 * paui64DevAddrs; SYSDEVU_sInfo * psSysDev; SYS_eMemPool eMemPool; IMG_PVOID pvCpuKmAddr; LOG_EVENT(PALLOC, PALLOC_IMPORT, LOG_FLAG_START | LOG_FLAG_QUAL_ARG1 | LOG_FLAG_QUAL_ARG2, ui32AttachId, buff_fd); DEBUG_REPORT(REPORT_MODULE_PALLOC, "PALLOC_Import1 fd %d", buff_fd); if (SYSOSKM_CopyFromUser(&sUmAllocCp, psUmAlloc, sizeof sUmAllocCp) != IMG_SUCCESS) { return IMG_ERROR_FATAL; } IMG_ASSERT(sUmAllocCp.bMappingOnly); /* Get the attachment handle from its ID... */ ui32Result = DMANKM_GetAttachHandleFromId(ui32AttachId, &hAttachHandle); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { return ui32Result; } /* Get access to the attachment specific data...*/ psAttachContext = DMANKM_GetCompAttachmentData(hAttachHandle); /* Get access to the device handle...*/ hDevHandle = DMANKM_GetDevHandleFromAttach(hAttachHandle); /* Lock the device...*/ DMANKM_LockDeviceContext(hDevHandle); psSysDev = SYSDEVU_GetDeviceById(SYSDEVKM_GetDeviceID(psAttachContext->hSysDevHandle)); IMG_ASSERT(psSysDev != IMG_NULL); // I if (psSysDev == IMG_NULL) { ui32Result = IMG_ERROR_DEVICE_NOT_FOUND; goto error_get_dev_by_id; } eMemPool = (eMemAttrib & SYS_MEMATTRIB_SECURE) ? psSysDev->secureMemPool : psSysDev->sMemPool; /* Allocate allocation info...*/ psKmAlloc = IMG_MALLOC(sizeof *psKmAlloc); IMG_ASSERT(psKmAlloc != IMG_NULL); if (psKmAlloc == IMG_NULL) { ui32Result = IMG_ERROR_OUT_OF_MEMORY; goto error_alloc_info; } IMG_MEMSET(psKmAlloc, 0, sizeof *psKmAlloc); /* Save device handle etc... */ psKmAlloc->hDevHandle = hDevHandle; psKmAlloc->sAllocInfo.ui32Size = sUmAllocCp.ui32Size; psKmAlloc->sAllocInfo.bIsContiguous = IMG_FALSE; /* Get the device id...*/ ui32Result = DMANKM_GetDeviceId(hDevHandle, &sUmAllocCp.ui32DeviceId); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_get_dev_id; } psKmAlloc->sAllocInfo.bMappingOnly = IMG_TRUE; /* Calculate the size of the allocation in pages */ ui32PageNo = (sUmAllocCp.ui32Size + SYS_MMU_PAGE_SIZE - 1)/SYS_MMU_PAGE_SIZE; psKmAlloc->sAllocInfo.psSysPAddr = IMG_BIGORSMALL_ALLOC(sizeof(IMG_SYS_PHYADDR) * ui32PageNo); IMG_ASSERT(psKmAlloc->sAllocInfo.psSysPAddr); if (IMG_NULL == psKmAlloc->sAllocInfo.psSysPAddr) { ui32Result = IMG_ERROR_OUT_OF_MEMORY; goto error_page_array; } paui64DevAddrs = IMG_BIGORSMALL_ALLOC((sizeof *paui64DevAddrs) * ui32PageNo); IMG_ASSERT(paui64DevAddrs); if (IMG_NULL == paui64DevAddrs) { ui32Result = IMG_ERROR_OUT_OF_MEMORY; goto error_addr_array; } if(buff_fd >= 0) { pvCpuKmAddr = NULL; /* ION buffer */ #if defined ANDROID_ION_BUFFERS psKmAlloc->eBufType = PALLOC_BUFTYPE_ANDROIDNATIVE; #if defined CONFIG_X86 ui32Result = palloc_GetIONPages(eMemPool, buff_fd, sUmAllocCp.ui32Size, psKmAlloc->sAllocInfo.psSysPAddr, &pvCpuKmAddr, &psKmAlloc->hBufHandle); #else // if CONFIG_X86 ui32Result = palloc_GetIONPages(eMemPool, buff_fd, sUmAllocCp.ui32Size, psKmAlloc->sAllocInfo.psSysPAddr, NULL, &psKmAlloc->hBufHandle); #endif // if CONFIG_X86 if (ui32Result != IMG_SUCCESS) { IMG_ASSERT(!"palloc_GetIONPages"); goto error_get_pages; } #else // if ANDROID_ION_BUFFERS IMG_ASSERT(!"NOT ANDROID: ION not supported"); goto error_get_pages; #endif // if ANDROID_ION_BUFFERS } else { /* User space allocated buffer */ IMG_VOID __user * pvUmBuff = ( IMG_VOID __user * ) sUmAllocCp.pvCpuUmAddr; IMG_ASSERT(pvUmBuff); psKmAlloc->hBufHandle = (IMG_HANDLE)(sUmAllocCp.pvCpuUmAddr); psKmAlloc->eBufType = PALLOC_BUFTYPE_USERALLOC; /* Assign and lock physical addresses to the user space buffer. The mapping of the first page in the kernel is also returned */ ui32Result = SYSOSKM_CpuUmAddrToCpuPAddrArray(pvUmBuff, psKmAlloc->sAllocInfo.psSysPAddr, ui32PageNo, &pvCpuKmAddr); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_get_pages; } } /* Import pages */ ui32Result = SYSMEMU_ImportExternalPages(eMemPool, sUmAllocCp.ui32Size, eMemAttrib, &psKmAlloc->hPagesHandle, pvCpuKmAddr, psKmAlloc->sAllocInfo.psSysPAddr); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_import_pages; } // Access from user space is not needed for the moment. Can be changed. sUmAllocCp.lOffset = 0; #if PALLOC_EXPOSE_KM_HANDLE sUmAllocCp.hKmAllocHandle = psKmAlloc->hPagesHandle; #endif /* PALLOC_EXPOSE_KM_HANDLE */ for (ui32PageIdx = 0; ui32PageIdx < ui32PageNo; ++ui32PageIdx) { ui64CpuPAddr = psKmAlloc->sAllocInfo.psSysPAddr[ui32PageIdx]; paui64DevAddrs[ui32PageIdx] = SYSDEVKM_CpuPAddrToDevPAddr(psAttachContext->hSysDevHandle, ui64CpuPAddr); } /* Register this with the resource manager */ ui32Result = RMAN_RegisterResource(psAttachContext->hResBHandle, PALLOC_RES_TYPE_1, palloc_fnFree, psKmAlloc, IMG_NULL, &sUmAllocCp.ui32AllocId); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_resource_register; } LOG_EVENT(PALLOC, PALLOC_IMPORTID, LOG_FLAG_END | LOG_FLAG_QUAL_ARG1 | LOG_FLAG_QUAL_ARG2, ui32AttachId, sUmAllocCp.ui32AllocId); /* Unlock the device...*/ DMANKM_UnlockDeviceContext(hDevHandle); /* Copy to user changed PALLOC_sUmAlloc, including physical device addresses */ if (SYSOSKM_CopyToUser(psUmAlloc, &sUmAllocCp, sizeof sUmAllocCp)) { ui32Result = IMG_ERROR_FATAL; goto error_copy_to_user; } if (SYSOSKM_CopyToUser(psUmAlloc->aui64DevPAddr, paui64DevAddrs, (sizeof *paui64DevAddrs) * ui32PageNo)) { ui32Result = IMG_ERROR_FATAL; goto error_copy_to_user; } /* Free the address array */ IMG_BIGORSMALL_FREE((sizeof *paui64DevAddrs) * ui32PageNo, paui64DevAddrs); LOG_EVENT(PALLOC, PALLOC_IMPORT, LOG_FLAG_END | LOG_FLAG_QUAL_ARG1 | LOG_FLAG_QUAL_ARG2, ui32AttachId, buff_fd); /* Return. */ return IMG_SUCCESS; /* Error handling. */ error_copy_to_user: /* Free everything. */ PALLOC_Free1(sUmAllocCp.ui32AllocId); goto error_return; error_resource_register: SYSMEMU_FreePages(psKmAlloc->hPagesHandle); error_import_pages: if (buff_fd >= 0) { #ifdef ANDROID_ION_BUFFERS palloc_ReleaseIONBuf(psKmAlloc->hBufHandle, NULL); #endif /* ANDROID_ION_BUFFERS */ } else { SYSOSKM_ReleaseCpuPAddrArray(pvCpuKmAddr, psKmAlloc->hBufHandle, psKmAlloc->sAllocInfo.psSysPAddr, ui32PageNo); } error_get_pages: IMG_BIGORSMALL_FREE((sizeof *paui64DevAddrs) * ui32PageNo, paui64DevAddrs); error_addr_array: IMG_BIGORSMALL_FREE(sizeof(IMG_SYS_PHYADDR) * ui32PageNo, psKmAlloc->sAllocInfo.psSysPAddr); error_page_array: error_get_dev_id: IMG_FREE(psKmAlloc); error_alloc_info: error_get_dev_by_id: /* Unlock the device. */ DMANKM_UnlockDeviceContext(hDevHandle); error_return: return ui32Result; }
/*! ****************************************************************************** @Function PALLOC_Alloc1 ******************************************************************************/ IMG_RESULT PALLOC_Alloc1( IMG_UINT32 ui32AttachId, SYS_eMemAttrib eMemAttrib, PALLOC_sUmAlloc __user * psUmAlloc ) { IMG_HANDLE hDevHandle; IMG_UINT32 ui32Result; PALLOC_sKmAlloc * psKmAlloc; IMG_HANDLE hAttachHandle; PALLOC_sAttachContext * psAttachContext; IMG_UINT32 ui32PageNo; PALLOC_sUmAlloc sUmAllocCp; IMG_UINT32 ui32PageIdx; IMG_UINT64 * pui64Phys; SYSMEMU_sPages * psSysMem; SYS_eMemPool eMemPool; SYSDEVU_sInfo * psSysDev; /* the following code assumes that IMG_SYS_PHYADDR and IMG_UINT64 are the same size */ #ifndef SYSBRG_BRIDGING IMG_VOID * pvKmAddr; #endif if (SYSOSKM_CopyFromUser(&sUmAllocCp, psUmAlloc, sizeof(sUmAllocCp)) != IMG_SUCCESS) { return IMG_ERROR_FATAL; } LOG_EVENT(PALLOC, PALLOC_ALLOC, (LOG_FLAG_START | LOG_FLAG_QUAL_ARG1 |LOG_FLAG_QUAL_ARG2), ui32AttachId, sUmAllocCp.ui32Size); IMG_ASSERT(!sUmAllocCp.bMappingOnly); /* Get the attachment handle from its ID...*/ ui32Result = DMANKM_GetAttachHandleFromId(ui32AttachId, &hAttachHandle); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { return ui32Result; } /* Get access to the attachment specific data...*/ psAttachContext = DMANKM_GetCompAttachmentData(hAttachHandle); /* Get access to the device handle...*/ hDevHandle = DMANKM_GetDevHandleFromAttach(hAttachHandle); /* Lock the device...*/ DMANKM_LockDeviceContext(hDevHandle); psSysDev = SYSDEVU_GetDeviceById(SYSDEVKM_GetDeviceID(psAttachContext->hSysDevHandle)); IMG_ASSERT(psSysDev != IMG_NULL); // I if (psSysDev == IMG_NULL) { return IMG_ERROR_DEVICE_NOT_FOUND; } eMemPool = (eMemAttrib & SYS_MEMATTRIB_SECURE) ? psSysDev->secureMemPool : psSysDev->sMemPool; /* Allocate allocation info...*/ psKmAlloc = IMG_MALLOC(sizeof(*psKmAlloc)); IMG_ASSERT(psKmAlloc != IMG_NULL); if (psKmAlloc == IMG_NULL) { ui32Result = IMG_ERROR_OUT_OF_MEMORY; goto error_alloc_info; } IMG_MEMSET(psKmAlloc, 0, sizeof(*psKmAlloc)); /* Save device handle etc...*/ psKmAlloc->hDevHandle = hDevHandle; psKmAlloc->sAllocInfo.ui32Size = sUmAllocCp.ui32Size; psKmAlloc->hBufHandle = NULL; psKmAlloc->eBufType = PALLOC_BUFTYPE_PALLOCATED; /* Allocate pages...*/ ui32Result = SYSMEMU_AllocatePages(sUmAllocCp.ui32Size, eMemAttrib, eMemPool, &psKmAlloc->hPagesHandle, &pui64Phys); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_alloc_pages; } #ifndef SYSBRG_BRIDGING SYSMEMU_GetCpuKmAddr(&pvKmAddr, psKmAlloc->hPagesHandle); IMG_ASSERT(pvKmAddr != IMG_NULL); if(pvKmAddr == IMG_NULL) { ui32Result = IMG_ERROR_FATAL; goto error_cpu_km_addr; } #endif /* Return addresses...*/ psSysMem = psKmAlloc->hPagesHandle; #ifdef PALLOC_EXPOSE_KM_HANDLE sUmAllocCp.hKmAllocHandle = psKmAlloc->hPagesHandle; #endif /* Check if contiguous...*/ psKmAlloc->sAllocInfo.bIsContiguous = SYSMEMKM_IsContiguous(psKmAlloc->hPagesHandle); /* Get the device id...*/ ui32Result = DMANKM_GetDeviceId(hDevHandle, &sUmAllocCp.ui32DeviceId); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_get_dev_id; } sUmAllocCp.lOffset = 0; if (psSysMem->hRegHandle) { // Determine the offset to memory if it has been made mappable in UM. sUmAllocCp.lOffset = (long)pui64Phys[0]; } /* Calculate the size of the allocation in pages...*/ ui32PageNo = (sUmAllocCp.ui32Size + SYS_MMU_PAGE_SIZE - 1)/SYS_MMU_PAGE_SIZE; psKmAlloc->sAllocInfo.psSysPAddr = IMG_BIGORSMALL_ALLOC(sizeof(IMG_SYS_PHYADDR) * ui32PageNo); IMG_ASSERT(psKmAlloc->sAllocInfo.psSysPAddr); if (IMG_NULL == psKmAlloc->sAllocInfo.psSysPAddr) { ui32Result = IMG_ERROR_OUT_OF_MEMORY; goto error_page_array; } IMG_MEMSET(psKmAlloc->sAllocInfo.psSysPAddr, 0, sizeof(IMG_SYS_PHYADDR) * ui32PageNo); for (ui32PageIdx = 0; ui32PageIdx < ui32PageNo; ++ui32PageIdx) { psKmAlloc->sAllocInfo.psSysPAddr[ui32PageIdx] = pui64Phys[ui32PageIdx]; } /* Register this with the resource manager...*/ ui32Result = RMAN_RegisterResource(psAttachContext->hResBHandle, PALLOC_RES_TYPE_1, palloc_fnFree, psKmAlloc, IMG_NULL, &sUmAllocCp.ui32AllocId); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_resource_register; } LOG_EVENT(PALLOC, PALLOC_ALLOCID, (LOG_FLAG_END | LOG_FLAG_QUAL_ARG1 |LOG_FLAG_QUAL_ARG2), ui32AttachId, sUmAllocCp.ui32AllocId); /* Unlock the device...*/ DMANKM_UnlockDeviceContext(hDevHandle); /* Copy to user changed PALLOC_sUmAlloc, including physical device addresses */ if (SYSOSKM_CopyToUser(psUmAlloc, &sUmAllocCp, sizeof(sUmAllocCp))) { ui32Result = IMG_ERROR_FATAL; goto error_copy_to_user; } if (SYSOSKM_CopyToUser(psUmAlloc->aui64DevPAddr, psKmAlloc->sAllocInfo.psSysPAddr, sizeof(psKmAlloc->sAllocInfo.psSysPAddr[0]) * ui32PageNo)) { ui32Result = IMG_ERROR_FATAL; goto error_copy_to_user; } LOG_EVENT(PALLOC, PALLOC_ALLOC, (LOG_FLAG_END | LOG_FLAG_QUAL_ARG1 |LOG_FLAG_QUAL_ARG2), ui32AttachId, sUmAllocCp.ui32Size); /* Return. */ return IMG_SUCCESS; /* Error handling. */ error_copy_to_user: /* Free everything. */ PALLOC_Free1(sUmAllocCp.ui32AllocId); goto error_return; error_resource_register: IMG_BIGORSMALL_FREE(sizeof(IMG_SYS_PHYADDR) * ui32PageNo, psKmAlloc->sAllocInfo.psSysPAddr); error_page_array: error_get_dev_id: #ifndef SYSBRG_BRIDGING error_cpu_km_addr: #endif /* SYSBRG_BRIDGING */ SYSMEMU_FreePages(psKmAlloc->hPagesHandle); error_alloc_pages: IMG_FREE(psKmAlloc); error_alloc_info: /* Unlock the device. */ DMANKM_UnlockDeviceContext(hDevHandle); error_return: return ui32Result; }
/*! ****************************************************************************** @Function SYSMEMKM_AllocPages ******************************************************************************/ static IMG_RESULT AllocPages( SYSMEM_Heap * heap, IMG_UINT32 ui32Size, SYSMEMU_sPages * psPages, SYS_eMemAttrib eMemAttrib ) { struct priv_params * prv = (struct priv_params *)heap->priv; IMG_PHYSADDR paCpuPhysAddr, paOffset; IMG_RESULT ui32Result; IMG_PHYSADDR * ppaCpuPhysAddrs; size_t numPages, pg_i; size_t physAddrArrSize; /* Calculate required no. of pages...*/ psPages->pvImplData = (IMG_VOID *)gen_pool_alloc(prv->pool, ui32Size); if(psPages->pvImplData == IMG_NULL) { return IMG_ERROR_OUT_OF_MEMORY; } paCpuPhysAddr = CpuKmAddrToCpuPAddr(heap, psPages->pvImplData); IMG_ASSERT(paCpuPhysAddr != 0); if (paCpuPhysAddr == 0) { ui32Result = IMG_ERROR_GENERIC_FAILURE; goto error_map; } numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE; physAddrArrSize = sizeof(*ppaCpuPhysAddrs) * numPages; ppaCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize); if (!ppaCpuPhysAddrs) { ui32Result = IMG_ERROR_OUT_OF_MEMORY; goto error_array_alloc; } for (pg_i = 0, paOffset = 0; pg_i < numPages; paOffset += HOST_MMU_PAGE_SIZE, ++pg_i) { ppaCpuPhysAddrs[pg_i] = paCpuPhysAddr + paOffset; } psPages->ppaPhysAddr = ppaCpuPhysAddrs; /* Add this to the list of mappable regions...*/ ui32Result = SYSBRGU_CreateMappableRegion(psPages->ppaPhysAddr[0], ui32Size, eMemAttrib, IMG_FALSE, psPages, &psPages->hRegHandle); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { goto error_region_create; } #if defined (CLEAR_PAGES) IMG_MEMSET( psPages->pvImplData, 0, ui32Size); #endif return IMG_SUCCESS; /* Error handling. */ error_region_create: IMG_BIGORSMALL_FREE(physAddrArrSize, psPages->ppaPhysAddr); error_array_alloc: error_map: gen_pool_free(prv->pool, (unsigned long)psPages->pvImplData, ui32Size); psPages->ppaPhysAddr = IMG_NULL; psPages->hRegHandle = IMG_NULL; return ui32Result; }
/*! ****************************************************************************** @Function SYSMEMKM_AllocPages ******************************************************************************/ static IMG_RESULT AllocPages( SYSMEM_Heap * heap, IMG_UINT32 ui32Size, SYSMEMU_sPages * psPages, SYS_eMemAttrib eMemAttrib ) { IMG_UINT32 Res; dma_addr_t dma; unsigned numPages, pg_i; IMG_UINT64 *pCpuPhysAddrs; IMG_VOID **pCpuKernAddrs = IMG_NULL; size_t physAddrArrSize; // This heap only supports uncached | write-combined memory allocations IMG_ASSERT(eMemAttrib == (SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE)); eMemAttrib = SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE; numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE; // Memory for physical addresses physAddrArrSize = sizeof(*pCpuPhysAddrs) * numPages; pCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize); if (!pCpuPhysAddrs) { Res = IMG_ERROR_OUT_OF_MEMORY; goto errPhysAddrsAlloc; } psPages->pvCpuKmAddr = dma_alloc_coherent(NULL, ui32Size, &dma, GFP_KERNEL | __GFP_HIGHMEM); if (!psPages->pvCpuKmAddr) { pCpuKernAddrs = IMG_BIGORSMALL_ALLOC(numPages*(sizeof(IMG_VOID **))); if (!pCpuKernAddrs) { Res = IMG_ERROR_OUT_OF_MEMORY; goto errKernAddrsAlloc; } for (pg_i = 0; pg_i < numPages; ++pg_i) { pCpuKernAddrs[pg_i] = dma_alloc_coherent(NULL, PAGE_SIZE, &dma, GFP_KERNEL | __GFP_HIGHMEM); if (!pCpuKernAddrs[pg_i]) { Res = IMG_ERROR_OUT_OF_MEMORY; goto errPageAlloc; } pCpuPhysAddrs[pg_i] = VAL64(dma); } psPages->pvImplData = (IMG_VOID *)((long)pCpuKernAddrs | 1); } else { int paddr; psPages->pvImplData = (IMG_VOID *)dma; paddr = dma; for (pg_i = 0; pg_i < numPages; ++pg_i) { pCpuPhysAddrs[pg_i] = VAL64(paddr + (PAGE_SIZE * pg_i)); } } // Set pointer to physical address in structure psPages->ppaPhysAddr = pCpuPhysAddrs; Res = SYSBRGU_CreateMappableRegion(psPages->ppaPhysAddr[0], ui32Size, eMemAttrib, IMG_TRUE, psPages, &psPages->hRegHandle); DEBUG_REPORT(REPORT_MODULE_SYSMEM, "%s (unified) region of size %u phys 0x%llx", __FUNCTION__, ui32Size, psPages->ppaPhysAddr[0]); IMG_ASSERT(Res == IMG_SUCCESS); if (Res != IMG_SUCCESS) { goto errCreateMapRegion; } return IMG_SUCCESS; errCreateMapRegion: errPageAlloc: for (--pg_i; pg_i >= 0; pg_i--) { dma_free_coherent(NULL, PAGE_SIZE, pCpuKernAddrs[pg_i], psPages->ppaPhysAddr[pg_i]); } IMG_BIGORSMALL_FREE(numPages * sizeof(*pCpuKernAddrs), pCpuKernAddrs); errKernAddrsAlloc: IMG_BIGORSMALL_FREE(numPages * sizeof(*pCpuPhysAddrs), pCpuPhysAddrs); errPhysAddrsAlloc: return Res; }