static int res_trk_pmem_alloc (struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 alloc_size; struct ddl_context *ddl_context; int rc = 0; DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz); if (!addr) { DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__); rc = -EINVAL; goto bail_out; } ddl_context = ddl_get_context(); res_trk_set_mem_type(addr->mem_type); alloc_size = (sz + alignment); if (res_trk_get_enable_ion()) { if (!res_trk_is_cp_enabled() || !res_trk_check_for_sec_session()) { if (!ddl_context->video_ion_client) ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { DDL_MSG_ERROR( "%s() :DDL ION Client Invalid handle\n", __func__); rc = -ENOMEM; goto bail_out; } alloc_size = (alloc_size+4095) & ~4095; addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, res_trk_get_mem_type()); if (IS_ERR_OR_NULL(addr->alloc_handle)) { DDL_MSG_ERROR("%s() :DDL ION alloc failed\n", __func__); rc = -ENOMEM; goto bail_out; } } else { addr->alloc_handle = NULL; addr->alloced_phys_addr = PIL_FW_BASE_ADDR; addr->buffer_size = sz; } } else { addr->alloced_phys_addr = (phys_addr_t) allocate_contiguous_memory_nomap(alloc_size, res_trk_get_mem_type(), SZ_4K); if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n", __func__, alloc_size); rc = -ENOMEM; goto bail_out; } addr->buffer_size = sz; return rc; } bail_out: return rc; }
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 alloc_size, offset = 0, flags = 0; u32 index = 0; struct ddl_context *ddl_context; struct msm_mapped_buffer *mapped_buffer = NULL; DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz); if (!addr) { DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__); goto bail_out; } ddl_context = ddl_get_context(); alloc_size = (sz + alignment); addr->alloced_phys_addr = (phys_addr_t) allocate_contiguous_memory_nomap(alloc_size, res_trk_get_mem_type(), SZ_4K); if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n", __func__, alloc_size); goto bail_out; } flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR; if (alignment == DDL_KILO_BYTE(128)) index = 1; else if (alignment > SZ_4K) flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K; addr->mapped_buffer = msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr, alloc_size, flags, &vidc_mmu_subsystem[index], sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int)); if (IS_ERR(addr->mapped_buffer)) { pr_err(" %s() buffer map failed", __func__); goto free_acm_alloc; } mapped_buffer = addr->mapped_buffer; if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) { pr_err("%s() map buffers failed\n", __func__); goto free_map_buffers; } addr->physical_base_addr = (u8 *)mapped_buffer->iova[0]; addr->virtual_base_addr = mapped_buffer->vaddr; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = sz; return addr->virtual_base_addr; free_map_buffers: msm_subsystem_unmap_buffer(addr->mapped_buffer); addr->mapped_buffer = NULL; free_acm_alloc: free_contiguous_memory_by_paddr( (unsigned long)addr->alloced_phys_addr); addr->alloced_phys_addr = (phys_addr_t)NULL; bail_out: return NULL; }
static void *res_trk_pmem_alloc (struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 alloc_size; struct ddl_context *ddl_context; DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz); if (!addr) { DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__); goto bail_out; } ddl_context = ddl_get_context(); res_trk_set_mem_type(addr->mem_type); alloc_size = (sz + alignment); if (res_trk_get_enable_ion()) { if (!ddl_context->video_ion_client) ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n", __func__); goto bail_out; } alloc_size = (alloc_size+4095) & ~4095; addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, res_trk_get_mem_type()); if (IS_ERR_OR_NULL(addr->alloc_handle)) { DDL_MSG_ERROR("%s() :DDL ION alloc failed\n", __func__); goto free_acm_ion_alloc; } return (void *) addr->alloc_handle; } else { addr->alloced_phys_addr = (phys_addr_t) allocate_contiguous_memory_nomap(alloc_size, res_trk_get_mem_type(), SZ_4K); if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n", __func__, alloc_size); goto bail_out; } addr->buffer_size = sz; return (void *)addr->alloced_phys_addr; } free_acm_ion_alloc: if (ddl_context->video_ion_client) { if (addr->alloc_handle) { ion_free(ddl_context->video_ion_client, addr->alloc_handle); addr->alloc_handle = NULL; } } bail_out: return NULL; }
static unsigned long msm_ion_get_base(unsigned long size, int memory_type, unsigned int align) { switch (memory_type) { case ION_EBI_TYPE: return allocate_contiguous_ebi_nomap(size, align); break; case ION_SMI_TYPE: return allocate_contiguous_memory_nomap(size, MEMTYPE_SMI_ION, align); break; #ifdef CONFIG_SEC_KERNEL_REBASE_FOR_PMEM_OPTIMIZATION case ION_ADSP_TYPE: return allocate_contiguous_memory_nomap(size, MEMTYPE_PMEM_ADSP, align); break; #endif default: pr_err("%s: Unknown memory type %d\n", __func__, memory_type); return 0; } }
static unsigned long msm_ion_get_base(unsigned long size, int memory_type) { switch (memory_type) { case ION_EBI_TYPE: return allocate_contiguous_ebi_nomap(size, PAGE_SIZE); break; case ION_SMI_TYPE: return allocate_contiguous_memory_nomap(size, MEMTYPE_SMI, PAGE_SIZE); break; default: return 0; } }
static unsigned long msm_ion_get_base(unsigned long size, int memory_type, unsigned int align) { switch (memory_type) { case ION_EBI_TYPE: return allocate_contiguous_ebi_nomap(size, align); break; case ION_SMI_TYPE: return allocate_contiguous_memory_nomap(size, MEMTYPE_SMI, align); break; default: pr_err("%s: Unknown memory type %d\n", __func__, memory_type); return 0; } }
void res_trk_init(struct device *device, u32 irq) { u32 memorytype = PMEM_MEMTYPE; if (resource_context.device || resource_context.irq_num || !device) { VCDRES_MSG_ERROR("%s() Resource Tracker Init error\n", __func__); } else { memset(&resource_context, 0, sizeof(resource_context)); mutex_init(&resource_context.lock); resource_context.device = device; resource_context.irq_num = irq; #ifdef CONFIG_MSM_BUS_SCALING resource_context.vidc_bus_client_pdata = (struct msm_bus_scale_pdata *)device->platform_data; #endif resource_context.core_type = VCD_CORE_1080P; if (memorytype == PMEM_MEMTYPE_EBI1) { resource_context.device_addr = (phys_addr_t) allocate_contiguous_memory_nomap(VIDC_FW_SIZE, resource_context.memtype, SZ_4K); if (resource_context.device_addr) { resource_context.base_addr = (u8 *) ioremap((unsigned long) resource_context.device_addr, VIDC_FW_SIZE); if (!resource_context.base_addr) { free_contiguous_memory_by_paddr( (unsigned long) resource_context.device_addr); resource_context.device_addr = (phys_addr_t)NULL; } } } } }
void ddl_pmem_alloc(struct ddl_buf_addr *buff_addr, size_t sz, u32 align) { u32 guard_bytes, align_mask; u32 physical_addr; u32 align_offset; u32 alloc_size, flags = 0; struct ddl_context *ddl_context; struct msm_mapped_buffer *mapped_buffer = NULL; unsigned long *kernel_vaddr = NULL; ion_phys_addr_t phyaddr = 0; size_t len = 0; int ret = -EINVAL; if (!buff_addr) { ERR("\n%s() Invalid Parameters\n", __func__); return; } if (align == DDL_LINEAR_BUFFER_ALIGN_BYTES) { guard_bytes = 31; align_mask = 0xFFFFFFE0U; } else { guard_bytes = DDL_TILE_BUF_ALIGN_GUARD_BYTES; align_mask = DDL_TILE_BUF_ALIGN_MASK; } ddl_context = ddl_get_context(); alloc_size = sz + guard_bytes; if (res_trk_get_enable_ion()) { if (!ddl_context->video_ion_client) ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { ERR("\n%s(): DDL ION Client Invalid handle\n", __func__); goto bailout; } buff_addr->mem_type = res_trk_get_mem_type(); buff_addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, buff_addr->mem_type); if (!buff_addr->alloc_handle) { ERR("\n%s(): DDL ION alloc failed\n", __func__); goto bailout; } ret = ion_phys(ddl_context->video_ion_client, buff_addr->alloc_handle, &phyaddr, &len); if (ret || !phyaddr) { ERR("\n%s(): DDL ION client physical failed\n", __func__); goto free_ion_buffer; } buff_addr->physical_base_addr = (u32 *)phyaddr; kernel_vaddr = (unsigned long *) ion_map_kernel( ddl_context->video_ion_client, buff_addr->alloc_handle, UNCACHED); if (IS_ERR_OR_NULL(kernel_vaddr)) { ERR("\n%s(): DDL ION map failed\n", __func__); goto unmap_ion_buffer; } buff_addr->virtual_base_addr = (u32 *)kernel_vaddr; DBG("ddl_ion_alloc: handle(0x%x), mem_type(0x%x), "\ "phys(0x%x), virt(0x%x), size(%u), align(%u), "\ "alloced_len(%u)", (u32)buff_addr->alloc_handle, (u32)buff_addr->mem_type, (u32)buff_addr->physical_base_addr, (u32)buff_addr->virtual_base_addr, alloc_size, align, len); } else { physical_addr = (u32) allocate_contiguous_memory_nomap(alloc_size, ddl_context->memtype, SZ_4K); if (!physical_addr) { ERR("\n%s(): DDL pmem allocate failed\n", __func__); goto bailout; } buff_addr->physical_base_addr = (u32 *) physical_addr; flags = MSM_SUBSYSTEM_MAP_KADDR; buff_addr->mapped_buffer = msm_subsystem_map_buffer((unsigned long)physical_addr, alloc_size, flags, NULL, 0); if (IS_ERR(buff_addr->mapped_buffer)) { ERR("\n%s() buffer map failed\n", __func__); goto free_pmem_buffer; } mapped_buffer = buff_addr->mapped_buffer; if (!mapped_buffer->vaddr) { ERR("\n%s() mapped virtual address is NULL\n", __func__); goto unmap_pmem_buffer; } buff_addr->virtual_base_addr = mapped_buffer->vaddr; DBG("ddl_pmem_alloc: mem_type(0x%x), phys(0x%x),"\ " virt(0x%x), sz(%u), align(%u)", (u32)buff_addr->mem_type, (u32)buff_addr->physical_base_addr, (u32)buff_addr->virtual_base_addr, alloc_size, SZ_4K); } memset(buff_addr->virtual_base_addr, 0 , sz + guard_bytes); buff_addr->buffer_size = sz; buff_addr->align_physical_addr = (u32 *) (((u32)buff_addr->physical_base_addr + guard_bytes) & align_mask); align_offset = (u32) (buff_addr->align_physical_addr) - (u32)buff_addr->physical_base_addr; buff_addr->align_virtual_addr = (u32 *) ((u32) (buff_addr->virtual_base_addr) + align_offset); DBG("%s(): phys(0x%x) align_phys(0x%x), virt(0x%x),"\ " align_virt(0x%x)", __func__, (u32)buff_addr->physical_base_addr, (u32)buff_addr->align_physical_addr, (u32)buff_addr->virtual_base_addr, (u32)buff_addr->align_virtual_addr); return; unmap_pmem_buffer: if (buff_addr->mapped_buffer) msm_subsystem_unmap_buffer(buff_addr->mapped_buffer); free_pmem_buffer: if (buff_addr->physical_base_addr) free_contiguous_memory_by_paddr((unsigned long) buff_addr->physical_base_addr); memset(buff_addr, 0, sizeof(struct ddl_buf_addr)); return; unmap_ion_buffer: if (ddl_context->video_ion_client) { if (buff_addr->alloc_handle) ion_unmap_kernel(ddl_context->video_ion_client, buff_addr->alloc_handle); } free_ion_buffer: if (ddl_context->video_ion_client) { if (buff_addr->alloc_handle) ion_free(ddl_context->video_ion_client, buff_addr->alloc_handle); } bailout: memset(buff_addr, 0, sizeof(struct ddl_buf_addr)); }
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 alloc_size, offset = 0, flags = 0; u32 index = 0; struct ddl_context *ddl_context; struct msm_mapped_buffer *mapped_buffer = NULL; int rc = -EINVAL; ion_phys_addr_t phyaddr = 0; size_t len = 0; DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz); if (!addr) { DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__); goto bail_out; } ddl_context = ddl_get_context(); alloc_size = (sz + alignment); if (res_trk_get_enable_ion()) { if (!ddl_context->video_ion_client) ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n", __func__); goto bail_out; } addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, (1<<res_trk_get_mem_type())); if (IS_ERR_OR_NULL(addr->alloc_handle)) { DDL_MSG_ERROR("%s() :DDL ION alloc failed\n", __func__); goto bail_out; } rc = ion_phys(ddl_context->video_ion_client, addr->alloc_handle, &phyaddr, &len); if (rc || !phyaddr) { DDL_MSG_ERROR("%s():DDL ION client physical failed\n", __func__); goto free_acm_ion_alloc; } addr->alloced_phys_addr = phyaddr; } else { addr->alloced_phys_addr = (phys_addr_t) allocate_contiguous_memory_nomap(alloc_size, res_trk_get_mem_type(), SZ_4K); if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n", __func__, alloc_size); goto bail_out; } } flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR; if (alignment == DDL_KILO_BYTE(128)) index = 1; else if (alignment > SZ_4K) flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K; addr->mapped_buffer = msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr, alloc_size, flags, &vidc_mmu_subsystem[index], sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int)); if (IS_ERR(addr->mapped_buffer)) { pr_err(" %s() buffer map failed", __func__); goto free_acm_ion_alloc; } mapped_buffer = addr->mapped_buffer; if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) { pr_err("%s() map buffers failed\n", __func__); goto free_map_buffers; } addr->physical_base_addr = (u8 *)mapped_buffer->iova[0]; addr->virtual_base_addr = mapped_buffer->vaddr; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = sz; return addr->virtual_base_addr; free_map_buffers: msm_subsystem_unmap_buffer(addr->mapped_buffer); addr->mapped_buffer = NULL; free_acm_ion_alloc: if (ddl_context->video_ion_client) { if (addr->alloc_handle) { ion_free(ddl_context->video_ion_client, addr->alloc_handle); addr->alloc_handle = NULL; } } else { free_contiguous_memory_by_paddr( (unsigned long)addr->alloced_phys_addr); addr->alloced_phys_addr = (phys_addr_t)NULL; } bail_out: return NULL; }
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 alloc_size, offset = 0 ; u32 index = 0; struct ddl_context *ddl_context; struct msm_mapped_buffer *mapped_buffer = NULL; unsigned long iova = 0; unsigned long buffer_size = 0; unsigned long *kernel_vaddr = NULL; unsigned long ionflag = 0; unsigned long flags = 0; int ret = 0; ion_phys_addr_t phyaddr = 0; size_t len = 0; int rc = 0; DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz); if (!addr) { DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__); goto bail_out; } ddl_context = ddl_get_context(); res_trk_set_mem_type(addr->mem_type); alloc_size = (sz + alignment); if (res_trk_get_enable_ion()) { if (!ddl_context->video_ion_client) ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n", __func__); goto bail_out; } alloc_size = (alloc_size+4095) & ~4095; addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, res_trk_get_mem_type()); if (IS_ERR_OR_NULL(addr->alloc_handle)) { DDL_MSG_ERROR("%s() :DDL ION alloc failed\n", __func__); goto bail_out; } if (res_trk_check_for_sec_session() || addr->mem_type == DDL_FW_MEM) ionflag = UNCACHED; else ionflag = CACHED; kernel_vaddr = (unsigned long *) ion_map_kernel( ddl_context->video_ion_client, addr->alloc_handle, ionflag); if (IS_ERR_OR_NULL(kernel_vaddr)) { DDL_MSG_ERROR("%s() :DDL ION map failed\n", __func__); goto free_ion_alloc; } addr->virtual_base_addr = (u8 *) kernel_vaddr; if (res_trk_check_for_sec_session()) { rc = ion_phys(ddl_context->video_ion_client, addr->alloc_handle, &phyaddr, &len); if (rc || !phyaddr) { DDL_MSG_ERROR( "%s():DDL ION client physical failed\n", __func__); goto unmap_ion_alloc; } addr->alloced_phys_addr = phyaddr; } else { ret = ion_map_iommu(ddl_context->video_ion_client, addr->alloc_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL, SZ_4K, 0, &iova, &buffer_size, UNCACHED, 0); if (ret || !iova) { DDL_MSG_ERROR( "%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n", __func__, ret, iova); goto unmap_ion_alloc; } addr->alloced_phys_addr = (phys_addr_t) iova; } if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s():DDL ION client physical failed\n", __func__); goto unmap_ion_alloc; } addr->mapped_buffer = NULL; addr->physical_base_addr = (u8 *) addr->alloced_phys_addr; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = alloc_size; } else { addr->alloced_phys_addr = (phys_addr_t) allocate_contiguous_memory_nomap(alloc_size, res_trk_get_mem_type(), SZ_4K); if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n", __func__, alloc_size); goto bail_out; } flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR; if (alignment == DDL_KILO_BYTE(128)) index = 1; else if (alignment > SZ_4K) flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K; addr->mapped_buffer = msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr, alloc_size, flags, &vidc_mmu_subsystem[index], sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int)); if (IS_ERR(addr->mapped_buffer)) { pr_err(" %s() buffer map failed", __func__); goto free_acm_alloc; } mapped_buffer = addr->mapped_buffer; if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) { pr_err("%s() map buffers failed\n", __func__); goto free_map_buffers; } addr->physical_base_addr = (u8 *)mapped_buffer->iova[0]; addr->virtual_base_addr = mapped_buffer->vaddr; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = sz; } return addr->virtual_base_addr; free_map_buffers: msm_subsystem_unmap_buffer(addr->mapped_buffer); addr->mapped_buffer = NULL; free_acm_alloc: free_contiguous_memory_by_paddr( (unsigned long)addr->alloced_phys_addr); addr->alloced_phys_addr = (phys_addr_t)NULL; return NULL; unmap_ion_alloc: ion_unmap_kernel(ddl_context->video_ion_client, addr->alloc_handle); addr->virtual_base_addr = NULL; addr->alloced_phys_addr = (phys_addr_t)NULL; free_ion_alloc: ion_free(ddl_context->video_ion_client, addr->alloc_handle); addr->alloc_handle = NULL; bail_out: return NULL; }
void ddl_pmem_alloc(struct ddl_buf_addr *buff_addr, size_t sz, u32 align) { u32 guard_bytes, align_mask; u32 physical_addr; u32 align_offset; u32 alloc_size, flags = 0; struct ddl_context *ddl_context; struct msm_mapped_buffer *mapped_buffer = NULL; if (!buff_addr) { ERR("\n%s() Invalid Parameters", __func__); return; } DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz); if (align == DDL_LINEAR_BUFFER_ALIGN_BYTES) { guard_bytes = 31; align_mask = 0xFFFFFFE0U; } else { guard_bytes = DDL_TILE_BUF_ALIGN_GUARD_BYTES; align_mask = DDL_TILE_BUF_ALIGN_MASK; } ddl_context = ddl_get_context(); alloc_size = sz + guard_bytes; physical_addr = (u32) allocate_contiguous_memory_nomap(alloc_size, ddl_context->memtype, SZ_4K); if (!physical_addr) { pr_err("%s(): could not allocate kernel pmem buffers\n", __func__); goto bailout; } buff_addr->physical_base_addr = (u32 *) physical_addr; flags = MSM_SUBSYSTEM_MAP_KADDR; buff_addr->mapped_buffer = msm_subsystem_map_buffer((unsigned long)physical_addr, alloc_size, flags, NULL, 0); if (IS_ERR(buff_addr->mapped_buffer)) { pr_err(" %s() buffer map failed", __func__); goto free_acm_alloc; } mapped_buffer = buff_addr->mapped_buffer; if (!mapped_buffer->vaddr) { pr_err("%s() mapped virtual address is NULL", __func__); goto free_map_buffers; } buff_addr->virtual_base_addr = mapped_buffer->vaddr; memset(buff_addr->virtual_base_addr, 0 , sz + guard_bytes); buff_addr->buffer_size = sz; buff_addr->align_physical_addr = (u32 *) ((physical_addr + guard_bytes) & align_mask); align_offset = (u32) (buff_addr->align_physical_addr) - physical_addr; buff_addr->align_virtual_addr = (u32 *) ((u32) (buff_addr->virtual_base_addr) + align_offset); DBG_PMEM("\n%s() OUT: phy_addr(%p) ker_addr(%p) size(%u)", __func__, buff_addr->physical_base_addr, buff_addr->virtual_base_addr, buff_addr->buffer_size); return; free_map_buffers: msm_subsystem_unmap_buffer(buff_addr->mapped_buffer); free_acm_alloc: free_contiguous_memory_by_paddr( (unsigned long) physical_addr); bailout: buff_addr->physical_base_addr = NULL; buff_addr->virtual_base_addr = NULL; buff_addr->buffer_size = 0; buff_addr->mapped_buffer = NULL; }