/** * dma_contiguous_reserve_area() - reserve custom contiguous area * @size: Size of the reserved area (in bytes), * @base: Base address of the reserved area optional, use 0 for any * @limit: End address of the reserved memory (optional, 0 for any). * @res_cma: Pointer to store the created cma region. * @fixed: hint about where to place the reserved area * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. This function allows to create custom reserved areas for specific * devices. * * If @fixed is true, reserve contiguous area at exactly @base. If false, * reserve in range from @base to @limit. */ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, phys_addr_t limit, struct cma **res_cma, bool fixed) { int ret; ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma); if (ret) return ret; /* Architecture specific contiguous memory fixup. */ dma_contiguous_early_fixup(cma_get_base(*res_cma), cma_get_size(*res_cma)); return 0; }
static int check_smlog_alloc(struct device *dev, struct htc_smem_type *smem) { dma_addr_t addr = 0; int ret = 0; int smlog_buf_size = 0; if(!dev->cma_area){ pr_err("[smem]%s: CMA reserved fail.\n", __func__); cma_reserved = false; ret = -ENOMEM; goto alloc_fail; } cma_reserved = true; smlog_enabled = is_smlog_enabled(); if(smlog_enabled){ smlog_buf_size = cma_get_size(dev); smlog_base_vaddr = dma_alloc_writecombine(dev, smlog_buf_size, &addr, GFP_KERNEL); if (!smlog_base_vaddr) { pr_err("[smem]%s: cannot alloc memory for smlog.\n", __func__); ret = -ENOMEM; goto alloc_fail; } pr_info("[smem]%s: smlog is enabled.\n", __func__); }else pr_info("[smem]%s: smlog is disabled.\n", __func__); set_smlog_magic(smlog_enabled, smem, addr, smlog_buf_size); return ret; alloc_fail: smlog_enabled = false; set_smlog_magic(smlog_enabled, smem, addr, smlog_buf_size); return ret; }