int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, phys_addr_t limit, struct cma **res_cma) { struct cma *cma = &cma_areas[cma_area_count]; pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, (unsigned long)size, (unsigned long)base, (unsigned long)limit); /* Sanity checks */ if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } if (!size) return -EINVAL; cma->base_pfn = PFN_DOWN(base); cma->count = size >> PAGE_SHIFT; cma->free_count = cma->count; *res_cma = cma; cma_area_count++; pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, (unsigned long)base); /* Architecture specific contiguous memory fixup. */ dma_contiguous_early_fixup(base, size); return 0; }
/** * dma_contiguous_reserve_area() - reserve custom contiguous area * @size: Size of the reserved area (in bytes), * @base: Base address of the reserved area optional, use 0 for any * @limit: End address of the reserved memory (optional, 0 for any). * @res_cma: Pointer to store the created cma region. * @fixed: hint about where to place the reserved area * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. This function allows to create custom reserved areas for specific * devices. * * If @fixed is true, reserve contiguous area at exactly @base. If false, * reserve in range from @base to @limit. */ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, phys_addr_t limit, struct cma **res_cma, bool fixed) { int ret; ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma); if (ret) return ret; /* Architecture specific contiguous memory fixup. */ dma_contiguous_early_fixup(cma_get_base(*res_cma), cma_get_size(*res_cma)); return 0; }
/** * dma_declare_contiguous() - reserve area for contiguous memory handling * for particular device * @dev: Pointer to device structure. * @size: Size of the reserved memory. * @base: Start address of the reserved memory (optional, 0 for any). * @limit: End address of the reserved memory (optional, 0 for any). * * This function reserves memory for specified device. It should be * called by board specific code when early allocator (memblock or bootmem) * is still activate. */ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, phys_addr_t base, phys_addr_t limit) { struct cma_reserved *r = &cma_reserved[cma_reserved_count]; phys_addr_t alignment; pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, (unsigned long)size, (unsigned long)base, (unsigned long)limit); /* Sanity checks */ if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } if (!size) return -EINVAL; /* Sanitise input arguments */ alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); /* Reserve memory */ if (base) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { base = -EBUSY; goto err; } } else { /* * Use __memblock_alloc_base() since * memblock_alloc_base() panic()s. */ phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); if (!addr) { base = -ENOMEM; goto err; } else { base = addr; } } /* * Each reserved area must be initialised later, when more kernel * subsystems (like slab allocator) are available. */ r->start = base; r->size = size; r->dev = dev; cma_reserved_count++; pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, (unsigned long)base); /* Architecture specific contiguous memory fixup. */ dma_contiguous_early_fixup(base, size); return 0; err: pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); return base; }
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, phys_addr_t limit, struct cma **res_cma) { struct cma *cma = &cma_areas[cma_area_count]; phys_addr_t alignment; int ret = 0; pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, (unsigned long)size, (unsigned long)base, (unsigned long)limit); /* Sanity checks */ if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } if (!size) return -EINVAL; /* Sanitise input arguments */ alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); /* Reserve memory */ if (base) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { ret = -EBUSY; goto err; } } else { /* * Use __memblock_alloc_base() since * memblock_alloc_base() panic()s. */ phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); if (!addr) { ret = -ENOMEM; goto err; } else { base = addr; } } /* * Each reserved area must be initialised later, when more kernel * subsystems (like slab allocator) are available. */ cma->base_pfn = PFN_DOWN(base); cma->count = size >> PAGE_SHIFT; *res_cma = cma; cma_area_count++; pr_err("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, (unsigned long)base); /* Architecture specific contiguous memory fixup. */ dma_contiguous_early_fixup(base, size); return 0; err: return ret; }