/** * dma_release_from_contiguous() - release allocated pages * @dev: Pointer to device for which the pages were allocated. * @pages: Allocated pages. * @count: Number of allocated pages. * * This function releases memory allocated by dma_alloc_from_contiguous(). * It returns false when provided pages do not belong to contiguous area and * true otherwise. */ bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count) { struct cma *cma = dev_get_cma_area(dev); unsigned long pfn; if (!cma || !pages) return false; pr_debug("%s(page %p)\n", __func__, (void *)pages); pfn = page_to_pfn(pages); if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) return false; VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); mutex_lock(&cma_mutex); bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); free_contig_range(pfn, count); adjust_managed_cma_page_count(page_zone(pages), count); mutex_unlock(&cma_mutex); return true; }
static void dma_contiguous_deisolate_until(struct device *dev, int idx_until) { struct cma *cma = dev_get_cma_area(dev); int idx; if (!cma || !idx_until) return; mutex_lock(&cma_mutex); if (!cma->isolated) { mutex_unlock(&cma_mutex); dev_err(dev, "Not isolated!\n"); return; } idx = find_first_zero_bit(cma->bitmap, idx_until); while (idx < idx_until) { int idx_set; idx_set = find_next_bit(cma->bitmap, idx_until, idx); free_contig_range(cma->base_pfn + idx, idx_set - idx); idx = find_next_zero_bit(cma->bitmap, idx_until, idx_set); } cma->isolated = false; mutex_unlock(&cma_mutex); }
/** * dma_release_from_contiguous() - release allocated pages * @dev: Pointer to device for which the pages were allocated. * @pages: Allocated pages. * @count: Number of allocated pages. * * This function releases memory allocated by dma_alloc_from_contiguous(). * It returns false when provided pages do not belong to contiguous area and * true otherwise. */ bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count) { struct cma *cma = dev_get_cma_area(dev); unsigned long pfn; if (!cma || !pages){ pr_err("cma is null\n"); return false; } pfn = page_to_pfn(pages); if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count){ pr_err("pfn:%lx cma->base_pfn:%lx cma->count:%lx\n", pfn, cma->base_pfn, cma->count); return false; } VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); mutex_lock(&cma_mutex); bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); free_contig_range(pfn, count); mutex_unlock(&cma_mutex); return true; }
/** * dma_release_from_contiguous() - release allocated pages * @dev: Pointer to device for which the pages were allocated. * @pages: Allocated pages. * @count: Number of allocated pages. * * This function releases memory allocated by dma_alloc_from_contiguous(). * It returns false when provided pages do not belong to contiguous area and * true otherwise. */ bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count) { struct cma *cma = dev_get_cma_area(dev); unsigned long pfn; if (!cma || !pages) return false; pr_debug("%s(page %p)\n", __func__, (void *)pages); pfn = page_to_pfn(pages); if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) return false; VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); free_contig_range(pfn, count); clear_cma_bitmap(cma, pfn, count); return true; }