/* * @put_userptr: inform the allocator that a USERPTR buffer will no longer * be used */ static void vb2_dma_sg_put_userptr(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; struct sg_table *sgt = &buf->sg_table; int i = buf->num_pages; DEFINE_DMA_ATTRS(attrs); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); #endif dprintk(1, "%s: Releasing userspace buffer of %d pages\n", __func__, buf->num_pages); dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, &attrs); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(buf->dma_sgt); while (--i >= 0) { if (buf->dma_dir == DMA_FROM_DEVICE) set_page_dirty_lock(buf->pages[i]); if (!vma_is_io(buf->vma)) put_page(buf->pages[i]); } kfree(buf->pages); vb2_put_vma(buf->vma); kfree(buf); }
static void vb2_dma_sg_put(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; struct sg_table *sgt = &buf->sg_table; int i = buf->num_pages; if (atomic_dec_and_test(&buf->refcount)) { DEFINE_DMA_ATTRS(attrs); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); #endif dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, buf->num_pages); dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, &attrs); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(buf->dma_sgt); while (--i >= 0) __free_page(buf->pages[i]); kfree(buf->pages); put_device(buf->dev); kfree(buf); } }
static void nv_free_pages(struct nvos_pagemap *pm) { unsigned int i; if (pm->addr) vm_unmap_ram(pm->addr, pm->nr_pages); for (i=0; i<pm->nr_pages; i++) { ClearPageReserved(pm->pages[i]); __free_page(pm->pages[i]); } kfree(pm); }
static void vb2_dma_sg_put(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; int i = buf->num_pages; if (atomic_dec_and_test(&buf->refcount)) { dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, buf->num_pages); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(&buf->sg_table); while (--i >= 0) __free_page(buf->pages[i]); kfree(buf->pages); kfree(buf); } }
/* * @put_userptr: inform the allocator that a USERPTR buffer will no longer * be used */ static void vb2_dma_sg_put_userptr(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; int i = buf->num_pages; dprintk(1, "%s: Releasing userspace buffer of %d pages\n", __func__, buf->num_pages); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(&buf->sg_table); while (--i >= 0) { if (buf->write) set_page_dirty_lock(buf->pages[i]); if (!vma_is_io(buf->vma)) put_page(buf->pages[i]); } kfree(buf->pages); vb2_put_vma(buf->vma); kfree(buf); }
static void vb2_vmalloc_put_userptr(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; unsigned int i; if (buf->pages) { if (vaddr) vm_unmap_ram((void *)vaddr, buf->n_pages); for (i = 0; i < buf->n_pages; ++i) { if (buf->dma_dir == DMA_FROM_DEVICE) set_page_dirty_lock(buf->pages[i]); put_page(buf->pages[i]); } kfree(buf->pages); } else { vb2_put_vma(buf->vma); iounmap((__force void __iomem *)buf->vaddr); } kfree(buf); }
static void vb2_vmalloc_put_userptr(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; unsigned int i; if (buf->pages) { if (vaddr) vm_unmap_ram((void *)vaddr, buf->n_pages); for (i = 0; i < buf->n_pages; ++i) { if (buf->write) set_page_dirty_lock(buf->pages[i]); put_page(buf->pages[i]); } kfree(buf->pages); } else { if (buf->vma) vb2_put_vma(buf->vma); iounmap(buf->vaddr); } kfree(buf); }
static void vb2_vmalloc_put_userptr(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; unsigned int i; struct page **pages; unsigned int n_pages; if (!buf->vec->is_pfns) { n_pages = frame_vector_count(buf->vec); pages = frame_vector_pages(buf->vec); if (vaddr) vm_unmap_ram((void *)vaddr, n_pages); if (buf->dma_dir == DMA_FROM_DEVICE) for (i = 0; i < n_pages; i++) set_page_dirty_lock(pages[i]); } else { iounmap((__force void __iomem *)buf->vaddr); } vb2_destroy_framevec(buf->vec); kfree(buf); }
/* * @put_userptr: inform the allocator that a USERPTR buffer will no longer * be used */ static void vb2_dma_sg_put_userptr(void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; struct sg_table *sgt = &buf->sg_table; int i = buf->num_pages; DEFINE_DMA_ATTRS(attrs); dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); dprintk(1, "%s: Releasing userspace buffer of %d pages\n", __func__, buf->num_pages); dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, &attrs); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(buf->dma_sgt); while (--i >= 0) { if (buf->dma_dir == DMA_FROM_DEVICE) set_page_dirty_lock(buf->pages[i]); } vb2_destroy_framevec(buf->vec); kfree(buf); }
int __init owl_kinfo_init(void) { struct page **pages; unsigned int pages_count, pfn, i; void __iomem * kinfo_vaddr; kinfo = kmalloc(sizeof(struct kernel_reserve_info), GFP_KERNEL); if(kinfo == NULL) { printk(KERN_ALERT "%s, kmalloc(%d) for kinfo failed!\n", __func__, sizeof(struct kernel_reserve_info)); return -ENOMEM; } pages_count = owl_kinfo_size >> PAGE_SHIFT; pages = kmalloc(sizeof *pages * pages_count, GFP_KERNEL); if (!pages) { printk(KERN_ALERT "%s, kmalloc(%d) for pages failed!\n", __func__, sizeof *pages * pages_count); return -ENOMEM; } pfn = PFN_DOWN(owl_kinfo_start); for (i = 0; i < pages_count; ++i) pages[i] = pfn_to_page(pfn + i); kinfo_vaddr = vm_map_ram(pages, pages_count, -1, PAGE_KERNEL); if(kinfo_vaddr == NULL) { printk(KERN_ALERT "%s, ioremap(0x%x, 0x%x) for kinfo failed!\n", __func__, owl_kinfo_start, owl_kinfo_size); return -ENOMEM; } memcpy(kinfo, kinfo_vaddr, sizeof(struct kernel_reserve_info)); vm_unmap_ram(kinfo_vaddr, pages_count); kfree(pages); free_owl_reserved_memory(owl_kinfo_start, owl_kinfo_size); return 0; }
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) { struct mock_dmabuf *mock = to_mock(dma_buf); vm_unmap_ram(vaddr, mock->npages); }