static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction dir) { struct mock_dmabuf *mock = to_mock(attachment->dmabuf); struct sg_table *st; struct scatterlist *sg; int i, err; st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) return ERR_PTR(-ENOMEM); err = sg_alloc_table(st, mock->npages, GFP_KERNEL); if (err) goto err_free; sg = st->sgl; for (i = 0; i < mock->npages; i++) { sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0); sg = sg_next(sg); } if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { err = -ENOMEM; goto err_st; } return st; err_st: sg_free_table(st); err_free: kfree(st); return ERR_PTR(err); }
static void mock_dmabuf_release(struct dma_buf *dma_buf) { struct mock_dmabuf *mock = to_mock(dma_buf); int i; for (i = 0; i < mock->npages; i++) put_page(mock->pages[i]); kfree(mock); }
static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) { struct mock_dmabuf *mock = to_mock(dma_buf); return kunmap(mock->pages[page_num]); }
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) { struct mock_dmabuf *mock = to_mock(dma_buf); vm_unmap_ram(vaddr, mock->npages); }
static void *mock_dmabuf_vmap(struct dma_buf *dma_buf) { struct mock_dmabuf *mock = to_mock(dma_buf); return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL); }
static void *mock_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) { struct mock_dmabuf *mock = to_mock(dma_buf); return kmap_atomic(mock->pages[page_num]); }