static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction dir) { struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); struct sg_table *st; struct scatterlist *src, *dst; int ret, i; ret = i915_mutex_lock_interruptible(obj->base.dev); if (ret) goto err; ret = i915_gem_object_get_pages(obj); if (ret) goto err_unlock; i915_gem_object_pin_pages(obj); /* Copy sg so that we make an independent mapping */ st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (st == NULL) { ret = -ENOMEM; goto err_unpin; } ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); if (ret) goto err_free; src = obj->pages->sgl; dst = st->sgl; for (i = 0; i < obj->pages->nents; i++) { sg_set_page(dst, sg_page(src), src->length, 0); dst = sg_next(dst); src = sg_next(src); } if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { ret =-ENOMEM; goto err_free_sg; } mutex_unlock(&obj->base.dev->struct_mutex); return st; err_free_sg: sg_free_table(st); err_free: kfree(st); err_unpin: i915_gem_object_unpin_pages(obj); err_unlock: mutex_unlock(&obj->base.dev->struct_mutex); err: return ERR_PTR(ret); }
/** * i915_gem_batch_pool_get() - allocate a buffer from the pool * @pool: the batch buffer pool * @size: the minimum desired size of the returned buffer * * Returns an inactive buffer from @pool with at least @size bytes, * with the pages pinned. The caller must i915_gem_object_unpin_pages() * on the returned object. * * Note: Callers must hold the struct_mutex * * Return: the buffer object or an error pointer */ struct drm_i915_gem_object * i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size) { struct drm_i915_gem_object *obj = NULL; struct drm_i915_gem_object *tmp, *next; struct list_head *list; int n; WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); /* Compute a power-of-two bucket, but throw everything greater than * 16KiB into the same bucket: i.e. the the buckets hold objects of * (1 page, 2 pages, 4 pages, 8+ pages). */ n = fls(size >> PAGE_SHIFT) - 1; if (n >= ARRAY_SIZE(pool->cache_list)) n = ARRAY_SIZE(pool->cache_list) - 1; list = &pool->cache_list[n]; list_for_each_entry_safe(tmp, next, list, batch_pool_link) { /* The batches are strictly LRU ordered */ if (tmp->active) break; /* While we're looping, do some clean up */ if (tmp->madv == __I915_MADV_PURGED) { list_del(&tmp->batch_pool_link); drm_gem_object_unreference(&tmp->base); continue; } if (tmp->base.size >= size) { obj = tmp; break; } } if (obj == NULL) { int ret; obj = i915_gem_alloc_object(pool->dev, size); if (obj == NULL) return ERR_PTR(-ENOMEM); ret = i915_gem_object_get_pages(obj); if (ret) return ERR_PTR(ret); obj->madv = I915_MADV_DONTNEED; } list_move_tail(&obj->batch_pool_link, list); i915_gem_object_pin_pages(obj); return obj; }
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction dir) { struct drm_i915_gem_object *obj = attachment->dmabuf->priv; struct sg_table *st; struct scatterlist *src, *dst; int ret, i; ret = i915_mutex_lock_interruptible(obj->base.dev); if (ret) return ERR_PTR(ret); ret = i915_gem_object_get_pages(obj); if (ret) { st = ERR_PTR(ret); goto out; } /* Copy sg so that we make an independent mapping */ st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (st == NULL) { st = ERR_PTR(-ENOMEM); goto out; } ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); if (ret) { kfree(st); st = ERR_PTR(ret); goto out; } src = obj->pages->sgl; dst = st->sgl; for (i = 0; i < obj->pages->nents; i++) { sg_set_page(dst, sg_page(src), PAGE_SIZE, 0); dst = sg_next(dst); src = sg_next(src); } if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { sg_free_table(st); kfree(st); st = ERR_PTR(-ENOMEM); goto out; } i915_gem_object_pin_pages(obj); out: mutex_unlock(&obj->base.dev->struct_mutex); return st; }
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; struct sg_page_iter sg_iter; struct page **pages; int ret, i; ret = i915_mutex_lock_interruptible(dev); if (ret) return ERR_PTR(ret); if (obj->dma_buf_vmapping) { obj->vmapping_count++; goto out_unlock; } ret = i915_gem_object_get_pages(obj); if (ret) goto err; i915_gem_object_pin_pages(obj); ret = -ENOMEM; pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); if (pages == NULL) goto err_unpin; i = 0; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) pages[i++] = sg_page_iter_page(&sg_iter); obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); drm_free_large(pages); if (!obj->dma_buf_vmapping) goto err_unpin; obj->vmapping_count = 1; out_unlock: mutex_unlock(&dev->struct_mutex); return obj->dma_buf_vmapping; err_unpin: i915_gem_object_unpin_pages(obj); err: mutex_unlock(&dev->struct_mutex); return ERR_PTR(ret); }
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { struct drm_i915_gem_object *obj = dma_buf->priv; struct drm_device *dev = obj->base.dev; struct scatterlist *sg; struct page **pages; int ret, i; ret = i915_mutex_lock_interruptible(dev); if (ret) return ERR_PTR(ret); if (obj->dma_buf_vmapping) { obj->vmapping_count++; goto out_unlock; } ret = i915_gem_object_get_pages(obj); if (ret) goto error; ret = -ENOMEM; pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *)); if (pages == NULL) goto error; for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) pages[i] = sg_page(sg); obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL); drm_free_large(pages); if (!obj->dma_buf_vmapping) goto error; obj->vmapping_count = 1; i915_gem_object_pin_pages(obj); out_unlock: mutex_unlock(&dev->struct_mutex); return obj->dma_buf_vmapping; error: mutex_unlock(&dev->struct_mutex); return ERR_PTR(ret); }