static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; struct sg_page_iter sg_iter; struct page **pages; int ret, i; ret = i915_mutex_lock_interruptible(dev); if (ret) return ERR_PTR(ret); if (obj->dma_buf_vmapping) { obj->vmapping_count++; goto out_unlock; } ret = i915_gem_object_get_pages(obj); if (ret) goto err; i915_gem_object_pin_pages(obj); ret = -ENOMEM; pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); if (pages == NULL) goto err_unpin; i = 0; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) pages[i++] = sg_page_iter_page(&sg_iter); obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); drm_free_large(pages); if (!obj->dma_buf_vmapping) goto err_unpin; obj->vmapping_count = 1; out_unlock: mutex_unlock(&dev->struct_mutex); return obj->dma_buf_vmapping; err_unpin: i915_gem_object_unpin_pages(obj); err: mutex_unlock(&dev->struct_mutex); return ERR_PTR(ret); }
void drm_clflush_sg(struct sg_table *st) { #if defined(CONFIG_X86) if (cpu_has_clflush) { struct sg_page_iter sg_iter; mb(); for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) drm_clflush_page(sg_page_iter_page(&sg_iter)); mb(); return; } if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) printk(KERN_ERR "Timed out waiting for cache flush.\n"); #else printk(KERN_ERR "Architecture has no drm_cache.c support\n"); WARN_ON_ONCE(1); #endif }
static struct page *__vmw_piter_sg_page(struct vmw_piter *viter) { return sg_page_iter_page(&viter->iter); }