int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, uint32_t handle, uint64_t *offset) { struct drm_gem_object *gem; struct tegra_bo *bo; mutex_lock(&drm->struct_mutex); gem = drm_gem_object_lookup(drm, file, handle); if (!gem) { dev_err(drm->dev, "failed to lookup GEM object\n"); mutex_unlock(&drm->struct_mutex); return -EINVAL; } bo = to_tegra_bo(gem); *offset = tegra_bo_get_mmap_offset(bo); drm_gem_object_unreference(gem); mutex_unlock(&drm->struct_mutex); return 0; }
static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_gem_object *gem = vma->vm_private_data; struct tegra_bo *bo = to_tegra_bo(gem); struct page *page; pgoff_t offset; int err; if (!bo->pages) return VM_FAULT_SIGBUS; offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; page = bo->pages[offset]; err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); switch (err) { case -EAGAIN: case 0: case -ERESTARTSYS: case -EINTR: case -EBUSY: return VM_FAULT_NOPAGE; case -ENOMEM: return VM_FAULT_OOM; } return VM_FAULT_SIGBUS; }
static void *tegra_gem_prime_vmap(struct dma_buf *buf) { struct drm_gem_object *gem = buf->priv; struct tegra_bo *bo = to_tegra_bo(gem); return bo->vaddr; }
void tegra_bo_free_object(struct drm_gem_object *gem) { struct tegra_bo *bo = to_tegra_bo(gem); if (gem->map_list.map) drm_gem_free_mmap_offset(gem); drm_gem_object_release(gem); tegra_bo_destroy(gem->dev, bo); kfree(bo); }
int tegra_uapi_gem_set_tiling(struct drm_device *drm, void *data, struct drm_file *file) { struct drm_tegra_gem_set_tiling *args = data; enum tegra_bo_tiling_mode mode; struct drm_gem_object *gem; unsigned long value = 0; struct tegra_bo *bo; switch (args->mode) { case DRM_TEGRA_GEM_TILING_MODE_PITCH: mode = TEGRA_BO_TILING_MODE_PITCH; if (args->value != 0) return -EINVAL; break; case DRM_TEGRA_GEM_TILING_MODE_TILED: mode = TEGRA_BO_TILING_MODE_TILED; if (args->value != 0) return -EINVAL; break; case DRM_TEGRA_GEM_TILING_MODE_BLOCK: mode = TEGRA_BO_TILING_MODE_BLOCK; if (args->value > 5) return -EINVAL; value = args->value; break; default: return -EINVAL; } gem = drm_gem_object_lookup(file, args->handle); if (!gem) return -ENOENT; bo = to_tegra_bo(gem); bo->tiling.mode = mode; bo->tiling.value = value; drm_gem_object_put_unlocked(gem); return 0; }
static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { struct drm_gem_object *gem = attach->dmabuf->priv; struct tegra_bo *bo = to_tegra_bo(gem); if (bo->pages) dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); sg_free_table(sgt); kfree(sgt); }
struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, struct drm_file *file, const struct drm_mode_fb_cmd2 *cmd) { unsigned int hsub, vsub, i; struct tegra_bo *planes[4]; struct drm_gem_object *gem; struct tegra_fb *fb; int err; hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format); vsub = drm_format_vert_chroma_subsampling(cmd->pixel_format); for (i = 0; i < drm_format_num_planes(cmd->pixel_format); i++) { unsigned int width = cmd->width / (i ? hsub : 1); unsigned int height = cmd->height / (i ? vsub : 1); unsigned int size, bpp; gem = drm_gem_object_lookup(file, cmd->handles[i]); if (!gem) { err = -ENXIO; goto unreference; } bpp = drm_format_plane_cpp(cmd->pixel_format, i); size = (height - 1) * cmd->pitches[i] + width * bpp + cmd->offsets[i]; if (gem->size < size) { err = -EINVAL; goto unreference; } planes[i] = to_tegra_bo(gem); } fb = tegra_fb_alloc(drm, cmd, planes, i); if (IS_ERR(fb)) { err = PTR_ERR(fb); goto unreference; } return &fb->base; unreference: while (i--) drm_gem_object_put_unlocked(&planes[i]->gem); return ERR_PTR(err); }
void tegra_bo_free_object(struct drm_gem_object *gem) { struct tegra_bo *bo = to_tegra_bo(gem); if (gem->import_attach) { dma_buf_unmap_attachment(gem->import_attach, bo->sgt, DMA_TO_DEVICE); drm_prime_gem_destroy(gem, NULL); } else { tegra_bo_destroy(gem->dev, bo); } drm_gem_free_mmap_offset(gem); drm_gem_object_release(gem); kfree(bo); }
void tegra_bo_free_object(struct drm_gem_object *gem) { struct tegra_drm *tegra = gem->dev->dev_private; struct tegra_bo *bo = to_tegra_bo(gem); if (tegra->domain) tegra_bo_iommu_unmap(tegra, bo); if (gem->import_attach) { dma_buf_unmap_attachment(gem->import_attach, bo->sgt, DMA_TO_DEVICE); drm_prime_gem_destroy(gem, NULL); } else { tegra_bo_free(gem->dev, bo); } drm_gem_object_release(gem); kfree(bo); }
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) { struct drm_gem_object *gem; struct tegra_bo *bo; int ret; ret = drm_gem_mmap(file, vma); if (ret) return ret; gem = vma->vm_private_data; bo = to_tegra_bo(gem); ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); if (ret) drm_gem_vm_close(vma); return ret; }
int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, u32 handle, u64 *offset) { struct drm_gem_object *gem; struct tegra_bo *bo; gem = drm_gem_object_lookup(drm, file, handle); if (!gem) { dev_err(drm->dev, "failed to lookup GEM object\n"); return -EINVAL; } bo = to_tegra_bo(gem); *offset = drm_vma_node_offset_addr(&bo->gem.vma_node); drm_gem_object_unreference_unlocked(gem); return 0; }
int tegra_uapi_gem_cpu_prep(struct drm_device *drm, void *data, struct drm_file *file) { struct drm_tegra_gem_cpu_prep *args = data; struct drm_gem_object *gem; struct tegra_bo *bo; unsigned long timeout; bool write; int ret; gem = drm_gem_object_lookup(file, args->handle); if (!gem) { DRM_ERROR("failed to find bo handle %u\n", args->handle); return -ENOENT; } bo = to_tegra_bo(gem); write = !!(args->flags & DRM_TEGRA_CPU_PREP_WRITE); timeout = usecs_to_jiffies(args->timeout); ret = reservation_object_wait_timeout_rcu(bo->resv, write, true, timeout); drm_gem_object_put_unlocked(gem); if (ret == 0) { DRM_DEBUG_DRIVER("bo handle %u is busy\n", args->handle); return timeout == 0 ? -EBUSY : -ETIMEDOUT; } if (ret < 0) { DRM_ERROR("failed to await bo handle %u: %d\n", args->handle, ret); return ret; } DRM_DEBUG_DRIVER("bo handle %u is idling\n", args->handle); return 0; }
static struct sg_table * tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct drm_gem_object *gem = attach->dmabuf->priv; struct tegra_bo *bo = to_tegra_bo(gem); struct sg_table *sgt; sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return NULL; if (bo->pages) { struct scatterlist *sg; unsigned int i; if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) goto free; for_each_sg(sgt->sgl, sg, bo->num_pages, i) sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) goto free; } else { if (sg_alloc_table(sgt, 1, GFP_KERNEL)) goto free; sg_dma_address(sgt->sgl) = bo->paddr; sg_dma_len(sgt->sgl) = gem->size; } return sgt; free: sg_free_table(sgt); kfree(sgt); return NULL; }
int tegra_uapi_gem_get_tiling(struct drm_device *drm, void *data, struct drm_file *file) { struct drm_tegra_gem_get_tiling *args = data; struct drm_gem_object *gem; struct tegra_bo *bo; int err = 0; gem = drm_gem_object_lookup(file, args->handle); if (!gem) return -ENOENT; bo = to_tegra_bo(gem); switch (bo->tiling.mode) { case TEGRA_BO_TILING_MODE_PITCH: args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH; args->value = 0; break; case TEGRA_BO_TILING_MODE_TILED: args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED; args->value = 0; break; case TEGRA_BO_TILING_MODE_BLOCK: args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK; args->value = bo->tiling.value; break; default: err = -EINVAL; break; } drm_gem_object_put_unlocked(gem); return err; }
static struct sg_table * tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct drm_gem_object *gem = attach->dmabuf->priv; struct tegra_bo *bo = to_tegra_bo(gem); struct sg_table *sgt; sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return NULL; if (sg_alloc_table(sgt, 1, GFP_KERNEL)) { kfree(sgt); return NULL; } sg_dma_address(sgt->sgl) = bo->paddr; sg_dma_len(sgt->sgl) = gem->size; return sgt; }
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) { struct drm_gem_object *gem; struct tegra_bo *bo; int ret; ret = drm_gem_mmap(file, vma); if (ret) return ret; gem = vma->vm_private_data; bo = to_tegra_bo(gem); if (!bo->pages) { unsigned long vm_pgoff = vma->vm_pgoff; vma->vm_flags &= ~VM_PFNMAP; vma->vm_pgoff = 0; ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr, bo->paddr, gem->size); if (ret) { drm_gem_vm_close(vma); return ret; } vma->vm_pgoff = vm_pgoff; } else { pgprot_t prot = vm_get_page_prot(vma->vm_flags); vma->vm_flags |= VM_MIXEDMAP; vma->vm_flags &= ~VM_PFNMAP; vma->vm_page_prot = pgprot_writecombine(prot); } return 0; }
int tegra_uapi_gem_set_flags(struct drm_device *drm, void *data, struct drm_file *file) { struct drm_tegra_gem_set_flags *args = data; struct drm_gem_object *gem; struct tegra_bo *bo; if (args->flags & ~DRM_TEGRA_GEM_FLAGS) return -EINVAL; gem = drm_gem_object_lookup(file, args->handle); if (!gem) return -ENOENT; bo = to_tegra_bo(gem); bo->flags = 0; if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) bo->flags |= TEGRA_BO_BOTTOM_UP; drm_gem_object_put_unlocked(gem); return 0; }
int tegra_uapi_gem_get_flags(struct drm_device *drm, void *data, struct drm_file *file) { struct drm_tegra_gem_get_flags *args = data; struct drm_gem_object *gem; struct tegra_bo *bo; gem = drm_gem_object_lookup(file, args->handle); if (!gem) return -ENOENT; bo = to_tegra_bo(gem); args->flags = 0; if (bo->flags & TEGRA_BO_BOTTOM_UP) args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; if (bo->sgt->nents > 1) args->flags |= DRM_TEGRA_GEM_SPARSE; drm_gem_object_put_unlocked(gem); return 0; }
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, unsigned int index) { return to_tegra_bo(drm_gem_fb_get_obj(framebuffer, index)); }