struct sg_table *nvhost_nvmap_pin(struct mem_mgr *mgr, struct mem_handle *handle) { int err = 0; dma_addr_t ret = 0; struct sg_table *sgt = kmalloc(sizeof(*sgt) + sizeof(*sgt->sgl), GFP_KERNEL); if (!sgt) return ERR_PTR(-ENOMEM); err = __sg_alloc_table(sgt, 1, 1, (gfp_t)(sgt+1), sg_kmalloc); if (err) { kfree(sgt); return ERR_PTR(err); } ret = nvmap_pin((struct nvmap_client *)mgr, (struct nvmap_handle_ref *)handle); if (IS_ERR_VALUE(ret)) { kfree(sgt); return ERR_PTR(ret); } sg_dma_address(sgt->sgl) = ret; return sgt; }
struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init( u32 syncpt, u32 waitbase, struct nvhost_channel *ch) { struct nvmap_client *nvmap; u32 *save_ptr; struct host1x_hwctx_handler *p; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; nvmap = nvhost_get_host(ch->dev)->nvmap; register_sets = tegra_gpu_register_sets(); BUG_ON(register_sets == 0 || register_sets > 2); p->syncpt = syncpt; p->waitbase = waitbase; setup_save(p, NULL); p->save_buf = nvmap_alloc(nvmap, p->save_size * 4, 32, NVMAP_HANDLE_WRITE_COMBINE, 0); if (IS_ERR(p->save_buf)) { p->save_buf = NULL; return NULL; } p->save_slots = 6; if (register_sets == 2) p->save_slots += 2; save_ptr = nvmap_mmap(p->save_buf); if (!save_ptr) { nvmap_free(nvmap, p->save_buf); p->save_buf = NULL; return NULL; } p->save_phys = nvmap_pin(nvmap, p->save_buf); setup_save(p, save_ptr); p->h.alloc = ctx3d_alloc_v1; p->h.save_push = save_push_v1; p->h.save_service = NULL; p->h.get = nvhost_3dctx_get; p->h.put = nvhost_3dctx_put; return &p->h; }
int tegra_dc_ext_pin_window(struct tegra_dc_ext_user *user, u32 id, struct nvmap_handle_ref **handle, dma_addr_t *phys_addr) { struct tegra_dc_ext *ext = user->ext; struct nvmap_handle_ref *win_dup; ulong win_handle_id; dma_addr_t phys; if (!id) { *handle = NULL; *phys_addr = -1; return 0; } /* * Take a reference to the buffer using the user's nvmap context, to * make sure they have permissions to access it. */ win_handle_id = nvmap_get_handle_user_id(user->nvmap, id); if (!win_handle_id) return -EACCES; /* * Duplicate the buffer's handle into the dc_ext driver's nvmap * context, to ensure that the handle won't be freed as long as it is * in use by display. */ win_dup = nvmap_duplicate_handle_user_id(ext->nvmap, id); /* Release the reference we took in the user's context above */ nvmap_put_handle_user_id(win_handle_id); if (IS_ERR(win_dup)) return PTR_ERR(win_dup); phys = nvmap_pin(ext->nvmap, win_dup); /* XXX this isn't correct for non-pointers... */ if (IS_ERR((void *)phys)) { nvmap_free(ext->nvmap, win_dup); return PTR_ERR((void *)phys); } *phys_addr = phys; *handle = win_dup; return 0; }
struct nvhost_hwctx *nvhost_3dctx_alloc_common(struct nvhost_channel *ch, bool map_restore) { struct nvmap_client *nvmap = ch->dev->nvmap; struct nvhost_hwctx *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; ctx->restore = nvmap_alloc(nvmap, nvhost_3dctx_restore_size * 4, 32, map_restore ? NVMAP_HANDLE_WRITE_COMBINE : NVMAP_HANDLE_UNCACHEABLE); if (IS_ERR_OR_NULL(ctx->restore)) goto fail; if (map_restore) { ctx->restore_virt = nvmap_mmap(ctx->restore); if (!ctx->restore_virt) goto fail; } else ctx->restore_virt = NULL; kref_init(&ctx->ref); ctx->channel = ch; ctx->valid = false; ctx->save = nvhost_3dctx_save_buf; ctx->save_incrs = nvhost_3dctx_save_incrs; ctx->save_thresh = nvhost_3dctx_save_thresh; ctx->save_slots = nvhost_3dctx_save_slots; ctx->restore_phys = nvmap_pin(nvmap, ctx->restore); if (IS_ERR_VALUE(ctx->restore_phys)) goto fail; ctx->restore_size = nvhost_3dctx_restore_size; ctx->restore_incrs = nvhost_3dctx_restore_incrs; return ctx; fail: if (map_restore && ctx->restore_virt) { nvmap_munmap(ctx->restore, ctx->restore_virt); ctx->restore_virt = NULL; } nvmap_free(nvmap, ctx->restore); ctx->restore = NULL; kfree(ctx); return NULL; }
int __init nvhost_gr3d_t30_ctxhandler_init(struct nvhost_hwctx_handler *h) { struct nvhost_channel *ch; struct nvmap_client *nvmap; u32 *save_ptr; ch = container_of(h, struct nvhost_channel, ctxhandler); nvmap = ch->dev->nvmap; register_sets = tegra_gpu_register_sets(); BUG_ON(register_sets == 0 || register_sets > 2); setup_save(NULL); nvhost_3dctx_save_buf = nvmap_alloc(nvmap, save_size * 4, 32, NVMAP_HANDLE_WRITE_COMBINE); if (IS_ERR(nvhost_3dctx_save_buf)) { int err = PTR_ERR(nvhost_3dctx_save_buf); nvhost_3dctx_save_buf = NULL; return err; } nvhost_3dctx_save_slots = 6; if (register_sets == 2) nvhost_3dctx_save_slots += 2; save_ptr = nvmap_mmap(nvhost_3dctx_save_buf); if (!save_ptr) { nvmap_free(nvmap, nvhost_3dctx_save_buf); nvhost_3dctx_save_buf = NULL; return -ENOMEM; } save_phys = nvmap_pin(nvmap, nvhost_3dctx_save_buf); setup_save(save_ptr); h->alloc = ctx3d_alloc_v1; h->save_push = save_push_v1; h->save_service = NULL; h->get = nvhost_3dctx_get; h->put = nvhost_3dctx_put; return 0; }
/* Overlay window manipulation */ static int tegra_overlay_pin_window(struct tegra_overlay_info *overlay, struct tegra_overlay_flip_win *flip_win, struct nvmap_client *user_nvmap) { struct nvmap_handle_ref *win_dupe; struct nvmap_handle *win_handle; unsigned long buff_id = flip_win->attr.buff_id; if (!buff_id) return 0; win_handle = nvmap_get_handle_id(user_nvmap, buff_id); if (win_handle == NULL) { dev_err(&overlay->ndev->dev, "%s: flip invalid " "handle %08lx\n", current->comm, buff_id); return -EPERM; } /* duplicate the new framebuffer's handle into the fb driver's * nvmap context, to ensure that the handle won't be freed as * long as it is in-use by the fb driver */ win_dupe = nvmap_duplicate_handle_id(overlay->overlay_nvmap, buff_id); nvmap_handle_put(win_handle); if (IS_ERR(win_dupe)) { dev_err(&overlay->ndev->dev, "couldn't duplicate handle\n"); return PTR_ERR(win_dupe); } flip_win->handle = win_dupe; flip_win->phys_addr = nvmap_pin(overlay->overlay_nvmap, win_dupe); if (IS_ERR((void *)flip_win->phys_addr)) { dev_err(&overlay->ndev->dev, "couldn't pin handle\n"); nvmap_free(overlay->overlay_nvmap, win_dupe); return PTR_ERR((void *)flip_win->phys_addr); } return 0; }
static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream , size_t size) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; #if TEGRA30_USE_SMMU unsigned char *vaddr; phys_addr_t paddr; struct tegra_smmu_data *ptsd; ptsd = kzalloc(sizeof(struct tegra_smmu_data), GFP_KERNEL); ptsd->pcm_nvmap_client = nvmap_create_client(nvmap_dev, "Audio_SMMU"); ptsd->pcm_nvmap_handle = nvmap_alloc(ptsd->pcm_nvmap_client, size, 32, NVMAP_HANDLE_WRITE_COMBINE, NVMAP_HEAP_IOVMM); vaddr = (unsigned char *) nvmap_mmap(ptsd->pcm_nvmap_handle); paddr = nvmap_pin(ptsd->pcm_nvmap_client, ptsd->pcm_nvmap_handle); buf->area = vaddr; buf->addr = paddr; buf->private_data = ptsd; #else buf->area = dma_alloc_writecombine(pcm->card->dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) return -ENOMEM; buf->private_data = NULL; #endif buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->bytes = size; return 0; }