static struct nvhost_hwctx *ctx3d_alloc(struct nvhost_channel *ch) { struct nvhost_hwctx *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; ctx->restore = nvmap_alloc(context_restore_size * 4, 32, NVMEM_HANDLE_WRITE_COMBINE, (void**)&ctx->save_cpu_data); if (IS_ERR_OR_NULL(ctx->restore)) { kfree(ctx); return NULL; } setup_restore(ctx->save_cpu_data, NVWAITBASE_3D); ctx->channel = ch; ctx->restore_phys = nvmap_pin_single(ctx->restore); ctx->restore_size = context_restore_size; ctx->save = context_save_buf; ctx->save_phys = context_save_phys; ctx->save_size = context_save_size; ctx->save_incrs = 3; ctx->restore_incrs = 1; ctx->valid = false; kref_init(&ctx->ref); return ctx; }
struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init( u32 syncpt, u32 waitbase, struct nvhost_channel *ch) { struct nvmap_client *nvmap; u32 *save_ptr; struct host1x_hwctx_handler *p; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; nvmap = nvhost_get_host(ch->dev)->nvmap; register_sets = tegra_gpu_register_sets(); BUG_ON(register_sets == 0 || register_sets > 2); p->syncpt = syncpt; p->waitbase = waitbase; setup_save(p, NULL); p->save_buf = nvmap_alloc(nvmap, p->save_size * 4, 32, NVMAP_HANDLE_WRITE_COMBINE, 0); if (IS_ERR(p->save_buf)) { p->save_buf = NULL; return NULL; } p->save_slots = 6; if (register_sets == 2) p->save_slots += 2; save_ptr = nvmap_mmap(p->save_buf); if (!save_ptr) { nvmap_free(nvmap, p->save_buf); p->save_buf = NULL; return NULL; } p->save_phys = nvmap_pin(nvmap, p->save_buf); setup_save(p, save_ptr); p->h.alloc = ctx3d_alloc_v1; p->h.save_push = save_push_v1; p->h.save_service = NULL; p->h.get = nvhost_3dctx_get; p->h.put = nvhost_3dctx_put; return &p->h; }
struct nvhost_hwctx *nvhost_3dctx_alloc_common(struct nvhost_channel *ch, bool map_restore) { struct nvmap_client *nvmap = ch->dev->nvmap; struct nvhost_hwctx *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; ctx->restore = nvmap_alloc(nvmap, nvhost_3dctx_restore_size * 4, 32, map_restore ? NVMAP_HANDLE_WRITE_COMBINE : NVMAP_HANDLE_UNCACHEABLE); if (IS_ERR_OR_NULL(ctx->restore)) goto fail; if (map_restore) { ctx->restore_virt = nvmap_mmap(ctx->restore); if (!ctx->restore_virt) goto fail; } else ctx->restore_virt = NULL; kref_init(&ctx->ref); ctx->channel = ch; ctx->valid = false; ctx->save = nvhost_3dctx_save_buf; ctx->save_incrs = nvhost_3dctx_save_incrs; ctx->save_thresh = nvhost_3dctx_save_thresh; ctx->save_slots = nvhost_3dctx_save_slots; ctx->restore_phys = nvmap_pin(nvmap, ctx->restore); if (IS_ERR_VALUE(ctx->restore_phys)) goto fail; ctx->restore_size = nvhost_3dctx_restore_size; ctx->restore_incrs = nvhost_3dctx_restore_incrs; return ctx; fail: if (map_restore && ctx->restore_virt) { nvmap_munmap(ctx->restore, ctx->restore_virt); ctx->restore_virt = NULL; } nvmap_free(nvmap, ctx->restore); ctx->restore = NULL; kfree(ctx); return NULL; }
int __init nvhost_3dctx_handler_init(struct nvhost_hwctx_handler *h) { setup_save(NULL, &context_save_size, &context_restore_size, 0, 0); context_save_buf = nvmap_alloc(context_save_size * 4, 32, NVMEM_HANDLE_WRITE_COMBINE, (void**)&context_save_ptr); if (IS_ERR_OR_NULL(context_save_buf)) return PTR_ERR(context_save_buf); context_save_phys = nvmap_pin_single(context_save_buf); setup_save(context_save_ptr, NULL, NULL, NVSYNCPT_3D, NVWAITBASE_3D); h->init = ctx3d_init; h->deinit = ctx3d_deinit; h->save_service = ctx3d_save_service; return 0; }
int __init nvhost_gr3d_t30_ctxhandler_init(struct nvhost_hwctx_handler *h) { struct nvhost_channel *ch; struct nvmap_client *nvmap; u32 *save_ptr; ch = container_of(h, struct nvhost_channel, ctxhandler); nvmap = ch->dev->nvmap; register_sets = tegra_gpu_register_sets(); BUG_ON(register_sets == 0 || register_sets > 2); setup_save(NULL); nvhost_3dctx_save_buf = nvmap_alloc(nvmap, save_size * 4, 32, NVMAP_HANDLE_WRITE_COMBINE); if (IS_ERR(nvhost_3dctx_save_buf)) { int err = PTR_ERR(nvhost_3dctx_save_buf); nvhost_3dctx_save_buf = NULL; return err; } nvhost_3dctx_save_slots = 6; if (register_sets == 2) nvhost_3dctx_save_slots += 2; save_ptr = nvmap_mmap(nvhost_3dctx_save_buf); if (!save_ptr) { nvmap_free(nvmap, nvhost_3dctx_save_buf); nvhost_3dctx_save_buf = NULL; return -ENOMEM; } save_phys = nvmap_pin(nvmap, nvhost_3dctx_save_buf); setup_save(save_ptr); h->alloc = ctx3d_alloc_v1; h->save_push = save_push_v1; h->save_service = NULL; h->get = nvhost_3dctx_get; h->put = nvhost_3dctx_put; return 0; }
static int ctx3d_init(struct nvhost_hwctx *ctx) { ctx->restore = nvmap_alloc(context_restore_size * 4, 32, NVMEM_HANDLE_WRITE_COMBINE, (void**)&ctx->save_cpu_data); if (IS_ERR_OR_NULL(ctx->restore)) return PTR_ERR(ctx->restore); setup_restore(ctx->save_cpu_data, NVWAITBASE_3D); ctx->restore_phys = nvmap_pin_single(ctx->restore); ctx->restore_size = context_restore_size; ctx->save = context_save_buf; ctx->save_phys = context_save_phys; ctx->save_size = context_save_size; ctx->save_incrs = 3; ctx->restore_incrs = 1; ctx->valid = false; return 0; }
static int alloc_gathers(struct nvhost_job *job, int num_cmdbufs) { int err = 0; job->gather_mem = NULL; job->gathers = NULL; job->gather_mem_size = 0; if (num_cmdbufs) { /* Allocate memory */ job->gather_mem = nvmap_alloc(job->nvmap, gather_size(num_cmdbufs), 32, NVMAP_HANDLE_CACHEABLE, 0); if (IS_ERR_OR_NULL(job->gather_mem)) { err = PTR_ERR(job->gather_mem); if (!job->gather_mem) err = -ENOMEM; job->gather_mem = NULL; goto error; } job->gather_mem_size = gather_size(num_cmdbufs); /* Map memory to kernel */ job->gathers = nvmap_mmap(job->gather_mem); if (IS_ERR_OR_NULL(job->gathers)) { err = PTR_ERR(job->gathers); if (!job->gathers) err = -ENOMEM; job->gathers = NULL; goto error; } } return 0; error: free_gathers(job); return err; }
static int nvhost_channelopen(struct inode *inode, struct file *filp) { struct nvhost_channel_userctx *priv; struct nvhost_channel *ch; ch = container_of(inode->i_cdev, struct nvhost_channel, cdev); ch = nvhost_getchannel(ch); if (!ch) return -ENOMEM; trace_nvhost_channel_open(ch->desc->name); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { nvhost_putchannel(ch, NULL); return -ENOMEM; } filp->private_data = priv; priv->ch = ch; nvhost_module_add_client(ch->dev, &ch->mod, priv); priv->gather_mem = nvmap_alloc(ch->dev->nvmap, sizeof(u32) * 2 * NVHOST_MAX_GATHERS, 32, NVMAP_HANDLE_CACHEABLE); if (IS_ERR(priv->gather_mem)) goto fail; if (ch->ctxhandler.alloc) { priv->hwctx = ch->ctxhandler.alloc(ch); if (!priv->hwctx) goto fail; priv->hwctx->timeout = &priv->timeout; } priv->gathers = nvmap_mmap(priv->gather_mem); return 0; fail: nvhost_channelrelease(inode, filp); return -ENOMEM; }
static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream , size_t size) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; #if TEGRA30_USE_SMMU unsigned char *vaddr; phys_addr_t paddr; struct tegra_smmu_data *ptsd; ptsd = kzalloc(sizeof(struct tegra_smmu_data), GFP_KERNEL); ptsd->pcm_nvmap_client = nvmap_create_client(nvmap_dev, "Audio_SMMU"); ptsd->pcm_nvmap_handle = nvmap_alloc(ptsd->pcm_nvmap_client, size, 32, NVMAP_HANDLE_WRITE_COMBINE, NVMAP_HEAP_IOVMM); vaddr = (unsigned char *) nvmap_mmap(ptsd->pcm_nvmap_handle); paddr = nvmap_pin(ptsd->pcm_nvmap_client, ptsd->pcm_nvmap_handle); buf->area = vaddr; buf->addr = paddr; buf->private_data = ptsd; #else buf->area = dma_alloc_writecombine(pcm->card->dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) return -ENOMEM; buf->private_data = NULL; #endif buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->bytes = size; return 0; }
struct mem_handle *nvhost_nvmap_alloc(struct mem_mgr *mgr, size_t size, size_t align, int flags) { return (struct mem_handle *)nvmap_alloc((struct nvmap_client *)mgr, size, align, flags, 0); }