/* * Write cmdbuf to ftrace output. Checks if cmdbuf contents should be output * and mmaps the cmdbuf contents if required. */ static void trace_write_cmdbufs(struct nvhost_job *job) { struct nvmap_handle_ref handle; void *mem = NULL; int i = 0; for (i = 0; i < job->num_gathers; i++) { struct nvhost_channel_gather *gather = &job->gathers[i]; if (nvhost_debug_trace_cmdbuf) { handle.handle = nvmap_id_to_handle(gather->mem_id); mem = nvmap_mmap(&handle); if (IS_ERR_OR_NULL(mem)) mem = NULL; }; if (mem) { u32 i; /* * Write in batches of 128 as there seems to be a limit * of how much you can output to ftrace at once. */ for (i = 0; i < gather->words; i += TRACE_MAX_LENGTH) { trace_nvhost_channel_write_cmdbuf_data( job->ch->desc->name, gather->mem_id, min(gather->words - i, TRACE_MAX_LENGTH), gather->offset + i * sizeof(u32), mem); } nvmap_munmap(&handle, mem); } } }
struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init( u32 syncpt, u32 waitbase, struct nvhost_channel *ch) { struct nvmap_client *nvmap; u32 *save_ptr; struct host1x_hwctx_handler *p; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; nvmap = nvhost_get_host(ch->dev)->nvmap; register_sets = tegra_gpu_register_sets(); BUG_ON(register_sets == 0 || register_sets > 2); p->syncpt = syncpt; p->waitbase = waitbase; setup_save(p, NULL); p->save_buf = nvmap_alloc(nvmap, p->save_size * 4, 32, NVMAP_HANDLE_WRITE_COMBINE, 0); if (IS_ERR(p->save_buf)) { p->save_buf = NULL; return NULL; } p->save_slots = 6; if (register_sets == 2) p->save_slots += 2; save_ptr = nvmap_mmap(p->save_buf); if (!save_ptr) { nvmap_free(nvmap, p->save_buf); p->save_buf = NULL; return NULL; } p->save_phys = nvmap_pin(nvmap, p->save_buf); setup_save(p, save_ptr); p->h.alloc = ctx3d_alloc_v1; p->h.save_push = save_push_v1; p->h.save_service = NULL; p->h.get = nvhost_3dctx_get; p->h.put = nvhost_3dctx_put; return &p->h; }
struct nvhost_hwctx *nvhost_3dctx_alloc_common(struct nvhost_channel *ch, bool map_restore) { struct nvmap_client *nvmap = ch->dev->nvmap; struct nvhost_hwctx *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; ctx->restore = nvmap_alloc(nvmap, nvhost_3dctx_restore_size * 4, 32, map_restore ? NVMAP_HANDLE_WRITE_COMBINE : NVMAP_HANDLE_UNCACHEABLE); if (IS_ERR_OR_NULL(ctx->restore)) goto fail; if (map_restore) { ctx->restore_virt = nvmap_mmap(ctx->restore); if (!ctx->restore_virt) goto fail; } else ctx->restore_virt = NULL; kref_init(&ctx->ref); ctx->channel = ch; ctx->valid = false; ctx->save = nvhost_3dctx_save_buf; ctx->save_incrs = nvhost_3dctx_save_incrs; ctx->save_thresh = nvhost_3dctx_save_thresh; ctx->save_slots = nvhost_3dctx_save_slots; ctx->restore_phys = nvmap_pin(nvmap, ctx->restore); if (IS_ERR_VALUE(ctx->restore_phys)) goto fail; ctx->restore_size = nvhost_3dctx_restore_size; ctx->restore_incrs = nvhost_3dctx_restore_incrs; return ctx; fail: if (map_restore && ctx->restore_virt) { nvmap_munmap(ctx->restore, ctx->restore_virt); ctx->restore_virt = NULL; } nvmap_free(nvmap, ctx->restore); ctx->restore = NULL; kfree(ctx); return NULL; }
int __init nvhost_gr3d_t30_ctxhandler_init(struct nvhost_hwctx_handler *h) { struct nvhost_channel *ch; struct nvmap_client *nvmap; u32 *save_ptr; ch = container_of(h, struct nvhost_channel, ctxhandler); nvmap = ch->dev->nvmap; register_sets = tegra_gpu_register_sets(); BUG_ON(register_sets == 0 || register_sets > 2); setup_save(NULL); nvhost_3dctx_save_buf = nvmap_alloc(nvmap, save_size * 4, 32, NVMAP_HANDLE_WRITE_COMBINE); if (IS_ERR(nvhost_3dctx_save_buf)) { int err = PTR_ERR(nvhost_3dctx_save_buf); nvhost_3dctx_save_buf = NULL; return err; } nvhost_3dctx_save_slots = 6; if (register_sets == 2) nvhost_3dctx_save_slots += 2; save_ptr = nvmap_mmap(nvhost_3dctx_save_buf); if (!save_ptr) { nvmap_free(nvmap, nvhost_3dctx_save_buf); nvhost_3dctx_save_buf = NULL; return -ENOMEM; } save_phys = nvmap_pin(nvmap, nvhost_3dctx_save_buf); setup_save(save_ptr); h->alloc = ctx3d_alloc_v1; h->save_push = save_push_v1; h->save_service = NULL; h->get = nvhost_3dctx_get; h->put = nvhost_3dctx_put; return 0; }
static int alloc_gathers(struct nvhost_job *job, int num_cmdbufs) { int err = 0; job->gather_mem = NULL; job->gathers = NULL; job->gather_mem_size = 0; if (num_cmdbufs) { /* Allocate memory */ job->gather_mem = nvmap_alloc(job->nvmap, gather_size(num_cmdbufs), 32, NVMAP_HANDLE_CACHEABLE, 0); if (IS_ERR_OR_NULL(job->gather_mem)) { err = PTR_ERR(job->gather_mem); if (!job->gather_mem) err = -ENOMEM; job->gather_mem = NULL; goto error; } job->gather_mem_size = gather_size(num_cmdbufs); /* Map memory to kernel */ job->gathers = nvmap_mmap(job->gather_mem); if (IS_ERR_OR_NULL(job->gathers)) { err = PTR_ERR(job->gathers); if (!job->gathers) err = -ENOMEM; job->gathers = NULL; goto error; } } return 0; error: free_gathers(job); return err; }
static int nvhost_channelopen(struct inode *inode, struct file *filp) { struct nvhost_channel_userctx *priv; struct nvhost_channel *ch; ch = container_of(inode->i_cdev, struct nvhost_channel, cdev); ch = nvhost_getchannel(ch); if (!ch) return -ENOMEM; trace_nvhost_channel_open(ch->desc->name); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { nvhost_putchannel(ch, NULL); return -ENOMEM; } filp->private_data = priv; priv->ch = ch; nvhost_module_add_client(ch->dev, &ch->mod, priv); priv->gather_mem = nvmap_alloc(ch->dev->nvmap, sizeof(u32) * 2 * NVHOST_MAX_GATHERS, 32, NVMAP_HANDLE_CACHEABLE); if (IS_ERR(priv->gather_mem)) goto fail; if (ch->ctxhandler.alloc) { priv->hwctx = ch->ctxhandler.alloc(ch); if (!priv->hwctx) goto fail; priv->hwctx->timeout = &priv->timeout; } priv->gathers = nvmap_mmap(priv->gather_mem); return 0; fail: nvhost_channelrelease(inode, filp); return -ENOMEM; }
static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream , size_t size) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; #if TEGRA30_USE_SMMU unsigned char *vaddr; phys_addr_t paddr; struct tegra_smmu_data *ptsd; ptsd = kzalloc(sizeof(struct tegra_smmu_data), GFP_KERNEL); ptsd->pcm_nvmap_client = nvmap_create_client(nvmap_dev, "Audio_SMMU"); ptsd->pcm_nvmap_handle = nvmap_alloc(ptsd->pcm_nvmap_client, size, 32, NVMAP_HANDLE_WRITE_COMBINE, NVMAP_HEAP_IOVMM); vaddr = (unsigned char *) nvmap_mmap(ptsd->pcm_nvmap_handle); paddr = nvmap_pin(ptsd->pcm_nvmap_client, ptsd->pcm_nvmap_handle); buf->area = vaddr; buf->addr = paddr; buf->private_data = ptsd; #else buf->area = dma_alloc_writecombine(pcm->card->dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) return -ENOMEM; buf->private_data = NULL; #endif buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->bytes = size; return 0; }
void *nvhost_nvmap_mmap(struct mem_handle *handle) { return nvmap_mmap((struct nvmap_handle_ref *)handle); }