int nvhost_job_pin(struct nvhost_job *job, struct nvhost_syncpt *sp) { int err = 0, i = 0; phys_addr_t gather_phys = 0; void *gather_addr = NULL; unsigned long waitchk_mask = job->waitchk_mask; /* get current syncpt values for waitchk */ for_each_set_bit(i, &waitchk_mask, sizeof(job->waitchk_mask)) nvhost_syncpt_update_min(sp, i); /* pin gathers */ for (i = 0; i < job->num_gathers; i++) { struct nvhost_job_gather *g = &job->gathers[i]; /* process each gather mem only once */ if (!g->ref) { g->ref = mem_op().get(job->memmgr, job->gathers[i].mem_id); if (IS_ERR(g->ref)) { err = PTR_ERR(g->ref); g->ref = NULL; break; } gather_phys = mem_op().pin(job->memmgr, g->ref); if (IS_ERR((void *)gather_phys)) { mem_op().put(job->memmgr, g->ref); err = gather_phys; break; } /* store the gather ref into unpin array */ job->unpins[job->num_unpins++] = g->ref; gather_addr = mem_op().mmap(g->ref); if (!gather_addr) { err = -ENOMEM; break; } err = do_relocs(job, g->mem_id, gather_addr); if (!err) err = do_waitchks(job, sp, g->mem_id, gather_addr); mem_op().munmap(g->ref, gather_addr); if (err) break; } g->mem = gather_phys + g->offset; } wmb(); return err; }
void nvhost_msenc_deinit(struct nvhost_device *dev) { struct msenc *m = get_msenc(dev); /* unpin, free ucode memory */ if (m->mem_r) { mem_op().unpin(nvhost_get_host(dev)->memmgr, m->mem_r, m->pa); mem_op().put(nvhost_get_host(dev)->memmgr, m->mem_r); m->mem_r = 0; } }
void nvhost_3dctx_free(struct kref *ref) { struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref); struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); struct mem_mgr *memmgr = nvhost_get_host(nctx->channel->dev)->memmgr; if (ctx->restore_virt) mem_op().munmap(ctx->restore, ctx->restore_virt); mem_op().unpin(memmgr, ctx->restore, ctx->restore_sgt); mem_op().put(memmgr, ctx->restore); kfree(ctx); }
void nvhost_job_unpin(struct nvhost_job *job) { int i; for (i = 0; i < job->num_unpins; i++) { mem_op().unpin(job->memmgr, job->unpins[i]); mem_op().put(job->memmgr, job->unpins[i]); } memset(job->unpins, BAD_MAGIC, job->num_unpins * sizeof(struct mem_handle *)); job->num_unpins = 0; }
struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init( u32 syncpt, u32 waitbase, struct nvhost_channel *ch) { struct mem_mgr *memmgr; u32 *save_ptr; struct host1x_hwctx_handler *p; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; memmgr = nvhost_get_host(ch->dev)->memmgr; p->syncpt = syncpt; p->waitbase = waitbase; setup_save(p, NULL); p->save_buf = mem_op().alloc(memmgr, p->save_size * 4, 32, mem_mgr_flag_write_combine); if (IS_ERR_OR_NULL(p->save_buf)) { p->save_buf = NULL; return NULL; } p->save_slots = 8; save_ptr = mem_op().mmap(p->save_buf); if (!save_ptr) { mem_op().put(memmgr, p->save_buf); p->save_buf = NULL; return NULL; } p->save_phys = mem_op().pin(memmgr, p->save_buf); setup_save(p, save_ptr); mem_op().munmap(p->save_buf, save_ptr); p->h.alloc = ctx3d_alloc_v1; p->h.save_push = save_push_v1; p->h.save_service = NULL; p->h.get = nvhost_3dctx_get; p->h.put = nvhost_3dctx_put; return &p->h; }
/*** ctx3d ***/ struct host1x_hwctx *nvhost_3dctx_alloc_common(struct host1x_hwctx_handler *p, struct nvhost_channel *ch, bool map_restore) { struct mem_mgr *memmgr = nvhost_get_host(ch->dev)->memmgr; struct host1x_hwctx *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; ctx->restore = mem_op().alloc(memmgr, p->restore_size * 4, 32, map_restore ? mem_mgr_flag_write_combine : mem_mgr_flag_uncacheable); if (IS_ERR_OR_NULL(ctx->restore)) goto fail_alloc; if (map_restore) { ctx->restore_virt = mem_op().mmap(ctx->restore); if (IS_ERR_OR_NULL(ctx->restore_virt)) goto fail_mmap; } else ctx->restore_virt = NULL; ctx->restore_sgt = mem_op().pin(memmgr, ctx->restore); if (IS_ERR_OR_NULL(ctx->restore_sgt)) goto fail_pin; ctx->restore_phys = sg_dma_address(ctx->restore_sgt->sgl); kref_init(&ctx->hwctx.ref); ctx->hwctx.h = &p->h; ctx->hwctx.channel = ch; ctx->hwctx.valid = false; ctx->save_incrs = p->save_incrs; ctx->save_thresh = p->save_thresh; ctx->save_slots = p->save_slots; ctx->restore_size = p->restore_size; ctx->restore_incrs = p->restore_incrs; return ctx; fail_pin: if (map_restore) mem_op().munmap(ctx->restore, ctx->restore_virt); fail_mmap: mem_op().put(memmgr, ctx->restore); fail_alloc: kfree(ctx); return NULL; }
struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch, struct nvhost_hwctx *hwctx, struct nvhost_submit_hdr_ext *hdr, struct mem_mgr *memmgr, int priority, int clientid) { struct nvhost_job *job = NULL; job = vzalloc(job_size(hdr)); if (!job) goto error; kref_init(&job->ref); job->ch = ch; job->hwctx = hwctx; if (hwctx) hwctx->h->get(hwctx); job->memmgr = memmgr ? mem_op().get_mgr(memmgr) : NULL; init_fields(job, hdr, priority, clientid); return job; error: if (job) nvhost_job_put(job); return NULL; }
void nvhost_msenc_deinit(struct platform_device *dev) { struct msenc *m = get_msenc(dev); /* unpin, free ucode memory */ if (m->mapped) { mem_op().munmap(m->mem_r, m->mapped); m->mapped = NULL; } if (m->pa) { mem_op().unpin(nvhost_get_host(dev)->memmgr, m->mem_r, m->pa); m->pa = NULL; } if (m->mem_r) { mem_op().put(nvhost_get_host(dev)->memmgr, m->mem_r); m->mem_r = NULL; } }
static int do_relocs(struct nvhost_job *job, u32 cmdbuf_mem, void *cmdbuf_addr) { phys_addr_t target_phys = -EINVAL; int i; u32 mem_id = 0; struct mem_handle *target_ref = NULL; /* pin & patch the relocs for one gather */ for (i = 0; i < job->num_relocs; i++) { struct nvhost_reloc *reloc = &job->relocarray[i]; struct nvhost_reloc_shift *shift = &job->relocshiftarray[i]; /* skip all other gathers */ if (cmdbuf_mem != reloc->cmdbuf_mem) continue; /* check if pin-mem is same as previous */ if (reloc->target != mem_id) { target_ref = mem_op().get(job->memmgr, reloc->target); if (IS_ERR(target_ref)) return PTR_ERR(target_ref); target_phys = mem_op().pin(job->memmgr, target_ref); if (IS_ERR((void *)target_phys)) { mem_op().put(job->memmgr, target_ref); return target_phys; } mem_id = reloc->target; job->unpins[job->num_unpins++] = target_ref; } __raw_writel( (target_phys + reloc->target_offset) >> shift->shift, (cmdbuf_addr + reloc->cmdbuf_offset)); /* Different gathers might have same mem_id. This ensures we * perform reloc only once per gather memid. */ reloc->cmdbuf_mem = 0; } return 0; }
static void job_free(struct kref *ref) { struct nvhost_job *job = container_of(ref, struct nvhost_job, ref); if (job->hwctxref) job->hwctxref->h->put(job->hwctxref); if (job->hwctx) job->hwctx->h->put(job->hwctx); if (job->memmgr) mem_op().put_mgr(job->memmgr); vfree(job); job = NULL; }
struct nvhost_hwctx_handler *nvhost_gr3d_t20_ctxhandler_init( u32 syncpt, u32 waitbase, struct nvhost_channel *ch) { struct mem_mgr *memmgr; u32 *save_ptr; struct host1x_hwctx_handler *p; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; memmgr = nvhost_get_host(ch->dev)->memmgr; p->syncpt = syncpt; p->waitbase = waitbase; setup_save(p, NULL); p->save_buf = mem_op().alloc(memmgr, p->save_size * sizeof(u32), 32, mem_mgr_flag_write_combine); if (IS_ERR_OR_NULL(p->save_buf)) goto fail_alloc; save_ptr = mem_op().mmap(p->save_buf); if (IS_ERR_OR_NULL(save_ptr)) goto fail_mmap; p->save_sgt = mem_op().pin(memmgr, p->save_buf); if (IS_ERR_OR_NULL(p->save_sgt)) goto fail_pin; p->save_phys = sg_dma_address(p->save_sgt->sgl); setup_save(p, save_ptr); mem_op().munmap(p->save_buf, save_ptr); p->save_slots = 1; p->h.alloc = ctx3d_alloc_v0; p->h.save_push = save_push_v0; p->h.save_service = ctx3d_save_service; p->h.get = nvhost_3dctx_get; p->h.put = nvhost_3dctx_put; return &p->h; fail_pin: mem_op().munmap(p->save_buf, save_ptr); fail_mmap: mem_op().put(memmgr, p->save_buf); fail_alloc: kfree(p); return NULL; }
void nvhost_msenc_init(struct nvhost_device *dev) { int err = 0; struct msenc *m; char *fw_name; fw_name = msenc_get_fw_name(dev); if (!fw_name) { dev_err(&dev->dev, "couldn't determine firmware name"); return; } m = kzalloc(sizeof(struct msenc), GFP_KERNEL); if (!m) { dev_err(&dev->dev, "couldn't alloc ucode"); kfree(fw_name); return; } set_msenc(dev, m); err = msenc_read_ucode(dev, fw_name); kfree(fw_name); fw_name = 0; if (err || !m->valid) { dev_err(&dev->dev, "ucode not valid"); goto clean_up; } if (!&dev->can_powergate) { nvhost_module_busy(dev); msenc_boot(dev); nvhost_module_idle(dev); } return; clean_up: dev_err(&dev->dev, "failed"); mem_op().unpin(nvhost_get_host(dev)->memmgr, m->mem_r, m->pa); }
static int nvhost_channelrelease(struct inode *inode, struct file *filp) { struct nvhost_channel_userctx *priv = filp->private_data; trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev)); filp->private_data = NULL; nvhost_module_remove_client(priv->ch->dev, priv); nvhost_putchannel(priv->ch, priv->hwctx); if (priv->hwctx) priv->ch->ctxhandler->put(priv->hwctx); if (priv->job) nvhost_job_put(priv->job); mem_op().put_mgr(priv->memmgr); kfree(priv); return 0; }
int msenc_read_ucode(struct platform_device *dev, const char *fw_name) { struct msenc *m = get_msenc(dev); const struct firmware *ucode_fw; int err; ucode_fw = nvhost_client_request_firmware(dev, fw_name); if (IS_ERR_OR_NULL(ucode_fw)) { dev_err(&dev->dev, "failed to get msenc firmware\n"); err = -ENOENT; return err; } /* allocate pages for ucode */ m->mem_r = mem_op().alloc(nvhost_get_host(dev)->memmgr, roundup(ucode_fw->size, PAGE_SIZE), PAGE_SIZE, mem_mgr_flag_uncacheable); if (IS_ERR_OR_NULL(m->mem_r)) { dev_err(&dev->dev, "nvmap alloc failed"); err = -ENOMEM; goto clean_up; } m->pa = mem_op().pin(nvhost_get_host(dev)->memmgr, m->mem_r); if (IS_ERR_OR_NULL(m->pa)) { dev_err(&dev->dev, "nvmap pin failed for ucode"); err = PTR_ERR(m->pa); m->pa = NULL; goto clean_up; } m->mapped = mem_op().mmap(m->mem_r); if (IS_ERR_OR_NULL(m->mapped)) { dev_err(&dev->dev, "nvmap mmap failed"); err = -ENOMEM; goto clean_up; } err = msenc_setup_ucode_image(dev, (u32 *)m->mapped, ucode_fw); if (err) { dev_err(&dev->dev, "failed to parse firmware image\n"); return err; } m->valid = true; release_firmware(ucode_fw); return 0; clean_up: if (m->mapped) { mem_op().munmap(m->mem_r, (u32 *)m->mapped); m->mapped = NULL; } if (m->pa) { mem_op().unpin(nvhost_get_host(dev)->memmgr, m->mem_r, m->pa); m->pa = NULL; } if (m->mem_r) { mem_op().put(nvhost_get_host(dev)->memmgr, m->mem_r); m->mem_r = NULL; } release_firmware(ucode_fw); return err; }
static int __devinit nvhost_probe(struct nvhost_device *dev, struct nvhost_device_id *id_table) { struct nvhost_master *host; struct resource *regs, *intr0, *intr1; int i, err; regs = nvhost_get_resource(dev, IORESOURCE_MEM, 0); intr0 = nvhost_get_resource(dev, IORESOURCE_IRQ, 0); intr1 = nvhost_get_resource(dev, IORESOURCE_IRQ, 1); if (!regs || !intr0 || !intr1) { dev_err(&dev->dev, "missing required platform resources\n"); return -ENXIO; } host = kzalloc(sizeof(*host), GFP_KERNEL); if (!host) return -ENOMEM; /* Register host1x device as bus master */ host->dev = dev; /* Copy host1x parameters */ memcpy(&host->info, dev->dev.platform_data, sizeof(struct host1x_device_info)); host->reg_mem = request_mem_region(regs->start, resource_size(regs), dev->name); if (!host->reg_mem) { dev_err(&dev->dev, "failed to get host register memory\n"); err = -ENXIO; goto fail; } host->aperture = ioremap(regs->start, resource_size(regs)); if (!host->aperture) { dev_err(&dev->dev, "failed to remap host registers\n"); err = -ENXIO; goto fail; } err = nvhost_alloc_resources(host); if (err) { dev_err(&dev->dev, "failed to init chip support\n"); goto fail; } host->memmgr = mem_op().alloc_mgr(); if (!host->memmgr) { dev_err(&dev->dev, "unable to create nvmap client\n"); err = -EIO; goto fail; } /* Register host1x device as bus master */ host->dev = dev; /* Give pointer to host1x via driver */ nvhost_set_drvdata(dev, host); nvhost_bus_add_host(host); err = nvhost_syncpt_init(dev, &host->syncpt); if (err) goto fail; err = nvhost_intr_init(&host->intr, intr1->start, intr0->start); if (err) goto fail; err = nvhost_user_init(host); if (err) goto fail; err = nvhost_module_init(dev); if (err) goto fail; for (i = 0; i < host->dev->num_clks; i++) clk_enable(host->dev->clk[i]); nvhost_syncpt_reset(&host->syncpt); for (i = 0; i < host->dev->num_clks; i++) clk_disable(host->dev->clk[0]); nvhost_debug_init(host); dev_info(&dev->dev, "initialized\n"); return 0; fail: nvhost_free_resources(host); if (host->memmgr) mem_op().put_mgr(host->memmgr); kfree(host); return err; }
static int __devinit nvhost_probe(struct platform_device *dev) { struct nvhost_master *host; struct resource *regs, *intr0, *intr1; int i, err; struct nvhost_device_data *pdata = (struct nvhost_device_data *)dev->dev.platform_data; regs = platform_get_resource(dev, IORESOURCE_MEM, 0); intr0 = platform_get_resource(dev, IORESOURCE_IRQ, 0); intr1 = platform_get_resource(dev, IORESOURCE_IRQ, 1); if (!regs || !intr0 || !intr1) { dev_err(&dev->dev, "missing required platform resources\n"); return -ENXIO; } host = kzalloc(sizeof(*host), GFP_KERNEL); if (!host) return -ENOMEM; nvhost = host; host->dev = dev; /* Copy host1x parameters. The private_data gets replaced * by nvhost_master later */ memcpy(&host->info, pdata->private_data, sizeof(struct host1x_device_info)); pdata->finalize_poweron = power_on_host; pdata->prepare_poweroff = power_off_host; pdata->prepare_clockoff = clock_off_host; pdata->finalize_clockon = clock_on_host; pdata->pdev = dev; /* set common host1x device data */ platform_set_drvdata(dev, pdata); /* set private host1x device data */ nvhost_set_private_data(dev, host); host->reg_mem = request_mem_region(regs->start, resource_size(regs), dev->name); if (!host->reg_mem) { dev_err(&dev->dev, "failed to get host register memory\n"); err = -ENXIO; goto fail; } host->aperture = ioremap(regs->start, resource_size(regs)); if (!host->aperture) { dev_err(&dev->dev, "failed to remap host registers\n"); err = -ENXIO; goto fail; } err = nvhost_alloc_resources(host); if (err) { dev_err(&dev->dev, "failed to init chip support\n"); goto fail; } host->memmgr = mem_op().alloc_mgr(); if (!host->memmgr) { dev_err(&dev->dev, "unable to create nvmap client\n"); err = -EIO; goto fail; } err = nvhost_syncpt_init(dev, &host->syncpt); if (err) goto fail; err = nvhost_intr_init(&host->intr, intr1->start, intr0->start); if (err) goto fail; err = nvhost_user_init(host); if (err) goto fail; err = nvhost_module_init(dev); if (err) goto fail; for (i = 0; i < pdata->num_clks; i++) clk_prepare_enable(pdata->clk[i]); nvhost_syncpt_reset(&host->syncpt); for (i = 0; i < pdata->num_clks; i++) clk_disable_unprepare(pdata->clk[i]); nvhost_device_list_init(); err = nvhost_device_list_add(dev); if (err) goto fail; nvhost_debug_init(host); dev_info(&dev->dev, "initialized\n"); return 0; fail: nvhost_free_resources(host); if (host->memmgr) mem_op().put_mgr(host->memmgr); kfree(host); return err; }