/** * Increment syncpoint value from cpu, updating cache */ void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id) { nvhost_syncpt_incr_max(sp, id, 1); nvhost_module_busy(&syncpt_to_dev(sp)->mod); nvhost_syncpt_cpu_incr(sp, id); nvhost_module_idle(&syncpt_to_dev(sp)->mod); }
int nvhost_flcn_init(struct platform_device *dev) { int err = 0; struct nvhost_device_data *pdata = nvhost_get_devdata(dev); struct flcn *v = get_flcn(dev); nvhost_dbg_fn("in dev:%p v:%p", dev, v); v = kzalloc(sizeof(*v), GFP_KERNEL); if (!v) { dev_err(&dev->dev, "couldn't alloc flcn support"); err = -ENOMEM; goto clean_up; } set_flcn(dev, v); nvhost_dbg_fn("primed dev:%p v:%p", dev, v); err = flcn_read_ucode(dev, pdata->firmware_name); if (err || !v->valid) goto clean_up; nvhost_module_busy(dev); err = nvhost_flcn_boot(dev); nvhost_module_idle(dev); return 0; clean_up: nvhost_err(&dev->dev, "failed"); return err; }
void nvhost_module_busy(struct nvhost_module *mod) { mutex_lock(&mod->lock); cancel_delayed_work(&mod->powerdown); if (mod->force_suspend) { pr_warn("tegra_grhost: module_busy despite %s force_suspend!\n", mod->name); WARN_ON(1); } if ((atomic_inc_return(&mod->refcount) == 1) && !mod->powered) { if (mod->parent) nvhost_module_busy(mod->parent); if (mod->powergate_id != -1) { BUG_ON(mod->num_clks != 1); tegra_powergate_sequence_power_up( mod->powergate_id, mod->clk[0]); } else { int i; for (i = 0; i < mod->num_clks; i++) clk_enable(mod->clk[i]); } if (mod->func) mod->func(mod, NVHOST_POWER_ACTION_ON); mod->powered = true; } mutex_unlock(&mod->lock); }
static int host1x_tickctrl_init_channel(struct platform_device *dev) { struct nvhost_device_data *pdata = platform_get_drvdata(dev); void __iomem *regs = pdata->channel->aperture; nvhost_module_busy(nvhost_get_parent(dev)); /* Initialize counter */ writel(0, regs + host1x_channel_tickcount_hi_r()); writel(0, regs + host1x_channel_tickcount_lo_r()); writel(0, regs + host1x_channel_stallcount_hi_r()); writel(0, regs + host1x_channel_stallcount_lo_r()); writel(0, regs + host1x_channel_xfercount_hi_r()); writel(0, regs + host1x_channel_xfercount_lo_r()); writel(host1x_channel_channelctrl_enabletickcnt_f(1), regs + host1x_channel_channelctrl_r()); writel(host1x_channel_stallctrl_enable_channel_stall_f(1), regs + host1x_channel_stallctrl_r()); writel(host1x_channel_xferctrl_enable_channel_xfer_f(1), regs + host1x_channel_xferctrl_r()); nvhost_module_idle(nvhost_get_parent(dev)); host1x_tickctrl_debug_init(dev); return 0; }
static int nvhost_ioctl_ctrl_module_mutex(struct nvhost_ctrl_userctx *ctx, struct nvhost_ctrl_module_mutex_args *args) { int err = 0; if (args->id >= nvhost_syncpt_nb_mlocks(&ctx->dev->syncpt) || args->lock > 1) return -EINVAL; trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id); if (args->lock && !ctx->mod_locks[args->id]) { if (args->id == 0) nvhost_module_busy(ctx->dev->dev); else err = nvhost_mutex_try_lock(&ctx->dev->syncpt, args->id); if (!err) ctx->mod_locks[args->id] = 1; } else if (!args->lock && ctx->mod_locks[args->id]) { if (args->id == 0) nvhost_module_idle(ctx->dev->dev); else nvhost_mutex_unlock(&ctx->dev->syncpt, args->id); ctx->mod_locks[args->id] = 0; } return err; }
/** * Increment syncpoint value from cpu, updating cache */ void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id) { if (nvhost_syncpt_client_managed(sp, id)) nvhost_syncpt_incr_max(sp, id, 1); nvhost_module_busy(syncpt_to_dev(sp)->dev); nvhost_syncpt_cpu_incr(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); }
/** * Get the current syncpoint value */ u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id) { u32 val; nvhost_module_busy(syncpt_to_dev(sp)->dev); val = syncpt_op().update_min(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); return val; }
/** * Get the current syncpoint value */ u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id) { u32 val; nvhost_module_busy(&syncpt_to_dev(sp)->mod); val = nvhost_syncpt_update_min(sp, id); nvhost_module_idle(&syncpt_to_dev(sp)->mod); return val; }
/** * Get the current syncpoint base */ u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id) { u32 val; nvhost_module_busy(syncpt_to_dev(sp)->dev); syncpt_op().read_wait_base(sp, id); val = sp->base_val[id]; nvhost_module_idle(syncpt_to_dev(sp)->dev); return val; }
static void show_all(struct nvhost_master *m, struct output *o) { nvhost_module_busy(m->dev); m->op.debug.show_mlocks(m, o); show_syncpts(m, o); show_channels(m, o); nvhost_module_idle(m->dev); }
/** * Get the current syncpoint base */ u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id) { u32 val; BUG_ON(!syncpt_op(sp).read_wait_base); nvhost_module_busy(&syncpt_to_dev(sp)->mod); syncpt_op(sp).read_wait_base(sp, id); val = sp->base_val[id]; nvhost_module_idle(&syncpt_to_dev(sp)->mod); return val; }
/** * Return current syncpoint value on success */ int nvhost_syncpt_read_check(struct nvhost_syncpt *sp, u32 id, u32 *val) { if (nvhost_module_busy(syncpt_to_dev(sp)->dev)) return -EINVAL; *val = syncpt_op().update_min(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); return 0; }
static void show_all_no_fifo(struct nvhost_master *m, struct output *o) { nvhost_module_busy(m->dev); nvhost_get_chip_ops()->debug.show_mlocks(m, o); show_syncpts(m, o); nvhost_debug_output(o, "---- channels ----\n"); nvhost_device_list_for_all(o, show_channels_no_fifo); nvhost_module_idle(m->dev); }
static void show_all_no_fifo(struct nvhost_master *m, struct output *o) { nvhost_module_busy(m->dev); nvhost_get_chip_ops()->debug.show_mlocks(m, o); show_syncpts(m, o); nvhost_debug_output(o, "---- channels ----\n"); bus_for_each_dev(&(nvhost_bus_get())->nvhost_bus_type, NULL, o, show_channels_no_fifo); nvhost_module_idle(m->dev); }
int nvhost_vic03_init(struct platform_device *dev) { int err = 0; struct nvhost_device_data *pdata = nvhost_get_devdata(dev); struct vic03 *v = get_vic03(dev); char *fw_name; nvhost_dbg_fn("in dev:%p v:%p", dev, v); fw_name = vic_get_fw_name(dev); if (!fw_name) { dev_err(&dev->dev, "couldn't determine firmware name"); return -EINVAL; } if (!v) { nvhost_dbg_fn("allocating vic03 support"); v = kzalloc(sizeof(*v), GFP_KERNEL); if (!v) { dev_err(&dev->dev, "couldn't alloc vic03 support"); err = -ENOMEM; goto clean_up; } set_vic03(dev, v); v->is_booted = false; } nvhost_dbg_fn("primed dev:%p v:%p", dev, v); v->host = nvhost_get_host(dev); if (!v->ucode.valid) err = vic03_read_ucode(dev, fw_name); if (err) goto clean_up; kfree(fw_name); fw_name = NULL; nvhost_module_busy(dev); err = vic03_boot(dev); nvhost_module_idle(dev); if (pdata->scaling_init) nvhost_scale_hw_init(dev); return 0; clean_up: kfree(fw_name); nvhost_err(&dev->dev, "failed"); return err; }
static int host1x_tickctrl_xfercount(struct platform_device *dev, u64 *val) { struct nvhost_device_data *pdata = platform_get_drvdata(dev); void __iomem *regs = pdata->channel->aperture; nvhost_module_busy(nvhost_get_parent(dev)); *val = readl64(regs + host1x_channel_xfercount_hi_r(), regs + host1x_channel_xfercount_lo_r()); rmb(); nvhost_module_idle(nvhost_get_parent(dev)); return 0; }
void nvhost_write_module_regs(struct nvhost_device *ndev, u32 offset, int count, const u32 *values) { void __iomem *p = ndev->aperture + offset; nvhost_module_busy(ndev); while (count--) { writel(*(values++), p); p += 4; } wmb(); nvhost_module_idle(ndev); }
/** * Get the current syncpoint value */ u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id) { u32 val = 0xffffffff; int err; err = nvhost_module_busy(syncpt_to_dev(sp)->dev); if (err) return val; val = syncpt_op().update_min(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); return val; }
void nvhost_read_module_regs(struct nvhost_device *ndev, u32 offset, int count, u32 *values) { void __iomem *p = ndev->aperture + offset; nvhost_module_busy(ndev); while (count--) { *(values++) = readl(p); p += 4; } rmb(); nvhost_module_idle(ndev); }
static void host1x_tickctrl_deinit_channel(struct platform_device *dev) { struct nvhost_device_data *pdata = platform_get_drvdata(dev); void __iomem *regs = pdata->channel->aperture; nvhost_module_busy(nvhost_get_parent(dev)); writel(host1x_channel_stallctrl_enable_channel_stall_f(0), regs + host1x_channel_stallctrl_r()); writel(host1x_channel_xferctrl_enable_channel_xfer_f(0), regs + host1x_channel_xferctrl_r()); writel(host1x_channel_channelctrl_enabletickcnt_f(0), regs + host1x_channel_channelctrl_r()); nvhost_module_idle(nvhost_get_parent(dev)); }
int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx) { struct nvhost_master *host = syncpt_to_dev(sp); u32 reg; nvhost_module_busy(host->dev); reg = syncpt_op().mutex_try_lock(sp, idx); if (reg) { nvhost_module_idle(host->dev); return -EBUSY; } atomic_inc(&sp->lock_counts[idx]); return 0; }
/** * Increment syncpoint value from cpu, updating cache */ int nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id) { int err; err = nvhost_module_busy(syncpt_to_dev(sp)->dev); if (err) return err; if (nvhost_syncpt_client_managed(sp, id)) nvhost_syncpt_incr_max(sp, id, 1); nvhost_syncpt_cpu_incr(sp, id); nvhost_module_idle(syncpt_to_dev(sp)->dev); return 0; }
void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module, u32 offset, size_t size, void *values) { struct nvhost_master *dev = cpuaccess_to_dev(ctx); void __iomem *p = ctx->regs[module] + offset; u32* out = (u32*)values; BUG_ON(size & 3); size >>= 2; nvhost_module_busy(&dev->mod); while (size--) { *(out++) = readl(p); p += 4; } rmb(); nvhost_module_idle(&dev->mod); }
int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx) { struct nvhost_master *dev = cpuaccess_to_dev(ctx); void __iomem *sync_regs = dev->sync_aperture; u32 reg; /* mlock registers returns 0 when the lock is aquired. * writing 0 clears the lock. */ nvhost_module_busy(&dev->mod); reg = readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4)); if (reg) { nvhost_module_idle(&dev->mod); return -EBUSY; } return 0; }
void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module, u32 offset, size_t size, const void *values) { struct nvhost_master *dev = cpuaccess_to_dev(ctx); void __iomem *p = ctx->regs[module] + offset; const u32* in = (const u32*)values; BUG_ON(size & 3); size >>= 2; nvhost_module_busy(&dev->mod); while (size--) { writel(*(in++), p); p += 4; } wmb(); nvhost_module_idle(&dev->mod); }
void nvhost_module_busy(struct nvhost_module *mod) { mutex_lock(&mod->lock); cancel_delayed_work(&mod->powerdown); if ((atomic_inc_return(&mod->refcount) == 1) && !mod->powered) { int i; if (mod->parent) nvhost_module_busy(mod->parent); for (i = 0; i < mod->num_clks; i++) clk_enable(mod->clk[i]); if (mod->func) mod->func(mod, NVHOST_POWER_ACTION_ON); mod->powered = true; } mutex_unlock(&mod->lock); }
int nvhost_msenc_init(struct platform_device *dev) { struct nvhost_device_data *pdata = platform_get_drvdata(dev); int err = 0; struct msenc *m; char *fw_name; nvhost_dbg_fn("in dev:%p", dev); fw_name = msenc_get_fw_name(dev); if (!fw_name) { dev_err(&dev->dev, "couldn't determine firmware name"); return -EINVAL; } m = kzalloc(sizeof(struct msenc), GFP_KERNEL); if (!m) { dev_err(&dev->dev, "couldn't alloc ucode"); kfree(fw_name); return -ENOMEM; } set_msenc(dev, m); nvhost_dbg_fn("primed dev:%p", dev); err = msenc_read_ucode(dev, fw_name); kfree(fw_name); fw_name = 0; if (err || !m->valid) { dev_err(&dev->dev, "ucode not valid"); goto clean_up; } nvhost_module_busy(dev); msenc_boot(dev); nvhost_module_idle(dev); if (pdata->scaling_init) nvhost_scale_hw_init(dev); return 0; clean_up: dev_err(&dev->dev, "failed"); return err; }
void nvhost_module_busy(struct nvhost_module *mod) { mutex_lock(&mod->lock); cancel_delayed_work(&mod->powerdown); if (mod->desc->busy) mod->desc->busy(mod); if ((atomic_inc_return(&mod->refcount) == 1) && !mod->powered) { if (mod->parent) nvhost_module_busy(mod->parent); unpowergate(mod); clock_enable(mod); if (mod->desc->finalize_poweron) mod->desc->finalize_poweron(mod); mod->powered = true; } mutex_unlock(&mod->lock); }
int vi_disable_irq(struct vi *tegra_vi) { int val; int err = 0; err = nvhost_module_busy(tegra_vi->ndev); if (err) return err; if (tegra_vi->ndev->id) { /* Disable FIFO Overflow Interrupt */ host1x_writel(tegra_vi->ndev, CSI_CSI_PIXEL_PARSER_B_INTERRUPT_MASK_0, 0); /* Reset status register */ val = host1x_readl(tegra_vi->ndev, CSI_CSI_PIXEL_PARSER_B_STATUS_0); host1x_writel(tegra_vi->ndev, CSI_CSI_PIXEL_PARSER_B_STATUS_0, val); } else { /* interrupts are associated only with master dev vi.0 */ disable_irq(tegra_vi->vi_irq); /* Disable FIFO Overflow Interrupt */ host1x_writel(tegra_vi->ndev, CSI_CSI_PIXEL_PARSER_A_INTERRUPT_MASK_0, 0); /* Reset status register */ val = host1x_readl(tegra_vi->ndev, CSI_CSI_PIXEL_PARSER_A_STATUS_0); host1x_writel(tegra_vi->ndev, CSI_CSI_PIXEL_PARSER_A_STATUS_0, val); } nvhost_module_idle(tegra_vi->ndev); return 0; }
void nvhost_msenc_init(struct platform_device *dev) { struct nvhost_device_data *pdata = (struct nvhost_device_data *)dev->dev.platform_data; int err = 0; struct msenc *m; char *fw_name; fw_name = msenc_get_fw_name(dev); if (!fw_name) { dev_err(&dev->dev, "couldn't determine firmware name"); return; } m = kzalloc(sizeof(struct msenc), GFP_KERNEL); if (!m) { dev_err(&dev->dev, "couldn't alloc ucode"); kfree(fw_name); return; } set_msenc(dev, m); err = msenc_read_ucode(dev, fw_name); kfree(fw_name); fw_name = 0; if (err || !m->valid) { dev_err(&dev->dev, "ucode not valid"); goto clean_up; } if (!pdata->can_powergate) { nvhost_module_busy(dev); msenc_boot(dev); nvhost_module_idle(dev); } return; clean_up: dev_err(&dev->dev, "failed"); }