Example #1
0
static void podgov_enable(struct device *dev, int enable)
{
	struct platform_device *d = to_platform_device(dev);
	struct nvhost_device_data *pdata = platform_get_drvdata(d);
	struct devfreq *df = pdata->power_manager;
	struct podgov_info_rec *podgov;

	if (!df)
		return;

	/* make sure the device is alive before doing any scaling */
	nvhost_module_busy_noresume(d);

	mutex_lock(&df->lock);

	podgov = df->data;

	trace_podgov_enabled(enable);

	/* bad configuration. quit. */
	if (df->min_freq == df->max_freq)
		goto exit_unlock;

	/* store the enable information */
	podgov->enable = enable;

	/* skip local adjustment if we are enabling or the device is
	 * suspended */
	if (enable || !pm_runtime_active(&d->dev))
		goto exit_unlock;

	/* full speed */
	podgov->adjustment_frequency = df->max_freq;
	podgov->adjustment_type = ADJUSTMENT_LOCAL;
	update_devfreq(df);

	mutex_unlock(&df->lock);

	nvhost_module_idle(d);

	stop_podgov_workers(podgov);

	return;

exit_unlock:
	mutex_unlock(&df->lock);
	nvhost_module_idle(d);
}
Example #2
0
static void podgov_set_freq_request(struct device *dev, int freq_request)
{
	struct platform_device *d = to_platform_device(dev);
	struct nvhost_device_data *pdata = platform_get_drvdata(d);
	struct devfreq *df = pdata->power_manager;
	struct podgov_info_rec *podgov;

	if (!df)
		return;

	/* make sure the device is alive before doing any scaling */
	nvhost_module_busy_noresume(d);

	mutex_lock(&df->lock);

	podgov = df->data;

	trace_podgov_set_freq_request(freq_request);

	podgov->p_freq_request = freq_request;

	/* update the request only if podgov is enabled, device is turned on
	 * and the scaling is in user mode */
	if (podgov->enable && podgov->p_user &&
	    pm_runtime_active(&d->dev)) {
		podgov->adjustment_frequency = freq_request;
		podgov->adjustment_type = ADJUSTMENT_LOCAL;
		update_devfreq(df);
	}

	mutex_unlock(&df->lock);
	nvhost_module_idle(d);
}
Example #3
0
static int nvhost_ioctl_ctrl_module_mutex(struct nvhost_ctrl_userctx *ctx,
	struct nvhost_ctrl_module_mutex_args *args)
{
	int err = 0;
	if (args->id >= nvhost_syncpt_nb_mlocks(&ctx->dev->syncpt) ||
	    args->lock > 1)
		return -EINVAL;

	trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id);
	if (args->lock && !ctx->mod_locks[args->id]) {
		if (args->id == 0)
			nvhost_module_busy(ctx->dev->dev);
		else
			err = nvhost_mutex_try_lock(&ctx->dev->syncpt,
					args->id);
		if (!err)
			ctx->mod_locks[args->id] = 1;
	} else if (!args->lock && ctx->mod_locks[args->id]) {
		if (args->id == 0)
			nvhost_module_idle(ctx->dev->dev);
		else
			nvhost_mutex_unlock(&ctx->dev->syncpt, args->id);
		ctx->mod_locks[args->id] = 0;
	}
	return err;
}
Example #4
0
static void powerdown_handler(struct work_struct *work)
{
	struct nvhost_module *mod;

	mod = container_of(to_delayed_work(work),
			struct nvhost_module,
			powerdown);
	mutex_lock(&mod->lock);
	if ((atomic_read(&mod->refcount) == 0) && mod->powered) {
		if (mod->desc->prepare_poweroff
			&& mod->desc->prepare_poweroff(mod)) {
			/* If poweroff fails, retry */
			mutex_unlock(&mod->lock);
			schedule_delayed_work(&mod->powerdown,
					msecs_to_jiffies(
						mod->desc->powerdown_delay));
			return;
		}
		clock_disable(mod);
		powergate(mod);
		mod->powered = false;
		if (mod->parent)
			nvhost_module_idle(mod->parent);
	}
	mutex_unlock(&mod->lock);
}
static int host1x_tickctrl_init_channel(struct platform_device *dev)
{
	struct nvhost_device_data *pdata = platform_get_drvdata(dev);
	void __iomem *regs = pdata->channel->aperture;

	nvhost_module_busy(nvhost_get_parent(dev));

	/* Initialize counter */
	writel(0, regs + host1x_channel_tickcount_hi_r());
	writel(0, regs + host1x_channel_tickcount_lo_r());
	writel(0, regs + host1x_channel_stallcount_hi_r());
	writel(0, regs + host1x_channel_stallcount_lo_r());
	writel(0, regs + host1x_channel_xfercount_hi_r());
	writel(0, regs + host1x_channel_xfercount_lo_r());

	writel(host1x_channel_channelctrl_enabletickcnt_f(1),
			regs + host1x_channel_channelctrl_r());
	writel(host1x_channel_stallctrl_enable_channel_stall_f(1),
			regs + host1x_channel_stallctrl_r());
	writel(host1x_channel_xferctrl_enable_channel_xfer_f(1),
			regs + host1x_channel_xferctrl_r());

	nvhost_module_idle(nvhost_get_parent(dev));

	host1x_tickctrl_debug_init(dev);

	return 0;
}
Example #6
0
/**
 * Increment syncpoint value from cpu, updating cache
 */
void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
{
	nvhost_syncpt_incr_max(sp, id, 1);
	nvhost_module_busy(&syncpt_to_dev(sp)->mod);
	nvhost_syncpt_cpu_incr(sp, id);
	nvhost_module_idle(&syncpt_to_dev(sp)->mod);
}
int nvhost_flcn_init(struct platform_device *dev)
{
	int err = 0;
	struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
	struct flcn *v = get_flcn(dev);

	nvhost_dbg_fn("in dev:%p v:%p", dev, v);

	v = kzalloc(sizeof(*v), GFP_KERNEL);
	if (!v) {
		dev_err(&dev->dev, "couldn't alloc flcn support");
		err = -ENOMEM;
		goto clean_up;
	}
	set_flcn(dev, v);
	nvhost_dbg_fn("primed dev:%p v:%p", dev, v);

	err = flcn_read_ucode(dev, pdata->firmware_name);
	if (err || !v->valid)
		goto clean_up;

	nvhost_module_busy(dev);
	err = nvhost_flcn_boot(dev);
	nvhost_module_idle(dev);

	return 0;

 clean_up:
	nvhost_err(&dev->dev, "failed");
	return err;
}
Example #8
0
static void powerdown_handler(struct work_struct *work)
{
	struct nvhost_module *mod;
	int refcount;

	mod = container_of(to_delayed_work(work), struct nvhost_module, powerdown);
	mutex_lock(&mod->lock);
	refcount = atomic_read(&mod->refcount);
	if ((refcount == 0) && mod->powered) {
		int i;
		if (mod->func)
			mod->func(mod, NVHOST_POWER_ACTION_OFF);
		for (i = 0; i < mod->num_clks; i++) {
			clk_disable(mod->clk[i]);
		}
		if (mod->powergate_id != -1) {
			tegra_periph_reset_assert(mod->clk[0]);
			tegra_powergate_power_off(mod->powergate_id);
		}
		mod->powered = false;
		if (mod->parent)
			nvhost_module_idle(mod->parent);
	}
	else if (mod->force_suspend) {
		pr_warn("tegra_grhost: module %s (refcnt %d)"
			" force_suspend powerdown skipped!\n",
			mod->name, refcount);
	}
	mod->force_suspend = false;
	mutex_unlock(&mod->lock);
}
void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx)
{
	struct nvhost_master *dev = cpuaccess_to_dev(ctx);
	void __iomem *sync_regs = dev->sync_aperture;
	writel(0, sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
	nvhost_module_idle(&dev->mod);
}
/**
 * Get the current syncpoint value
 */
u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
{
	u32 val;
	nvhost_module_busy(syncpt_to_dev(sp)->dev);
	val = syncpt_op().update_min(sp, id);
	nvhost_module_idle(syncpt_to_dev(sp)->dev);
	return val;
}
Example #11
0
/**
 * Increment syncpoint value from cpu, updating cache
 */
void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
{
	if (nvhost_syncpt_client_managed(sp, id))
		nvhost_syncpt_incr_max(sp, id, 1);
	nvhost_module_busy(syncpt_to_dev(sp)->dev);
	nvhost_syncpt_cpu_incr(sp, id);
	nvhost_module_idle(syncpt_to_dev(sp)->dev);
}
Example #12
0
static void podgov_set_user_ctl(struct device *dev, int user)
{
	struct platform_device *d = to_platform_device(dev);
	struct nvhost_device_data *pdata = platform_get_drvdata(d);
	struct devfreq *df = pdata->power_manager;
	struct podgov_info_rec *podgov;
	int old_user;

	if (!df)
		return;

	/* make sure the device is alive before doing any scaling */
	nvhost_module_busy_noresume(d);

	mutex_lock(&df->lock);
	podgov = df->data;

	trace_podgov_set_user_ctl(user);

	/* store the new user value */
	old_user = podgov->p_user;
	podgov->p_user = user;

	/* skip scaling, if scaling (or the whole device) is turned off
	 * - or the scaling already was in user mode */
	if (!pm_runtime_active(&d->dev) || !podgov->enable ||
	    !(user && !old_user))
		goto exit_unlock;

	/* write request */
	podgov->adjustment_frequency = podgov->p_freq_request;
	podgov->adjustment_type = ADJUSTMENT_LOCAL;
	update_devfreq(df);

	mutex_unlock(&df->lock);
	nvhost_module_idle(d);

	stop_podgov_workers(podgov);

	return;

exit_unlock:
	mutex_unlock(&df->lock);
	nvhost_module_idle(d);
}
/**
 * Get the current syncpoint base
 */
u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
{
	u32 val;
	nvhost_module_busy(syncpt_to_dev(sp)->dev);
	syncpt_op().read_wait_base(sp, id);
	val = sp->base_val[id];
	nvhost_module_idle(syncpt_to_dev(sp)->dev);
	return val;
}
Example #14
0
/**
 * Get the current syncpoint value
 */
u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
{
	u32 val;

	nvhost_module_busy(&syncpt_to_dev(sp)->mod);
	val = nvhost_syncpt_update_min(sp, id);
	nvhost_module_idle(&syncpt_to_dev(sp)->mod);
	return val;
}
Example #15
0
/**
 * Return current syncpoint value on success
 */
int nvhost_syncpt_read_check(struct nvhost_syncpt *sp, u32 id, u32 *val)
{
	if (nvhost_module_busy(syncpt_to_dev(sp)->dev))
		return -EINVAL;

	*val = syncpt_op().update_min(sp, id);
	nvhost_module_idle(syncpt_to_dev(sp)->dev);

	return 0;
}
Example #16
0
static void show_all(struct nvhost_master *m, struct output *o)
{
	nvhost_module_busy(m->dev);

	m->op.debug.show_mlocks(m, o);
	show_syncpts(m, o);
	show_channels(m, o);

	nvhost_module_idle(m->dev);
}
Example #17
0
/**
 * Get the current syncpoint base
 */
u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
{
	u32 val;
	BUG_ON(!syncpt_op(sp).read_wait_base);
	nvhost_module_busy(&syncpt_to_dev(sp)->mod);
	syncpt_op(sp).read_wait_base(sp, id);
	val = sp->base_val[id];
	nvhost_module_idle(&syncpt_to_dev(sp)->mod);
	return val;
}
Example #18
0
static void show_all_no_fifo(struct nvhost_master *m, struct output *o)
{
	nvhost_module_busy(m->dev);

	nvhost_get_chip_ops()->debug.show_mlocks(m, o);
	show_syncpts(m, o);
	nvhost_debug_output(o, "---- channels ----\n");
	nvhost_device_list_for_all(o, show_channels_no_fifo);

	nvhost_module_idle(m->dev);
}
Example #19
0
static void show_all_no_fifo(struct nvhost_master *m, struct output *o)
{
	nvhost_module_busy(m->dev);

	nvhost_get_chip_ops()->debug.show_mlocks(m, o);
	show_syncpts(m, o);
	nvhost_debug_output(o, "---- channels ----\n");
	bus_for_each_dev(&(nvhost_bus_get())->nvhost_bus_type, NULL, o,
			show_channels_no_fifo);

	nvhost_module_idle(m->dev);
}
Example #20
0
int nvhost_vic03_init(struct platform_device *dev)
{
	int err = 0;
	struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
	struct vic03 *v = get_vic03(dev);
	char *fw_name;

	nvhost_dbg_fn("in dev:%p v:%p", dev, v);

	fw_name = vic_get_fw_name(dev);
	if (!fw_name) {
		dev_err(&dev->dev, "couldn't determine firmware name");
		return -EINVAL;
	}

	if (!v) {
		nvhost_dbg_fn("allocating vic03 support");
		v = kzalloc(sizeof(*v), GFP_KERNEL);
		if (!v) {
			dev_err(&dev->dev, "couldn't alloc vic03 support");
			err = -ENOMEM;
			goto clean_up;
		}
		set_vic03(dev, v);
		v->is_booted = false;
	}
	nvhost_dbg_fn("primed dev:%p v:%p", dev, v);

	v->host = nvhost_get_host(dev);

	if (!v->ucode.valid)
		err = vic03_read_ucode(dev, fw_name);
	if (err)
		goto clean_up;

	kfree(fw_name);
	fw_name = NULL;

	nvhost_module_busy(dev);
	err = vic03_boot(dev);
	nvhost_module_idle(dev);

	if (pdata->scaling_init)
		nvhost_scale_hw_init(dev);

	return 0;

 clean_up:
	kfree(fw_name);
	nvhost_err(&dev->dev, "failed");

	return err;
}
Example #21
0
/**
 * Get the current syncpoint value
 */
u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
{
	u32 val = 0xffffffff;
	int err;

	err = nvhost_module_busy(syncpt_to_dev(sp)->dev);
	if (err)
		return val;

	val = syncpt_op().update_min(sp, id);
	nvhost_module_idle(syncpt_to_dev(sp)->dev);
	return val;
}
static int host1x_tickctrl_xfercount(struct platform_device *dev, u64 *val)
{
	struct nvhost_device_data *pdata = platform_get_drvdata(dev);
	void __iomem *regs = pdata->channel->aperture;

	nvhost_module_busy(nvhost_get_parent(dev));
	*val = readl64(regs + host1x_channel_xfercount_hi_r(),
		regs + host1x_channel_xfercount_lo_r());
	rmb();
	nvhost_module_idle(nvhost_get_parent(dev));

	return 0;
}
void nvhost_read_module_regs(struct nvhost_device *ndev,
			u32 offset, int count, u32 *values)
{
	void __iomem *p = ndev->aperture + offset;

	nvhost_module_busy(ndev);
	while (count--) {
		*(values++) = readl(p);
		p += 4;
	}
	rmb();
	nvhost_module_idle(ndev);
}
void nvhost_write_module_regs(struct nvhost_device *ndev,
			u32 offset, int count, const u32 *values)
{
	void __iomem *p = ndev->aperture + offset;

	nvhost_module_busy(ndev);
	while (count--) {
		writel(*(values++), p);
		p += 4;
	}
	wmb();
	nvhost_module_idle(ndev);
}
Example #25
0
int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx)
{
	struct nvhost_master *host = syncpt_to_dev(sp);
	u32 reg;

	nvhost_module_busy(host->dev);
	reg = syncpt_op().mutex_try_lock(sp, idx);
	if (reg) {
		nvhost_module_idle(host->dev);
		return -EBUSY;
	}
	atomic_inc(&sp->lock_counts[idx]);
	return 0;
}
static void host1x_tickctrl_deinit_channel(struct platform_device *dev)
{
	struct nvhost_device_data *pdata = platform_get_drvdata(dev);
	void __iomem *regs = pdata->channel->aperture;

	nvhost_module_busy(nvhost_get_parent(dev));
	writel(host1x_channel_stallctrl_enable_channel_stall_f(0),
			regs + host1x_channel_stallctrl_r());
	writel(host1x_channel_xferctrl_enable_channel_xfer_f(0),
			regs + host1x_channel_xferctrl_r());
	writel(host1x_channel_channelctrl_enabletickcnt_f(0),
			regs + host1x_channel_channelctrl_r());
	nvhost_module_idle(nvhost_get_parent(dev));
}
Example #27
0
/**
 * Increment syncpoint value from cpu, updating cache
 */
int nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
{
	int err;

	err = nvhost_module_busy(syncpt_to_dev(sp)->dev);
	if (err)
		return err;

	if (nvhost_syncpt_client_managed(sp, id))
		nvhost_syncpt_incr_max(sp, id, 1);
	nvhost_syncpt_cpu_incr(sp, id);
	nvhost_module_idle(syncpt_to_dev(sp)->dev);

	return 0;
}
void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
			u32 offset, size_t size, void *values)
{
	struct nvhost_master *dev = cpuaccess_to_dev(ctx);
	void __iomem *p = ctx->regs[module] + offset;
	u32* out = (u32*)values;
	BUG_ON(size & 3);
	size >>= 2;
	nvhost_module_busy(&dev->mod);
	while (size--) {
		*(out++) = readl(p);
		p += 4;
	}
	rmb();
	nvhost_module_idle(&dev->mod);
}
int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx)
{
	struct nvhost_master *dev = cpuaccess_to_dev(ctx);
	void __iomem *sync_regs = dev->sync_aperture;
	u32 reg;

	/* mlock registers returns 0 when the lock is aquired.
	 * writing 0 clears the lock. */
	nvhost_module_busy(&dev->mod);
	reg = readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
	if (reg) {
		nvhost_module_idle(&dev->mod);
		return -EBUSY;
	}
	return 0;
}
void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
			u32 offset, size_t size, const void *values)
{
	struct nvhost_master *dev = cpuaccess_to_dev(ctx);
	void __iomem *p = ctx->regs[module] + offset;
	const u32* in = (const u32*)values;
	BUG_ON(size & 3);
	size >>= 2;
	nvhost_module_busy(&dev->mod);
	while (size--) {
		writel(*(in++), p);
		p += 4;
	}
	wmb();
	nvhost_module_idle(&dev->mod);
}