Exemplo n.º 1
0
static void job_free(struct kref *ref)
{
	struct nvhost_job *job = container_of(ref, struct nvhost_job, ref);

	if (job->gathers)
		nvmap_munmap(job->gather_mem, job->gathers);
	if (job->gather_mem)
		nvmap_free(job->nvmap, job->gather_mem);
	if (job->nvmap)
		nvmap_client_put(job->nvmap);
	vfree(job);
}
Exemplo n.º 2
0
static int tegra_dc_ext_set_nvmap_fd(struct tegra_dc_ext_user *user,
				     int fd)
{
	struct nvmap_client *nvmap = NULL;

	if (fd >= 0) {
		nvmap = nvmap_client_get_file(fd);
		if (IS_ERR(nvmap))
			return PTR_ERR(nvmap);
	}

	if (user->nvmap)
		nvmap_client_put(user->nvmap);

	user->nvmap = nvmap;

	return 0;
}
Exemplo n.º 3
0
void tegra_dc_ext_unregister(struct tegra_dc_ext *ext)
{
	int i;

	for (i = 0; i < ext->dc->n_windows; i++) {
		struct tegra_dc_ext_win *win = &ext->win[i];

		flush_workqueue(win->flip_wq);
		destroy_workqueue(win->flip_wq);
	}

	nvmap_client_put(ext->nvmap);
	device_del(ext->dev);
	cdev_del(&ext->cdev);

	kfree(ext);

	head_count--;
}
Exemplo n.º 4
0
static int tegra_dc_release(struct inode *inode, struct file *filp)
{
	struct tegra_dc_ext_user *user = filp->private_data;
	struct tegra_dc_ext *ext = user->ext;
	unsigned int i;

	for (i = 0; i < DC_N_WINDOWS; i++) {
		if (ext->win[i].user == user)
			tegra_dc_ext_put_window(user, i);
	}
	if (ext->cursor.user == user)
		tegra_dc_ext_put_cursor(user);

	if (user->nvmap)
		nvmap_client_put(user->nvmap);

	kfree(user);

	return 0;
}
static int nvhost_channelrelease(struct inode *inode, struct file *filp)
{
	struct nvhost_channel_userctx *priv = filp->private_data;

	trace_nvhost_channel_release(priv->ch->desc->name);

	filp->private_data = NULL;

	nvhost_module_remove_client(priv->ch->dev, &priv->ch->mod, priv);
	nvhost_putchannel(priv->ch, priv->hwctx);

	if (priv->hwctx)
		priv->ch->ctxhandler.put(priv->hwctx);

	if (priv->job)
		nvhost_job_put(priv->job);

	nvmap_client_put(priv->nvmap);
	kfree(priv);
	return 0;
}
Exemplo n.º 6
0
static int tegra_overlay_release(struct inode *inode, struct file *filp)
{
	struct overlay_client *client = filp->private_data;
	unsigned long flags;
	int i;

	mutex_lock(&client->dev->overlays_lock);
	for (i = 0; i < client->dev->dc->n_windows; i++)
		if (client->dev->overlays[i].owner == client)
			tegra_overlay_put_locked(client, i);
	mutex_unlock(&client->dev->overlays_lock);

	spin_lock_irqsave(&client->dev->clients_lock, flags);
	list_del(&client->list);
	spin_unlock_irqrestore(&client->dev->clients_lock, flags);

	nvmap_client_put(client->user_nvmap);
	put_task_struct(client->task);

	kfree(client);
	return 0;
}
Exemplo n.º 7
0
static int tegra_overlay_ioctl_set_nvmap_fd(struct overlay_client *client,
					    void __user *arg)
{
	int fd;
	struct nvmap_client *nvmap = NULL;

	if (copy_from_user(&fd, arg, sizeof(fd)))
		return -EFAULT;

	if (fd < 0)
		return -EINVAL;

	nvmap = nvmap_client_get_file(fd);
	if (IS_ERR(nvmap))
		return PTR_ERR(nvmap);

	if (client->user_nvmap)
		nvmap_client_put(client->user_nvmap);

	client->user_nvmap = nvmap;

	return 0;
}
Exemplo n.º 8
0
static int nvhost_channelrelease(struct inode *inode, struct file *filp)
{
	struct nvhost_channel_userctx *priv = filp->private_data;

	trace_nvhost_channel_release(priv->ch->desc->name);

	filp->private_data = NULL;

	nvhost_module_remove_client(priv->ch->dev, &priv->ch->mod, priv);
	nvhost_putchannel(priv->ch, priv->hwctx);

	if (priv->hwctx)
		priv->ch->ctxhandler.put(priv->hwctx);

	if (priv->gathers)
		nvmap_munmap(priv->gather_mem, priv->gathers);

	if (!IS_ERR_OR_NULL(priv->gather_mem))
		nvmap_free(priv->ch->dev->nvmap, priv->gather_mem);

	nvmap_client_put(priv->nvmap);
	kfree(priv);
	return 0;
}
Exemplo n.º 9
0
struct tegra_dc_ext *tegra_dc_ext_register(struct nvhost_device *ndev,
					   struct tegra_dc *dc)
{
	int ret;
	struct tegra_dc_ext *ext;
	int devno;

	ext = kzalloc(sizeof(*ext), GFP_KERNEL);
	if (!ext)
		return ERR_PTR(-ENOMEM);

	BUG_ON(!tegra_dc_ext_devno);
	devno = tegra_dc_ext_devno + head_count + 1;

	cdev_init(&ext->cdev, &tegra_dc_devops);
	ext->cdev.owner = THIS_MODULE;
	ret = cdev_add(&ext->cdev, devno, 1);
	if (ret) {
		dev_err(&ndev->dev, "Failed to create character device\n");
		goto cleanup_alloc;
	}

	ext->dev = device_create(tegra_dc_ext_class,
				 &ndev->dev,
				 devno,
				 NULL,
				 "tegra_dc_%d",
				 ndev->id);

	if (IS_ERR(ext->dev)) {
		ret = PTR_ERR(ext->dev);
		goto cleanup_cdev;
	}

	ext->dc = dc;

	ext->nvmap = nvmap_create_client(nvmap_dev, "tegra_dc_ext");
	if (!ext->nvmap) {
		ret = -ENOMEM;
		goto cleanup_device;
	}

	ret = tegra_dc_ext_setup_windows(ext);
	if (ret)
		goto cleanup_nvmap;

	mutex_init(&ext->cursor.lock);

	head_count++;

	return ext;

cleanup_nvmap:
	nvmap_client_put(ext->nvmap);

cleanup_device:
	device_del(ext->dev);

cleanup_cdev:
	cdev_del(&ext->cdev);

cleanup_alloc:
	kfree(ext);

	return ERR_PTR(ret);
}
Exemplo n.º 10
0
/**
 * For all sync queue entries that have already finished according to the
 * current sync point registers:
 *  - unpin & unref their mems
 *  - pop their push buffer slots
 *  - remove them from the sync queue
 * This is normally called from the host code's worker thread, but can be
 * called manually if necessary.
 * Must be called with the cdma lock held.
 */
static void update_cdma(struct nvhost_cdma *cdma)
{
	bool signal = false;
	struct nvhost_master *dev = cdma_to_dev(cdma);

	BUG_ON(!cdma->running);

	/*
	 * Walk the sync queue, reading the sync point registers as necessary,
	 * to consume as many sync queue entries as possible without blocking
	 */
	for (;;) {
		u32 syncpt_id, syncpt_val;
		u32 timeout;
		struct nvhost_userctx_timeout *timeout_ref = NULL;
		unsigned int nr_slots, nr_handles;
		struct nvhost_syncpt *sp = &dev->syncpt;
		struct nvmap_handle **handles;
		struct nvmap_client *nvmap;
		u32 *sync;

		sync = sync_queue_head(&cdma->sync_queue);
		if (!sync) {
			if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
				signal = true;
			break;
		}

		syncpt_id = sync[SQ_IDX_SYNCPT_ID];
		syncpt_val = sync[SQ_IDX_SYNCPT_VAL];
		timeout = sync[SQ_IDX_TIMEOUT];
		timeout_ref = (struct nvhost_userctx_timeout *)
				sync[SQ_IDX_TIMEOUT_CTX];

		BUG_ON(syncpt_id == NVSYNCPT_INVALID);

		/* Check whether this syncpt has completed, and bail if not */
		if (!nvhost_syncpt_min_cmp(sp, syncpt_id, syncpt_val)) {
			/* Start timer on next pending syncpt */
			if (timeout) {
				nvhost_cdma_start_timer(cdma, syncpt_id,
					syncpt_val, timeout_ref);
			}
			break;
		}

		/* Cancel timeout, when a buffer completes */
		if (cdma->timeout.ctx_timeout)
			stop_cdma_timer(cdma);

		nr_slots = sync[SQ_IDX_NUM_SLOTS];
		nr_handles = sync[SQ_IDX_NUM_HANDLES];
		nvmap = (struct nvmap_client *)sync[SQ_IDX_NVMAP_CTX];
		handles = (struct nvmap_handle **)&sync[SQ_IDX_HANDLES];

		BUG_ON(!nvmap);

		/* Unpin the memory */
		nvmap_unpin_handles(nvmap, handles, nr_handles);
		nvmap_client_put(nvmap);

		/* Pop push buffer slots */
		if (nr_slots) {
			struct push_buffer *pb = &cdma->push_buffer;
			BUG_ON(!cdma_pb_op(cdma).pop_from);
			cdma_pb_op(cdma).pop_from(pb, nr_slots);
			if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
				signal = true;
		}

		dequeue_sync_queue_head(&cdma->sync_queue);
		if (cdma->event == CDMA_EVENT_SYNC_QUEUE_SPACE)
			signal = true;
	}

	/* Wake up CdmaWait() if the requested event happened */
	if (signal) {
		cdma->event = CDMA_EVENT_NONE;
		up(&cdma->sem);
	}
}
Exemplo n.º 11
0
static int __devinit nvhost_probe(struct platform_device *pdev)
{
	struct nvhost_master *host;
	struct resource *regs, *intr0, *intr1;
	int i, err;

	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);

	if (!regs || !intr0 || !intr1) {
		dev_err(&pdev->dev, "missing required platform resources\n");
		return -ENXIO;
	}

	host = kzalloc(sizeof(*host), GFP_KERNEL);
	if (!host)
		return -ENOMEM;

	host->pdev = pdev;

	host->nvmap = nvmap_create_client(nvmap_dev, "nvhost");
	if (!host->nvmap) {
		dev_err(&pdev->dev, "unable to create nvmap client\n");
		err = -EIO;
		goto fail;
	}

	host->reg_mem = request_mem_region(regs->start,
					resource_size(regs), pdev->name);
	if (!host->reg_mem) {
		dev_err(&pdev->dev, "failed to get host register memory\n");
		err = -ENXIO;
		goto fail;
	}
	host->aperture = ioremap(regs->start, resource_size(regs));
	if (!host->aperture) {
		dev_err(&pdev->dev, "failed to remap host registers\n");
		err = -ENXIO;
		goto fail;
	}

	err = nvhost_init_chip_support(host);
	if (err) {
		dev_err(&pdev->dev, "failed to init chip support\n");
		goto fail;
	}

	for (i = 0; i < host->nb_channels; i++) {
		struct nvhost_channel *ch = &host->channels[i];
		BUG_ON(!host_channel_op(host).init);
		err = host_channel_op(host).init(ch, host, i);
		if (err < 0) {
			dev_err(&pdev->dev, "failed to init channel %d\n", i);
			goto fail;
		}
	}

	err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
	if (err)
		goto fail;

	err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
	if (err)
		goto fail;

	err = nvhost_user_init(host);
	if (err)
		goto fail;

	err = nvhost_module_init(&host->mod, "host1x",
			&hostdesc, NULL, &pdev->dev);
	for (i = 0; i < host->nb_channels; i++) {
		struct nvhost_channel *ch = &host->channels[i];
		nvhost_module_preinit(ch->desc->name,
				&ch->desc->module);
	}

	if (err)
		goto fail;


	platform_set_drvdata(pdev, host);

	clk_enable(host->mod.clk[0]);
	nvhost_syncpt_reset(&host->syncpt);
	clk_disable(host->mod.clk[0]);

	nvhost_bus_register(host);

	nvhost_debug_init(host);

	dev_info(&pdev->dev, "initialized\n");
	return 0;

fail:
	nvhost_remove_chip_support(host);
	if (host->nvmap)
		nvmap_client_put(host->nvmap);
	kfree(host);
	return err;
}
Exemplo n.º 12
0
static long nvhost_channelctl(struct file *filp,
	unsigned int cmd, unsigned long arg)
{
	struct nvhost_channel_userctx *priv = filp->private_data;
	u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
	int err = 0;

	if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
		(_IOC_NR(cmd) == 0) ||
		(_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
		return -EFAULT;

	BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);

	if (_IOC_DIR(cmd) & _IOC_WRITE) {
		if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
			return -EFAULT;
	}

	switch (cmd) {
	case NVHOST_IOCTL_CHANNEL_FLUSH:
		err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
		break;
	case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
		err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
		break;
	case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
	{
		struct nvhost_submit_hdr_ext *hdr;

		if (priv->hdr.num_relocs ||
		    priv->num_relocshifts ||
		    priv->hdr.num_cmdbufs ||
		    priv->hdr.num_waitchks) {
			reset_submit(priv);
			dev_err(&priv->ch->dev->pdev->dev,
				"channel submit out of sync\n");
			err = -EIO;
			break;
		}

		hdr = (struct nvhost_submit_hdr_ext *)buf;
		if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
			dev_err(&priv->ch->dev->pdev->dev,
				"submit version %d > max supported %d\n",
				hdr->submit_version,
				NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
			err = -EINVAL;
			break;
		}
		memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
		err = set_submit(priv);
		trace_nvhost_ioctl_channel_submit(priv->ch->desc->name,
			priv->hdr.submit_version,
			priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
			priv->hdr.num_waitchks,
			priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
		break;
	}
	case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
		/* host syncpt ID is used by the RM (and never be given out) */
		BUG_ON(priv->ch->desc->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST));
		((struct nvhost_get_param_args *)buf)->value =
			priv->ch->desc->syncpts;
		break;
	case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
		((struct nvhost_get_param_args *)buf)->value =
			priv->ch->desc->waitbases;
		break;
	case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
		((struct nvhost_get_param_args *)buf)->value =
			priv->ch->desc->modulemutexes;
		break;
	case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
	{
		int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
		struct nvmap_client *new_client = nvmap_client_get_file(fd);

		if (IS_ERR(new_client)) {
			err = PTR_ERR(new_client);
			break;
		}

		if (priv->nvmap)
			nvmap_client_put(priv->nvmap);

		priv->nvmap = new_client;
		break;
	}
	case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
		err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
		break;
	case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
	{
		unsigned long rate;
		struct nvhost_clk_rate_args *arg =
				(struct nvhost_clk_rate_args *)buf;

		err = nvhost_module_get_rate(priv->ch->dev,
				&priv->ch->mod, &rate, 0);
		if (err == 0)
			arg->rate = rate;
		break;
	}
	case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
	{
		struct nvhost_clk_rate_args *arg =
				(struct nvhost_clk_rate_args *)buf;
		unsigned long rate = (unsigned long)arg->rate;

		err = nvhost_module_set_rate(priv->ch->dev,
				&priv->ch->mod, priv, rate, 0);
		break;
	}
	case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
		priv->timeout =
			(u32)((struct nvhost_set_timeout_args *)buf)->timeout;
		dev_dbg(&priv->ch->dev->pdev->dev,
			"%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
			__func__, priv->timeout, priv);
		break;
	case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
		((struct nvhost_get_param_args *)buf)->value =
				priv->hwctx->has_timedout;
		break;
	case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
		priv->priority =
			(u32)((struct nvhost_set_priority_args *)buf)->priority;
		break;
	default:
		err = -ENOTTY;
		break;
	}

	if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
		err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));

	return err;
}
Exemplo n.º 13
0
void nvhost_nvmap_put_mgr(struct mem_mgr *mgr)
{
	nvmap_client_put((struct nvmap_client *)mgr);
}