Пример #1
0
static void show_syncpts(struct nvhost_master *m, struct output *o)
{
	int i;
	BUG_ON(!nvhost_get_chip_ops()->syncpt.name);
	nvhost_debug_output(o, "---- syncpts ----\n");
	for (i = 0; i < nvhost_syncpt_nb_pts(&m->syncpt); i++) {
		u32 max = nvhost_syncpt_read_max(&m->syncpt, i);
		u32 min = nvhost_syncpt_update_min(&m->syncpt, i);
		if (!min && !max)
			continue;
		nvhost_debug_output(o, "id %d (%s) min %d max %d\n",
				i, nvhost_get_chip_ops()->syncpt.name(&m->syncpt, i),
				min, max);
	}

	for (i = 0; i < nvhost_syncpt_nb_pts(&m->syncpt); i++) {
		u32 base_val;
		base_val = nvhost_syncpt_read_wait_base(&m->syncpt, i);
		if (base_val)
			nvhost_debug_output(o, "waitbase id %d val %d\n",
					i, base_val);
	}

	nvhost_debug_output(o, "\n");
}
Пример #2
0
/**
 * Get the current syncpoint value
 */
u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
{
	u32 val;

	nvhost_module_busy(&syncpt_to_dev(sp)->mod);
	val = nvhost_syncpt_update_min(sp, id);
	nvhost_module_idle(&syncpt_to_dev(sp)->mod);
	return val;
}
Пример #3
0
int nvhost_job_pin(struct nvhost_job *job, struct nvhost_syncpt *sp)
{
	int err = 0, i = 0;
	phys_addr_t gather_phys = 0;
	void *gather_addr = NULL;
	unsigned long waitchk_mask = job->waitchk_mask;

	/* get current syncpt values for waitchk */
	for_each_set_bit(i, &waitchk_mask, sizeof(job->waitchk_mask))
		nvhost_syncpt_update_min(sp, i);

	/* pin gathers */
	for (i = 0; i < job->num_gathers; i++) {
		struct nvhost_job_gather *g = &job->gathers[i];

		/* process each gather mem only once */
		if (!g->ref) {
			g->ref = mem_op().get(job->memmgr,
					job->gathers[i].mem_id);
			if (IS_ERR(g->ref)) {
				err = PTR_ERR(g->ref);
				g->ref = NULL;
				break;
			}

			gather_phys = mem_op().pin(job->memmgr, g->ref);
			if (IS_ERR((void *)gather_phys)) {
				mem_op().put(job->memmgr, g->ref);
				err = gather_phys;
				break;
			}

			/* store the gather ref into unpin array */
			job->unpins[job->num_unpins++] = g->ref;

			gather_addr = mem_op().mmap(g->ref);
			if (!gather_addr) {
				err = -ENOMEM;
				break;
			}

			err = do_relocs(job, g->mem_id, gather_addr);
			if (!err)
				err = do_waitchks(job, sp,
						g->mem_id, gather_addr);
			mem_op().munmap(g->ref, gather_addr);

			if (err)
				break;
		}
		g->mem = gather_phys + g->offset;
	}
	wmb();

	return err;
}
Пример #4
0
void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
{
	u32 i;
	for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
		u32 max = nvhost_syncpt_read_max(sp, i);
		if (!max)
			continue;
		dev_info(&syncpt_to_dev(sp)->pdev->dev,
			"id %d (%s) min %d max %d\n",
			i, nvhost_syncpt_name(i),
			nvhost_syncpt_update_min(sp, i), max);

	}
}
Пример #5
0
/**
 * Updates sw shadow state for client managed registers
 */
void nvhost_syncpt_save(struct nvhost_syncpt *sp)
{
	u32 i;

	for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
		if (client_managed(i))
			nvhost_syncpt_update_min(sp, i);
		else
			BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
	}

	for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
		read_syncpt_wait_base(sp, i);
}
Пример #6
0
/* check for old WAITs to be removed (avoiding a wrap) */
static int t20_syncpt_wait_check(struct nvhost_syncpt *sp,
				 struct nvmap_client *nvmap,
				 u32 waitchk_mask,
				 struct nvhost_waitchk *wait,
				 int num_waitchk)
{
	u32 idx;
	int err = 0;

	/* get current syncpt values */
	for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) {
		if (BIT(idx) & waitchk_mask)
			nvhost_syncpt_update_min(sp, idx);
	}

	BUG_ON(!wait && !num_waitchk);

	/* compare syncpt vs wait threshold */
	while (num_waitchk) {
		u32 override;

		BUG_ON(wait->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS);
		trace_nvhost_syncpt_wait_check(wait->mem, wait->offset,
				wait->syncpt_id, wait->thresh);
		if (nvhost_syncpt_is_expired(sp,
					wait->syncpt_id, wait->thresh)) {
			/*
			 * NULL an already satisfied WAIT_SYNCPT host method,
			 * by patching its args in the command stream. The
			 * method data is changed to reference a reserved
			 * (never given out or incr) NVSYNCPT_GRAPHICS_HOST
			 * syncpt with a matching threshold value of 0, so
			 * is guaranteed to be popped by the host HW.
			 */
			dev_dbg(&syncpt_to_dev(sp)->dev->dev,
			    "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
			    wait->syncpt_id,
			    syncpt_op(sp).name(sp, wait->syncpt_id),
			    wait->thresh,
			    nvhost_syncpt_read_min(sp, wait->syncpt_id));

			/* patch the wait */
			override = nvhost_class_host_wait_syncpt(
					NVSYNCPT_GRAPHICS_HOST, 0);
			err = nvmap_patch_word(nvmap,
					(struct nvmap_handle *)wait->mem,
					wait->offset, override);
			if (err)
				break;
		}
Пример #7
0
/**
 * Updates sw shadow state for client managed registers
 */
void nvhost_syncpt_save(struct nvhost_syncpt *sp)
{
	u32 i;

	for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
		if (client_managed(i))
			nvhost_syncpt_update_min(sp, i);
		else
			//BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
			if (!nvhost_syncpt_min_eq_max(sp, i)) //A temporary workaround....
		        printk("BUG! sp=%p, i=%d",sp,i);
	}

	for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
		read_syncpt_wait_base(sp, i);
}
/**
 * Updates sw shadow state for client managed registers
 */
void nvhost_syncpt_save(struct nvhost_syncpt *sp)
{
	u32 i;

	for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
		if (client_managed(i))
			nvhost_syncpt_update_min(sp, i);
		else
			BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
	}

	for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
		read_syncpt_wait_base(sp, i);
#ifdef CONFIG_MACH_N1
	sp->restore_needed = true;
#endif
}
Пример #9
0
/**
 * Begin a cdma submit
 */
int nvhost_cdma_begin(struct nvhost_cdma *cdma,
		       struct nvhost_userctx_timeout *timeout)
{
	mutex_lock(&cdma->lock);

	if (timeout && timeout->has_timedout) {
		struct nvhost_master *dev = cdma_to_dev(cdma);
		u32 min, max;

		min = nvhost_syncpt_update_min(&dev->syncpt,
			cdma->timeout.syncpt_id);
		max = nvhost_syncpt_read_min(&dev->syncpt,
			cdma->timeout.syncpt_id);

		dev_dbg(&dev->pdev->dev,
			"%s: skip timed out ctx submit (min = %d, max = %d)\n",
			__func__, min, max);
		mutex_unlock(&cdma->lock);
		return -ETIMEDOUT;
	}
	if (timeout->timeout) {
		/* init state on first submit with timeout value */
		if (!cdma->timeout.initialized) {
			int err;
			BUG_ON(!cdma_op(cdma).timeout_init);
			err = cdma_op(cdma).timeout_init(cdma,
				timeout->syncpt_id);
			if (err) {
				mutex_unlock(&cdma->lock);
				return err;
			}
		}
	}
	if (!cdma->running) {
		BUG_ON(!cdma_op(cdma).start);
		cdma_op(cdma).start(cdma);
	}
	cdma->slots_free = 0;
	cdma->slots_used = 0;
	cdma->first_get = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
	return 0;
}
Пример #10
0
/* check for old WAITs to be removed (avoiding a wrap) */
int nvhost_syncpt_wait_check(struct nvmap_client *nvmap,
			struct nvhost_syncpt *sp, u32 waitchk_mask,
			struct nvhost_waitchk *waitp, u32 waitchks)
{
	u32 idx;
	int err = 0;

	/* get current syncpt values */
	for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) {
		if (BIT(idx) & waitchk_mask) {
			nvhost_syncpt_update_min(sp, idx);
		}
	}

	BUG_ON(!waitp);

	/* compare syncpt vs wait threshold */
	while (waitchks) {
		u32 syncpt, override;

		BUG_ON(waitp->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS);

		syncpt = atomic_read(&sp->min_val[waitp->syncpt_id]);
		if (nvhost_syncpt_wrapping_comparison(syncpt, waitp->thresh)) {

			/* wait has completed already, so can be removed */
			dev_dbg(&syncpt_to_dev(sp)->pdev->dev,
					"drop WAIT id %d (%s) thresh 0x%x, syncpt 0x%x\n",
					waitp->syncpt_id,  nvhost_syncpt_name(waitp->syncpt_id),
					waitp->thresh, syncpt);

			/* move wait to a kernel reserved syncpt (that's always 0) */
			override = nvhost_class_host_wait_syncpt(NVSYNCPT_GRAPHICS_HOST, 0);

			/* patch the wait */
			err = nvmap_patch_wait(nvmap,
						(struct nvmap_handle *)waitp->mem,
						waitp->offset, override);
			if (err)
				break;
		}
Пример #11
0
/**
 * Main entrypoint for syncpoint value waits.
 */
int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
			u32 thresh, u32 timeout, u32 *value)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	void *ref;
	int err = 0;

	if (value)
		*value = 0;

	BUG_ON(!check_max(sp, id, thresh));

	/* first check cache */
	if (nvhost_syncpt_min_cmp(sp, id, thresh)) {
		if (value)
			*value = nvhost_syncpt_read_min(sp, id);
		return 0;
	}

	/* keep host alive */
	nvhost_module_busy(&syncpt_to_dev(sp)->mod);

	if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
		/* try to read from register */
		u32 val = nvhost_syncpt_update_min(sp, id);
		if ((s32)(val - thresh) >= 0) {
			if (value)
				*value = val;
			goto done;
		}
	}

	if (!timeout) {
		err = -EAGAIN;
		goto done;
	}

	/* schedule a wakeup when the syncpoint value is reached */
	err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
				NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, &ref);
	if (err)
		goto done;

	err = -EAGAIN;
	/* wait for the syncpoint, or timeout, or signal */
	while (timeout) {
		u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
		int remain = wait_event_interruptible_timeout(wq,
						nvhost_syncpt_min_cmp(sp, id, thresh),
						check);
		if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) {
			if (value)
				*value = nvhost_syncpt_read_min(sp, id);
			err = 0;
			break;
		}
		if (remain < 0) {
			err = remain;
			break;
		}
		if (timeout != NVHOST_NO_TIMEOUT)
			timeout -= check;
		if (timeout) {
			dev_warn(&syncpt_to_dev(sp)->pdev->dev,
				"syncpoint id %d (%s) stuck waiting %d\n",
				id, nvhost_syncpt_name(id), thresh);
			nvhost_syncpt_debug(sp);
		}
	};
	nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);

done:
	nvhost_module_idle(&syncpt_to_dev(sp)->mod);
	return err;
}
Пример #12
0
/**
 * Main entrypoint for syncpoint value waits.
 */
int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
			u32 thresh, u32 timeout)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	void *ref;
	int err = 0;
	//struct nvhost_dev *dev = syncpt_to_dev(sp);

	BUG_ON(!check_max(sp, id, thresh));

	/* first check cache */
	if (nvhost_syncpt_min_cmp(sp, id, thresh))
		return 0;

	/* keep host alive */
	nvhost_module_busy(&syncpt_to_dev(sp)->mod);

	if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
		/* try to read from register */
		u32 val = nvhost_syncpt_update_min(sp, id);
		if ((s32)(val - thresh) >= 0)
			goto done;
	}

	if (!timeout) {
		err = -EAGAIN;
		goto done;
	}

	/* schedule a wakeup when the syncpoint value is reached */
	err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
				NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, &ref);
	if (err)
		goto done;

	/* wait for the syncpoint, or timeout, or signal */
	while (timeout) {
		u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
		err = wait_event_interruptible_timeout(wq,
						nvhost_syncpt_min_cmp(sp, id, thresh),
						check);
		if (err != 0)
			break;
		if (timeout != NVHOST_NO_TIMEOUT)
			timeout -= SYNCPT_CHECK_PERIOD;
		if (timeout) {
			dev_warn(&syncpt_to_dev(sp)->pdev->dev,
				"syncpoint id %d (%s) stuck waiting %d  timeout=%d\n",
				id, nvhost_syncpt_name(id), thresh, timeout);
			/* A wait queue in nvhost driver maybe run frequently
			  when early suspend/late resume. These log will be
			  printed,then the early suspend/late resume
			  maybe blocked,then it will triger early suspend/late
			  resume watchdog. Now we cancel these log. */
			/*
			nvhost_syncpt_debug(sp);
			nvhost_channel_fifo_debug(dev);
			nvhost_sync_reg_dump(dev);
			*/
		}
	};
	if (err > 0)
		err = 0;
	else if (err == 0)
		err = -EAGAIN;
	nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);

done:
	nvhost_module_idle(&syncpt_to_dev(sp)->mod);
	return err;
}
Пример #13
0
void nvhost_cdma_update_sync_queue(struct nvhost_cdma *cdma,
		struct nvhost_syncpt *syncpt, struct device *dev)
{
	u32 first_get, get_restart;
	u32 syncpt_incrs, nr_slots;
	bool clear_ctxsave, exec_ctxsave;
	struct sync_queue *queue = &cdma->sync_queue;
	u32 *sync = sync_queue_head(queue);
	u32 syncpt_val = nvhost_syncpt_update_min(syncpt,
			cdma->timeout.syncpt_id);

	dev_dbg(dev,
		"%s: starting cleanup (thresh %d, queue rd 0x%x wr 0x%x)\n",
		__func__,
		syncpt_val, queue->read, queue->write);

	/*
	 * Move the sync_queue read pointer to the first entry that hasn't
	 * completed based on the current HW syncpt value. It's likely there
	 * won't be any (i.e. we're still at the head), but covers the case
	 * where a syncpt incr happens just prior/during the teardown.
	 */

	dev_dbg(dev,
		"%s: skip completed buffers still in sync_queue\n",
		__func__);

	while (sync != (queue->buffer + queue->write)) {
		/* move read ptr to first blocked entry */
		if (syncpt_val < sync[SQ_IDX_SYNCPT_VAL])
			break;	/* not completed */

		dump_sync_queue_entry(cdma, sync);
		sync = advance_next_entry(cdma, sync);
	}

	/*
	 * Walk the sync_queue, first incrementing with the CPU syncpts that
	 * are partially executed (the first buffer) or fully skipped while
	 * still in the current context (slots are also NOP-ed).
	 *
	 * At the point contexts are interleaved, syncpt increments must be
	 * done inline with the pushbuffer from a GATHER buffer to maintain
	 * the order (slots are modified to be a GATHER of syncpt incrs).
	 *
	 * Note: save in get_restart the location where the timed out buffer
	 * started in the PB, so we can start the refetch from there (with the
	 * modified NOP-ed PB slots). This lets things appear to have completed
	 * properly for this buffer and resources are freed.
	 */

	dev_dbg(dev,
		"%s: perform CPU incr on pending same ctx buffers\n",
		__func__);

	get_restart = cdma->last_put;
	if (sync != (queue->buffer + queue->write))
		get_restart = sync[SQ_IDX_FIRST_GET];

	/* do CPU increments */
	while (sync != (queue->buffer + queue->write)) {

		/* different context, gets us out of this loop */
		if ((void *)sync[SQ_IDX_TIMEOUT_CTX] !=
				cdma->timeout.ctx_timeout)
			break;

		syncpt_incrs = (sync[SQ_IDX_SYNCPT_VAL] - syncpt_val);
		first_get = sync[SQ_IDX_FIRST_GET];
		nr_slots = sync[SQ_IDX_NUM_SLOTS];

		/* won't need a timeout when replayed */
		sync[SQ_IDX_TIMEOUT] = 0;

		dev_dbg(dev,
			"%s: CPU incr (%d)\n", __func__, syncpt_incrs);

		dump_sync_queue_entry(cdma, sync);

		/* safe to use CPU to incr syncpts */
		cdma_op(cdma).timeout_cpu_incr(cdma, first_get,
			syncpt_incrs, nr_slots);
		syncpt_val += syncpt_incrs;
		sync = advance_next_entry(cdma, sync);
	}

	dev_dbg(dev,
		"%s: GPU incr blocked interleaved ctx buffers\n",
		__func__);

	clear_ctxsave = true;
	exec_ctxsave = false;

	/* setup GPU increments */
	while (sync != (queue->buffer + queue->write)) {

		syncpt_incrs = (sync[SQ_IDX_SYNCPT_VAL] - syncpt_val);
		first_get = sync[SQ_IDX_FIRST_GET];
		nr_slots = sync[SQ_IDX_NUM_SLOTS];

		/* same context, increment in the pushbuffer */
		if ((void *)sync[SQ_IDX_TIMEOUT_CTX] ==
				cdma->timeout.ctx_timeout) {

			/* won't need a timeout when replayed */
			sync[SQ_IDX_TIMEOUT] = 0;

			/* update buffer's syncpts in the pushbuffer */
			cdma_op(cdma).timeout_pb_incr(cdma, first_get,
				syncpt_incrs, nr_slots, exec_ctxsave);

			clear_ctxsave = true;
			exec_ctxsave = false;
		} else {
			dev_dbg(dev,
				"%s: switch to a different userctx\n",
				__func__);
			/*
			 * If previous context was the timed out context
			 * then clear its CTXSAVE in this slot.
			 */
			if (clear_ctxsave) {
				cdma_op(cdma).timeout_clear_ctxsave(cdma,
					first_get, nr_slots);
				clear_ctxsave = false;
			}
			exec_ctxsave = true;
		}

		dump_sync_queue_entry(cdma, sync);

		syncpt_val = sync[SQ_IDX_SYNCPT_VAL];
		sync = advance_next_entry(cdma, sync);
	}

	dev_dbg(dev,
		"%s: finished sync_queue modification\n", __func__);

	/* roll back DMAGET and start up channel again */
	cdma_op(cdma).timeout_teardown_end(cdma, get_restart);

	cdma->timeout.ctx_timeout->has_timedout = true;
	mutex_unlock(&cdma->lock);
}