Exemple #1
0
/**
 * API to mark in-use syncpt as free
 */
void nvhost_free_syncpt(u32 id)
{
	struct nvhost_master *host = nvhost;
	struct nvhost_syncpt *sp = &host->syncpt;
	struct device *d = &host->dev->dev;

	/* first check if we are freeing a valid syncpt */
	if (!sp->assigned[id]) {
		nvhost_warn(d, "trying to free unused syncpt %u\n", id);
		return;
	}
	if (!nvhost_syncpt_client_managed(sp, id) &&
			!nvhost_syncpt_min_eq_max(sp, id)) {
		nvhost_err(d,
		    "trying to free host managed syncpt still in use %u\n", id);
		return;
	}

	mutex_lock(&sp->syncpt_mutex);

	/* set to default state */
	if (nvhost_syncpt_client_managed(sp, id))
		nvhost_syncpt_set_min_eq_max(sp, id);
	sp->assigned[id] = false;
	sp->client_managed[id] = false;
	kfree(sp->syncpt_names[id]);
	sp->syncpt_names[id] = NULL;

	mutex_unlock(&sp->syncpt_mutex);
}
Exemple #2
0
/**
 * Increment syncpoint value from cpu, updating cache
 */
void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
{
	if (nvhost_syncpt_client_managed(sp, id))
		nvhost_syncpt_incr_max(sp, id, 1);
	nvhost_module_busy(syncpt_to_dev(sp)->dev);
	nvhost_syncpt_cpu_incr(sp, id);
	nvhost_module_idle(syncpt_to_dev(sp)->dev);
}
Exemple #3
0
/**
 * Returns true if syncpoint is expired, false if we may need to wait
 */
bool nvhost_syncpt_is_expired(
	struct nvhost_syncpt *sp,
	u32 id,
	u32 thresh)
{
	u32 current_val;
	u32 future_val;
	smp_rmb();
	current_val = (u32)atomic_read(&sp->min_val[id]);
	future_val = (u32)atomic_read(&sp->max_val[id]);

	/* Note the use of unsigned arithmetic here (mod 1<<32).
	 *
	 * c = current_val = min_val	= the current value of the syncpoint.
	 * t = thresh			= the value we are checking
	 * f = future_val  = max_val	= the value c will reach when all
	 *			   	  outstanding increments have completed.
	 *
	 * Note that c always chases f until it reaches f.
	 *
	 * Dtf = (f - t)
	 * Dtc = (c - t)
	 *
	 *  Consider all cases:
	 *
	 *	A) .....c..t..f.....	Dtf < Dtc	need to wait
	 *	B) .....c.....f..t..	Dtf > Dtc	expired
	 *	C) ..t..c.....f.....	Dtf > Dtc	expired	   (Dct very large)
	 *
	 *  Any case where f==c: always expired (for any t).  	Dtf == Dcf
	 *  Any case where t==c: always expired (for any f).  	Dtf >= Dtc (because Dtc==0)
	 *  Any case where t==f!=c: always wait.	 	Dtf <  Dtc (because Dtf==0,
	 *							Dtc!=0)
	 *
	 *  Other cases:
	 *
	 *	A) .....t..f..c.....	Dtf < Dtc	need to wait
	 *	A) .....f..c..t.....	Dtf < Dtc	need to wait
	 *	A) .....f..t..c.....	Dtf > Dtc	expired
	 *
	 *   So:
	 *	   Dtf >= Dtc implies EXPIRED	(return true)
	 *	   Dtf <  Dtc implies WAIT	(return false)
	 *
	 * Note: If t is expired then we *cannot* wait on it. We would wait
	 * forever (hang the system).
	 *
	 * Note: do NOT get clever and remove the -thresh from both sides. It
	 * is NOT the same.
	 *
	 * If future valueis zero, we have a client managed sync point. In that
	 * case we do a direct comparison.
	 */
	if (!nvhost_syncpt_client_managed(sp, id))
		return future_val - thresh >= current_val - thresh;
	else
		return (s32)(current_val - thresh) >= 0;
}
/**
 * Returns true if syncpoint is expired, false if we may need to wait
 */
bool nvhost_syncpt_is_expired(
	struct nvhost_syncpt *sp,
	u32 id,
	u32 thresh)
{
	u32 current_val = (u32)atomic_read(&sp->min_val[id]);
	u32 future_val = (u32)atomic_read(&sp->max_val[id]);
	bool has_future_val = !nvhost_syncpt_client_managed(sp, id);
	return _nvhost_syncpt_is_expired(current_val, future_val,
					 has_future_val, thresh);
}
/**
 * Updates sw shadow state for client managed registers
 */
void nvhost_syncpt_save(struct nvhost_syncpt *sp)
{
	u32 i;

	for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
		if (nvhost_syncpt_client_managed(sp, i))
			syncpt_op().update_min(sp, i);
		else
			WARN_ON(!nvhost_syncpt_min_eq_max(sp, i));
	}

	for (i = 0; i < nvhost_syncpt_nb_bases(sp); i++)
		syncpt_op().read_wait_base(sp, i);
}
/**
 * Returns -1 if a < b (a triggers before b)
 *	    0 if a = b (a and b trigger at the same time)
 *	    1 if a > b (b triggers before a)
 */
int nvhost_syncpt_compare(
	struct nvhost_syncpt *sp,
	u32 id,
	u32 thresh_a,
	u32 thresh_b)
{
	u32 current_val;
	u32 future_val;
	bool has_future_val = !nvhost_syncpt_client_managed(sp, id);

	current_val = (u32)atomic_read(&sp->min_val[id]);
	future_val = (u32)atomic_read(&sp->max_val[id]);
	return _nvhost_syncpt_compare(current_val, future_val,
				      has_future_val, thresh_a, thresh_b);
}
Exemple #7
0
static ssize_t syncpt_type_show(struct kobject *kobj,
		struct kobj_attribute *attr, char *buf)
{
	struct nvhost_syncpt_attr *syncpt_attr =
		container_of(attr, struct nvhost_syncpt_attr, attr);

	if (syncpt_attr->id < 0)
		return snprintf(buf, PAGE_SIZE, "non_client_managed\n");

	if (nvhost_syncpt_client_managed(&syncpt_attr->host->syncpt,
			syncpt_attr->id))
		return snprintf(buf, PAGE_SIZE, "%s\n", "client_managed");
	else
		return snprintf(buf, PAGE_SIZE, "%s\n", "non_client_managed");
}
Exemple #8
0
/**
 * Increment syncpoint value from cpu, updating cache
 */
int nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
{
	int err;

	err = nvhost_module_busy(syncpt_to_dev(sp)->dev);
	if (err)
		return err;

	if (nvhost_syncpt_client_managed(sp, id))
		nvhost_syncpt_incr_max(sp, id, 1);
	nvhost_syncpt_cpu_incr(sp, id);
	nvhost_module_idle(syncpt_to_dev(sp)->dev);

	return 0;
}
Exemple #9
0
/**
 * Write a cpu syncpoint increment to the hardware, without touching
 * the cache. Caller is responsible for host being powered.
 */
static void t20_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
{
	struct nvhost_master *dev = syncpt_to_dev(sp);
	u32 reg_offset = id / 32;

	if (!nvhost_syncpt_client_managed(sp, id)
			&& nvhost_syncpt_min_eq_max(sp, id)) {
		dev_err(&syncpt_to_dev(sp)->dev->dev,
			"Trying to increment syncpoint id %d beyond max\n",
			id);
		nvhost_debug_dump(syncpt_to_dev(sp));
		return;
	}
	writel(bit_mask(id), dev->sync_aperture +
			host1x_sync_syncpt_cpu_incr_r() + reg_offset * 4);
}