Esempio n. 1
0
static int m_display_present(lua_State * L)
{
	struct ldisplay_t * display = luaL_checkudata(L, 1, MT_DISPLAY);
	cairo_t * cr;
	if(display->showfps)
	{
		char buf[32];
		ktime_t now = ktime_get();
		s64_t delta = ktime_ms_delta(now, display->stamp);
		if(delta > 0)
			display->fps = ((double)1000.0 / (double)delta) * 0.618 + display->fps * 0.382;
		display->frame++;
		display->stamp = now;
		cr = display->cr[display->index];
		cairo_save(cr);
		cairo_set_font_size(cr, 24);
		cairo_set_source_rgb(cr, 0.4, 0.4, 0.4);
		cairo_move_to(cr, 0, 24);
		snprintf(buf, sizeof(buf), "%.2f %d", display->fps, display->frame);
		cairo_show_text(cr, buf);
		cairo_restore(cr);
	}
	cairo_xboot_surface_present(display->cs[display->index]);
	display->index = (display->index + 1) % 2;
	cr = display->cr[display->index];
	cairo_save(cr);
	cairo_set_source_rgb(cr, 1, 1, 1);
	cairo_set_operator(cr, CAIRO_OPERATOR_SOURCE);
	cairo_paint(cr);
	cairo_restore(cr);
	return 0;
}
Esempio n. 2
0
static long calc_speed(void)
{
	uint64_t k;
	long ms;

	ms = ktime_ms_delta(finish, start);
	if (ms == 0)
		return 0;
	k = (uint64_t)goodebcnt * (mtd->erasesize / 1024) * 1000;
	do_div(k, ms);
	return k;
}
Esempio n. 3
0
void display_present(struct display_t * disp, void * o, void (*draw)(struct display_t *, void *))
{
	cairo_t * cr;
	struct region_t rn, ro, * r;
	char fps[32];
	int count;
	int i;

	if(disp)
	{
		if(disp->cursor.show && disp->cursor.dirty)
		{
			region_init(&rn, disp->cursor.nx, disp->cursor.ny, disp->cursor.width, disp->cursor.height);
			region_init(&ro, disp->cursor.ox, disp->cursor.oy, disp->cursor.width, disp->cursor.height);
			display_region_list_add(disp, &rn);
			display_region_list_add(disp, &ro);
			disp->cursor.dirty = 0;
		}

		if(disp->fps.show)
		{
			ktime_t now = ktime_get();
			s64_t delta = ktime_ms_delta(now, disp->fps.stamp);
			if(delta > 0)
				disp->fps.rate = ((double)1000.0 / (double)delta) * 0.618 + disp->fps.rate * 0.382;
			disp->fps.frame++;
			disp->fps.stamp = now;
			int len = snprintf(fps, sizeof(fps), "%.2f %ld", disp->fps.rate, disp->fps.frame);
			region_init(&rn, 0, 0, len * (24 / 2), 24);
			display_region_list_add(disp, &rn);
		}

		if((count = disp->rl->count) > 0)
		{
			cr = disp->cr;

			cairo_reset_clip(cr);
			for(i = 0; i < count; i++)
			{
				r = &disp->rl->region[i];
				cairo_rectangle(cr, r->x, r->y, r->w, r->h);
			}
			cairo_clip(cr);
			cairo_save(cr);
			cairo_set_source_rgb(cr, 1, 1, 1);
			cairo_set_operator(cr, CAIRO_OPERATOR_OVER);
			cairo_paint(cr);
			cairo_restore(cr);

			if(draw)
				draw(disp, o);

			#if 0
			{
				static int flag = 0;
				cairo_save(cr);
				flag = !flag;
				if(flag)
					cairo_set_source_rgba(cr, 1, 0, 0, 0.7);
				else
					cairo_set_source_rgba(cr, 0, 1, 0, 0.7);
				cairo_set_operator(cr, CAIRO_OPERATOR_OVER);
				cairo_paint(cr);
				cairo_restore(cr);
			}
			#endif

			if(disp->cursor.show)
			{
				cairo_save(cr);
				cairo_set_source_surface(cr, disp->cursor.cs, disp->cursor.nx, disp->cursor.ny);
				cairo_paint(cr);
				cairo_restore(cr);
			}

			if(disp->fps.show)
			{
				cairo_save(cr);
				cairo_set_font_size(cr, 24);
				cairo_set_source_rgb(cr, 0.4, 0.4, 0.4);
				cairo_move_to(cr, 0, 24);
				cairo_show_text(cr, fps);
				cairo_restore(cr);
			}

			cairo_xboot_surface_present(disp->cs, disp->rl);
		}
	}
}
Esempio n. 4
0
/*
 * This function transfers the ownership of the PCC to the platform
 * So it must be called while holding write_lock(pcc_lock)
 */
static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
{
	int ret = -EIO, i;
	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
	struct acpi_pcct_shared_memory *generic_comm_base =
		(struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
	unsigned int time_delta;

	/*
	 * For CMD_WRITE we know for a fact the caller should have checked
	 * the channel before writing to PCC space
	 */
	if (cmd == CMD_READ) {
		/*
		 * If there are pending cpc_writes, then we stole the channel
		 * before write completion, so first send a WRITE command to
		 * platform
		 */
		if (pcc_ss_data->pending_pcc_write_cmd)
			send_pcc_cmd(pcc_ss_id, CMD_WRITE);

		ret = check_pcc_chan(pcc_ss_id, false);
		if (ret)
			goto end;
	} else /* CMD_WRITE */
		pcc_ss_data->pending_pcc_write_cmd = FALSE;

	/*
	 * Handle the Minimum Request Turnaround Time(MRTT)
	 * "The minimum amount of time that OSPM must wait after the completion
	 * of a command before issuing the next command, in microseconds"
	 */
	if (pcc_ss_data->pcc_mrtt) {
		time_delta = ktime_us_delta(ktime_get(),
					    pcc_ss_data->last_cmd_cmpl_time);
		if (pcc_ss_data->pcc_mrtt > time_delta)
			udelay(pcc_ss_data->pcc_mrtt - time_delta);
	}

	/*
	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
	 * "The maximum number of periodic requests that the subspace channel can
	 * support, reported in commands per minute. 0 indicates no limitation."
	 *
	 * This parameter should be ideally zero or large enough so that it can
	 * handle maximum number of requests that all the cores in the system can
	 * collectively generate. If it is not, we will follow the spec and just
	 * not send the request to the platform after hitting the MPAR limit in
	 * any 60s window
	 */
	if (pcc_ss_data->pcc_mpar) {
		if (pcc_ss_data->mpar_count == 0) {
			time_delta = ktime_ms_delta(ktime_get(),
						    pcc_ss_data->last_mpar_reset);
			if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
				pr_debug("PCC cmd not sent due to MPAR limit");
				ret = -EIO;
				goto end;
			}
			pcc_ss_data->last_mpar_reset = ktime_get();
			pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
		}
		pcc_ss_data->mpar_count--;
	}

	/* Write to the shared comm region. */
	writew_relaxed(cmd, &generic_comm_base->command);

	/* Flip CMD COMPLETE bit */
	writew_relaxed(0, &generic_comm_base->status);

	pcc_ss_data->platform_owns_pcc = true;

	/* Ring doorbell */
	ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
	if (ret < 0) {
		pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
				cmd, ret);
		goto end;
	}

	/* wait for completion and check for PCC errro bit */
	ret = check_pcc_chan(pcc_ss_id, true);

	if (pcc_ss_data->pcc_mrtt)
		pcc_ss_data->last_cmd_cmpl_time = ktime_get();

	if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
		mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
	else
		mbox_client_txdone(pcc_ss_data->pcc_channel, ret);

end:
	if (cmd == CMD_WRITE) {
		if (unlikely(ret)) {
			for_each_possible_cpu(i) {
				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
				if (!desc)
					continue;

				if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
					desc->write_cmd_status = ret;
			}
		}
		pcc_ss_data->pcc_write_cnt++;
		wake_up_all(&pcc_ss_data->pcc_write_wait_q);
	}

	return ret;
}
Esempio n. 5
0
static int send_pcc_cmd(u16 cmd)
{
	int ret = -EIO;
	struct acpi_pcct_shared_memory *generic_comm_base =
		(struct acpi_pcct_shared_memory *) pcc_comm_addr;
	static ktime_t last_cmd_cmpl_time, last_mpar_reset;
	static int mpar_count;
	unsigned int time_delta;

	/*
	 * For CMD_WRITE we know for a fact the caller should have checked
	 * the channel before writing to PCC space
	 */
	if (cmd == CMD_READ) {
		ret = check_pcc_chan();
		if (ret)
			return ret;
	}

	/*
	 * Handle the Minimum Request Turnaround Time(MRTT)
	 * "The minimum amount of time that OSPM must wait after the completion
	 * of a command before issuing the next command, in microseconds"
	 */
	if (pcc_mrtt) {
		time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
		if (pcc_mrtt > time_delta)
			udelay(pcc_mrtt - time_delta);
	}

	/*
	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
	 * "The maximum number of periodic requests that the subspace channel can
	 * support, reported in commands per minute. 0 indicates no limitation."
	 *
	 * This parameter should be ideally zero or large enough so that it can
	 * handle maximum number of requests that all the cores in the system can
	 * collectively generate. If it is not, we will follow the spec and just
	 * not send the request to the platform after hitting the MPAR limit in
	 * any 60s window
	 */
	if (pcc_mpar) {
		if (mpar_count == 0) {
			time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
			if (time_delta < 60 * MSEC_PER_SEC) {
				pr_debug("PCC cmd not sent due to MPAR limit");
				return -EIO;
			}
			last_mpar_reset = ktime_get();
			mpar_count = pcc_mpar;
		}
		mpar_count--;
	}

	/* Write to the shared comm region. */
	writew_relaxed(cmd, &generic_comm_base->command);

	/* Flip CMD COMPLETE bit */
	writew_relaxed(0, &generic_comm_base->status);

	/* Ring doorbell */
	ret = mbox_send_message(pcc_channel, &cmd);
	if (ret < 0) {
		pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
				cmd, ret);
		return ret;
	}

	/*
	 * For READs we need to ensure the cmd completed to ensure
	 * the ensuing read()s can proceed. For WRITEs we dont care
	 * because the actual write()s are done before coming here
	 * and the next READ or WRITE will check if the channel
	 * is busy/free at the entry of this call.
	 *
	 * If Minimum Request Turnaround Time is non-zero, we need
	 * to record the completion time of both READ and WRITE
	 * command for proper handling of MRTT, so we need to check
	 * for pcc_mrtt in addition to CMD_READ
	 */
	if (cmd == CMD_READ || pcc_mrtt) {
		ret = check_pcc_chan();
		if (pcc_mrtt)
			last_cmd_cmpl_time = ktime_get();
	}

	mbox_client_txdone(pcc_channel, ret);
	return ret;
}