Пример #1
0
static void server_pointer (int buttonMask,int x,int y,rfbClientPtr cl)
{
/*
 * synch cursor position on change only, could theoretically allow multiple
 * cursors here by differentiating subid with a cl- derived identifier
 */
	if (vncctx.last_x != x || vncctx.last_y != y){
		struct arcan_event mouse = {
			.category = EVENT_IO,
			.io = {
				.kind = EVENT_IO_AXIS_MOVE,
				.devid = getpid(),
				.datatype = EVENT_IDATATYPE_ANALOG,
				.devkind = EVENT_IDEVKIND_MOUSE,
				.input.analog.gotrel = false,
				.input.analog.nvalues = 1
			}
		};

		if (vncctx.last_x != x){
			mouse.io.input.analog.axisval[0] = x;
			arcan_shmif_enqueue(&vncctx.shmcont, &mouse);
		}

		if (vncctx.last_y != y){
			mouse.io.input.analog.axisval[0] = y;
			mouse.io.subid = 1;
			arcan_shmif_enqueue(&vncctx.shmcont, &mouse);
		}

		vncctx.last_x = x;
		vncctx.last_y = y;
	}

/*
 * synch button state on change
 */
	if (buttonMask != vncctx.last_mask){
		for (size_t i = 0; i < 5; i++){
			if (((1 << i) & buttonMask) != ((1 << i) & vncctx.last_mask)){
				arcan_shmif_enqueue(&vncctx.shmcont, &(struct arcan_event){
					.category = EVENT_IO,
					.io = {
						.kind = EVENT_IO_BUTTON,
						.subid = vnc_btn_to_shmif(i),
						.datatype = EVENT_IDATATYPE_DIGITAL,
						.devkind = EVENT_IDEVKIND_MOUSE,
						.devid = getpid(),
						.input.digital.active = !!((1 << i & buttonMask) > 0)
					}
				});
			}
		}
Пример #2
0
int main(int argc, char** argv)
{
	struct arg_arr* aarr;
	struct arcan_shmif_cont cont = arcan_shmif_open(
		SEGID_APPLICATION, SHMIF_ACQUIRE_FATALFAIL, &aarr);

	arcan_event ev = {
		.ext.kind = ARCAN_EVENT(CLOCKREQ),
		.ext.clock.rate = 2,
		.ext.clock.dynamic = (argc > 1 && strcmp(argv[1], "dynamic") == 0)
	};
	arcan_shmif_enqueue(&cont, &ev);

	ev.ext.clock.dynamic = false;
	int tbl[] = {20, 40, 42, 44, 60, 80, 86, 88, 100, 120};
	int step = 0;

	for (size_t i=0; i < sizeof(tbl)/sizeof(tbl[0]); i++){
		ev.ext.clock.once = true;
		ev.ext.clock.rate = tbl[i];
		ev.ext.clock.id = i + 2; /* 0 index and 1 is reserved */
		arcan_shmif_enqueue(&cont, &ev);
	}

	while(arcan_shmif_wait(&cont, &ev) != 0){
		if (ev.category == EVENT_TARGET)
			switch(ev.tgt.kind){
				case TARGET_COMMAND_STEPFRAME:
					printf("step: %d, source: %d\n",
						ev.tgt.ioevs[0].iv, ev.tgt.ioevs[1].iv);
					if (ev.tgt.ioevs[1].iv > 1){
						if (step == ev.tgt.ioevs[1].iv-2)
							printf("custom timer %d OK\n", step);
						else
							printf("timer out of synch, expected %d got %d\n",
							step, ev.tgt.ioevs[1].iv-2);
						step++;
					}
				break;
				case TARGET_COMMAND_EXIT:
					goto done; /* break(1), please */
				default:
				break;
			}
	}

done:
	arcan_shmif_drop(&cont);
	return EXIT_SUCCESS;
}
Пример #3
0
/*
 * Build the output buffer and push/synch to an external recipient,
 * taking mapping function, alpha population functions, and timing-
 * related metadata.
 */
static void ch_step(struct rwstat_ch* ch)
{
	struct rwstat_ch_priv* chp = ch->priv;

	struct arcan_event outev = {
		.category = EVENT_EXTERNAL,
		.ext.kind = EVENT_EXTERNAL_FRAMESTATUS,
		.ext.framestatus.framenumber = ch->priv->cnt_local,
		.ext.framestatus.pts = ch->priv->cnt_total,
	};

	size_t ntw = chp->base * chp->base;
	ch->event(ch, &outev);

/*
 * Notify about the packing mode active for this frame. This is
 * needed for the parent to be able to determine what each byte
 * corresponds to.
 */
	if (chp->status_dirty){
		outev.ext.kind = EVENT_EXTERNAL_STREAMINFO;
		outev.ext.streaminf.streamid = 0;
		outev.ext.streaminf.datakind = 0;
		outev.ext.streaminf.langid[0] = 'a' + chp->pack;
		outev.ext.streaminf.langid[1] = 'a' + chp->map;
		outev.ext.streaminf.langid[2] = 'a' + chp->pack_sz;
		chp->status_dirty = false;
		ch->event(ch, &outev);
	}

	if ( 1 == (chp->clock & (RW_CLK_SLIDE)) )
		rebuild_hgram(chp);

	if (chp->amode == RW_ALPHA_ENTBASE)
		update_entalpha(chp, chp->ent_base);

	else if (chp->amode == RW_ALPHA_PTN)
		update_ptnalpha(chp);

	for (size_t i = 0; i < chp->buf_sz; i += chp->pack_sz)
		pack_bytes(chp, &chp->buf[i], i / chp->pack_sz);

	chp->cont->addr->vpts = ch->priv->cnt_total;
	arcan_shmif_signal(chp->cont, SHMIF_SIGVID);
	chp->cnt_local = chp->cnt_total;

/* non-sparse mappings require an output flush */
	if (chp->map == MAP_TUPLE || chp->map == MAP_TUPLE_ACC){
		shmif_pixel val = SHMIF_RGBA(0x00, 0x00, 0x00, 0xff);
		for (size_t i = 0; i < ntw; i++)
			chp->cont->vidp[i] = val;
	}
}

static void ch_event(struct rwstat_ch* ch, arcan_event* ev)
{
	arcan_shmif_enqueue(ch->priv->cont, ev);
}
Пример #4
0
/*
 * Use the current set of patterns to populate the alpha buffer
 * that is then sampled when building the final output.
 */
static void update_ptnalpha(struct rwstat_ch_priv* chp)
{
	uint8_t av = 0xff;
	if (chp->n_patterns == 0){
		memset(chp->alpha, av, chp->base * chp->base);
		return;
	}

/* reset patterns */
	for (size_t i = 0; i < chp->n_patterns; i++){
		chp->patterns[i].buf_pos = 0;
		chp->patterns[i].evc = 0;
	}

/* If ptn-match ever becomes a performance choke,
 * here is a good spot for adding parallelization. */
	for (size_t i = 0; i < chp->buf_sz; i++){
		chp->alpha[i] = av;

		for (size_t j = 0; j < chp->n_patterns; j++){
			struct pattern* ptn = &chp->patterns[j];
			if (ptn->buf[ptn->buf_pos] == chp->buf[i])
			 	if (++(ptn->buf_pos) == ptn->buf_sz){
					chp->patterns[j].buf_pos = 0;
					memset(&chp->alpha[i - ptn->buf_sz], ptn->alpha, ptn->buf_sz);
					if ((ptn->flags & FLAG_STATE))
						av = ptn->alpha;
					if ((ptn->flags & FLAG_EVENT))
						ptn->evc++;
				}
			}

	}


/* Check matched patterns and fire an event with the matching
 * identifier, and the number of times each event was matched
 * in the buffer window. Abuse the CURSORINPUT event for this */
	for (size_t i = 0; i < chp->n_patterns; i++)
		if (chp->patterns[i].evc){
			arcan_event ev = {
				.category = EVENT_EXTERNAL,
				.ext.kind = EVENT_EXTERNAL_CURSORINPUT,
				.ext.cursor.id = chp->patterns[i].id,
				.ext.cursor.x = chp->patterns[i].evc
			};
			arcan_shmif_enqueue(chp->cont, &ev);
			chp->patterns[i].evc = 0;
		}
}
Пример #5
0
static bool subcomp_defer_handler(
	struct surface_request* req, struct arcan_shmif_cont* con)
{
	if (!con){
		trace(TRACE_SHELL, "reqfail");
		wl_resource_post_no_memory(req->target);
		return false;
	}

	struct wl_resource* subsurf = wl_resource_create(req->client->client,
		&wl_subsurface_interface, wl_resource_get_version(req->target), req->id);

	if (!subsurf){
		trace(TRACE_SHELL, "reqfail");
		wl_resource_post_no_memory(req->target);
		return false;
	}

	struct comp_surf* surf = wl_resource_get_user_data(req->target);
	wl_resource_set_implementation(subsurf, &subsurf_if, surf, NULL);

	if (!surf){
		trace(TRACE_SHELL, "reqfail");
		wl_resource_post_no_memory(req->target);
		return false;
	}

	surf->acon = *con;
	surf->cookie = 0xfeedface;
	surf->shell_res = subsurf;
	surf->dispatch = subsurf_shmifev_handler;
	surf->sub_parent_res = req->parent;

	snprintf(surf->tracetag, SURF_TAGLEN, "subsurf");

	if (req->parent){
		struct comp_surf* psurf = wl_resource_get_user_data(req->parent);
		if (!psurf->acon.addr){
			trace(TRACE_ALLOC, "bad subsurface, broken parent");
			return false;
		}
		surf->viewport.ext.kind = ARCAN_EVENT(VIEWPORT);
		surf->viewport.ext.viewport.parent = psurf->acon.segment_token;
		arcan_shmif_enqueue(&surf->acon, &surf->viewport);
	}

	trace(TRACE_ALLOC, "subsurface");
	return true;
}
Пример #6
0
static void on_cl_event(
	struct arcan_shmif_cont* cont, int chid, struct arcan_event* ev, void* tag)
{
	if (!cont){
		debug_print(1, "ignore incoming event on unknown context");
		return;
	}
	if (arcan_shmif_descrevent(ev)){
/*
 * Events needed to be handled here:
 * NEWSEGMENT, map it, add it to the context channel list.
 */
		debug_print(1, "incoming descr- event ignored");
	}
	else {
		debug_print(2, "client event: %s on ch %d",
			arcan_shmif_eventstr(ev, NULL, 0), chid);
		arcan_shmif_enqueue(cont, ev);
	}
}