Пример #1
0
static void ch_pump(struct senseye_ch* ch)
{
	if (!ch || !ch->in_pr)
		return;

	struct senseye_priv* priv = ch->in_pr;
	struct arcan_event ev;

	while(arcan_shmif_poll(&priv->cont, &ev) != 0)
		dispatch_event(&ev, ch->in, priv);
}
Пример #2
0
int main(int argc, char** argv)
#endif
{
	struct arg_arr* aarr;
	struct arcan_shmif_cont cont = arcan_shmif_open(
		SEGID_APPLICATION, SHMIF_ACQUIRE_FATALFAIL, &aarr);

	arcan_event ev;
	bool running = true;

	arcan_shmif_resize(&cont, 640, 480);

	uint8_t step_r = 0;
	uint8_t step_g = 0;
	uint8_t step_b = 255;

	int frames = 0;
	while(running){
		if (frames++ > 200){
			printf("send resize\n");
			arcan_shmif_resize(&cont, 128 + (rand() % 1024), 128 + (rand() % 1024));
			printf("unlock resize\n");
			frames = 0;
		}

		printf("frame(%zu, %zu)\n", cont.w, cont.h);
		for (size_t row = 0; row < cont.h; row++)
			for (size_t col = 0; col < cont.w; col++){
				cont.vidp[ row * cont.addr->w + col ] = SHMIF_RGBA(step_r, step_g, step_b, 0xff);
				step_r++;
				step_g += step_r == 255;
				step_b += step_g == 255;
			}

		arcan_shmif_signal(&cont, SHMIF_SIGVID);

		int rv;
		while ( (rv = arcan_shmif_poll(&cont, &ev)) == 1){
			if (ev.category == EVENT_TARGET)
			switch (ev.tgt.kind){
			case TARGET_COMMAND_EXIT:
				running = false;
			break;
			default:
			break;
			}
		}
	}

#ifndef ENABLE_FSRV_AVFEED
	return EXIT_SUCCESS;
#endif
}
Пример #3
0
bool senseye_pump(struct senseye_cont* cont, bool block)
{
	struct senseye_priv* cpriv = cont->priv;
	arcan_event sr;

	if (block){
		if (!arcan_shmif_wait(&cpriv->cont, &sr) ||
			(sr.category == EVENT_TARGET && sr.tgt.kind == TARGET_COMMAND_EXIT))
				return false;
		else
			process_event(cont, &sr);
		return true;
	}

	int rc = arcan_shmif_poll(&cpriv->cont, &sr);
	if (rc > 0)
		process_event(cont, &sr);

	return rc != -1;
}
Пример #4
0
int a12helper_a12srv_shmifcl(
	struct a12_state* S, const char* cp, int fd_in, int fd_out)
{
	if (!cp)
		cp = getenv("ARCAN_CONNPATH");
	else
		setenv("ARCAN_CONNPATH", cp, 1);

	if (!cp){
		debug_print(1, "No connection point was specified");
		return -ENOENT;
	}

/* Channel - connection mapping */
	struct cl_state cl_state = {};

/* Open / set the primary connection */
	cl_state.wnd[0] = arcan_shmif_open(SEGID_UNKNOWN, SHMIF_NOACTIVATE, NULL);
	if (!cl_state.wnd[0].addr){
		debug_print(1, "Couldn't connect to an arcan display server");
		return -ENOENT;
	}
	cl_state.n_segments = 1;
	debug_print(1, "Segment connected");

	a12_set_destination(S, &cl_state.wnd[0], 0);

/* set to non-blocking */
	int flags = fcntl(fd_in, F_GETFL);
	fcntl(fd_in, F_SETFL, flags | O_NONBLOCK);

	uint8_t* outbuf;
	size_t outbuf_sz = 0;
	debug_print(1, "got proxy connection, waiting for source");

	int status;
	while (-1 != (status = a12helper_poll_triple(
		cl_state.wnd[0].epipe, fd_in, outbuf_sz ? fd_out : -1, 4))){

		if (status & A12HELPER_WRITE_OUT){
			if (outbuf_sz || (outbuf_sz = a12_channel_flush(S, &outbuf))){
				ssize_t nw = write(fd_out, outbuf, outbuf_sz);
				if (nw > 0){
					outbuf += nw;
					outbuf_sz -= nw;
				}
			}
		}

		if (status & A12HELPER_DATA_IN){
			uint8_t inbuf[9000];
			ssize_t nr = read(fd_in, inbuf, 9000);
			if (-1 == nr && errno != EAGAIN && errno != EWOULDBLOCK && errno != EINTR){
				debug_print(1, "failed to read from input: %d", errno);
				break;
			}

			debug_print(2, "unpack %zd bytes", nr);
			a12_channel_unpack(S, inbuf, nr, NULL, on_cl_event);
		}

/* 1 client can have multiple segments */
		for (size_t i = 0, count = cl_state.n_segments; i < 256 && count; i++){
			if (!cl_state.wnd[i].addr)
				continue;

			count--;
			struct arcan_event newev;
			int sc;
			while (( sc = arcan_shmif_poll(&cl_state.wnd[i], &newev)) > 0){
/* we got a descriptor passing event, some of these we could/should discard,
 * while others need to be forwarded as a binary- chunk stream and kept out-
 * of order on the other side */
				if (arcan_shmif_descrevent(&newev)){
					debug_print(1, "(cl:%zu) ign-descr-event: %s",
						i, arcan_shmif_eventstr(&newev, NULL, 0));
				}
				else {
					debug_print(2, "enqueue %s", arcan_shmif_eventstr(&newev, NULL, 0));
					a12_channel_enqueue(S, &newev);
				}
			}
		}

/* we might have gotten data to flush, so use that as feedback */
		if (!outbuf_sz){
			outbuf_sz = a12_channel_flush(S, &outbuf);
			if (outbuf_sz)
				debug_print(2, "output buffer size: %zu", outbuf_sz);
		}
	}

/* though a proper cleanup would cascade, it doesn't help being careful */
	for (size_t i = 0, count = cl_state.n_segments; i < 256 && count; i++){
		if (!cl_state.wnd[i].addr)
				continue;
		arcan_shmif_drop(&cl_state.wnd[i]);
		cl_state.wnd[i].addr = NULL;
	}

	return 0;
}
Пример #5
0
static bool client_inevq_process(apr_socket_t* outconn)
{
	arcan_event ev;
	uint16_t msgsz = sizeof(ev.net.message) / sizeof(ev.net.message[0]);

/* since we flush the entire eventqueue at once, it means that multiple
 * messages may possible be interleaved in one push (up to the 64k buffer)
 * before getting sent of to the TCP layer (thus not as wasteful as it might
 * initially seem).
 *
 * The real issue is buffer overruns though, which currently means that data
 * gets lost (for custommsg) or truncated State transfers won't ever overflow
 * and are only ever tucked on at the end */
	while ( 1 == arcan_shmif_poll(&clctx.shmcont, &ev) )
		if (ev.category == EVENT_NET){
			switch (ev.net.kind){
			case EVENT_NET_INPUTEVENT:
				LOG("(net-cl) inputevent unfinished, implement "
					"event_pack()/unpack(), ignored\n");
			break;

			case EVENT_NET_CUSTOMMSG:
				if (clctx.conn.connstate < CONN_CONNECTED)
					break;

				if (strlen(ev.net.message) + 1 < msgsz)
					msgsz = strlen(ev.net.message) + 1;

				return clctx.conn.pack(&clctx.conn, TAG_NETMSG, msgsz, ev.net.message);
			break;

			default:
			break;
			}
		}
		else if (ev.category == EVENT_TARGET){
			switch (ev.tgt.kind){
			case TARGET_COMMAND_EXIT:
				return false;
			break;

/*
 * new transfer (arcan->fsrv) requested, or pending
 * request to accept incoming transfer.
 * reject: transfer pending or non-authenticated
 * accept: switch to STATEXFER mode
 */
			case TARGET_COMMAND_NEWSEGMENT:
				net_newseg(&clctx.conn,	ev.tgt.ioevs[0].iv, ev.tgt.message);

/* output type? assume transfer request */
				if (ev.tgt.ioevs[0].iv == 0){
					char outbuf[4] = {
						clctx.conn.state_out.shmcont.addr->w,
						clctx.conn.state_out.shmcont.addr->w >> 8,
						clctx.conn.state_out.shmcont.addr->h,
						clctx.conn.state_out.shmcont.addr->h >> 8
					};
					clctx.conn.state_out.state = STATE_IMG;
					return (clctx.conn.pack(
						&clctx.conn, TAG_STATE_IMGOBJ, 4, outbuf));
				}
				else {
					if (clctx.conn.blocked){
						clctx.conn.blocked = false;
					}
				}

				close(clctx.tmphandle);
				clctx.tmphandle = 0;
			break;

/*
 * new transfer (fsrv<->fsrv) requested
 */
			case TARGET_COMMAND_STORE:
			break;

			case TARGET_COMMAND_RESTORE:
			break;

			case TARGET_COMMAND_STEPFRAME:
				queueout_data(&clctx.conn);
			break;

			default:
				; /* just ignore */
		}