Esempio n. 1
0
/* This function animates an accept request from `ol->accepted` queue */
static void bake_one_accepted(outlet_t *ol, proc_t *cont_proc)
{
#if LING_WITH_LWIP
	tcp_accepted(ol->tcp);	// not needed, really
#endif

	assert(ol->accepted != 0);
	acc_pend_t *pend = pop_from_ring(&ol->accepted);

	outlet_t *new_ol = ol_tcp_factory(cont_proc, 0);	// default bit options?
	if (new_ol == 0)
		scheduler_signal_exit_N(cont_proc, ol->oid, A_NO_MEMORY);
	else
	{
		ol_tcp_animate(new_ol, pend);

		// BEAM copies some options in the driver code and some - in gen_tcp.
		// This creates an interplay that may result in the first packet getting
		// lost of interpreted as a wrong packet type. The following assignments
		// are determined by trial and error to let the right first packet
		// through.

		new_ol->active = INET_PASSIVE;
		new_ol->packet = ol->packet;
		new_ol->binary = ol->binary;

		inet_async2(ol->oid, cont_proc->pid, ASYNC_REF, A_OK, new_ol->oid);
	}

	reuse_pending(&ol->free_pends, pend);
}
Esempio n. 2
0
//NB: called both from a callback and a BIF - do not send signals
static int recv_bake_packets(outlet_t *ol, proc_t *cont_proc)
{
	term_t reason = noval;
	term_t packet = noval;
	term_t active_tag = A_TCP;

more_packets:
	if (ol->cr_in_progress || ol->active != INET_PASSIVE)
	{
		if (ol->packet == TCP_PB_RAW &&
			ol->recv_expected_size != 0 &&
			ol->recv_buf_off < ol->recv_expected_size)
				packet = A_MORE;
		else
		{
			uint32_t more_len;

			uint32_t adj_len = ol->recv_buf_off;
			// take into account expected_size for raw packets
			if (ol->packet == TCP_PB_RAW && ol->recv_expected_size != 0)
				adj_len = ol->recv_expected_size;
				
			bits_t bs;
			bits_init_buf(ol->recv_buffer, adj_len, &bs);
			packet = decode_packet_N(ol->packet, &bs, noval, ol->binary,
						&reason, &more_len, ol->packet_size, 0, &cont_proc->hp);

			if (packet == A_MORE && more_len != 0 && more_len > ol->recv_bufsize)
				return -TOO_LONG;

			if (packet != A_MORE && packet != noval)
			{
				uint32_t left = (bs.ends -bs.starts) /8;
				uint32_t consumed = adj_len -left;
				memmove(ol->recv_buffer, ol->recv_buffer +consumed, ol->recv_buf_off -consumed);
				ol->recv_buf_off -= consumed;
				//debug("---> recv_bake_packets: consumed %d left %d cr_in_progress %d active %d\n",
				//		consumed, left, ol->cr_in_progress, ol->active);

				// Is it safe to acknowledge the data here, outside of the
				// receive callback?
				tcp_recved(ol->tcp, consumed);

				if (ol->packet == TCP_PB_HTTP || ol->packet == TCP_PB_HTTP_BIN)
					active_tag = A_HTTP;

				if (ol->packet == TCP_PB_HTTP)
					ol->packet = TCP_PB_HTTPH;
				else if (ol->packet == TCP_PB_HTTP_BIN)
					ol->packet = TCP_PB_HTTPH_BIN;
				else if (ol->packet == TCP_PB_HTTPH && packet == A_HTTP_EOH)
					ol->packet = TCP_PB_HTTP;
				else if (ol->packet == TCP_PB_HTTPH_BIN && packet == A_HTTP_EOH)
					ol->packet = TCP_PB_HTTP_BIN;
			}
		}
	}

	if (packet != A_MORE && ol->cr_in_progress)
	{
		cr_cancel_deferred(ol);
		term_t a = (packet == noval) ?A_ERROR :A_OK;
		term_t b = (packet == noval) ?reason :packet;
		inet_async2(ol->oid, ol->cr_reply_to, ASYNC_REF, a, b);
	}
	else if (packet != A_MORE && ol->active != INET_PASSIVE)
	{
		uint32_t *p = heap_alloc_N(&cont_proc->hp, 1 +3);
		if (p == 0)
			return -NO_MEMORY;
		p[0] = 3;
		p[1] = (packet == noval) ?A_TCP_ERROR :active_tag;
		p[2] = ol->oid;
		p[3] = (packet == noval) ?reason :packet;
		heap_set_top(&cont_proc->hp, p +1 +3);
		int x = scheduler_new_local_mail_N(cont_proc, tag_tuple(p));
		if (x < 0)
			return x;

		if (ol->active == INET_ONCE && !is_tuple(packet))	// http_eoh
			ol->active = INET_PASSIVE;
		else if (ol->recv_buf_off > 0 && packet != noval)
			goto more_packets;
	}

	return 0;
}
Esempio n. 3
0
static err_t recv_cb(void *arg, struct tcp_pcb *tcp, struct pbuf *data, err_t err)
{
	phase_expected(PHASE_EVENTS);

	outlet_t *ol = (outlet_t *)arg;
	if (ol == 0)
		return ERR_OK;		// outlet has gone already
	//debug("---> recv_cb(arg 0x%pp, tcp 0x%pp, %d, err %d)\n",
	//				arg, tcp, (data == 0) ?0: data->tot_len, err);
	assert(ol->tcp == tcp);

	term_t pid = (ol->cr_in_progress) ?ol->cr_reply_to :ol->owner;
	proc_t *cont_proc = scheduler_lookup(pid);
	if (cont_proc == 0)
	{
		//debug("recv_cb: nowhere to send - discard\n");
		if (data != 0)
			pbuf_free(data);
		return ERR_OK;
	}

	if (data == 0)
	{
		if (ol->active != INET_PASSIVE)
		{
			// deliver {tcp_closed,Sock}
			uint32_t *p = heap_alloc_N(&cont_proc->hp, 1 +2);
			if (p == 0)
				scheduler_signal_exit_N(cont_proc, ol->oid, A_NO_MEMORY);
			else
			{
				p[0] = 2;
				p[1] = A_TCP_CLOSED;
				p[2] = ol->oid;
				heap_set_top(&cont_proc->hp, p +1 +2);
				int x = scheduler_new_local_mail_N(cont_proc, tag_tuple(p));
				if (x < 0)
					scheduler_signal_exit_N(cont_proc, ol->oid, err_to_term(x));
			}

			if (ol->active == INET_ONCE)
				ol->active = INET_PASSIVE;
		}
		else if (ol->cr_in_progress)
		{
			cr_cancel_deferred(ol);
			inet_async2(ol->oid, ol->cr_reply_to, ASYNC_REF, A_ERROR, A_CLOSED);
		}

		// No more data will be received, otherwise it is a normal connection.
		// No need to do tcp_close() or anything.
		ol->peer_close_detected = 1;
	}
	else
	{
		uint16_t len = data->tot_len;
		if (len > ol->recv_bufsize -ol->recv_buf_off)
		{
			debug("recv_cb: len %d recv_bufsize %d recv_buf_off %d\n",
									len, ol->recv_bufsize, ol->recv_buf_off);
			debug("recv_cb: received data do not fit recv_buffer - truncated\n");
			len = ol->recv_bufsize -ol->recv_buf_off;	// truncation
		}

		//debug("---> recv_cb: recv_bufsize %d recv_buf_off %d\n\t\ttot_len %d len %d\n", 
		//		ol->recv_bufsize, ol->recv_buf_off, data->tot_len, len);
		pbuf_copy_partial(data, ol->recv_buffer +ol->recv_buf_off, len, 0);
		ol->recv_buf_off += len;

		// A more natural place to acknowledge the data when complete packets
		// are baked.
		//tcp_recved(ol->tcp, len);

		pbuf_free(data);
		int x = recv_bake_packets(ol, cont_proc);
		if (x < 0)
			scheduler_signal_exit_N(cont_proc, ol->oid, err_to_term(x));
	}

	return ERR_OK;
}