Example #1
0
static void disk_async(proc_t *caller,
			term_t reply, term_t oid, uint16_t tag, term_t result)
{
	//
	// no need to marshal: reply is always an atom, result is either an atom or
	// a binary allocated on the caller's heap
	//
 
	uint32_t *p = heap_alloc_N(&caller->hp, 1 +4);
	if (p == 0)
		goto nomem;
	heap_set_top(&caller->hp, p +1 +4);
	p[0] = 4;
	p[1] = reply;
	p[2] = oid;
	p[3] = tag_int(tag);
	p[4] = result;
	term_t msg = tag_tuple(p);

	int x = scheduler_new_local_mail_N(caller, msg);
	if (x < 0)
		scheduler_signal_exit_N(caller, oid, err_to_term(x));

	return;
nomem:
	scheduler_signal_exit_N(caller, oid, A_NO_MEMORY);
}
Example #2
0
File: tube.c Project: EarlGray/ling
void slots_reply(term_t oid, term_t reply_to, term_t avail)
{
	proc_t *caller = scheduler_lookup(reply_to);
	if (caller == 0)
		return;

	// {can_send,Tube,Avail}
	uint32_t *p = heap_alloc_N(&caller->hp, 1 +3);
	if (p == 0)
		goto nomem;
	heap_set_top(&caller->hp, p +1 +3);
	p[0] = 3;
	p[1] = A_CAN_SEND;
	p[2] = oid;
	p[3] = avail;
	term_t msg = tag_tuple(p);

	int x = scheduler_new_local_mail_N(caller, msg);
	if (x < 0)
		scheduler_signal_exit_N(caller, oid, err_to_term(x));

	return;
nomem:
	scheduler_signal_exit_N(caller, oid, A_NO_MEMORY);
}
Example #3
0
File: pore.c Project: EarlGray/ling
static void pore_universal_handler(uint32_t evtchn, void *data)
{
	assert(data != 0);
	pore_t *pore = (pore_t *)data;
	proc_t *proc = scheduler_lookup(pore->owner);
	if (proc == 0)
		return;	// drop

	// {irq,Pore}
	uint32_t *p = heap_alloc_N(&proc->hp, 3);
	if (p == 0)
		goto no_memory;
	term_t irq = tag_tuple(p);
	*p++ = 2;
	*p++ = A_IRQ;
	*p++ = pore->eid;
	heap_set_top(&proc->hp, p);

	if (scheduler_new_local_mail_N(proc, irq) < 0)
		goto no_memory;
	return;

no_memory:
	scheduler_signal_exit_N(proc, pore->eid, A_NO_MEMORY);
}
Example #4
0
term_t heap_tuple(heap_t *hp, int size)
{
	tuple_t *tuple;
	int gap = sizeof(*tuple) + size*sizeof(term_t);
	tuple = (tuple_t *)heap_alloc(hp, gap);
	tuple->size = size;
	//NB: elements are undefined
	return tag_tuple(tuple);
}
Example #5
0
term_t heap_tuple1(heap_t *hp, term_t e1)
{
	tuple_t *tuple;
	int gap = sizeof(*tuple) + 1*sizeof(term_t);
	tuple = (tuple_t *)heap_alloc(hp, gap);
	tuple->size = 1;
	tuple->elts[0] = e1;
	return tag_tuple(tuple);
}
Example #6
0
static int erlang_fire(etimer_t *tm)
{
	proc_t *to_proc = (is_atom(tm->dst))
		?scheduler_process_by_name(tm->dst)
		:scheduler_lookup(tm->dst);

	int rc = 0;
	if (to_proc != 0)
	{
		term_t marsh_msg = tm->msg;
		if (tm->sender != to_proc)	// tm->sender may be zero
		{
			rc = heap_copy_terms_N(&to_proc->hp, &marsh_msg, 1);
			if (rc < 0)
				goto error;
		}

		term_t env_msg = marsh_msg;	// {timeout,TRef,Msg} or Msg
		if (tm->enveloped)
		{
			term_t tref = heap_remake_local_ref_N(&to_proc->hp, tm->ref_id);
			if (tref == noval)
			{
				rc = -NO_MEMORY;
				goto error;
			}

			uint32_t *htop = heap_alloc_N(&to_proc->hp, 1 +3);
			if (htop == 0)
			{
				rc = -NO_MEMORY;
				goto error;
			}
			heap_set_top(&to_proc->hp, htop +1 +3);
			env_msg = tag_tuple(htop);
			htop[0] = 3;
			htop[1] = A_TIMEOUT;
			htop[2] = tref;
			htop[3] = marsh_msg;
		}

		rc = scheduler_new_local_mail_N(to_proc, env_msg);
	}

error:
	if (tm->sender != 0)
	{
		assert(tm->sender->pending_timers > 0);
		tm->sender->pending_timers--;
		if (tm->sender->pending_timers == 0 &&
				tm->sender->my_queue == MY_QUEUE_PENDING_TIMERS)
			proc_destroy(tm->sender);	// destroy a zombie process
	}

	return rc;
}
Example #7
0
term_t heap_tuple2(heap_t *hp, term_t e1, term_t e2)
{
	tuple_t *tuple;
	int gap = sizeof(*tuple) + 2*sizeof(term_t);
	tuple = (tuple_t *)heap_alloc(hp, gap);
	tuple->size = 2;
	tuple->elts[0] = e1;
	tuple->elts[1] = e2;
	return tag_tuple(tuple);
}
Example #8
0
term_t heap_tuple3(heap_t *hp, term_t e1, term_t e2, term_t e3)
{
	tuple_t *tuple;
	int gap = sizeof(*tuple) + 3*sizeof(term_t);
	tuple = (tuple_t *)heap_alloc(hp, gap);
	tuple->size = 3;
	tuple->elts[0] = e1;
	tuple->elts[1] = e2;
	tuple->elts[2] = e3;
	return tag_tuple(tuple);
}
Example #9
0
term_t heap_tuple4(heap_t *hp, term_t e1, term_t e2, term_t e3, term_t e4)
{
	tuple_t *tuple;
	int gap = sizeof(*tuple) + 4*sizeof(term_t);
	tuple = (tuple_t *)heap_alloc(hp, gap);
	tuple->size = 4;
	tuple->elts[0] = e1;
	tuple->elts[1] = e2;
	tuple->elts[2] = e3;
	tuple->elts[3] = e4;
	return tag_tuple(tuple);
}
Example #10
0
// maps:put/3 [21]
term_t cbif_put3(proc_t *proc, term_t *regs)
{
	term_t Key   = regs[0];
	term_t Value = regs[1];
	term_t Map   = regs[2];

	if (!is_boxed_map(Map))
		badarg(Map);

	t_map_t *m0 = (t_map_t *)peel_boxed(Map);
	int index = map_key_index(Key, m0->keys);
	if (index >= 0)
	{
		// same as update/3
		int size = map_size(m0);
		int needed = WSIZE(t_map_t) +size;
		uint32_t *p = heap_alloc(&proc->hp, needed);
		t_map_t *m1 = (t_map_t *)p;
		box_map(p, size, m0->keys);
		heap_set_top(&proc->hp, p);

		memcpy(m1->values, m0->values, size *sizeof(term_t));
		m1->values[index] = Value;

		return tag_boxed(m1);
	}
	else
	{
		uint32_t *q = peel_tuple(m0->keys);
		int size = *q++;
		term_t *ks = q;
	
		term_t kvs[] = {Key,Value};

		int needed = 1 +size+1 +2 +size+1;
		uint32_t *p = heap_alloc(&proc->hp, needed);
		term_t keys = tag_tuple(p);
		*p++ = size+1;
		term_t *ks1 = p;
		p += size+1;
		term_t out = tag_boxed(p);
		term_t *vs1 = p +WSIZE(t_map_t);
		box_map(p, size+1, keys);
		heap_set_top(&proc->hp, p);

		int size1 = map_merge(ks, m0->values, size, kvs, 1, ks1, vs1);
		assert(size1 == size+1);
		
		return out;
	}
}
Example #11
0
term_t heap_tuple6(heap_t *hp, term_t e1, term_t e2, term_t e3, term_t e4, term_t e5, term_t e6)
{
	tuple_t *tuple;
	int gap = sizeof(*tuple) + 6*sizeof(term_t);
	tuple = (tuple_t *)heap_alloc(hp, gap);
	tuple->size = 6;
	tuple->elts[0] = e1;
	tuple->elts[1] = e2;
	tuple->elts[2] = e3;
	tuple->elts[3] = e4;
	tuple->elts[4] = e5;
	tuple->elts[5] = e6;
	return tag_tuple(tuple);
}
Example #12
0
term_t heap_tuple8(heap_t *hp, term_t e1, term_t e2, term_t e3, term_t e4, term_t e5, term_t e6, term_t e7, term_t e8)
{
	tuple_t *tuple;
	int gap = sizeof(*tuple) + 8*sizeof(term_t);
	tuple = (tuple_t *)heap_alloc(hp, gap);
	tuple->size = 8;
	tuple->elts[0] = e1;
	tuple->elts[1] = e2;
	tuple->elts[2] = e3;
	tuple->elts[3] = e4;
	tuple->elts[4] = e5;
	tuple->elts[5] = e6;
	tuple->elts[6] = e7;
	tuple->elts[7] = e8;
	return tag_tuple(tuple);
}
Example #13
0
int scheduler_signal_exit_N(proc_t *proc, term_t source, term_t reason)
{
	//printk("scheduler_signal_exit_N: pid %pt src %pt reason %pt\n", T(proc->pid), T(source), T(reason));
	if (reason == A_KILL)
	{
		scheduler_dequeue_process(proc);
		scheduler_exit_process(proc, A_KILLED);
		return 0;
	}

	if (proc->trap_exit == A_TRUE)
	{
		term_t marshalled_reason = reason;
		int x = heap_copy_terms_N(&proc->hp, &marshalled_reason, 1);
		if (x < 0)
		{
			printk("Cannot marshal the exit reason, replacing with 'undefined'\n");
			marshalled_reason = A_UNDEFINED;
		}

		// build {'EXIT',Pid,Reason}
		uint32_t *htop = heap_alloc_N(&proc->hp, 1 +3);
		if (htop == 0)
			return -NO_MEMORY;
		term_t msg = tag_tuple(htop);
		*htop++ = 3;
		*htop++ = AEXIT__;
		*htop++ = source;
		*htop++ = marshalled_reason;
		heap_set_top(&proc->hp, htop);

		x = scheduler_new_local_mail_N(proc, msg);
		if (x < 0)
			return x;
	}
	else
	{
		if (reason != A_NORMAL)
		{
			scheduler_dequeue_process(proc);
			scheduler_exit_process(proc, reason);
			return 0;
		}
	}

	return 0;
}
Example #14
0
int notify_monitors_N(term_t late, term_t reason)
{
	monitor_t **p = &active_monitors;
	while ((*p) != 0)
	{
		if ((*p)->pid1 == late || (*p)->pid2 == late)
		{
			monitor_t *m = *p;
			*p = (*p)->next;

			if (m->pid2 == late)
			{
				// notify the watcher
				proc_t *watcher = scheduler_lookup(m->pid1);
				assert(watcher != 0);

				term_t ref = heap_remake_local_ref_N(&watcher->hp, m->ref_id);
				if (ref == noval)
					return -NO_MEMORY;

				term_t marshalled_reason = reason;
				int x = heap_copy_terms_N(&watcher->hp, &marshalled_reason, 1);
				if (x < 0)
					return x;
				uint32_t *htop = heap_alloc_N(&watcher->hp, 1 +5);
				if (htop == 0)
					return -NO_MEMORY;
				heap_set_top(&watcher->hp, htop +1 +5);
				htop[0] = 5;
				htop[1] = ADOWN__;
				htop[2] = ref;
				htop[3] = A_PROCESS;
				htop[4] = late;
				htop[5] = marshalled_reason;

				x = scheduler_new_local_mail_N(watcher, tag_tuple(htop));
				if (x < 0)
					return x;
			}
		}
		else
			p = &(*p)->next;
	}

	return 0;
}
Example #15
0
// maps:remove/2 [20]
term_t cbif_remove2(proc_t *proc, term_t *regs)
{
	term_t Key = regs[0];
	term_t Map = regs[1];

	if (!is_boxed_map(Map))
		badarg(Map);

	t_map_t *m = (t_map_t *)peel_boxed(Map);
	int index = map_key_index(Key, m->keys);
	if (index < 0)
		return Map;

	uint32_t *p = peel_tuple(m->keys);
	int size = *p++;
	term_t *ks = p;

	int needed = 1 +size-1 +WSIZE(t_map_t) +size-1;
	uint32_t *htop = heap_alloc(&proc->hp, needed);
	term_t keys = tag_tuple(htop);
	*htop++ = size-1;
	memcpy(htop, ks, index *sizeof(term_t));
	htop += index;
	memcpy(htop, ks +index +1, (size -index -1) *sizeof(term_t));
	htop += (size -index -1);
	term_t out = tag_boxed(htop);
	t_map_t *m1 = (t_map_t *)htop;
	box_map(htop, size-1, keys);
	heap_set_top(&proc->hp, htop);

	memcpy(m1->values, m->values, index *sizeof(term_t));
	memcpy(m1->values +index +1,
		    m->values +index +1, (size -index -1) *sizeof(term_t));
 
	return out;	
}
Example #16
0
static err_t recv_cb(void *arg, struct tcp_pcb *tcp, struct pbuf *data, err_t err)
{
	phase_expected(PHASE_EVENTS);

	outlet_t *ol = (outlet_t *)arg;
	if (ol == 0)
		return ERR_OK;		// outlet has gone already
	//debug("---> recv_cb(arg 0x%pp, tcp 0x%pp, %d, err %d)\n",
	//				arg, tcp, (data == 0) ?0: data->tot_len, err);
	assert(ol->tcp == tcp);

	term_t pid = (ol->cr_in_progress) ?ol->cr_reply_to :ol->owner;
	proc_t *cont_proc = scheduler_lookup(pid);
	if (cont_proc == 0)
	{
		//debug("recv_cb: nowhere to send - discard\n");
		if (data != 0)
			pbuf_free(data);
		return ERR_OK;
	}

	if (data == 0)
	{
		if (ol->active != INET_PASSIVE)
		{
			// deliver {tcp_closed,Sock}
			uint32_t *p = heap_alloc_N(&cont_proc->hp, 1 +2);
			if (p == 0)
				scheduler_signal_exit_N(cont_proc, ol->oid, A_NO_MEMORY);
			else
			{
				p[0] = 2;
				p[1] = A_TCP_CLOSED;
				p[2] = ol->oid;
				heap_set_top(&cont_proc->hp, p +1 +2);
				int x = scheduler_new_local_mail_N(cont_proc, tag_tuple(p));
				if (x < 0)
					scheduler_signal_exit_N(cont_proc, ol->oid, err_to_term(x));
			}

			if (ol->active == INET_ONCE)
				ol->active = INET_PASSIVE;
		}
		else if (ol->cr_in_progress)
		{
			cr_cancel_deferred(ol);
			inet_async2(ol->oid, ol->cr_reply_to, ASYNC_REF, A_ERROR, A_CLOSED);
		}

		// No more data will be received, otherwise it is a normal connection.
		// No need to do tcp_close() or anything.
		ol->peer_close_detected = 1;
	}
	else
	{
		uint16_t len = data->tot_len;
		if (len > ol->recv_bufsize -ol->recv_buf_off)
		{
			debug("recv_cb: len %d recv_bufsize %d recv_buf_off %d\n",
									len, ol->recv_bufsize, ol->recv_buf_off);
			debug("recv_cb: received data do not fit recv_buffer - truncated\n");
			len = ol->recv_bufsize -ol->recv_buf_off;	// truncation
		}

		//debug("---> recv_cb: recv_bufsize %d recv_buf_off %d\n\t\ttot_len %d len %d\n", 
		//		ol->recv_bufsize, ol->recv_buf_off, data->tot_len, len);
		pbuf_copy_partial(data, ol->recv_buffer +ol->recv_buf_off, len, 0);
		ol->recv_buf_off += len;

		// A more natural place to acknowledge the data when complete packets
		// are baked.
		//tcp_recved(ol->tcp, len);

		pbuf_free(data);
		int x = recv_bake_packets(ol, cont_proc);
		if (x < 0)
			scheduler_signal_exit_N(cont_proc, ol->oid, err_to_term(x));
	}

	return ERR_OK;
}
Example #17
0
// maps:from_list/1 [26]
term_t cbif_from_list1(proc_t *proc, term_t *regs)
{
	term_t List = regs[0];
	if (!is_list(List))
		badarg(List);

	int len = list_len(List);
	term_t ks[len];		//XXX: imminent stack overflow
	term_t vs[len];
	int n = 0;

	term_t l = List;
	while (is_cons(l))
	{
		term_t *cons = peel_cons(l);
		if (!is_tuple(cons[0]))
			badarg(List);
		uint32_t *p = peel_tuple(cons[0]);
		if (*p++ != 2)
			badarg(List);
		term_t k = *p++;
		term_t v = *p++;

		if (n == 0 || is_term_smaller(k, ks[0]))
		{
			memmove(ks +1, ks, n *sizeof(term_t));
			memmove(vs +1, vs, n *sizeof(term_t));
			ks[0] = k;
			vs[0] = v;
			n++;
		}
		else
		{
			term_t *alpha = ks;
			term_t *beta = ks +n;
			// *alpha =< k
			while (beta > alpha+1)
			{
				term_t *mid = alpha + (beta -alpha +1)/2;
				if (is_term_smaller(k, *mid))
					beta = mid;
				else
					alpha = mid;
			}
			assert(beta == alpha+1);
			int index = alpha -ks;
			if (k == *alpha || are_terms_equal(k, *alpha, 1))
				vs[index] = v;
			else
			{
				index++;	// ks[index] > k now
				memmove(ks +index +1, ks +index, (n -index) *sizeof(term_t));
				memmove(vs +index +1, vs +index, (n -index) *sizeof(term_t));
				ks[index] = k;
				vs[index] = v;
				n++;
			}
		}
		l = cons[1];
	}

	if (!is_nil(l))
		badarg(List);

	int needed = 1 +n + WSIZE(t_map_t) +n;
	uint32_t *htop = heap_alloc(&proc->hp, needed);
	term_t keys = tag_tuple(htop);
	*htop++ = n;
	memcpy(htop, ks, n *sizeof(term_t));
	htop += n;
	term_t out = tag_boxed(htop);
	term_t *values = htop +WSIZE(t_map_t);
	box_map(htop, n, keys);
	heap_set_top(&proc->hp, htop);
	memcpy(values, vs, n *sizeof(term_t));

	return out;	
}
Example #18
0
// maps:merge/2 [23]
term_t cbif_merge2(proc_t *proc, term_t *regs)
{
	term_t Map1 = regs[0];
	term_t Map2 = regs[1];

	if (!is_boxed_map(Map1))
		badarg(Map1);
	if (!is_boxed_map(Map2))
		badarg(Map2);

	t_map_t *m1 = (t_map_t *)peel_boxed(Map1);
	uint32_t *p1 = peel_tuple(m1->keys);
	int size1 = *p1++;
	term_t *ks1 = p1;
	term_t *vs1 = m1->values;
	t_map_t *m2 = (t_map_t *)peel_boxed(Map2);
	uint32_t *p2 = peel_tuple(m2->keys);
	int size2 = *p2++;
	term_t *ks2 = p2;
	term_t *vs2 = m2->values;

	term_t mks[size1+size2];	//XXX: stack overflow
	term_t mvs[size1+size2];

	term_t *ks3 = mks;
	term_t *vs3 = mvs;

	int ss1 = size1;
	int ss2 = size2;

	int size = 0;
	while (size1 > 0 && size2 > 0)
	{
		term_t a = *ks1;
		term_t b = *ks2;
		if (is_term_smaller(a, b))
		{
			*ks3++ = *ks1++;
			*vs3++ = *vs1++;
			size1--;
		}
		else if (a == b || are_terms_equal(a, b, 1))
		{
			ks1++; vs1++;
			size1--;
			*ks3++ = *ks2++;
			*vs3++ = *vs2++;
			size2--;
		}
		else
		{
			*ks3++ = *ks2++;
			*vs3++ = *vs2++;
			size2--;
		}
		size++;
	}

	while (size1-- > 0)
	{
		*ks3++ = *ks1++;
		*vs3++ = *vs1++;
		size++;
	}

	while (size2-- > 0)
	{
		*ks3++ = *ks2++;
		*vs3++ = *vs2++;
		size++;
	}

	if (size == ss1 || size == ss2)
	{
		// reuse keys
		term_t keys = (size == ss1) ?m1->keys :m2->keys;
		int needed = WSIZE(t_map_t) +size;
		uint32_t *p = heap_alloc(&proc->hp, needed);
		term_t out = tag_boxed(p);
		term_t *values = p +WSIZE(t_map_t);
		box_map(p, size, keys);
		heap_set_top(&proc->hp, p);
		memcpy(values, mvs, size *sizeof(term_t));
		return out;
	}
	else
	{
		// new keys
		int needed = 1 +size +WSIZE(t_map_t) +size;
		uint32_t *p = heap_alloc(&proc->hp, needed);
		term_t keys = tag_tuple(p);
		*p++ = size;
		memcpy(p, mks, size *sizeof(term_t));
		term_t out = tag_boxed(p);
		term_t *values = p +WSIZE(t_map_t);
		box_map(p, size, keys);
		heap_set_top(&proc->hp, p);
		memcpy(values, mvs, size *sizeof(term_t));
		return out;
	}
}
Example #19
0
static err_t sent_cb(void *arg, struct tcp_pcb *tcp, uint16_t len)
{
	phase_expected(PHASE_EVENTS);

	outlet_t *ol = (outlet_t *)arg;
	if (ol == 0)
		return ERR_OK;		// outlet has gone already
	assert(ol->tcp == tcp);

	//debug("sent_cb: len %d\n", len);
	
	// inet_reply() may close the outlet
	term_t saved_oid = ol->oid;
	term_t send_reply_to = noval;
	term_t send_error = noval;
	term_t empty_queue_reply_to = noval;

	assert(ol->send_in_progress);
	assert(ol->send_buf_left >= len);
	ol->send_buf_left -= len;
	ol->send_buf_ack += len;
	assert(ol->send_buf_ack <= ol->send_buf_off);

	if (ol->send_buf_ack == ol->send_buf_off)
	{
		if (ol->send_buf_left > 0)
		{
			// write more
			uint16_t write_len = (ol->send_buf_left > TCP_SND_BUF)
				?TCP_SND_BUF
				:ol->send_buf_left;

			ol->send_buf_off += write_len;

			//debug("ol_tcp_send: tcp_write(%d)\n", write_len);
			int rc = tcp_write(ol->tcp, ol->send_buffer +ol->send_buf_ack, write_len, TCP_WRITE_FLAG_COPY);
			if (rc != ERR_OK)
			{
				send_cancel_deferred(ol);
				send_reply_to = ol->send_reply_to;
				send_error = lwip_err_to_term(rc);
			}

			// Kick the TCP/IP stack
			tcp_output(ol->tcp);
		}
		else
		{
			send_cancel_deferred(ol);
			send_reply_to = ol->send_reply_to;
		}
	}

	if (ol->empty_queue_in_progress && tcp_sndqueuelen(tcp) == 0)
	{
		ol->empty_queue_in_progress = 0;
		empty_queue_reply_to = ol->empty_queue_reply_to;
	}

	if (send_reply_to != noval && send_error != noval)
		inet_reply_error(saved_oid, send_reply_to, send_error);
	else if (send_reply_to != noval)
		inet_reply(saved_oid, send_reply_to, A_OK);
	
	if (empty_queue_reply_to != noval)
	{
		// non-standard reply
		proc_t *caller = scheduler_lookup(empty_queue_reply_to);
		if (caller != 0)
		{
			// {empty_out_q,S}
			uint32_t *p = heap_alloc_N(&caller->hp, 1 +2);
			if (p == 0)
				scheduler_signal_exit_N(caller, saved_oid, A_NO_MEMORY);
			else
			{
				heap_set_top(&caller->hp, p +1 +2);
				p[0] = 2;
				p[1] = A_EMPTY_OUT_Q;
				p[2] = saved_oid;
				term_t msg = tag_tuple(p);

				int x = scheduler_new_local_mail_N(caller, msg);
				if (x < 0)
					scheduler_signal_exit_N(caller, saved_oid, err_to_term(x));
			}
		}
	}

	return ERR_OK;
}
Example #20
0
static uint32_t *terms_copy(stack_t *stack, term_t *terms, int num,
								uint32_t *htop, t_proc_bin_t **pbs)
{
next_term:
	if (num == 0)
	{
		if (stack_is_empty(stack))
			return htop;
		ets_deferred_copy_t *pop = (ets_deferred_copy_t *)stack_pop(stack);
		terms = pop->terms;
		num = pop->num;
		goto next_term;
	}

	term_t t = terms[0];
	if (is_immed(t))
	{
		terms++;
		num--;
		goto next_term;
	}

	term_t copy = noval;
	if (is_cons(t))
	{
		term_t *cons = peel_cons(t);
		copy = tag_cons(htop);
		term_t *new_cons = htop;
		do {
			new_cons[0] = cons[0];
			new_cons[1] = cons[1];
			htop += 2;

			if (!is_immed(new_cons[0]))
				DEFER_COPY(stack, new_cons, 1);

			term_t tail = new_cons[1];
			if (is_immed(tail))
				break;

			if (!is_cons(tail))
			{
				DEFER_COPY(stack, new_cons +1, 1);
				break;
			}

			new_cons[1] = tag_cons(htop);

			cons = peel_cons(tail);
			new_cons = htop;
		} while (1);
	}
	else if (is_tuple(t))
	{
		uint32_t *p = peel_tuple(t);
		int arity = *p++;
		if (arity == 0)
			copy = ZERO_TUPLE;
		else
		{
			copy = tag_tuple(htop);
			*htop++ = arity;
			memcpy(htop, p, arity *sizeof(term_t));
			DEFER_COPY(stack, htop, arity);
			htop += arity;
		}
	}
	else
	{
		assert(is_boxed(t));
		uint32_t *tdata = peel_boxed(t);
		copy = tag_boxed(htop);
		switch (boxed_tag(tdata))
		{
		case SUBTAG_POS_BIGNUM:
		case SUBTAG_NEG_BIGNUM:
		{
			bignum_t *bn = (bignum_t *)tdata;
			int wsize = WSIZE(bignum_t) + (bn->used*sizeof(uint16_t) +3) /4;
			memcpy(htop, tdata, wsize *sizeof(uint32_t));
			htop += wsize;
			break;
		}
		case SUBTAG_FLOAT:
			EASY_COPY(t_float_t);
			break;

		case SUBTAG_FUN:
		{
			t_fun_t *new_fun = (t_fun_t *)htop;
			int num_free = fun_num_free(tdata);
			int wsize = WSIZE(t_fun_t) + num_free;
			memcpy(new_fun, tdata, wsize *sizeof(uint32_t));
			DEFER_COPY(stack, new_fun->frozen, num_free);
			htop += wsize;
			break;
		}
		case SUBTAG_EXPORT:
			EASY_COPY(t_export_t);
			break;

		case SUBTAG_PID:
			EASY_COPY(t_long_pid_t);
			break;

		case SUBTAG_OID:
			EASY_COPY(t_long_oid_t);
			break;

		case SUBTAG_REF:
			EASY_COPY(t_long_ref_t);
			break;

		case SUBTAG_PROC_BIN:
		{
			t_proc_bin_t *pb = (t_proc_bin_t *)htop;
			memcpy(htop, tdata, sizeof(t_proc_bin_t));

			// 1+ bin node refc
			proc_bin_link(pbs, pb, 0);

			htop += WSIZE(t_proc_bin_t);
			break;
		}
		case SUBTAG_HEAP_BIN:
		{
			t_heap_bin_t *hb = (t_heap_bin_t *)tdata;
			int wsize = WSIZE(t_heap_bin_t) + (hb->byte_size +3) /4;
			memcpy(htop, tdata, wsize*sizeof(uint32_t));
			htop += wsize;
			break;
		}
		case SUBTAG_MATCH_CTX:
		{
			t_match_ctx_t *new_mc = (t_match_ctx_t *)htop;
			memcpy(new_mc, tdata, sizeof(t_match_ctx_t));
			DEFER_COPY(stack, &new_mc->parent, 1);
			htop += WSIZE(t_match_ctx_t);
			break;
		}
		default: // SUBTAG_SUB_BIN
		{
			assert(boxed_tag(tdata) == SUBTAG_SUB_BIN);
			t_sub_bin_t *new_sb = (t_sub_bin_t *)htop;
			memcpy(new_sb, tdata, sizeof(t_sub_bin_t));
			DEFER_COPY(stack, &new_sb->parent, 1);
			htop += WSIZE(t_sub_bin_t);
			break;
		}
		}
	}

	assert(copy != noval);
	*terms++ = copy;
	num--;
	goto next_term;
}
Example #21
0
//NB: called both from a callback and a BIF - do not send signals
static int recv_bake_packets(outlet_t *ol, proc_t *cont_proc)
{
	term_t reason = noval;
	term_t packet = noval;
	term_t active_tag = A_TCP;

more_packets:
	if (ol->cr_in_progress || ol->active != INET_PASSIVE)
	{
		if (ol->packet == TCP_PB_RAW &&
			ol->recv_expected_size != 0 &&
			ol->recv_buf_off < ol->recv_expected_size)
				packet = A_MORE;
		else
		{
			uint32_t more_len;

			uint32_t adj_len = ol->recv_buf_off;
			// take into account expected_size for raw packets
			if (ol->packet == TCP_PB_RAW && ol->recv_expected_size != 0)
				adj_len = ol->recv_expected_size;
				
			bits_t bs;
			bits_init_buf(ol->recv_buffer, adj_len, &bs);
			packet = decode_packet_N(ol->packet, &bs, noval, ol->binary,
						&reason, &more_len, ol->packet_size, 0, &cont_proc->hp);

			if (packet == A_MORE && more_len != 0 && more_len > ol->recv_bufsize)
				return -TOO_LONG;

			if (packet != A_MORE && packet != noval)
			{
				uint32_t left = (bs.ends -bs.starts) /8;
				uint32_t consumed = adj_len -left;
				memmove(ol->recv_buffer, ol->recv_buffer +consumed, ol->recv_buf_off -consumed);
				ol->recv_buf_off -= consumed;
				//debug("---> recv_bake_packets: consumed %d left %d cr_in_progress %d active %d\n",
				//		consumed, left, ol->cr_in_progress, ol->active);

				// Is it safe to acknowledge the data here, outside of the
				// receive callback?
				tcp_recved(ol->tcp, consumed);

				if (ol->packet == TCP_PB_HTTP || ol->packet == TCP_PB_HTTP_BIN)
					active_tag = A_HTTP;

				if (ol->packet == TCP_PB_HTTP)
					ol->packet = TCP_PB_HTTPH;
				else if (ol->packet == TCP_PB_HTTP_BIN)
					ol->packet = TCP_PB_HTTPH_BIN;
				else if (ol->packet == TCP_PB_HTTPH && packet == A_HTTP_EOH)
					ol->packet = TCP_PB_HTTP;
				else if (ol->packet == TCP_PB_HTTPH_BIN && packet == A_HTTP_EOH)
					ol->packet = TCP_PB_HTTP_BIN;
			}
		}
	}

	if (packet != A_MORE && ol->cr_in_progress)
	{
		cr_cancel_deferred(ol);
		term_t a = (packet == noval) ?A_ERROR :A_OK;
		term_t b = (packet == noval) ?reason :packet;
		inet_async2(ol->oid, ol->cr_reply_to, ASYNC_REF, a, b);
	}
	else if (packet != A_MORE && ol->active != INET_PASSIVE)
	{
		uint32_t *p = heap_alloc_N(&cont_proc->hp, 1 +3);
		if (p == 0)
			return -NO_MEMORY;
		p[0] = 3;
		p[1] = (packet == noval) ?A_TCP_ERROR :active_tag;
		p[2] = ol->oid;
		p[3] = (packet == noval) ?reason :packet;
		heap_set_top(&cont_proc->hp, p +1 +3);
		int x = scheduler_new_local_mail_N(cont_proc, tag_tuple(p));
		if (x < 0)
			return x;

		if (ol->active == INET_ONCE && !is_tuple(packet))	// http_eoh
			ol->active = INET_PASSIVE;
		else if (ol->recv_buf_off > 0 && packet != noval)
			goto more_packets;
	}

	return 0;
}