Esempio n. 1
0
void print_opnd_type(Opnd o) {
  if(is_reg(o)) {
    if(is_hard_reg(o)) {
      std::cout << "(is hard reg)";
    }
    else if(is_virtual_reg(o)) {
      std::cout << "(is virtual reg)[$vr"<<get_reg(o)<<"]";
    }
    else {
      std::cout << "(is undeterminate reg)";
    }
  }
  else if(is_immed(o)) {
    if(is_immed_integer(o)) {
      std::cout << "(is immed integer)";
    }
    else if(is_immed_string(o)) {
      std::cout << "(is immed string)";
    }
    else {
      std::cout << "(is undeterminate immed)";
    }
  }
  else if(is_addr(o)) {
    if(is_addr_sym(o)) {
      FormattedText ft;
      Sym *symbol = get_sym(o);
      symbol->print(ft);
      char* addr_name;
      
      addr_name =  (char*)(symbol->get_name()).c_str();
  
      std::cout << "(is addr sym)["<<addr_name<<"]";
    }
    else if(is_addr_exp(o)) {
      std::cout << "(is addr exp)";
    }
    else {
      std::cout << "(is undeterminate addr)";
    }
  }
  else if(is_var(o)) {
    FormattedText ft;
    VarSym *vsym = get_var(o);
    vsym->print(ft);
    char* var_name;
    
    var_name =  (char*)(vsym->get_name()).c_str();

    std::cout << "(is var)["<<var_name<<"]";
  }
  else if(is_null(o)) {
    std::cout << "(is null)";
  }
  else {
    std::cout << "(I don't know) !!!)";
  }

  return;
}
Esempio n. 2
0
BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
{
    /*
     * This is not a signal. See comment above.
     */
    Eterm res;
    erts_aint_t data;
    Port* prt;

    prt = data_lookup_port(BIF_P, BIF_ARG_1);
    if (!prt)
        BIF_ERROR(BIF_P, BADARG);

    data = erts_smp_atomic_read_ddrb(&prt->data);
    if (data == (erts_aint_t)NULL)
        BIF_ERROR(BIF_P, BADARG);  /* Port terminated by racing thread */

    if ((data & 0x3) != 0) {
	res = (Eterm) (UWord) data;
	ASSERT(is_immed(res));
    }
    else {
	ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
	Eterm *hp = HAlloc(BIF_P, pdhp->hsize);
	res = copy_struct(pdhp->data, pdhp->hsize, &hp, &MSO(BIF_P));
    }

    BIF_RET(res);
}
Esempio n. 3
0
static void
delete_table(Process* c_p, HashTable* table)
{
    Uint idx = table->first_to_delete;
    Uint n = table->num_to_delete;

    /*
     * There are no longer any references to this hash table.
     *
     * Any literals pointed for deletion can be queued for
     * deletion and the table itself can be deallocated.
     */

#ifdef DEBUG
    if (n == 1) {
        ASSERT(is_tuple_arity(table->term[idx], 2));
    }
#endif

    while (n > 0) {
        Eterm term = table->term[idx];

        if (is_tuple_arity(term, 2)) {
            if (is_immed(tuple_val(term)[2])) {
                erts_release_literal_area(term_to_area(term));
            } else {
                erts_queue_release_literals(c_p, term_to_area(term));
            }
        }
        idx++, n--;
    }
    erts_free(ERTS_ALC_T_PERSISTENT_TERM, table);
}
Esempio n. 4
0
static void
dump_element(int to, void *to_arg, Eterm x)
{
    if (is_list(x)) {
	erts_print(to, to_arg, "H" WORD_FMT, list_val(x));
    } else if (is_boxed(x)) {
	erts_print(to, to_arg, "H" WORD_FMT, boxed_val(x));
    } else if (is_immed(x)) {
	if (is_atom(x)) {
	    unsigned char* s = atom_tab(atom_val(x))->name;
	    int len = atom_tab(atom_val(x))->len;
	    int i;

	    erts_print(to, to_arg, "A%X:", atom_tab(atom_val(x))->len);
	    for (i = 0; i < len; i++) {
		erts_putc(to, to_arg, *s++);
	    }
	} else if (is_small(x)) {
	    erts_print(to, to_arg, "I%T", x);
	} else if (is_pid(x)) {
	    erts_print(to, to_arg, "P%T", x);
	} else if (is_port(x)) {
	    erts_print(to, to_arg, "p<%bpu.%bpu>",
		       port_channel_no(x), port_number(x));
	} else if (is_nil(x)) {
	    erts_putc(to, to_arg, 'N');
	}
    }
}
Esempio n. 5
0
void
erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra)
{
    lck->id = erts_lc_get_lock_order_id(name);
    lck->extra = extra;
    ASSERT(is_immed(lck->extra));
    lck->flags = flags;
    lck->inited = ERTS_LC_INITITALIZED;
}
Esempio n. 6
0
int etimer_add(uint64_t ref_id, uint64_t timeout,
			term_t dst, term_t msg, proc_t *sender, int enveloped)
{
	assert(is_atom(dst) || is_short_pid(dst));

	if (free_timers == 0)
	{
		memnode_t *node = nalloc_N(QUICK_SIZE -sizeof(memnode_t));
		if (node == 0)
			return -NO_MEMORY;

		node->next = etimer_nodes;
		etimer_nodes = node;

		etimer_t *ptr = (etimer_t *)node->starts;
		while (ptr +1 <= (etimer_t *)node->ends)
		{
			ptr->next = free_timers;
			free_timers = ptr;
			ptr++;
		}
		assert(free_timers != 0);
	}

	etimer_t *tm = free_timers;
	free_timers = tm->next;

	tm->ref_id = ref_id;
	tm->timeout = timeout;
	tm->dst = dst;
	tm->msg = msg;

	if (is_immed(msg))
		tm->sender = 0;
	else
	{
		sender->pending_timers++;
		tm->sender = sender;
	}

	tm->enveloped = enveloped;
	tm->fire = erlang_fire;

	etimer_t **ref = &active_timers;
	etimer_t *ptr = active_timers;

	while (ptr != 0 && ptr->timeout < timeout)
	{
		ref = &ptr->next;
		ptr = ptr->next;
	}

	tm->next = ptr;
	*ref = tm;

	return 0;
}
Esempio n. 7
0
void
erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Eterm extra)
{
    lck->id = erts_lc_get_lock_order_id(name);
    lck->check_order = erts_lc_is_check_order(name);
    lck->extra = extra;
    ASSERT(is_immed(lck->extra));
    lck->flags = flags;
    lck->taken_options = 0;
    lck->inited = ERTS_LC_INITITALIZED;
}
Esempio n. 8
0
Uint
erts_port_data_size(Port *prt)
{
    erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data);

    if ((data & 0x3) != 0) {
	ASSERT(is_immed((Eterm) (UWord) data));
	return (Uint) 0;
    }
    else {
	ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
	return (Uint) sizeof(ErtsPortDataHeap) + (pdhp->hsize-1)*sizeof(Eterm);
    }
}
Esempio n. 9
0
ErlOffHeap *
erts_port_data_offheap(Port *prt)
{
    erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data);

    if ((data & 0x3) != 0) {
	ASSERT(is_immed((Eterm) (UWord) data));
	return NULL;
    }
    else {
	ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
	return &pdhp->off_heap;
    }
}
Esempio n. 10
0
static ERTS_INLINE void
cleanup_old_port_data(erts_aint_t data)
{
    if ((data & 0x3) != 0) {
	ASSERT(is_immed((Eterm) data));
    }
    else {
	ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
	size_t size;
	ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
	size = sizeof(ErtsPortDataHeap) + (pdhp->hsize-1)*sizeof(Eterm);
	erts_schedule_thr_prgr_later_cleanup_op(free_port_data_heap,
						(void *) pdhp,
						&pdhp->later_op,
						size);
    }
}
Esempio n. 11
0
BIF_RETTYPE port_set_data_2(BIF_ALIST_2)
{
    /*
     * This is not a signal. See comment above.
     */
    erts_aint_t data;
    Port* prt;

    prt = data_lookup_port(BIF_P, BIF_ARG_1);
    if (!prt)
        BIF_ERROR(BIF_P, BADARG);

    if (is_immed(BIF_ARG_2)) {
	data = (erts_aint_t) BIF_ARG_2;
	ASSERT((data & 0x3) != 0);
    }
    else {
	ErtsPortDataHeap *pdhp;
	Uint hsize;
	Eterm *hp;

	hsize = size_object(BIF_ARG_2);
	pdhp = erts_alloc(ERTS_ALC_T_PORT_DATA_HEAP,
			  sizeof(ErtsPortDataHeap) + (hsize-1)*sizeof(Eterm));
	hp = &pdhp->heap[0];
	pdhp->off_heap.first = NULL;
	pdhp->off_heap.overhead = 0;
	pdhp->hsize = hsize;
	pdhp->data = copy_struct(BIF_ARG_2, hsize, &hp, &pdhp->off_heap);
	data = (erts_aint_t) pdhp;
	ASSERT((data & 0x3) == 0);
    }

    data = erts_smp_atomic_xchg_wb(&prt->data, data);

    if (data == (erts_aint_t)NULL) {
	/* Port terminated by racing thread */
	data = erts_smp_atomic_xchg_wb(&prt->data, data);
	ASSERT(data != (erts_aint_t)NULL);
	cleanup_old_port_data(data);
	BIF_ERROR(BIF_P, BADARG);
    }
    cleanup_old_port_data(data);
    BIF_RET(am_true);
}
Esempio n. 12
0
Eterm erts_port_data_read(Port* prt)
{
    Eterm res;
    erts_aint_t data;

    data = erts_smp_atomic_read_ddrb(&prt->data);
    if (data == (erts_aint_t)NULL)
        return am_undefined;  /* Port terminated by racing thread */

    if ((data & 0x3) != 0) {
	res = (Eterm) (UWord) data;
	ASSERT(is_immed(res));
    }
    else {
	ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
	res = pdhp->data;
    }
    return res;
}
Esempio n. 13
0
static int
dirty_send_message(Process *c_p, Eterm to, Eterm tag)
{
    ErtsProcLocks c_p_locks, rp_locks;
    Process *rp, *real_c_p;
    Eterm msg, *hp;
    ErlOffHeap *ohp;
    ErtsMessage *mp;

    ASSERT(is_immed(tag));

    real_c_p = erts_proc_shadow2real(c_p);
    if (real_c_p != c_p)
	c_p_locks = 0;
    else
	c_p_locks = ERTS_PROC_LOCK_MAIN;

    ASSERT(real_c_p->common.id == c_p->common.id);

    rp = erts_pid2proc_opt(real_c_p, c_p_locks,
			   to, 0,
			   ERTS_P2P_FLG_INC_REFC);

    if (!rp)
	return 0;

    rp_locks = 0;
    mp = erts_alloc_message_heap(rp, &rp_locks, 3, &hp, &ohp);

    msg = TUPLE2(hp, tag, c_p->common.id);
    erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id);

    if (rp == real_c_p)
	rp_locks &= ~c_p_locks;
    if (rp_locks)
	erts_proc_unlock(rp, rp_locks);

    erts_proc_dec_refc(rp);

    return 1;
}
Esempio n. 14
0
//cons - tuple - binary - fun
term_t heap_marshal(term_t t, heap_t *hp)
{
	term_box_t *box;
	if (is_immed(t))
		return t;
	box = peel(t);

	if (is_cons(t))
	{
		term_t first = nil;
		term_t last = nil;

		do {
			term_box_t *cb = peel(t);
			term_t v = heap_marshal(cb->cons.head, hp);
			cons_up(first, last, v, hp);
			t = cb->cons.tail;
		} while (is_cons(t));
		
		if (t != nil)
			peel(last)->cons.tail = heap_marshal(t, hp);

		return first;
	}
	else if (is_tuple(t))
	{
		int n = box->tuple.size;
		term_t tuple = heap_tuple(hp, n);
		term_box_t *tb = peel(tuple);
		int i;
		for (i = 0; i < n; i++)
			tb->tuple.elts[i] = heap_marshal(box->tuple.elts[i], hp);

		return tuple;
	}
	else if (is_binary(t))
	{
		//NB: for shared binaries parent not copied; shared becomes root

		term_t binary = heap_binary(hp, box->binary.bit_size, box->binary.data);
		return binary;
	}
	else if (is_bignum(t))
	{
		bignum_t *bb = (bignum_t *)peel(t);
		term_t biggie = heap_bignum(hp, bb->sign, bb->used, bb->dp);
		return biggie;
	}
	else if (is_float(t))
	{
		term_t f = heap_float(hp, float_value(t));
		return f;
	}
	else if (is_fun(t))
	{
		term_t fun = heap_fun(hp,
			box->fun.module,
			box->fun.function,
			box->fun.arity,
			box->fun.uniq,
			box->fun.index,
			heap_marshal(box->fun.frozen, hp));
		return fun;
	}
	else // long_id
	{
		term_t id;
		assert(is_long_id(t));
		id = heap_long_id(hp,
			box->long_id.node,
			box->long_id.serial,
			box->long_id.tag_creation);
		return id;
	}
}
Esempio n. 15
0
/*
 * Moves content of message buffer attached to a message into a heap.
 * The message buffer is deallocated.
 */
void
erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
{
    struct erl_off_heap_header* oh;
    Eterm term, token, *fhp, *hp;
    Sint offs;
    Uint sz;
    ErlHeapFragment *bp;
#ifdef USE_VM_PROBES
    Eterm utag;
#endif

#ifdef HARD_DEBUG
    struct erl_off_heap_header* dbg_oh_start = off_heap->first;
    Eterm dbg_term, dbg_token;
    ErlHeapFragment *dbg_bp;
    Uint *dbg_hp, *dbg_thp_start;
    Uint dbg_term_sz, dbg_token_sz;
#ifdef USE_VM_PROBES
    Eterm dbg_utag;
    Uint dbg_utag_sz;
#endif
#endif

    bp = msg->data.heap_frag;
    term = ERL_MESSAGE_TERM(msg);
    token = ERL_MESSAGE_TOKEN(msg);
#ifdef USE_VM_PROBES
    utag = ERL_MESSAGE_DT_UTAG(msg);
#endif
    if (!bp) {
#ifdef USE_VM_PROBES
	ASSERT(is_immed(term) && is_immed(token) && is_immed(utag));
#else
	ASSERT(is_immed(term) && is_immed(token));
#endif
	return;
    }

#ifdef HARD_DEBUG
    dbg_term_sz = size_object(term);
    dbg_token_sz = size_object(token);
    dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz);
#ifdef USE_VM_PROBES
    dbg_utag_sz = size_object(utag);
    dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz + dbg_utag_sz );
#endif
    /*ASSERT(dbg_term_sz + dbg_token_sz == erts_msg_used_frag_sz(msg));
      Copied size may be smaller due to removed SubBins's or garbage.
      Copied size may be larger due to duplicated shared terms.
    */
    dbg_hp = dbg_bp->mem;
    dbg_term = copy_struct(term, dbg_term_sz, &dbg_hp, &dbg_bp->off_heap);
    dbg_token = copy_struct(token, dbg_token_sz, &dbg_hp, &dbg_bp->off_heap);
#ifdef USE_VM_PROBES
    dbg_utag = copy_struct(utag, dbg_utag_sz, &dbg_hp, &dbg_bp->off_heap);
#endif
   dbg_thp_start = *hpp;
#endif

    if (bp->next != NULL) {
	move_multi_frags(hpp, off_heap, bp, msg->m, 
#ifdef USE_VM_PROBES
			 3
#else
			 2
#endif
			 );
	goto copy_done;
    }

    OH_OVERHEAD(off_heap, bp->off_heap.overhead);
    sz = bp->used_size;

    ASSERT(is_immed(term) || in_heapfrag(ptr_val(term),bp));
    ASSERT(is_immed(token) || in_heapfrag(ptr_val(token),bp));

    fhp = bp->mem;
    hp = *hpp;
    offs = hp - fhp;

    oh = NULL;
    while (sz--) {
	Uint cpy_sz;
	Eterm val = *fhp++;

	switch (primary_tag(val)) {
	case TAG_PRIMARY_IMMED1:
	    *hp++ = val;
	    break;
	case TAG_PRIMARY_LIST:
	case TAG_PRIMARY_BOXED:
	    ASSERT(in_heapfrag(ptr_val(val), bp));
	    *hp++ = offset_ptr(val, offs);
	    break;
	case TAG_PRIMARY_HEADER:
	    *hp++ = val;
	    switch (val & _HEADER_SUBTAG_MASK) {
	    case ARITYVAL_SUBTAG:
		break;
	    case REFC_BINARY_SUBTAG:
	    case FUN_SUBTAG:
	    case EXTERNAL_PID_SUBTAG:
	    case EXTERNAL_PORT_SUBTAG:
	    case EXTERNAL_REF_SUBTAG:
		oh = (struct erl_off_heap_header*) (hp-1);
		cpy_sz = thing_arityval(val);
		goto cpy_words;
	    default:
		cpy_sz = header_arity(val);

	    cpy_words:
		ASSERT(sz >= cpy_sz);
		sz -= cpy_sz;
		while (cpy_sz >= 8) {
		    cpy_sz -= 8;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		}
		switch (cpy_sz) {
		case 7: *hp++ = *fhp++;
		case 6: *hp++ = *fhp++;
		case 5: *hp++ = *fhp++;
		case 4: *hp++ = *fhp++;
		case 3: *hp++ = *fhp++;
		case 2: *hp++ = *fhp++;
		case 1: *hp++ = *fhp++;
		default: break;
		}
		if (oh) {
		    /* Add to offheap list */
		    oh->next = off_heap->first;
		    off_heap->first = oh;
		    ASSERT(*hpp <= (Eterm*)oh);
		    ASSERT(hp > (Eterm*)oh);
		    oh = NULL;
		}
		break;
	    }
	    break;
	}
    }

    ASSERT(bp->used_size == hp - *hpp);
    *hpp = hp;

    if (is_not_immed(token)) {
	ASSERT(in_heapfrag(ptr_val(token), bp));
	ERL_MESSAGE_TOKEN(msg) = offset_ptr(token, offs);
#ifdef HARD_DEBUG
	ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TOKEN(msg)));
	ASSERT(hp > ptr_val(ERL_MESSAGE_TOKEN(msg)));
#endif
    }

    if (is_not_immed(term)) {
	ASSERT(in_heapfrag(ptr_val(term),bp));
	ERL_MESSAGE_TERM(msg) = offset_ptr(term, offs);
#ifdef HARD_DEBUG
	ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TERM(msg)));
	ASSERT(hp > ptr_val(ERL_MESSAGE_TERM(msg)));
#endif
    }
#ifdef USE_VM_PROBES
    if (is_not_immed(utag)) {
	ASSERT(in_heapfrag(ptr_val(utag), bp));
	ERL_MESSAGE_DT_UTAG(msg) = offset_ptr(utag, offs);
#ifdef HARD_DEBUG
	ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_DT_UTAG(msg)));
	ASSERT(hp > ptr_val(ERL_MESSAGE_DT_UTAG(msg)));
#endif
    }
#endif

copy_done:

#ifdef HARD_DEBUG
    {
	int i, j;
	ErlHeapFragment* frag;
	{
	    struct erl_off_heap_header* dbg_oh = off_heap->first;
	    i = j = 0;
	    while (dbg_oh != dbg_oh_start) {
		dbg_oh = dbg_oh->next;
		i++;
	    }
	    for (frag=bp; frag; frag=frag->next) {
		dbg_oh = frag->off_heap.first;
		while (dbg_oh) {
		    dbg_oh = dbg_oh->next;
		    j++;
		}
	    }
	    ASSERT(i == j);
	}
    }
#endif
	    

    bp->off_heap.first = NULL;
    free_message_buffer(bp);
    msg->data.heap_frag = NULL;

#ifdef HARD_DEBUG
    ASSERT(eq(ERL_MESSAGE_TERM(msg), dbg_term));
    ASSERT(eq(ERL_MESSAGE_TOKEN(msg), dbg_token));
#ifdef USE_VM_PROBES
    ASSERT(eq(ERL_MESSAGE_DT_UTAG(msg), dbg_utag));
#endif
    free_message_buffer(dbg_bp);
#endif

}
Esempio n. 16
0
int is_term_smaller(term_t a, term_t b)
{
	if (a == b)
		return 0;

	if (are_both_immed(a, b))
	{
		if (are_both_int(a, b))
			return int_value(a) < int_value(b);

		if (is_int(a))	// !is_int(b)
			return 1;

		if (is_nil(a))	// !is_nil(b)
			return 0;
		if (is_nil(b))	// !is_nil(a)
			return 1;

		if (is_atom(a))
		{
			if (is_int(b))
				return 0;
			else if (is_atom(b))
			{
				uint8_t *print1 = atoms_get(atom_index(a));
				uint8_t *print2 = atoms_get(atom_index(b));
				int short_len = (print1[0] < print2[0])
					? print1[0]
					: print2[0];
				int d = memcmp(print1+1, print2+1, short_len);
				if (d == 0)
					return print1[0] < print2[0];
				return d < 0;
			}
			else
				return 1;
		}
		else if (is_short_oid(a))
		{
			if (is_int(b) || is_atom(b))
				return 0;
			else if (is_short_oid(b))
				return short_oid_id(a) < short_oid_id(b);
			else
				return 1;
		}
		else if (is_short_pid(a))
		{
			if (is_int(b) || is_atom(b) || is_short_oid(b))
				return 0;
			else
			{
				assert(is_short_pid(b));
				return short_pid_id(a) < short_pid_id(b);
			}
		}
	}

	//TODO: comparison of bignum and float: docs mention the
	// number 9007199254740992.0 and a loss of transitivity
	
	if (!is_immed(a) && !is_immed(b) &&
				primary_tag(a) == primary_tag(b))
	{
		if (is_cons(a))
			return is_term_smaller_1(a, b);
		else if (is_tuple(a))
			return is_term_smaller_2(a, b);
		else
		{
			assert(is_boxed(a) && is_boxed(b));
			uint32_t *adata = peel_boxed(a);
			uint32_t *bdata = peel_boxed(b);
			if (boxed_tag(adata) == boxed_tag(bdata) ||
					(is_binary(adata) && is_binary(bdata)) ||
					(is_bignum(adata) && is_bignum(bdata)))
			{
				switch(boxed_tag(adata))
				{
				case SUBTAG_POS_BIGNUM:
				case SUBTAG_NEG_BIGNUM:
					return bignum_compare((bignum_t *)adata,
										  (bignum_t *)bdata) < 0;
				case SUBTAG_FUN:
					return fun_compare((t_fun_t *)adata,
									   (t_fun_t *)bdata) < 0;
				case SUBTAG_EXPORT:
					return export_compare((t_export_t *)adata,
									   	  (t_export_t *)bdata) < 0;

				case SUBTAG_PID:
					return pid_compare((t_long_pid_t *)adata,
									   (t_long_pid_t *)bdata) < 0;

				case SUBTAG_OID:
					return oid_compare((t_long_oid_t *)adata,
									   (t_long_oid_t *)bdata) < 0;

				case SUBTAG_REF:
					return ref_compare((t_long_ref_t *)adata,
									   (t_long_ref_t *)bdata) < 0;

				case SUBTAG_PROC_BIN:
				case SUBTAG_HEAP_BIN:
				case SUBTAG_MATCH_CTX:
				case SUBTAG_SUB_BIN:
					return is_term_smaller_3(adata, bdata);

				default:
					assert(boxed_tag(adata) == SUBTAG_FLOAT);
					return float_value(adata) < float_value(bdata);
				}
			}
		}
	}

	// Number comparison with (mandatory) coercion
	//
	int use_float = (is_boxed(a) && boxed_tag(peel_boxed(a)) == SUBTAG_FLOAT) ||
					(is_boxed(b) && boxed_tag(peel_boxed(b)) == SUBTAG_FLOAT);

	if (use_float)
	{
		if (is_int(a))	// b is always float
			return (double)int_value(a) < float_value(peel_boxed(b));
		else if (is_boxed(a))
		{
			uint32_t *adata = peel_boxed(a);
			if (is_bignum(adata))	// b is always float
				return bignum_to_double((bignum_t *)adata) < float_value(peel_boxed(b));

			if (boxed_tag(adata) == SUBTAG_FLOAT)
			{
				if (is_int(b))
					return float_value(adata) < (double)int_value(b);
				if (is_boxed(b))
				{
					uint32_t *bdata = peel_boxed(b);
					if (is_bignum(bdata))
						return float_value(adata) < bignum_to_double((bignum_t *)bdata);
				}
			}
		}
	}
	else	// use integer
	{
		if (is_int(a))
		{
			if (is_boxed(b))
			{
				uint32_t *bdata = peel_boxed(b);
				if (is_bignum(bdata))
				{
					bignum_t *bbn = (bignum_t *)bdata;
					return !bignum_is_neg(bbn);
				}
				assert(boxed_tag(bdata) != SUBTAG_FLOAT);
			}
		}
		else if (is_boxed(a))
		{
			uint32_t *adata = peel_boxed(a);
			if (is_bignum(adata))
			{
				bignum_t *abn = (bignum_t *)adata;
				if (is_int(b))
					return bignum_is_neg(abn);

				if (is_boxed(b))
				{
					uint32_t *bdata = peel_boxed(b);
					if (is_bignum(bdata))
						return bignum_compare(abn, (bignum_t *)bdata);
					assert(boxed_tag(bdata) != SUBTAG_FLOAT);
				}
			}

			assert(boxed_tag(adata) != SUBTAG_FLOAT);
		}
	}

	// a and b are quaranteed to have different types
	// 
	
	return term_order(a) < term_order(b);
}
Esempio n. 17
0
int are_terms_equal(term_t a, term_t b, int exact)
{
	assert(a != b);		// should be checked elsewhere

	if (is_immed(a) || is_immed(b))
	{
		if (exact)
			return 0;
		if (is_int(a) && is_boxed(b))
		{
			uint32_t *term_data = peel_boxed(b);
			return (boxed_tag(term_data) == SUBTAG_FLOAT)
				&& (double)int_value(a) == float_value(term_data);
		}
		else if (is_boxed(a) && is_int(b))
		{
			uint32_t *term_data = peel_boxed(a);
			return (boxed_tag(term_data) == SUBTAG_FLOAT)
				&& (float_value(term_data) == (double)int_value(b));
		}

		return 0;
	}

	if (is_cons(a))
	{
		if (is_cons(b))
		{
			do {
				uint32_t *cons1 = peel_cons(a);
				uint32_t *cons2 = peel_cons(b);

				if (cons1[0] != cons2[0]
						&& !are_terms_equal(cons1[0], cons2[0], exact))
					return 0;
				a = cons1[1];
				b = cons2[1];
			} while (is_cons(a) && is_cons(b));

			return (a == b) || are_terms_equal(a, b, exact);
		}
		else
			return 0;
	}
	else if (is_tuple(a))
	{
		if (is_tuple(b))
		{
			uint32_t *data1 = peel_tuple(a);
			uint32_t *data2 = peel_tuple(b);

			if (data1[0] != data2[0])
				return 0;

			for (int i = 1; i <= data1[0]; i++)
				if (data1[i] != data2[i]
						&& !are_terms_equal(data1[i], data2[i], exact))
					return 0;

			return 1;
		}
		else
			return 0;
	}
	else
	{
		assert(is_boxed(a));
		if (!is_boxed(b))
			return 0;

		uint32_t *term_data1 = peel_boxed(a);
		uint32_t *term_data2 = peel_boxed(b);

		uint32_t subtag = boxed_tag(term_data1);

		if (!exact && subtag == SUBTAG_FLOAT && is_bignum(term_data2))
			return float_value(term_data1) == bignum_to_double((bignum_t *)term_data2);

		if (!exact && is_bignum(term_data1) && boxed_tag(term_data2) == SUBTAG_FLOAT)
			return bignum_to_double((bignum_t *)term_data1) == float_value(term_data2);

		if (subtag != boxed_tag(term_data2) &&
				!(is_binary(term_data1) && is_binary(term_data2)))
			return 0;

		switch (subtag)
		{
		case SUBTAG_POS_BIGNUM:
		case SUBTAG_NEG_BIGNUM:
		{
			bignum_t *bn1 = (bignum_t *)term_data1;
			bignum_t *bn2 = (bignum_t *)term_data2;
			return bignum_compare(bn1, bn2) == 0;
		}
		case SUBTAG_FUN:
		{
			t_fun_t *f1 = (t_fun_t *)term_data1;
			t_fun_t *f2 = (t_fun_t *)term_data2;
			if (f1->module != f2->module ||
				f1->index != f2->index ||
				f1->old_uniq != f2->old_uniq)
					return 0;
			int num_free = fun_num_free(term_data1);
			assert(num_free == fun_num_free(term_data2));
			for (int i = 0; i < num_free; i++)
			{
				term_t v1 = f1->frozen[i];
				term_t v2 = f2->frozen[i];
				if (v1 != v2 && !are_terms_equal(v1, v2, exact))
					return 0;
			}
			return 1;
		}
		case SUBTAG_EXPORT:
		{
			export_t *e1 = ((t_export_t *)term_data1)->e;
			export_t *e2 = ((t_export_t *)term_data2)->e;
			return e1->module == e2->module &&
			   	   e1->function == e2->function &&
				   e1->arity == e2->arity;
		}		
		case SUBTAG_PID:
		{
			t_long_pid_t *pid1 = (t_long_pid_t *)term_data1;
			t_long_pid_t *pid2 = (t_long_pid_t *)term_data2;
			return pid1->node == pid2->node &&
				   pid1->serial == pid2->serial &&
				   opr_hdr_id(pid1) == opr_hdr_id(pid2) &&
				   opr_hdr_creat(pid1) == opr_hdr_creat(pid2);
		}
		case SUBTAG_OID:
		{
			t_long_oid_t *oid1 = (t_long_oid_t *)term_data1;
			t_long_oid_t *oid2 = (t_long_oid_t *)term_data2;
			return oid1->node == oid2->node &&
				   opr_hdr_id(oid1) == opr_hdr_id(oid2) &&
				   opr_hdr_creat(oid1) == opr_hdr_creat(oid2);
		}
		case SUBTAG_REF:
		{
			t_long_ref_t *ref1 = (t_long_ref_t *)term_data1;
			t_long_ref_t *ref2 = (t_long_ref_t *)term_data2;
			return ref1->node == ref2->node &&
				   ref1->id1 == ref2->id1 &&
				   ref1->id2 == ref2->id2 &&
				   opr_hdr_id(ref1) == opr_hdr_id(ref2) &&
				   opr_hdr_creat(ref1) == opr_hdr_creat(ref2);
		}
		case SUBTAG_PROC_BIN:
		case SUBTAG_HEAP_BIN:
		case SUBTAG_MATCH_CTX:
		case SUBTAG_SUB_BIN:
		{
			bits_t bs1, bs2;
			bits_get_real(term_data1, &bs1);
			bits_get_real(term_data2, &bs2);
			return (bits_compare(&bs1, &bs2) == 0);
		}
		default:
			assert(subtag == SUBTAG_FLOAT);
			return float_value(term_data1) == float_value(term_data2);
		}
		return 1;
	}
}
Esempio n. 18
0
static Eterm
keyfind(int Bif, Process* p, Eterm Key, Eterm Pos, Eterm List)
{
    int max_iter = 10 * CONTEXT_REDS;
    Sint pos;
    Eterm term;

    if (!is_small(Pos) || (pos = signed_val(Pos)) < 1) {
	BIF_ERROR(p, BADARG);
    }

    if (is_small(Key)) {
	double float_key = (double) signed_val(Key);

	while (is_list(List)) {
	    if (--max_iter < 0) {
		BUMP_ALL_REDS(p);
		BIF_TRAP3(bif_export[Bif], p, Key, Pos, List);
	    }
	    term = CAR(list_val(List));
	    List = CDR(list_val(List));
	    if (is_tuple(term)) {
		Eterm *tuple_ptr = tuple_val(term);
		if (pos <= arityval(*tuple_ptr)) {
		    Eterm element = tuple_ptr[pos];
		    if (Key == element) {
			return term;
		    } else if (is_float(element)) {
			FloatDef f;

			GET_DOUBLE(element, f);
			if (f.fd == float_key) {
			    return term;
			}
		    }
		}
	    }
	}
    } else if (is_immed(Key)) {
	while (is_list(List)) {
	    if (--max_iter < 0) {
		BUMP_ALL_REDS(p);
		BIF_TRAP3(bif_export[Bif], p, Key, Pos, List);
	    }
	    term = CAR(list_val(List));
	    List = CDR(list_val(List));
	    if (is_tuple(term)) {
		Eterm *tuple_ptr = tuple_val(term);
		if (pos <= arityval(*tuple_ptr)) {
		    Eterm element = tuple_ptr[pos];
		    if (Key == element) {
			return term;
		    }
		}
	    }
	}
    } else {
	while (is_list(List)) {
	    if (--max_iter < 0) {
		BUMP_ALL_REDS(p);
		BIF_TRAP3(bif_export[Bif], p, Key, Pos, List);
	    }
	    term = CAR(list_val(List));
	    List = CDR(list_val(List));
	    if (is_tuple(term)) {
		Eterm *tuple_ptr = tuple_val(term);
		if (pos <= arityval(*tuple_ptr)) {
		    Eterm element = tuple_ptr[pos];
		    if (CMP(Key, element) == 0) {
			return term;
		    }
		}
	    }
	}
    }

    if (is_not_nil(List))  {
	BIF_ERROR(p, BADARG);
    }
    return am_false;
}
Esempio n. 19
0
ErlHeapFragment*
erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
			   Eterm *brefs, Uint brefs_size)
{
#ifdef DEBUG
    int i;
#endif
#ifdef HARD_DEBUG
    ErlHeapFragment *dbg_bp;
    Eterm *dbg_brefs;
    Uint dbg_size;
    Uint dbg_tot_size;
    Eterm *dbg_hp;
#endif
    ErlHeapFragment* nbp;

#ifdef DEBUG
    {
	Uint off_sz = size < bp->used_size ? size : bp->used_size;
	for (i = 0; i < brefs_size; i++) {
	    Eterm *ptr;
	    if (is_immed(brefs[i]))
		continue;
	    ptr = ptr_val(brefs[i]);
	    ASSERT(&bp->mem[0] <= ptr && ptr < &bp->mem[0] + off_sz);

	}
    }
#endif

    if (size >= (bp->used_size - bp->used_size / 16)) {
        bp->used_size = size;
	return bp;
    }

#ifdef HARD_DEBUG
    dbg_brefs = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(Eterm *)*brefs_size);
    dbg_bp = new_message_buffer(bp->used_size);
    dbg_hp = dbg_bp->mem;
    dbg_tot_size = 0;
    for (i = 0; i < brefs_size; i++) {
	dbg_size = size_object(brefs[i]);
	dbg_tot_size += dbg_size;
	dbg_brefs[i] = copy_struct(brefs[i], dbg_size, &dbg_hp,
				   &dbg_bp->off_heap);
    }
    ASSERT(dbg_tot_size == (size < bp->used_size ? size : bp->used_size));
#endif

    nbp = (ErlHeapFragment*) ERTS_HEAP_REALLOC(ERTS_ALC_T_HEAP_FRAG,
					       (void *) bp,
					       ERTS_HEAP_FRAG_SIZE(bp->alloc_size),
					       ERTS_HEAP_FRAG_SIZE(size));
    if (bp != nbp) {
	Uint off_sz = size < nbp->used_size ? size : nbp->used_size;
	Eterm *sp = &bp->mem[0];
	Eterm *ep = sp + off_sz;
	Sint offs = &nbp->mem[0] - sp;
	erts_offset_off_heap(&nbp->off_heap, offs, sp, ep);
	erts_offset_heap(&nbp->mem[0], off_sz, offs, sp, ep);
	if (brefs && brefs_size)
	    erts_offset_heap_ptr(brefs, brefs_size, offs, sp, ep);
#ifdef DEBUG
	for (i = 0; i < brefs_size; i++) {
	    Eterm *ptr;
	    if (is_immed(brefs[i]))
		continue;
	    ptr = ptr_val(brefs[i]);
	    ASSERT(&nbp->mem[0] <= ptr && ptr < &nbp->mem[0] + off_sz);
	}
#endif
    }
    nbp->alloc_size = size;
    nbp->used_size = size;

#ifdef HARD_DEBUG
    for (i = 0; i < brefs_size; i++)
	ASSERT(eq(dbg_brefs[i], brefs[i]));
    free_message_buffer(dbg_bp);
    erts_free(ERTS_ALC_T_UNDEF, dbg_brefs);
#endif

    return nbp;
}
Esempio n. 20
0
/*
 * Garbage collect a process.
 *
 * p: Pointer to the process structure.
 * need: Number of Eterm words needed on the heap.
 * objv: Array of terms to add to rootset; that is to preserve.
 * nobj: Number of objects in objv.
 */
int
erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
{
    int ret;
    struct slave_syscall_gc *cmd = erts_alloc(ERTS_ALC_T_TMP, sizeof(*cmd));
    Eterm *dram_objv;
    int copy_objv = nobj != 0 && !epiphany_in_dram(objv);

    if (copy_objv) {
	dram_objv = erts_alloc(ERTS_ALC_T_TMP, nobj*sizeof(Eterm));
	memcpy(dram_objv, objv, nobj*sizeof(Eterm));
    } else {
	dram_objv = objv;
    }

#ifdef DEBUG
    {
	int i;
	for (i = 0; i < nobj; i++)
	    ASSERT(is_immed(objv[i]) || epiphany_in_dram(ptr_val(objv[i])));
    }
#endif
    cmd->need = need;
    cmd->objv = dram_objv;
    cmd->nobj = nobj;
    slave_state_swapout(p, &cmd->state);

    erts_master_syscall(SLAVE_SYSCALL_GC, cmd);

    if (copy_objv) {
	memcpy(objv, dram_objv, nobj*sizeof(Eterm));
	erts_free(ERTS_ALC_T_TMP, dram_objv);
    }

    /*
     * The garbage collector will have set mbuf to NULL without freeing it. We
     * do so here. See remove_message_buffers in the master.
     */
    if (MBUF(p) != NULL) {
	free_message_buffer(MBUF(p));
	ASSERT(cmd->state.mbuf == NULL);
    }

    slave_state_swapin(p, &cmd->state);

#ifdef CHECK_FOR_HOLES
    /*
     * We intentionally do not rescan the areas copied by the GC.
     * We trust the GC not to leave any holes.
     */
    p->last_htop = p->htop;
    p->last_mbuf = 0;
#endif

#ifdef DEBUG
    /*
     * The scanning for pointers from the old_heap into the new_heap or
     * heap fragments turned out to be costly, so we remember how far we
     * have scanned this time and will start scanning there next time.
     * (We will not detect wild writes into the old heap, or modifications
     * of the old heap in-between garbage collections.)
     */
    p->last_old_htop = p->old_htop;
#endif

    ret = cmd->ret;
    erts_free(ERTS_ALC_T_TMP, cmd);
    return ret;
}
Esempio n. 21
0
Sint
erts_send_message(Process* sender,
		  Process* receiver,
		  ErtsProcLocks *receiver_locks,
		  Eterm message,
		  unsigned flags)
{
    Uint msize;
    ErtsMessage* mp;
    ErlOffHeap *ohp;
    Eterm token = NIL;
    Sint res = 0;
#ifdef USE_VM_PROBES
    DTRACE_CHARBUF(sender_name, 64);
    DTRACE_CHARBUF(receiver_name, 64);
    Sint tok_label = 0;
    Sint tok_lastcnt = 0;
    Sint tok_serial = 0;
    Eterm utag = NIL;
#endif
    erts_aint32_t receiver_state;
#ifdef SHCOPY_SEND
    erts_shcopy_t info;
#else
    erts_literal_area_t litarea;
    INITIALIZE_LITERAL_PURGE_AREA(litarea);
#endif

#ifdef USE_VM_PROBES
    *sender_name = *receiver_name = '\0';
    if (DTRACE_ENABLED(message_send)) {
        erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
		      "%T", sender->common.id);
        erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
		      "%T", receiver->common.id);
    }
#endif

    receiver_state = erts_atomic32_read_nob(&receiver->state);

    if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) {
        Eterm* hp;
	Eterm stoken = SEQ_TRACE_TOKEN(sender);
	Uint seq_trace_size = 0;
#ifdef USE_VM_PROBES
	Uint dt_utag_size = 0;
#endif

        /* SHCOPY corrupts the heap between
         * copy_shared_calculate, and
         * copy_shared_perform. (it inserts move_markers like the gc).
         * Make sure we don't use the heap between those instances.
         */
        if (have_seqtrace(stoken)) {
	    seq_trace_update_send(sender);
	    seq_trace_output(stoken, message, SEQ_TRACE_SEND,
			     receiver->common.id, sender);
	    seq_trace_size = 6; /* TUPLE5 */
	}
#ifdef USE_VM_PROBES
        if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) {
            dt_utag_size = size_object(DT_UTAG(sender));
        } else if (stoken == am_have_dt_utag ) {
            stoken = NIL;
        }
#endif

#ifdef SHCOPY_SEND
        INITIALIZE_SHCOPY(info);
        msize = copy_shared_calculate(message, &info);
#else
        msize = size_object_litopt(message, &litarea);
#endif
        mp = erts_alloc_message_heap_state(receiver,
                                           &receiver_state,
                                           receiver_locks,
                                           (msize
#ifdef USE_VM_PROBES
                                            + dt_utag_size
#endif
                                            + seq_trace_size),
                                           &hp,
                                           &ohp);

#ifdef SHCOPY_SEND
	if (is_not_immed(message))
            message = copy_shared_perform(message, msize, &info, &hp, ohp);
        DESTROY_SHCOPY(info);
#else
	if (is_not_immed(message))
            message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
	if (is_immed(stoken))
	    token = stoken;
	else
	    token = copy_struct(stoken, seq_trace_size, &hp, ohp);

#ifdef USE_VM_PROBES
	if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) {
	    if (is_immed(DT_UTAG(sender)))
		utag = DT_UTAG(sender);
	    else
		utag = copy_struct(DT_UTAG(sender), dt_utag_size, &hp, ohp);
	}
        if (DTRACE_ENABLED(message_send)) {
            if (have_seqtrace(stoken)) {
		tok_label = signed_val(SEQ_TRACE_T_LABEL(stoken));
		tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken));
		tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken));
	    }
	    DTRACE6(message_send, sender_name, receiver_name,
		    msize, tok_label, tok_lastcnt, tok_serial);
        }
#endif
    } else {
        Eterm *hp;

	if (receiver == sender && !(receiver_state & ERTS_PSFLG_OFF_HEAP_MSGQ)) {
	    mp = erts_alloc_message(0, NULL);
	    msize = 0;
	}
	else {
#ifdef SHCOPY_SEND
            INITIALIZE_SHCOPY(info);
            msize = copy_shared_calculate(message, &info);
#else
            msize = size_object_litopt(message, &litarea);
#endif
	    mp = erts_alloc_message_heap_state(receiver,
					       &receiver_state,
					       receiver_locks,
					       msize,
					       &hp,
					       &ohp);
#ifdef SHCOPY_SEND
            if (is_not_immed(message))
                message = copy_shared_perform(message, msize, &info, &hp, ohp);
            DESTROY_SHCOPY(info);
#else
            if (is_not_immed(message))
                message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
	}
#ifdef USE_VM_PROBES
        DTRACE6(message_send, sender_name, receiver_name,
                msize, tok_label, tok_lastcnt, tok_serial);
#endif
    }

    ERL_MESSAGE_TOKEN(mp) = token;
#ifdef USE_VM_PROBES
    ERL_MESSAGE_DT_UTAG(mp) = utag;
#endif
    res = queue_message(receiver,
			&receiver_state,
			*receiver_locks,
			mp, message,
                        sender->common.id);

    return res;
}
Esempio n. 22
0
static int ets_terms_copy_size2(stack_t *st)
{
	int hsize = 0;
pop_term:
	if (stack_is_empty(st))
		return hsize;
	term_t t = (term_t)*stack_pop(st);
tail_recur:
	if (is_immed(t))
		goto pop_term;

	if (is_cons(t))
	{
		while (is_cons(t))
		{
			hsize += 2;
			term_t *cons = peel_cons(t);
			uint32_t *push = stack_push_N(st);
			if (push == 0)
				return -NO_MEMORY;
			*push = cons[0];
			t = cons[1];
		}
		if (t != nil)
			goto tail_recur;

		goto pop_term;
	}

	if (is_tuple(t))
	{
		uint32_t *p = peel_tuple(t);
		int arity = *p++;
		if (arity == 0)
			goto pop_term; // no heap frag
		hsize += 1 +arity;
		for (int i = 0; i < arity -1; i++)
		{
			uint32_t *push = stack_push_N(st);
			if (push == 0)
				return -NO_MEMORY;
			*push = p[i];
		}

		t = p[arity -1];
		goto tail_recur;
	}

	assert(is_boxed(t));
	uint32_t *tdata = peel_boxed(t);
	switch (boxed_tag(tdata))
	{
	case SUBTAG_POS_BIGNUM:
	case SUBTAG_NEG_BIGNUM:
	{
		bignum_t *bn = (bignum_t *)tdata;
		hsize += WSIZE(bignum_t) + (bn->used*sizeof(uint16_t) +3) /4;
		goto pop_term;
	}
	case SUBTAG_FLOAT:
		hsize += WSIZE(t_float_t);
		goto pop_term;

	case SUBTAG_FUN:
	{
		t_fun_t *fun = (t_fun_t *)tdata;
		int num_free = fun_num_free(tdata);
		hsize += WSIZE(t_fun_t) + num_free;

		for (int i = 0; i < num_free -1; i++)
		{
			uint32_t *push = stack_push_N(st);
			if (push == 0)
				return -NO_MEMORY;
			*push = fun->frozen[i];
		}
		if (num_free == 0)
			goto pop_term;

		t = fun->frozen[num_free -1];
		goto tail_recur;
	}
	case SUBTAG_EXPORT:
		hsize += WSIZE(t_export_t);
		goto pop_term;

	case SUBTAG_PID:
		hsize += WSIZE(t_long_pid_t);
		goto pop_term;

	case SUBTAG_OID:
		hsize += WSIZE(t_long_oid_t);
		goto pop_term;

	case SUBTAG_REF:
		hsize += WSIZE(t_long_ref_t);
		goto pop_term;

	case SUBTAG_PROC_BIN:
		hsize += WSIZE(t_proc_bin_t);
		goto pop_term;

	case SUBTAG_HEAP_BIN:
	{
		t_heap_bin_t *hb = (t_heap_bin_t *)tdata;
		hsize += WSIZE(t_heap_bin_t) + (hb->byte_size +3) /4;
		goto pop_term;
	}
	case SUBTAG_MATCH_CTX:
	{
		t_match_ctx_t *mc = (t_match_ctx_t *)tdata;
		int num_slots = match_ctx_num_slots(tdata);

		hsize += WSIZE(t_match_ctx_t) + num_slots*sizeof(int64_t) /4;

		t = mc->parent;
		goto tail_recur;
	}
	default: // SUBTAG_SUB_BIN
	{
		assert(boxed_tag(tdata) == SUBTAG_SUB_BIN);
		t_sub_bin_t *sb = (t_sub_bin_t *)tdata;
		
		hsize += WSIZE(t_sub_bin_t);

		t = sb->parent;
		goto tail_recur;
	}
	}
}
Esempio n. 23
0
Sint
erts_move_messages_off_heap(Process *c_p)
{
    int reds = 1;
    /*
     * Move all messages off heap. This *only* occurs when the
     * process had off heap message disabled and just enabled
     * it...
     */
    ErtsMessage *mp;

    reds += c_p->msg.len / 10;

    ASSERT(erts_atomic32_read_nob(&c_p->state)
	   & ERTS_PSFLG_OFF_HEAP_MSGQ);
    ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG);

    for (mp = c_p->msg.first; mp; mp = mp->next) {
	Uint msg_sz, token_sz;
#ifdef USE_VM_PROBES
	Uint utag_sz;
#endif
	Eterm *hp;
	ErlHeapFragment *hfrag;

	if (mp->data.attached)
	    continue;

	if (is_immed(ERL_MESSAGE_TERM(mp))
#ifdef USE_VM_PROBES
	    && is_immed(ERL_MESSAGE_DT_UTAG(mp))
#endif
	    && is_not_immed(ERL_MESSAGE_TOKEN(mp)))
	    continue;

	/*
	 * The message refers into the heap. Copy the message
	 * from the heap into a heap fragment and attach
	 * it to the message...
	 */
	msg_sz = size_object(ERL_MESSAGE_TERM(mp));
#ifdef USE_VM_PROBES
	utag_sz = size_object(ERL_MESSAGE_DT_UTAG(mp));
#endif
	token_sz = size_object(ERL_MESSAGE_TOKEN(mp));

	hfrag = new_message_buffer(msg_sz
#ifdef USE_VM_PROBES
				   + utag_sz
#endif
				   + token_sz);
	hp = hfrag->mem;
	if (is_not_immed(ERL_MESSAGE_TERM(mp)))
	    ERL_MESSAGE_TERM(mp) = copy_struct(ERL_MESSAGE_TERM(mp),
                                               msg_sz, &hp,
                                               &hfrag->off_heap);
	if (is_not_immed(ERL_MESSAGE_TOKEN(mp)))
	    ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp),
						token_sz, &hp,
						&hfrag->off_heap);
#ifdef USE_VM_PROBES
	if (is_not_immed(ERL_MESSAGE_DT_UTAG(mp)))
	    ERL_MESSAGE_DT_UTAG(mp) = copy_struct(ERL_MESSAGE_DT_UTAG(mp),
						  utag_sz, &hp,
						  &hfrag->off_heap);
#endif
	mp->data.heap_frag = hfrag;
	reds += 1;
    }

    return reds;
}
Esempio n. 24
0
static uint32_t *terms_copy(stack_t *stack, term_t *terms, int num,
								uint32_t *htop, t_proc_bin_t **pbs)
{
next_term:
	if (num == 0)
	{
		if (stack_is_empty(stack))
			return htop;
		ets_deferred_copy_t *pop = (ets_deferred_copy_t *)stack_pop(stack);
		terms = pop->terms;
		num = pop->num;
		goto next_term;
	}

	term_t t = terms[0];
	if (is_immed(t))
	{
		terms++;
		num--;
		goto next_term;
	}

	term_t copy = noval;
	if (is_cons(t))
	{
		term_t *cons = peel_cons(t);
		copy = tag_cons(htop);
		term_t *new_cons = htop;
		do {
			new_cons[0] = cons[0];
			new_cons[1] = cons[1];
			htop += 2;

			if (!is_immed(new_cons[0]))
				DEFER_COPY(stack, new_cons, 1);

			term_t tail = new_cons[1];
			if (is_immed(tail))
				break;

			if (!is_cons(tail))
			{
				DEFER_COPY(stack, new_cons +1, 1);
				break;
			}

			new_cons[1] = tag_cons(htop);

			cons = peel_cons(tail);
			new_cons = htop;
		} while (1);
	}
	else if (is_tuple(t))
	{
		uint32_t *p = peel_tuple(t);
		int arity = *p++;
		if (arity == 0)
			copy = ZERO_TUPLE;
		else
		{
			copy = tag_tuple(htop);
			*htop++ = arity;
			memcpy(htop, p, arity *sizeof(term_t));
			DEFER_COPY(stack, htop, arity);
			htop += arity;
		}
	}
	else
	{
		assert(is_boxed(t));
		uint32_t *tdata = peel_boxed(t);
		copy = tag_boxed(htop);
		switch (boxed_tag(tdata))
		{
		case SUBTAG_POS_BIGNUM:
		case SUBTAG_NEG_BIGNUM:
		{
			bignum_t *bn = (bignum_t *)tdata;
			int wsize = WSIZE(bignum_t) + (bn->used*sizeof(uint16_t) +3) /4;
			memcpy(htop, tdata, wsize *sizeof(uint32_t));
			htop += wsize;
			break;
		}
		case SUBTAG_FLOAT:
			EASY_COPY(t_float_t);
			break;

		case SUBTAG_FUN:
		{
			t_fun_t *new_fun = (t_fun_t *)htop;
			int num_free = fun_num_free(tdata);
			int wsize = WSIZE(t_fun_t) + num_free;
			memcpy(new_fun, tdata, wsize *sizeof(uint32_t));
			DEFER_COPY(stack, new_fun->frozen, num_free);
			htop += wsize;
			break;
		}
		case SUBTAG_EXPORT:
			EASY_COPY(t_export_t);
			break;

		case SUBTAG_PID:
			EASY_COPY(t_long_pid_t);
			break;

		case SUBTAG_OID:
			EASY_COPY(t_long_oid_t);
			break;

		case SUBTAG_REF:
			EASY_COPY(t_long_ref_t);
			break;

		case SUBTAG_PROC_BIN:
		{
			t_proc_bin_t *pb = (t_proc_bin_t *)htop;
			memcpy(htop, tdata, sizeof(t_proc_bin_t));

			// 1+ bin node refc
			proc_bin_link(pbs, pb, 0);

			htop += WSIZE(t_proc_bin_t);
			break;
		}
		case SUBTAG_HEAP_BIN:
		{
			t_heap_bin_t *hb = (t_heap_bin_t *)tdata;
			int wsize = WSIZE(t_heap_bin_t) + (hb->byte_size +3) /4;
			memcpy(htop, tdata, wsize*sizeof(uint32_t));
			htop += wsize;
			break;
		}
		case SUBTAG_MATCH_CTX:
		{
			t_match_ctx_t *new_mc = (t_match_ctx_t *)htop;
			memcpy(new_mc, tdata, sizeof(t_match_ctx_t));
			DEFER_COPY(stack, &new_mc->parent, 1);
			htop += WSIZE(t_match_ctx_t);
			break;
		}
		default: // SUBTAG_SUB_BIN
		{
			assert(boxed_tag(tdata) == SUBTAG_SUB_BIN);
			t_sub_bin_t *new_sb = (t_sub_bin_t *)htop;
			memcpy(new_sb, tdata, sizeof(t_sub_bin_t));
			DEFER_COPY(stack, &new_sb->parent, 1);
			htop += WSIZE(t_sub_bin_t);
			break;
		}
		}
	}

	assert(copy != noval);
	*terms++ = copy;
	num--;
	goto next_term;
}
Esempio n. 25
0
void seek_live(term_t *tp, apr_memnode_t *newest, heap_t *hp)
{
	term_t t = *tp;
	apr_memnode_t *node;
	term_box_t *ptr;

	// newest node - the node last generation the term may belong to
	// the node chain starts with the newest and goes to hp->gc_spot

	if (is_immed(t))
		return;
	ptr = peel(t);

	node = newest;
	while (node != hp->gc_spot)
	{
		if (node_contains(node, ptr))
		{
			// the term belongs to the newer generation
			// of terms; recurse to find possible references
			// to live terms in hp->gc_spot

			// only tuples, conses, funs (frozen)
			// and binaries (data, parent) contain references

			// order of popularity:
			// cons - tuple - binary - fun

			if (is_cons(t))
			{
				seek_live(&ptr->cons.head, node, hp);
				seek_live(&ptr->cons.tail, node, hp);
			}
			else if (is_tuple(t))
			{
				int i;
				int n = ptr->tuple.size;
				for (i = 0; i < n; i++)
					seek_live(&ptr->tuple.elts[i], node, hp);
			}
			else if (is_binary(t))
			{
				if (ptr->binary.parent != noval)
				{
					term_box_t *parent;
					seek_live(&ptr->binary.parent, node, hp);
					parent = peel(ptr->binary.parent);
					ptr->binary.data = parent->binary.data + ptr->binary.offset;
				}
			}
			else if (is_fun(t))
			{
				seek_live(&ptr->fun.frozen, node, hp);
			}

			return;
		}
		node = node->next;
	}

	if (node_contains(hp->gc_spot, ptr))
	{
		// the term should be recreated

		// the term may have already been moved
		// and the term value has been replaced with
		// the buried reference to the new location

		if (is_grave(t))
		{
			*tp = ptr->grave.skeleton;
			return;
		}

		// list - tuple - binary - fun - bignum - pid - float

		if (is_list(t))
		{
			term_t cons = heap_cons2(hp, ptr->cons.head, ptr->cons.tail);
			term_box_t *box = peel(cons);
			seek_live(&box->cons.head, hp->gc_spot, hp);
			seek_live(&box->cons.tail, hp->gc_spot, hp);
			*tp = cons;
		}
		else if (is_tuple(t))
		{
			term_t tuple = heap_tuple(hp, ptr->tuple.size);
			term_box_t *box = peel(tuple);
			int i;
			for (i = 0; i < ptr->tuple.size; i++)
			{
				box->tuple.elts[i] = ptr->tuple.elts[i];
				seek_live(&box->tuple.elts[i], hp->gc_spot, hp);
			}
			*tp = tuple;
		}
		else if (is_binary(t))
		{
			term_t parent = ptr->binary.parent;
			term_t b;
			if (parent == noval)
				b = heap_binary(hp, ptr->binary.bit_size, ptr->binary.data);
			else
			{
				apr_byte_t *data;
				seek_live(&parent, hp->gc_spot, hp);
				data = peel(parent)->binary.data + ptr->binary.offset;
				b = heap_binary_shared(hp, ptr->binary.bit_size, data, parent);
			}
			*tp = b;
		}
		else if (is_fun(t))
		{
			term_t f = heap_fun(hp,
				ptr->fun.module, ptr->fun.function, ptr->fun.arity,
				ptr->fun.uniq, ptr->fun.index, ptr->fun.frozen);
			seek_live(&peel(f)->fun.frozen, hp->gc_spot, hp);
			*tp = f;
		}
		else if (is_bignum(t))
		{
			mp_int ma = bignum_to_mp(t);
			*tp = heap_bignum(hp, SIGN(&ma), USED(&ma), DIGITS(&ma));
		}
		else if (is_long_id(t))
		{
			*tp = heap_long_id(hp,
				ptr->long_id.node,
				ptr->long_id.serial,
				ptr->long_id.tag_creation);
		}
		else	// if (is_float(t))
		{
			assert(is_float(t));
			*tp = heap_float(hp, float_value(t));
		}

		// bury the term
		ptr->grave.cross = MAGIC_CROSS;
		ptr->grave.skeleton = *tp;

		return;
	}
	else
	{
		// the term belong to the older generation or
		// to the literal pool of the module -- ignore

		return;
	}
}
Esempio n. 26
0
/*
 * Moves content of message buffer attached to a message into a heap.
 * The message buffer is deallocated.
 */
void
erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
{
    /* Unions for typecasts avoids warnings about type-punned pointers and aliasing */
    union {
	Uint** upp;
	ProcBin **pbpp;
	ErlFunThing **efpp;
	ExternalThing **etpp;
    } oh_list_pp, oh_el_next_pp;
    union {
	Uint *up;
	ProcBin *pbp;
	ErlFunThing *efp;
	ExternalThing *etp;
    } oh_el_p;
    Eterm term, token, *fhp, *hp;
    Sint offs;
    Uint sz;
    ErlHeapFragment *bp;

#ifdef HARD_DEBUG
    ProcBin *dbg_mso_start = off_heap->mso;
    ErlFunThing *dbg_fun_start = off_heap->funs;
    ExternalThing *dbg_external_start = off_heap->externals;
    Eterm dbg_term, dbg_token;
    ErlHeapFragment *dbg_bp;
    Uint *dbg_hp, *dbg_thp_start;
    Uint dbg_term_sz, dbg_token_sz;
#endif

    bp = msg->data.heap_frag;
    term = ERL_MESSAGE_TERM(msg);
    token = ERL_MESSAGE_TOKEN(msg);
    if (!bp) {
	ASSERT(is_immed(term) && is_immed(token));
	return;
    }

#ifdef HARD_DEBUG
    dbg_term_sz = size_object(term);
    dbg_token_sz = size_object(token);
    ASSERT(bp->size == dbg_term_sz + dbg_token_sz);

    dbg_bp = new_message_buffer(bp->size);
    dbg_hp = dbg_bp->mem;
    dbg_term = copy_struct(term, dbg_term_sz, &dbg_hp, &dbg_bp->off_heap);
    dbg_token = copy_struct(token, dbg_token_sz, &dbg_hp, &dbg_bp->off_heap);
    dbg_thp_start = *hpp;
#endif

    ASSERT(bp);
    msg->data.attached = NULL;

    off_heap->overhead += bp->off_heap.overhead;
    sz = bp->size;

#ifdef DEBUG
    if (is_not_immed(term)) {
	ASSERT(bp->mem <= ptr_val(term));
	ASSERT(bp->mem + bp->size > ptr_val(term));
    }

    if (is_not_immed(token)) {
	ASSERT(bp->mem <= ptr_val(token));
	ASSERT(bp->mem + bp->size > ptr_val(token));
    }
#endif

    fhp = bp->mem;
    hp = *hpp;
    offs = hp - fhp;

    oh_list_pp.upp = NULL;
    oh_el_next_pp.upp = NULL; /* Shut up compiler warning */
    oh_el_p.up = NULL; /* Shut up compiler warning */
    while (sz--) {
	Uint cpy_sz;
	Eterm val = *fhp++;

	switch (primary_tag(val)) {
	case TAG_PRIMARY_IMMED1:
	    *hp++ = val;
	    break;
	case TAG_PRIMARY_LIST:
	case TAG_PRIMARY_BOXED:
	    ASSERT(bp->mem <= ptr_val(val));
	    ASSERT(bp->mem + bp->size > ptr_val(val));
	    *hp++ = offset_ptr(val, offs);
	    break;
	case TAG_PRIMARY_HEADER:
	    *hp++ = val;
	    switch (val & _HEADER_SUBTAG_MASK) {
	    case ARITYVAL_SUBTAG:
		break;
	    case REFC_BINARY_SUBTAG:
		oh_list_pp.pbpp = &off_heap->mso;
		oh_el_p.up = (hp-1);
		oh_el_next_pp.pbpp = &(oh_el_p.pbp)->next;
		cpy_sz = thing_arityval(val);
		goto cpy_words;
	    case FUN_SUBTAG:
#ifndef HYBRID
		oh_list_pp.efpp = &off_heap->funs;
		oh_el_p.up = (hp-1);
		oh_el_next_pp.efpp = &(oh_el_p.efp)->next;
#endif
		cpy_sz = thing_arityval(val);
		goto cpy_words;
	    case EXTERNAL_PID_SUBTAG:
	    case EXTERNAL_PORT_SUBTAG:
	    case EXTERNAL_REF_SUBTAG:
		oh_list_pp.etpp = &off_heap->externals;
		oh_el_p.up = (hp-1);
		oh_el_next_pp.etpp =  &(oh_el_p.etp)->next;
		cpy_sz = thing_arityval(val);
		goto cpy_words;
	    default:
		cpy_sz = header_arity(val);

	    cpy_words:
		sz -= cpy_sz;
		while (cpy_sz >= 8) {
		    cpy_sz -= 8;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		    *hp++ = *fhp++;
		}
		switch (cpy_sz) {
		case 7: *hp++ = *fhp++;
		case 6: *hp++ = *fhp++;
		case 5: *hp++ = *fhp++;
		case 4: *hp++ = *fhp++;
		case 3: *hp++ = *fhp++;
		case 2: *hp++ = *fhp++;
		case 1: *hp++ = *fhp++;
		default: break;
		}
		if (oh_list_pp.upp) {
#ifdef HARD_DEBUG
		    Uint *dbg_old_oh_list_p = *oh_list_pp.upp;
#endif
		    /* Add to offheap list */
		    *oh_el_next_pp.upp = *oh_list_pp.upp;
		    *oh_list_pp.upp = oh_el_p.up;
		    ASSERT(*hpp <= oh_el_p.up);
		    ASSERT(hp > oh_el_p.up);
#ifdef HARD_DEBUG
		    switch (val & _HEADER_SUBTAG_MASK) {
		    case REFC_BINARY_SUBTAG:
			ASSERT(off_heap->mso == *oh_list_pp.pbpp);
			ASSERT(off_heap->mso->next
			       == (ProcBin *) dbg_old_oh_list_p);
			break;
#ifndef HYBRID
		    case FUN_SUBTAG:
			ASSERT(off_heap->funs == *oh_list_pp.efpp);
			ASSERT(off_heap->funs->next
			       == (ErlFunThing *) dbg_old_oh_list_p);
			break;
#endif
		    case EXTERNAL_PID_SUBTAG:
		    case EXTERNAL_PORT_SUBTAG:
		    case EXTERNAL_REF_SUBTAG:
			ASSERT(off_heap->externals
			       == *oh_list_pp.etpp);
			ASSERT(off_heap->externals->next
			       == (ExternalThing *) dbg_old_oh_list_p);
			break;
		    default:
			ASSERT(0);
		    }
#endif
		    oh_list_pp.upp = NULL;


		}
		break;
	    }
	    break;
	}
    }

    ASSERT(bp->size == hp - *hpp);
    *hpp = hp;

    if (is_not_immed(token)) {
	ASSERT(bp->mem <= ptr_val(token));
	ASSERT(bp->mem + bp->size > ptr_val(token));
	ERL_MESSAGE_TOKEN(msg) = offset_ptr(token, offs);
#ifdef HARD_DEBUG
	ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TOKEN(msg)));
	ASSERT(hp > ptr_val(ERL_MESSAGE_TOKEN(msg)));
#endif
    }

    if (is_not_immed(term)) {
	ASSERT(bp->mem <= ptr_val(term));
	ASSERT(bp->mem + bp->size > ptr_val(term));
	ERL_MESSAGE_TERM(msg) = offset_ptr(term, offs);
#ifdef HARD_DEBUG
	ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TERM(msg)));
	ASSERT(hp > ptr_val(ERL_MESSAGE_TERM(msg)));
#endif
    }


#ifdef HARD_DEBUG
    {
	int i, j;
	{
	    ProcBin *mso = off_heap->mso;
	    i = j = 0;
	    while (mso != dbg_mso_start) {
		mso = mso->next;
		i++;
	    }
	    mso = bp->off_heap.mso;
	    while (mso) {
		mso = mso->next;
		j++;
	    }
	    ASSERT(i == j);
	}
	{
	    ErlFunThing *fun = off_heap->funs;
	    i = j = 0;
	    while (fun != dbg_fun_start) {
		fun = fun->next;
		i++;
	    }
	    fun = bp->off_heap.funs;
	    while (fun) {
		fun = fun->next;
		j++;
	    }
	    ASSERT(i == j);
	}
	{
	    ExternalThing *external = off_heap->externals;
	    i = j = 0;
	    while (external != dbg_external_start) {
		external = external->next;
		i++;
	    }
	    external = bp->off_heap.externals;
	    while (external) {
		external = external->next;
		j++;
	    }
	    ASSERT(i == j);
	}
    }
#endif
	    

    bp->off_heap.mso = NULL;
#ifndef HYBRID
    bp->off_heap.funs = NULL;
#endif
    bp->off_heap.externals = NULL;
    free_message_buffer(bp);

#ifdef HARD_DEBUG
    ASSERT(eq(ERL_MESSAGE_TERM(msg), dbg_term));
    ASSERT(eq(ERL_MESSAGE_TOKEN(msg), dbg_token));
    free_message_buffer(dbg_bp);
#endif

}