Пример #1
0
ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
{
    ErlNode *res;
    ErlNode ne;
    ne.sysname = sysname;
    ne.creation = creation;

    erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
    res = hash_get(&erts_node_table, (void *) &ne);
    if (res && res != erts_this_node) {
	erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
	if (refc < 2) /* New or pending delete */
	    erts_refc_inc(&res->refc, 1);
    }
    erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
    if (res)
	return res;

    erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
    res = hash_put(&erts_node_table, (void *) &ne);
    ASSERT(res);
    if (res != erts_this_node) {
	erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
	if (refc < 2) /* New or pending delete */
	    erts_refc_inc(&res->refc, 1);
    }
    erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
    return res;
}
Пример #2
0
DistEntry *
erts_sysname_to_connected_dist_entry(Eterm sysname)
{
    DistEntry de;
    DistEntry *res_dep;
    de.sysname = sysname;
  
    if(erts_this_dist_entry->sysname == sysname) {
	erts_refc_inc(&erts_this_dist_entry->refc, 2);
	return erts_this_dist_entry;
    }

    erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
    res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de);
    if (res_dep) {
	erts_aint_t refc = erts_refc_inctest(&res_dep->refc, 1);
	if (refc < 2) /* Pending delete */
	    erts_refc_inc(&res_dep->refc, 1);
    }
    erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
    if (res_dep) {
	int deref;
	erts_smp_rwmtx_rlock(&res_dep->rwmtx);
	deref = is_nil(res_dep->cid);
	erts_smp_rwmtx_runlock(&res_dep->rwmtx);
	if (deref) {
	    erts_deref_dist_entry(res_dep);
	    res_dep = NULL;
	}
    }
    return res_dep;
}
Пример #3
0
ErlNifResourceType*
enif_open_resource_type(ErlNifEnv* env,
			const char* module_str, 
			const char* name_str, 
			ErlNifResourceDtor* dtor,
			ErlNifResourceFlags flags,
			ErlNifResourceFlags* tried)
{
    ErlNifResourceType* type = NULL;
    ErlNifResourceFlags op = flags;
    Eterm module_am, name_am;

    ASSERT(erts_smp_thr_progress_is_blocking());
    ASSERT(module_str == NULL); /* for now... */
    module_am = make_atom(env->mod_nif->mod->module);
    name_am = enif_make_atom(env, name_str);

    type = find_resource_type(module_am, name_am);
    if (type == NULL) {
	if (flags & ERL_NIF_RT_CREATE) {
	    type = erts_alloc(ERTS_ALC_T_NIF,
			      sizeof(struct enif_resource_type_t)); 
	    type->dtor = dtor;
	    type->module = module_am;
	    type->name = name_am;
	    erts_refc_init(&type->refc, 1);
	    type->owner = env->mod_nif;
	    type->prev = &resource_type_list;
	    type->next = resource_type_list.next;
	    type->next->prev = type;
	    type->prev->next = type;
	    op = ERL_NIF_RT_CREATE;
	}
    }
    else {
	if (flags & ERL_NIF_RT_TAKEOVER) {	
	    steal_resource_type(type);
	    op = ERL_NIF_RT_TAKEOVER;
	}
	else {
	    type = NULL;
	}
    }
    if (type != NULL) {
	type->owner = env->mod_nif;
	type->dtor = dtor;
	if (type->dtor != NULL) {
	    erts_refc_inc(&type->owner->rt_dtor_cnt, 1);
	}
	erts_refc_inc(&type->owner->rt_cnt, 1);    
    }
    if (tried != NULL) {
	*tried = op;
    }
    return type;
}
Пример #4
0
void enif_keep_resource(void* obj)
{
    ErlNifResource* resource = DATA_TO_RESOURCE(obj);
    ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource);

    ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
#ifdef DEBUG
    erts_refc_inc(&resource->nif_refc, 1);
#endif
    erts_refc_inc(&bin->binary.refc, 2);
}
Пример #5
0
void* enif_alloc_resource(ErlNifResourceType* type, size_t size)
{
    Binary* bin = erts_create_magic_binary(SIZEOF_ErlNifResource(size), &nif_resource_dtor);
    ErlNifResource* resource = ERTS_MAGIC_BIN_DATA(bin);
    resource->type = type;
    erts_refc_inc(&bin->refc, 1);
#ifdef DEBUG
    erts_refc_init(&resource->nif_refc, 1);
#endif
    erts_refc_inc(&resource->type->refc, 2);
    return resource->data;
}
Пример #6
0
void erts_ddll_reference_driver(DE_Handle *dh)
{
    assert_drv_list_locked();
    if (erts_refc_inctest(&(dh->refc),1) == 1) {
	erts_refc_inc(&(dh->refc),2); /* add a reference for the scheduled operation */
    }
}
Пример #7
0
/* Put elements from vec at q head */
int erts_ioq_pushqv(ErtsIOQueue *q, ErtsIOVec* vec, Uint skipbytes)
{
    int n;
    Uint len;
    Uint size = vec->common.size - skipbytes;
    SysIOVec* iov;
    ErtsIOQBinary** binv;
    ErtsIOQBinary* b;

    if (q == NULL)
	return -1;

    ASSERT(vec->common.size >= skipbytes);
    if (vec->common.size <= skipbytes)
	return 0;

    n = skip(vec, skipbytes, &iov, &binv, &len);

    if (n < 0)
        return n;

    if (q->v_head - n < q->v_start)
	if (expandq(q, n, 0))
            return -1;

    /* Queue and reference all binaries (remove zero length items) */
    iov += (n-1);  /* move to end */
    binv += (n-1); /* move to end */
    while(n--) {
	if ((len = iov->iov_len) > 0) {
	    if ((b = *binv) == NULL) { /* special case create binary ! */
                if (q->driver) {
                    ErlDrvBinary *bin = driver_alloc_binary(len);
                    if (!bin) return -1;
                    sys_memcpy(bin->orig_bytes, iov->iov_base, len);
                    b = (ErtsIOQBinary *)bin;
                    q->v_head->iov_base = bin->orig_bytes;
                }
		*--q->b_head = b;
		q->v_head--;
		q->v_head->iov_len = len;
	    }
	    else {
                if (q->driver)
                    driver_binary_inc_refc(&b->driver);
                else
                    erts_refc_inc(&b->nif.intern.refc, 1);
		*--q->b_head = b;
		*--q->v_head = *iov;
	    }
	}
	iov--;
	binv--;
    }
    q->size += size;      /* update total size in queue */
    return 0;
}
Пример #8
0
static void restore_process_references(DE_Handle *dh) 
{
    DE_ProcEntry *p;
    assert_drv_list_rwlocked();
    ASSERT(erts_refc_read(&(dh->refc),0) == 0);
    for(p  = dh->procs;p != NULL; p = p->next) {
	if (p->awaiting_status == ERL_DE_PROC_LOADED) {
	    ASSERT(p->flags & ERL_DE_FL_DEREFERENCED);
	    erts_refc_inc(&(dh->refc),1);
	    p->flags &= ~ERL_DE_FL_DEREFERENCED;
	}
    }
}
Пример #9
0
/* Put elements from vec at q tail */
int erts_ioq_enqv(ErtsIOQueue *q, ErtsIOVec *eiov, Uint skipbytes)
{
    int n;
    Uint len;
    Uint size = eiov->common.size - skipbytes;
    SysIOVec *iov;
    ErtsIOQBinary** binv;
    ErtsIOQBinary*  b;

    if (q == NULL)
	return -1;

    ASSERT(eiov->common.size >= skipbytes);
    if (eiov->common.size <= skipbytes)
	return 0;

    n = skip(eiov, skipbytes, &iov, &binv, &len);

    if (n < 0)
        return n;

    if (q->v_tail + n >= q->v_end)
	if (expandq(q, n, 1))
            return -1;

    /* Queue and reference all binaries (remove zero length items) */
    while(n--) {
	if ((len = iov->iov_len) > 0) {
	    if ((b = *binv) == NULL) { /* special case create binary ! */
		b = alloc_binary(len, iov->iov_base, (void**)&q->v_tail->iov_base,
                                 q->driver);
                if (!b) return -1;
		*q->b_tail++ = b;
		q->v_tail->iov_len = len;
		q->v_tail++;
	    }
	    else {
                if (q->driver)
                    driver_binary_inc_refc(&b->driver);
                else
                    erts_refc_inc(&b->nif.intern.refc, 1);
		*q->b_tail++ = b;
		*q->v_tail++ = *iov;
	    }
	}
	iov++;
	binv++;
    }
    q->size += size;      /* update total size in queue */
    return 0;
}
Пример #10
0
DistEntry *erts_find_or_insert_dist_entry(Eterm sysname)
{
    DistEntry *res;
    DistEntry de;
    long refc;
    de.sysname = sysname;
    erts_smp_mtx_lock(&erts_dist_table_mtx);
    res = hash_put(&erts_dist_table, (void *) &de);
    refc = erts_refc_inctest(&res->refc, 0);
    if (refc < 2) /* New or pending delete */
	erts_refc_inc(&res->refc, 1);
    erts_smp_mtx_unlock(&erts_dist_table_mtx);
    return res;
}
Пример #11
0
DistEntry *
erts_sysname_to_connected_dist_entry(Eterm sysname)
{
    DistEntry de;
    DistEntry *res_dep;
    de.sysname = sysname;
  
    if(erts_this_dist_entry->sysname == sysname) {
	erts_refc_inc(&erts_this_dist_entry->refc, 2);
	return erts_this_dist_entry;
    }

    erts_smp_mtx_lock(&erts_dist_table_mtx);
    res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de);
    if (res_dep) {
	long refc = erts_refc_inctest(&res_dep->refc, 1);
	if (refc < 2) /* Pending delete */
	    erts_refc_inc(&res_dep->refc, 1);
    }
    erts_smp_mtx_unlock(&erts_dist_table_mtx);
    if (res_dep) {
	erts_smp_mtx_t *mtxp;
#ifdef ERTS_SMP
	mtxp = res_dep->mtxp;
#else
	mtxp = NULL;
#endif
	erts_smp_mtx_lock(mtxp);
	if (is_nil(res_dep->cid)) {
	    erts_deref_dist_entry(res_dep);
	    res_dep = NULL;
	}
	erts_smp_mtx_unlock(mtxp);
    }
    return res_dep;
}
Пример #12
0
DistEntry *erts_find_dist_entry(Eterm sysname)
{
    DistEntry *res;
    DistEntry de;
    de.sysname = sysname;
    erts_smp_mtx_lock(&erts_dist_table_mtx);
    res = hash_get(&erts_dist_table, (void *) &de);
    if (res) {
	long refc = erts_refc_inctest(&res->refc, 1);
	if (refc < 2) /* Pending delete */
	    erts_refc_inc(&res->refc, 1);
    }
    erts_smp_mtx_unlock(&erts_dist_table_mtx);
    return res;
}
Пример #13
0
void
erts_set_this_node(Eterm sysname, Uint creation)
{
    ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking());
    ASSERT(erts_refc_read(&erts_this_dist_entry->refc, 2));

    if (erts_refc_dectest(&erts_this_node->refc, 0) == 0)
        try_delete_node(erts_this_node);

    if (erts_refc_dectest(&erts_this_dist_entry->refc, 0) == 0)
        try_delete_dist_entry(erts_this_dist_entry);

    erts_this_node = NULL; /* to make sure refc is bumped for this node */
    erts_this_node = erts_find_or_insert_node(sysname, creation);
    erts_this_dist_entry = erts_this_node->dist_entry;

    erts_refc_inc(&erts_this_dist_entry->refc, 2);

    erts_this_node_sysname = erts_this_node_sysname_BUFFER;
    erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname_BUFFER),
		  "%T", sysname);
}
Пример #14
0
DistEntry *
erts_channel_no_to_dist_entry(Uint cno)
{
/*
 * For this node (and previous incarnations of this node),
 * ERST_INTERNAL_CHANNEL_NO (will always be 0 I guess) is used as
 * channel no. For other nodes, the atom index of the atom corresponding
 * to the node name is used as channel no.
 */
    if(cno == ERST_INTERNAL_CHANNEL_NO) {
	erts_refc_inc(&erts_this_dist_entry->refc, 2);
	return erts_this_dist_entry;
    }

    if((cno > MAX_ATOM_INDEX)
       || (cno >= atom_table_size())
       ||  (atom_tab(cno) == NULL))
	return NULL;
        
    /* cno is a valid atom index; find corresponding dist entry (if there
       is one) */
    return erts_find_dist_entry(make_atom(cno));
}
Пример #15
0
void
erts_slave_serve_refc(struct master_command_refc *cmd)
{
    switch (cmd->op) {
    case MASTER_REFC_OP_INIT:
	erts_refc_init(cmd->refcp, cmd->arg);
	break;
    case MASTER_REFC_OP_INC:
	erts_refc_inc(cmd->refcp, cmd->min_val);
	break;
    case MASTER_REFC_OP_DEC:
	erts_refc_dec(cmd->refcp, cmd->min_val);
	break;
    case MASTER_REFC_OP_ADD:
	erts_refc_add(cmd->refcp, cmd->arg, cmd->min_val);
	break;
    case MASTER_REFC_OP_DECFREE:
	erts_refc_decfree(cmd->refcp, cmd->min_val, cmd->kind, cmd->objp);
	break;
    default:
	erl_exit(1, "Unexpected op %d in erts_slave_serve_refc", cmd->op);
    }
}
Пример #16
0
/*
 * Copy a term that is guaranteed to be contained in a single
 * heap block. The heap block is copied word by word, and any
 * pointers are offsetted to point correctly in the new location.
 *
 * Typically used to copy a term from an ets table.
 *
 * NOTE: Assumes that term is a tuple (ptr is an untagged tuple ptr).
 */
Eterm
copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
{
    Eterm* tp = ptr;
    Eterm* hp = *hpp;
    Sint offs = hp - tp;

    while (sz--) {
	Eterm val = *tp++;

	switch (primary_tag(val)) {
	case TAG_PRIMARY_IMMED1:
	    *hp++ = val;
	    break;
	case TAG_PRIMARY_LIST:
	case TAG_PRIMARY_BOXED:
	    *hp++ = offset_ptr(val, offs);
	    break;
	case TAG_PRIMARY_HEADER:
	    *hp++ = val;
	    switch (val & _HEADER_SUBTAG_MASK) {
	    case ARITYVAL_SUBTAG:
		break;
	    case REFC_BINARY_SUBTAG:
		{
		    ProcBin* pb = (ProcBin *) (hp-1);
		    int tari = thing_arityval(val);

		    sz -= tari;
		    while (tari--) {
			*hp++ = *tp++;
		    }
		    erts_refc_inc(&pb->val->refc, 2);
		    pb->next = off_heap->mso;
		    off_heap->mso = pb;
		    off_heap->overhead += pb->size / sizeof(Eterm);
		}
		break;
	    case FUN_SUBTAG:
		{
#ifndef HYBRID /* FIND ME! */
		    ErlFunThing* funp = (ErlFunThing *) (hp-1);
#endif
		    int tari = thing_arityval(val);

		    sz -= tari;
		    while (tari--) {
			*hp++ = *tp++;
		    }
#ifndef HYBRID /* FIND ME! */
		    funp->next = off_heap->funs;
		    off_heap->funs = funp;
		    erts_refc_inc(&funp->fe->refc, 2);
#endif
		}
		break;
	    case EXTERNAL_PID_SUBTAG:
	    case EXTERNAL_PORT_SUBTAG:
	    case EXTERNAL_REF_SUBTAG:
		{
		    ExternalThing* etp = (ExternalThing *) (hp-1);
		    int tari = thing_arityval(val);

		    sz -= tari;
		    while (tari--) {
			*hp++ = *tp++;
		    }
		    etp->next = off_heap->externals;
		    off_heap->externals = etp;
		    erts_refc_inc(&etp->node->refc, 2);
		}
		break;
	    default:
		{
		    int tari = header_arity(val);

		    sz -= tari;
		    while (tari--) {
			*hp++ = *tp++;
		    }
		}
		break;
	    }
	    break;
	}
    }
    *hpp = hp;
    return make_tuple(ptr + offs); 
}
Пример #17
0
void erts_ddll_reference_referenced_driver(DE_Handle *dh)
{
    erts_refc_inc(&(dh->refc),2);
}
Пример #18
0
BIF_RETTYPE
finish_loading_1(BIF_ALIST_1)
{
    Sint i;
    Sint n;
    struct m* p = NULL;
    Uint exceptions;
    Eterm res;
    int is_blocking = 0;
    int do_commit = 0;

    if (!erts_try_seize_code_write_permission(BIF_P)) {
	ERTS_BIF_YIELD1(bif_export[BIF_finish_loading_1], BIF_P, BIF_ARG_1);
    }

    /*
     * Validate the argument before we start loading; it must be a
     * proper list where each element is a magic binary containing
     * prepared (not previously loaded) code.
     *
     * First count the number of elements and allocate an array
     * to keep the elements in.
     */

    n = erts_list_length(BIF_ARG_1);
    if (n < 0) {
    badarg:
	if (p) {
	    erts_free(ERTS_ALC_T_LOADER_TMP, p);
	}
	erts_release_code_write_permission();
	BIF_ERROR(BIF_P, BADARG);
    }
    p = erts_alloc(ERTS_ALC_T_LOADER_TMP, n*sizeof(struct m));

    /*
     * We now know that the argument is a proper list. Validate
     * and collect the binaries into the array.
     */

    for (i = 0; i < n; i++) {
	Eterm* cons = list_val(BIF_ARG_1);
	Eterm term = CAR(cons);
	ProcBin* pb;

	if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
	    goto badarg;
	}
	pb = (ProcBin*) binary_val(term);
	p[i].code = pb->val;
	p[i].module = erts_module_for_prepared_code(p[i].code);
	if (p[i].module == NIL) {
	    goto badarg;
	}
	BIF_ARG_1 = CDR(cons);
    }

    /*
     * Since we cannot handle atomic loading of a group of modules
     * if one or more of them uses on_load, we will only allow
     * more than one element in the list if none of the modules
     * have an on_load function.
     */

    if (n > 1) {
	for (i = 0; i < n; i++) {
	    if (erts_has_code_on_load(p[i].code) == am_true) {
		erts_free(ERTS_ALC_T_LOADER_TMP, p);
		erts_release_code_write_permission();
		BIF_ERROR(BIF_P, SYSTEM_LIMIT);
	    }
	}
    }

    /*
     * All types are correct. There cannot be a BADARG from now on.
     * Before we can start loading, we must check whether any of
     * the modules already has old code. To avoid a race, we must
     * not allow other process to initiate a code loading operation
     * from now on.
     */

    res = am_ok;
    erts_start_staging_code_ix(n);

    for (i = 0; i < n; i++) {
	p[i].modp = erts_put_module(p[i].module);
	p[i].modp->seen = 0;
    }

    exceptions = 0;
    for (i = 0; i < n; i++) {
	p[i].exception = 0;
	if (p[i].modp->seen) {
	    p[i].exception = 1;
	    exceptions++;
	}
	p[i].modp->seen = 1;
    }
    if (exceptions) {
	res = exception_list(BIF_P, am_duplicated, p, exceptions);
	goto done;
    }

    for (i = 0; i < n; i++) {
	if (p[i].modp->curr.num_breakpoints > 0 ||
	    p[i].modp->curr.num_traced_exports > 0 ||
	    erts_is_default_trace_enabled()) {
	    /* tracing involved, fallback with thread blocking */
	    erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
	    erts_smp_thr_progress_block();
	    is_blocking = 1;
	    break;
	}
    }

    if (is_blocking) {
	for (i = 0; i < n; i++) {
	    if (p[i].modp->curr.num_breakpoints) {
		erts_clear_module_break(p[i].modp);
		ASSERT(p[i].modp->curr.num_breakpoints == 0);
	    }
	}
    }

    exceptions = 0;
    for (i = 0; i < n; i++) {
	p[i].exception = 0;
	if (p[i].modp->curr.code_hdr && p[i].modp->old.code_hdr) {
	    p[i].exception = 1;
	    exceptions++;
	}
    }

    if (exceptions) {
	res = exception_list(BIF_P, am_not_purged, p, exceptions);
    } else {
	/*
	 * Now we can load all code. This can't fail.
	 */

	exceptions = 0;
	for (i = 0; i < n; i++) {
	    Eterm mod;
	    Eterm retval;

	    erts_refc_inc(&p[i].code->refc, 1);
	    retval = erts_finish_loading(p[i].code, BIF_P, 0, &mod);
	    ASSERT(retval == NIL || retval == am_on_load);
	    if (retval == am_on_load) {
		p[i].exception = 1;
		exceptions++;
	    }
	}

	/*
	 * Check whether any module has an on_load_handler.
	 */

	if (exceptions) {
	    res = exception_list(BIF_P, am_on_load, p, exceptions);
	}
	do_commit = 1;
    }

done:
    return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n);
}
Пример #19
0
Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
#endif
{
    char* hstart;
    Uint hsize;
    Eterm* htop;
    Eterm* hbot;
    Eterm* hp;
    Eterm* objp;
    Eterm* tp;
    Eterm  res;
    Eterm  elem;
    Eterm* tailp;
    Eterm* argp;
    Eterm* const_tuple;
    Eterm hdr;
    int i;
#ifdef DEBUG
    Eterm org_obj = obj;
    Uint org_sz = sz;
#endif

    if (IS_CONST(obj))
	return obj;

    DTRACE1(copy_struct, (int32_t)sz);

    hp = htop = *hpp;
    hbot   = htop + sz;
    hstart = (char *)htop;
    hsize = (char*) hbot - hstart;
    const_tuple = 0;

    /* Copy the object onto the heap */
    switch (primary_tag(obj)) {
    case TAG_PRIMARY_LIST:
	argp = &res;
	objp = list_val_rel(obj,src_base);
	goto L_copy_list;
    case TAG_PRIMARY_BOXED: argp = &res; goto L_copy_boxed;
    default:
	erl_exit(ERTS_ABORT_EXIT,
		 "%s, line %d: Internal error in copy_struct: 0x%08x\n",
		 __FILE__, __LINE__,obj);
    }

 L_copy:
    while (hp != htop) {
	obj = *hp;

	switch (primary_tag(obj)) {
	case TAG_PRIMARY_IMMED1:
	    hp++;
	    break;
	case TAG_PRIMARY_LIST:
	    objp = list_val_rel(obj,src_base);
	#if !HALFWORD_HEAP || defined(DEBUG)
	    if (in_area(objp,hstart,hsize)) {
		ASSERT(!HALFWORD_HEAP);
		hp++;
		break;
	    }
	#endif
	    argp = hp++;
	    /* Fall through */

	L_copy_list:
	    tailp = argp;
	    for (;;) {
		tp = tailp;
		elem = CAR(objp);
		if (IS_CONST(elem)) {
		    hbot -= 2;
		    CAR(hbot) = elem;
		    tailp = &CDR(hbot);
		}
		else {
		    CAR(htop) = elem;
		#if HALFWORD_HEAP
		    CDR(htop) = CDR(objp);
		    *tailp = make_list_rel(htop,dst_base);
		    htop += 2;
		    goto L_copy;
		#else
		    tailp = &CDR(htop);
		    htop += 2;
		#endif
		}
		ASSERT(!HALFWORD_HEAP || tp < hp || tp >= hbot);
		*tp = make_list_rel(tailp - 1, dst_base);
		obj = CDR(objp);
		if (!is_list(obj)) {
		    break;
		}
		objp = list_val_rel(obj,src_base);
	    }
	    switch (primary_tag(obj)) {
	    case TAG_PRIMARY_IMMED1: *tailp = obj; goto L_copy;
	    case TAG_PRIMARY_BOXED: argp = tailp; goto L_copy_boxed;
	    default:
		erl_exit(ERTS_ABORT_EXIT,
			 "%s, line %d: Internal error in copy_struct: 0x%08x\n",
			 __FILE__, __LINE__,obj);
	    }
	    
	case TAG_PRIMARY_BOXED:
	#if !HALFWORD_HEAP || defined(DEBUG)
	    if (in_area(boxed_val_rel(obj,src_base),hstart,hsize)) {
		ASSERT(!HALFWORD_HEAP);
		hp++;
		break;
	    }
	#endif
	    argp = hp++;

	L_copy_boxed:
	    objp = boxed_val_rel(obj, src_base);
	    hdr = *objp;
	    switch (hdr & _TAG_HEADER_MASK) {
	    case ARITYVAL_SUBTAG:
		{
		    int const_flag = 1; /* assume constant tuple */
		    i = arityval(hdr);
		    *argp = make_tuple_rel(htop, dst_base);
		    tp = htop;	/* tp is pointer to new arity value */
		    *htop++ = *objp++; /* copy arity value */
		    while (i--) {
			elem = *objp++;
			if (!IS_CONST(elem)) {
			    const_flag = 0;
			}
			*htop++ = elem;
		    }
		    if (const_flag) {
			const_tuple = tp; /* this is the latest const_tuple */
		    }
		}
		break;
	    case MAP_SUBTAG:
		{
		    i = map_get_size(objp) + 3;
		    *argp = make_map_rel(htop, dst_base);
		    while (i--) {
			*htop++ = *objp++;
		    }
		}
		break;
	    case REFC_BINARY_SUBTAG:
		{
                    EPIPHANY_STUB(copy_struct);
		}
		break;
	    case SUB_BINARY_SUBTAG:
		{
                    EPIPHANY_STUB(copy_struct);
		}
		break;
	    case FUN_SUBTAG:
		{
		    ErlFunThing* funp = (ErlFunThing *) objp;

		    i =  thing_arityval(hdr) + 2 + funp->num_free;
		    tp = htop;
		    while (i--)  {
			*htop++ = *objp++;
		    }
		    funp = (ErlFunThing *) tp;
		    funp->next = off_heap->first;
		    off_heap->first = (struct erl_off_heap_header*) funp;
		    erts_refc_inc(&funp->fe->refc, 2);
		    *argp = make_fun_rel(tp, dst_base);
		}
		break;
	    case EXTERNAL_PID_SUBTAG:
	    case EXTERNAL_PORT_SUBTAG:
	    case EXTERNAL_REF_SUBTAG:
		{
		  ExternalThing *etp = (ExternalThing *) htop;

		  i =  thing_arityval(hdr) + 1;
		  tp = htop;

		  while (i--)  {
		    *htop++ = *objp++;
		  }

		  etp->next = off_heap->first;
		  off_heap->first = (struct erl_off_heap_header*)etp;
		  erts_refc_inc(&etp->node->refc, 2);

		  *argp = make_external_rel(tp, dst_base);
		}
		break;
	    case BIN_MATCHSTATE_SUBTAG:
		erl_exit(ERTS_ABORT_EXIT,
			 "copy_struct: matchstate term not allowed");
	    default:
		i = thing_arityval(hdr)+1;
		hbot -= i;
		tp = hbot;
		*argp = make_boxed_rel(hbot, dst_base);
		while (i--) {
		    *tp++ = *objp++;
		}
	    }
	    break;
	case TAG_PRIMARY_HEADER:
	    if (header_is_thing(obj) || hp == const_tuple) {
		hp += header_arity(obj) + 1;
	    } else {
		hp++;
	    }
	    break;
	}
    }

#ifdef DEBUG
    if (htop != hbot)
	erl_exit(ERTS_ABORT_EXIT,
		 "Internal error in copy_struct() when copying %T:"
		 " htop=%p != hbot=%p (sz=%beu)\n",
		 org_obj, htop, hbot, org_sz); 
#else
    if (htop > hbot) {
	erl_exit(ERTS_ABORT_EXIT,
		 "Internal error in copy_struct(): htop, hbot overrun\n");
    }
#endif
    *hpp = (Eterm *) (hstart+hsize);
    return res;
}
Пример #20
0
Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
#endif
{
    Eterm* tp = ptr;
    Eterm* hp = *hpp;
    const Eterm res = make_tuple(hp);
#if HALFWORD_HEAP
    const Sint offs = COMPRESS_POINTER(hp - (tp - src_base));
#else
    const Sint offs = (hp - tp) * sizeof(Eterm);
#endif

    while (sz--) {
	Eterm val = *tp++;

	switch (primary_tag(val)) {
	case TAG_PRIMARY_IMMED1:
	    *hp++ = val;
	    break;
	case TAG_PRIMARY_LIST:
	case TAG_PRIMARY_BOXED:
	    *hp++ = byte_offset_ptr(val, offs);
	    break;
	case TAG_PRIMARY_HEADER:
	    *hp++ = val;
	    switch (val & _HEADER_SUBTAG_MASK) {
	    case ARITYVAL_SUBTAG:
		break;
	    case REFC_BINARY_SUBTAG:
		{
		    ProcBin* pb = (ProcBin *) (tp-1);
		    erts_refc_inc(&pb->val->refc, 2);
		    OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
		}
		goto off_heap_common;

	    case FUN_SUBTAG:
		{
		    ErlFunThing* funp = (ErlFunThing *) (tp-1);
		    erts_refc_inc(&funp->fe->refc, 2);
		}
		goto off_heap_common;

	    case MAP_SUBTAG:
		*hp++ = *tp++;
		sz--;
		break;
	    case EXTERNAL_PID_SUBTAG:
	    case EXTERNAL_PORT_SUBTAG:
	    case EXTERNAL_REF_SUBTAG:
		{
		    ExternalThing* etp = (ExternalThing *) (tp-1);
		    erts_refc_inc(&etp->node->refc, 2);
		}
	    off_heap_common:
		{
		    struct erl_off_heap_header* ohh = (struct erl_off_heap_header*)(hp-1);
		    int tari = thing_arityval(val);
		    
		    sz -= tari;
		    while (tari--) {
			*hp++ = *tp++;
		    }
		    ohh->next = off_heap->first;
		    off_heap->first = ohh;
		}
		break;
	    default:
		{
		    int tari = header_arity(val);
    
		    sz -= tari;
		    while (tari--) {
			*hp++ = *tp++;
		    }
		}
		break;
	    }
	    break;
	}
    }
    *hpp = hp;

    return res;
}
Пример #21
0
static void
consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
{
    GenericBp* g = (GenericBp *) pc[-4];
    GenericBpData* src;
    GenericBpData* dst;
    Uint flags;

    if (g == 0) {
	return;
    }

    src = &g->data[erts_active_bp_ix()];
    dst = &g->data[erts_staging_bp_ix()];

    /*
     * The contents of the staging area may be out of date.
     * Decrement all reference pointers.
     */

    flags = dst->flags;
    if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
	MatchSetUnref(dst->local_ms);
    }
    if (flags & ERTS_BPF_META_TRACE) {
	bp_meta_unref(dst->meta_tracer);
	MatchSetUnref(dst->meta_ms);
    }
    if (flags & ERTS_BPF_COUNT) {
	bp_count_unref(dst->count);
    }
    if (flags & ERTS_BPF_TIME_TRACE) {
	bp_time_unref(dst->time);
    }

    /*
     * If all flags are zero, deallocate all breakpoint data.
     */

    flags = dst->flags = src->flags;
    if (flags == 0) {
	if (modp) {
	    if (local) {
		modp->curr.num_breakpoints--;
	    } else {
		modp->curr.num_traced_exports--;
	    }
	    ASSERT(modp->curr.num_breakpoints >= 0);
	    ASSERT(modp->curr.num_traced_exports >= 0);
	    ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint));
	}
	pc[-4] = 0;
	Free(g);
	return;
    }

    /*
     * Copy the active data to the staging area (making it ready
     * for the next time it will be used).
     */

    if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
	dst->local_ms = src->local_ms;
	MatchSetRef(dst->local_ms);
    }
    if (flags & ERTS_BPF_META_TRACE) {
	dst->meta_tracer = src->meta_tracer;
	erts_refc_inc(&dst->meta_tracer->refc, 1);
	dst->meta_ms = src->meta_ms;
	MatchSetRef(dst->meta_ms);
    }
    if (flags & ERTS_BPF_COUNT) {
	dst->count = src->count;
	erts_refc_inc(&dst->count->refc, 1);
    }
    if (flags & ERTS_BPF_TIME_TRACE) {
	dst->time = src->time;
	erts_refc_inc(&dst->time->refc, 1);
	ASSERT(dst->time->hash);
    }
}
Пример #22
0
/*
 *  Copy a structure to a heap.
 */
Eterm
copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
{
    char* hstart;
    Uint hsize;
    Eterm* htop;
    Eterm* hbot;
    Eterm* hp;
    Eterm* objp;
    Eterm* tp;
    Eterm  res;
    Eterm  elem;
    Eterm* tailp;
    Eterm* argp;
    Eterm* const_tuple;
    Eterm hdr;
    int i;
#ifdef DEBUG
    Eterm org_obj = obj;
    Uint org_sz = sz;
#endif

    if (IS_CONST(obj))
	return obj;

    hp = htop = *hpp;
    hbot   = htop + sz;
    hstart = (char *)htop;
    hsize = (char*) hbot - hstart;
    const_tuple = 0;

    /* Copy the object onto the heap */
    switch (primary_tag(obj)) {
    case TAG_PRIMARY_LIST: argp = &res; goto L_copy_list;
    case TAG_PRIMARY_BOXED: argp = &res; goto L_copy_boxed;
    default:
	erl_exit(ERTS_ABORT_EXIT,
		 "%s, line %d: Internal error in copy_struct: 0x%08x\n",
		 __FILE__, __LINE__,obj);
    }

 L_copy:
    while (hp != htop) {
	obj = *hp;

	switch (primary_tag(obj)) {
	case TAG_PRIMARY_IMMED1:
	    hp++;
	    break;
	case TAG_PRIMARY_LIST:
	    objp = list_val(obj);
	    if (in_area(objp,hstart,hsize)) {
		hp++;
		break;
	    }
	    argp = hp++;
	    /* Fall through */

	L_copy_list:
	    tailp = argp;
	    while (is_list(obj)) {
		objp = list_val(obj);
		tp = tailp;
		elem = *objp;
		if (IS_CONST(elem)) {
		    *(hbot-2) = elem;
		    tailp = hbot-1;
		    hbot -= 2;
		}
		else {
		    *htop = elem;
		    tailp = htop+1;
		    htop += 2;
		}
		*tp = make_list(tailp - 1);
		obj = *(objp+1);
	    }
	    switch (primary_tag(obj)) {
	    case TAG_PRIMARY_IMMED1: *tailp = obj; goto L_copy;
	    case TAG_PRIMARY_BOXED: argp = tailp; goto L_copy_boxed;
	    default:
		erl_exit(ERTS_ABORT_EXIT,
			 "%s, line %d: Internal error in copy_struct: 0x%08x\n",
			 __FILE__, __LINE__,obj);
	    }
	    
	case TAG_PRIMARY_BOXED:
	    if (in_area(boxed_val(obj),hstart,hsize)) {
		hp++;
		break;
	    }
	    argp = hp++;

	L_copy_boxed:
	    objp = boxed_val(obj);
	    hdr = *objp;
	    switch (hdr & _TAG_HEADER_MASK) {
	    case ARITYVAL_SUBTAG:
		{
		    int const_flag = 1; /* assume constant tuple */
		    i = arityval(hdr);
		    *argp = make_tuple(htop);
		    tp = htop;	/* tp is pointer to new arity value */
		    *htop++ = *objp++; /* copy arity value */
		    while (i--) {
			elem = *objp++;
			if (!IS_CONST(elem)) {
			    const_flag = 0;
			}
			*htop++ = elem;
		    }
		    if (const_flag) {
			const_tuple = tp; /* this is the latest const_tuple */
		    }
		}
		break;
	    case REFC_BINARY_SUBTAG:
		{
		    ProcBin* pb;

		    pb = (ProcBin *) objp;
		    if (pb->flags) {
			erts_emasculate_writable_binary(pb);
		    }
		    i = thing_arityval(*objp) + 1;
		    hbot -= i;
		    tp = hbot;
		    while (i--)  {
			*tp++ = *objp++;
		    }
		    *argp = make_binary(hbot);
		    pb = (ProcBin*) hbot;
		    erts_refc_inc(&pb->val->refc, 2);
		    pb->next = off_heap->mso;
		    pb->flags = 0;
		    off_heap->mso = pb;
		    off_heap->overhead += pb->size / sizeof(Eterm);
		}
		break;
	    case SUB_BINARY_SUBTAG:
		{
		    ErlSubBin* sb = (ErlSubBin *) objp;
		    Eterm real_bin = sb->orig;
		    Uint bit_offset = sb->bitoffs;
		    Uint bit_size = sb -> bitsize;
		    Uint offset = sb->offs;
		    size_t size = sb->size;
		    Uint extra_bytes;
		    Uint real_size;
		    if ((bit_size + bit_offset) > 8) {
			extra_bytes = 2;
		    } else if ((bit_size + bit_offset) > 0) {
			extra_bytes = 1;
		    } else {
			extra_bytes = 0;
		    } 
		    real_size = size+extra_bytes;
		    objp = binary_val(real_bin);
		    if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) {
			ErlHeapBin* from = (ErlHeapBin *) objp;
			ErlHeapBin* to;
			i = heap_bin_size(real_size);
			hbot -= i;
			to = (ErlHeapBin *) hbot;
			to->thing_word = header_heap_bin(real_size);
			to->size = real_size;
			sys_memcpy(to->data, ((byte *)from->data)+offset, real_size);
		    } else {
			ProcBin* from = (ProcBin *) objp;
			ProcBin* to;
			
			ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG);
			if (from->flags) {
			    erts_emasculate_writable_binary(from);
			}
			hbot -= PROC_BIN_SIZE;
			to = (ProcBin *) hbot;
			to->thing_word = HEADER_PROC_BIN;
			to->size = real_size;
			to->val = from->val;
			erts_refc_inc(&to->val->refc, 2);
			to->bytes = from->bytes + offset;
			to->next = off_heap->mso;
			to->flags = 0;
			off_heap->mso = to;
			off_heap->overhead += to->size / sizeof(Eterm);
		    }
		    *argp = make_binary(hbot);
		    if (extra_bytes != 0) {
			ErlSubBin* res;
			hbot -= ERL_SUB_BIN_SIZE;
			res = (ErlSubBin *) hbot;
			res->thing_word = HEADER_SUB_BIN;
			res->size = size;
			res->bitsize = bit_size;
			res->bitoffs = bit_offset;
			res->offs = 0;
			res->is_writable = 0;
			res->orig = *argp;
			*argp = make_binary(hbot);
		    }
		    break;
		}
		break;
	    case FUN_SUBTAG:
		{
		    ErlFunThing* funp = (ErlFunThing *) objp;

		    i =  thing_arityval(hdr) + 2 + funp->num_free;
		    tp = htop;
		    while (i--)  {
			*htop++ = *objp++;
		    }
#ifndef HYBRID /* FIND ME! */
		    funp = (ErlFunThing *) tp;
		    funp->next = off_heap->funs;
		    off_heap->funs = funp;
		    erts_refc_inc(&funp->fe->refc, 2);
#endif
		    *argp = make_fun(tp);
		}
		break;
	    case EXTERNAL_PID_SUBTAG:
	    case EXTERNAL_PORT_SUBTAG:
	    case EXTERNAL_REF_SUBTAG:
		{
		  ExternalThing *etp = (ExternalThing *) htop;

		  i =  thing_arityval(hdr) + 1;
		  tp = htop;

		  while (i--)  {
		    *htop++ = *objp++;
		  }

		  etp->next = off_heap->externals;
		  off_heap->externals = etp;
		  erts_refc_inc(&etp->node->refc, 2);

		  *argp = make_external(tp);
		}
		break;
	    case BIN_MATCHSTATE_SUBTAG:
		erl_exit(ERTS_ABORT_EXIT,
			 "copy_struct: matchstate term not allowed");
	    default:
		i = thing_arityval(hdr)+1;
		hbot -= i;
		tp = hbot;
		*argp = make_boxed(hbot);
		while (i--) {
		    *tp++ = *objp++;
		}
	    }
	    break;
	case TAG_PRIMARY_HEADER:
	    if (header_is_thing(obj) || hp == const_tuple) {
		hp += header_arity(obj) + 1;
	    } else {
		hp++;
	    }
	    break;
	}
    }

#ifdef DEBUG
    if (htop != hbot)
	erl_exit(ERTS_ABORT_EXIT,
		 "Internal error in copy_struct() when copying %T:"
		 " htop=%p != hbot=%p (sz=%bpu)\n",
		 org_obj, htop, hbot, org_sz); 
#else
    if (htop > hbot) {
	erl_exit(ERTS_ABORT_EXIT,
		 "Internal error in copy_struct(): htop, hbot overrun\n");
    }
#endif
    *hpp = (Eterm *) (hstart+hsize);
    return res;
}
Пример #23
0
/* Copy a message to the message area. */
Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
{
    Eterm obj;
    Eterm dest;
#ifdef INCREMENTAL
    int alloc_old = 0;
#else
    int total_need = 0;
#endif

    VERBOSE(DEBUG_MESSAGES,
            ("COPY START; %T is sending a message @ 0x%016x\n%T\n",
             from->id, orig, orig));

#ifndef INCREMENTAL
 copy_start:
#endif
    MA_STACK_PUSH(src,orig);
    MA_STACK_PUSH(dst,&dest);
    MA_STACK_PUSH(offset,offs);

    while (ma_src_top > 0) {
        obj = MA_STACK_POP(src);

        /* copy_struct_lazy should never be called with something that
         * do not need to be copied. Within the loop, nothing that do
         * not need copying should be placed in the src-stack.
         */
        ASSERT(!NO_COPY(obj));

        switch (primary_tag(obj)) {
        case TAG_PRIMARY_LIST: {
            Eterm *hp;
            Eterm *objp;

            GlobalAlloc(from,2,hp);
            objp = list_val(obj);

            MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_list(hp));
            MA_STACK_POP(dst);

            /* TODO: Byt ordningen nedan så att CDR pushas först. */

            if (NO_COPY(*objp)) {
                hp[0] = *objp;
#ifdef INCREMENTAL
                if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend))
                    INC_STORE(gray,hp,2);
#endif
            } else {
                MA_STACK_PUSH(src,*objp);
                MA_STACK_PUSH(dst,hp);
                MA_STACK_PUSH(offset,0);
            }

            objp++;

            if (NO_COPY(*objp)) {
                hp[1] = *objp;
#ifdef INCREMENTAL
                if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend))
                    INC_STORE(gray,hp,2);
#endif
            }
            else {
                MA_STACK_PUSH(src,*objp);
                MA_STACK_PUSH(dst,hp);
                MA_STACK_PUSH(offset,1);
            }
            continue;
        }

        case TAG_PRIMARY_BOXED: {
            Eterm *objp = boxed_val(obj);

            switch (*objp & _TAG_HEADER_MASK) {
            case ARITYVAL_SUBTAG: {
                Uint ari = arityval(*objp);
                Uint i;
                Eterm *hp;
                GlobalAlloc(from,ari + 1,hp);
                /* A GC above might invalidate the value of objp */
                objp = boxed_val(obj);
                MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_tuple(hp));
                MA_STACK_POP(dst);
                *hp = *objp++;
                for (i = 1; i <= ari; i++) {
                    switch (primary_tag(*objp)) {
                    case TAG_PRIMARY_LIST:
                    case TAG_PRIMARY_BOXED:
                        if (NO_COPY(*objp)) {
                            hp[i] = *objp;
#ifdef INCREMENTAL
                            if (ptr_within(ptr_val(*objp),
                                           inc_fromspc,inc_fromend))
                                INC_STORE(gray,hp,BOXED_NEED(hp,*hp));
#endif
                            objp++;
                        } else {
                            MA_STACK_PUSH(src,*objp++);
                            MA_STACK_PUSH(dst,hp);
                            MA_STACK_PUSH(offset,i);
                        }
                        break;
                    default:
                        hp[i] = *objp++;
                    }
                }
                continue;
            }

            case REFC_BINARY_SUBTAG: {
                ProcBin *pb;
                Uint i = thing_arityval(*objp) + 1;
                Eterm *hp;
                GlobalAlloc(from,i,hp);
                /* A GC above might invalidate the value of objp */
                objp = boxed_val(obj);
                MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_binary(hp));
                MA_STACK_POP(dst);
                pb = (ProcBin*) hp;
                while (i--) {
                    *hp++ = *objp++;
                }
                erts_refc_inc(&pb->val->refc, 2);
                pb->next = erts_global_offheap.mso;
                erts_global_offheap.mso = pb;
                erts_global_offheap.overhead += pb->size / sizeof(Eterm);
                continue;
            }

            case FUN_SUBTAG: {
                ErlFunThing *funp = (ErlFunThing*) objp;
                Uint i = thing_arityval(*objp) + 1;
                Uint j = i + 1 + funp->num_free;
                Uint k = i;
                Eterm *hp, *hp_start;
                GlobalAlloc(from,j,hp);
                /* A GC above might invalidate the value of objp */
                objp = boxed_val(obj);
                hp_start = hp;
                MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_fun(hp));
                MA_STACK_POP(dst);
                funp = (ErlFunThing*) hp;
                while (i--) {
                    *hp++ = *objp++;
                }
#ifndef HYBRID // FIND ME!
                funp->next = erts_global_offheap.funs;
                erts_global_offheap.funs = funp;
                erts_refc_inc(&funp->fe->refc, 2);
#endif
                for (i = k; i < j; i++) {
                    switch (primary_tag(*objp)) {
                    case TAG_PRIMARY_LIST:
                    case TAG_PRIMARY_BOXED:
                        if (NO_COPY(*objp)) {
#ifdef INCREMENTAL
                            if (ptr_within(ptr_val(*objp),
                                           inc_fromspc,inc_fromend))
                                INC_STORE(gray,hp,BOXED_NEED(hp,*hp));
#endif
                            *hp++ = *objp++;
                        } else {
                            MA_STACK_PUSH(src,*objp++);
                            MA_STACK_PUSH(dst,hp_start);
                            MA_STACK_PUSH(offset,i);
                            hp++;
                        }
                        break;
                    default:
                        *hp++ = *objp++;
                    }
                }
                continue;
            }

            case EXTERNAL_PID_SUBTAG:
            case EXTERNAL_PORT_SUBTAG:
            case EXTERNAL_REF_SUBTAG: {
                ExternalThing *etp;
                Uint i =  thing_arityval(*objp) + 1;
                Eterm *hp;
                GlobalAlloc(from,i,hp);
                /* A GC above might invalidate the value of objp */
                objp = boxed_val(obj);
                MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_external(hp));
                MA_STACK_POP(dst);
                etp = (ExternalThing*) hp;
                while (i--)  {
                    *hp++ = *objp++;
                }

                etp->next = erts_global_offheap.externals;
                erts_global_offheap.externals = etp;
		erts_refc_inc(&etp->node->refc, 2);
                continue;
            }

            case SUB_BINARY_SUBTAG: {
                ErlSubBin *sb = (ErlSubBin *) objp;
		Eterm *hp;
		Eterm res_binary;
                Eterm real_bin = sb->orig;
                Uint bit_offset = sb->bitoffs;
		Uint bit_size = sb -> bitsize;
		Uint sub_offset = sb->offs;
                size_t size = sb->size;
		Uint extra_bytes;
		Uint real_size;
		Uint sub_binary_heapneed;
		if ((bit_size + bit_offset) > 8) {
		    extra_bytes = 2;
		    sub_binary_heapneed = ERL_SUB_BIN_SIZE;
		} else if ((bit_size + bit_offset) > 0) {
		    extra_bytes = 1;
		    sub_binary_heapneed = ERL_SUB_BIN_SIZE;
		} else {
		    extra_bytes = 0;
		    sub_binary_heapneed = 0;
		}
		
		real_size = size+extra_bytes;
                objp = binary_val(real_bin);
                if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) {
                    ErlHeapBin *from_bin;
                    ErlHeapBin *to_bin;
                    Uint i = heap_bin_size(real_size);
                    GlobalAlloc(from,i+sub_binary_heapneed,hp);
                    from_bin = (ErlHeapBin *) objp;
                    to_bin = (ErlHeapBin *) hp;
                    to_bin->thing_word = header_heap_bin(real_size);
                    to_bin->size = real_size;
                    sys_memcpy(to_bin->data, ((byte *)from_bin->data) +
                               sub_offset, real_size);
		    res_binary = make_binary(to_bin);
		    hp += i;
                } else {
                    ProcBin *from_bin;
                    ProcBin *to_bin;
                    
                    ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG);
		    from_bin = (ProcBin *) objp;
		    erts_refc_inc(&from_bin->val->refc, 2);
                    GlobalAlloc(from,PROC_BIN_SIZE+sub_binary_heapneed,hp);
                    to_bin = (ProcBin *) hp;
                    to_bin->thing_word = HEADER_PROC_BIN;
                    to_bin->size = real_size;
                    to_bin->val = from_bin->val;
                    to_bin->bytes = from_bin->bytes + sub_offset;
                    to_bin->next = erts_global_offheap.mso;
                    erts_global_offheap.mso = to_bin;
                    erts_global_offheap.overhead += to_bin->size / sizeof(Eterm);
		    res_binary=make_binary(to_bin);
		    hp += PROC_BIN_SIZE;
                }
		if (extra_bytes != 0) {
		    ErlSubBin* res;
		    res = (ErlSubBin *) hp;
		    res->thing_word = HEADER_SUB_BIN;
		    res->size = size;
		    res->bitsize = bit_size;
		    res->bitoffs = bit_offset;
		    res->offs = 0;
		    res->is_writable = 0;
		    res->orig = res_binary;
		    res_binary = make_binary(hp);
		}
		MA_STACK_UPDATE(dst,MA_STACK_POP(offset),res_binary);
		MA_STACK_POP(dst);
                continue;
            }

	    case BIN_MATCHSTATE_SUBTAG:
		erl_exit(ERTS_ABORT_EXIT,
			 "copy_struct_lazy: matchstate term not allowed");

            default: {
                Uint size = thing_arityval(*objp) + 1;
                Eterm *hp;
                GlobalAlloc(from,size,hp);
                /* A GC above might invalidate the value of objp */
                objp = boxed_val(obj);
                MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_boxed(hp));
                MA_STACK_POP(dst);
                while (size--) {
                    *hp++ = *objp++;
                }
                continue;
            }
            }
            continue;
        }

        case TAG_PRIMARY_HEADER:
        ASSERT((obj & _TAG_HEADER_MASK) == ARITYVAL_SUBTAG);
        {
            Eterm *objp = &obj;
            Uint ari = arityval(obj);
            Uint i;
            Eterm *hp;
            GlobalAlloc(from,ari + 1,hp);
            MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_tuple(hp));
            MA_STACK_POP(dst);
            *hp = *objp++;
            for (i = 1; i <= ari; i++) {
                switch (primary_tag(*objp)) {
                case TAG_PRIMARY_LIST:
                case TAG_PRIMARY_BOXED:
                    if (NO_COPY(*objp)) {
#ifdef INCREMENTAL
                        if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend))
                            INC_STORE(gray,hp,ari + 1);
#endif
                        hp[i] = *objp++;
                    } else {
                        MA_STACK_PUSH(src,*objp++);
                        MA_STACK_PUSH(dst,hp);
                        MA_STACK_PUSH(offset,i);
                    }
                    break;
                default:
                    hp[i] = *objp++;
                }
            }
            continue;
        }

        default:
            erl_exit(ERTS_ABORT_EXIT,
		     "%s, line %d: Internal error in copy_struct_lazy: 0x%08x\n",
                     __FILE__, __LINE__,obj);
        }
    }

    VERBOSE(DEBUG_MESSAGES,
            ("Copy allocated @ 0x%08lx:\n%T\n",
             (unsigned long)ptr_val(dest),dest));

    ma_gc_flags &= ~GC_CYCLE_START;

    ASSERT(eq(orig, dest));
    ASSERT(ma_src_top == 0);
    ASSERT(ma_dst_top == 0);
    ASSERT(ma_offset_top == 0);
    return dest;
}
Пример #24
0
Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
#endif
{
    char* hstart;
    Uint hsize;
    Eterm* htop;
    Eterm* hbot;
    Eterm* hp;
    Eterm* objp;
    Eterm* tp;
    Eterm  res;
    Eterm  elem;
    Eterm* tailp;
    Eterm* argp;
    Eterm* const_tuple;
    Eterm hdr;
    int i;
#ifdef DEBUG
    Eterm org_obj = obj;
    Uint org_sz = sz;
#endif

    if (IS_CONST(obj))
	return obj;

    DTRACE1(copy_struct, (int32_t)sz);

    hp = htop = *hpp;
    hbot   = htop + sz;
    hstart = (char *)htop;
    hsize = (char*) hbot - hstart;
    const_tuple = 0;

    /* Copy the object onto the heap */
    switch (primary_tag(obj)) {
    case TAG_PRIMARY_LIST:
	argp = &res;
	objp = list_val_rel(obj,src_base);
	goto L_copy_list;
    case TAG_PRIMARY_BOXED: argp = &res; goto L_copy_boxed;
    default:
	erl_exit(ERTS_ABORT_EXIT,
		 "%s, line %d: Internal error in copy_struct: 0x%08x\n",
		 __FILE__, __LINE__,obj);
    }

 L_copy:
    while (hp != htop) {
	obj = *hp;

	switch (primary_tag(obj)) {
	case TAG_PRIMARY_IMMED1:
	    hp++;
	    break;
	case TAG_PRIMARY_LIST:
	    objp = list_val_rel(obj,src_base);
	#if !HALFWORD_HEAP || defined(DEBUG)
	    if (in_area(objp,hstart,hsize)) {
		ASSERT(!HALFWORD_HEAP);
		hp++;
		break;
	    }
	#endif
	    argp = hp++;
	    /* Fall through */

	L_copy_list:
	    tailp = argp;
	    for (;;) {
		tp = tailp;
		elem = CAR(objp);
		if (IS_CONST(elem)) {
		    hbot -= 2;
		    CAR(hbot) = elem;
		    tailp = &CDR(hbot);
		}
		else {
		    CAR(htop) = elem;
		#if HALFWORD_HEAP
		    CDR(htop) = CDR(objp);
		    *tailp = make_list_rel(htop,dst_base);
		    htop += 2;
		    goto L_copy;
		#else
		    tailp = &CDR(htop);
		    htop += 2;
		#endif
		}
		ASSERT(!HALFWORD_HEAP || tp < hp || tp >= hbot);
		*tp = make_list_rel(tailp - 1, dst_base);
		obj = CDR(objp);
		if (!is_list(obj)) {
		    break;
		}
		objp = list_val_rel(obj,src_base);
	    }
	    switch (primary_tag(obj)) {
	    case TAG_PRIMARY_IMMED1: *tailp = obj; goto L_copy;
	    case TAG_PRIMARY_BOXED: argp = tailp; goto L_copy_boxed;
	    default:
		erl_exit(ERTS_ABORT_EXIT,
			 "%s, line %d: Internal error in copy_struct: 0x%08x\n",
			 __FILE__, __LINE__,obj);
	    }
	    
	case TAG_PRIMARY_BOXED:
	#if !HALFWORD_HEAP || defined(DEBUG)
	    if (in_area(boxed_val_rel(obj,src_base),hstart,hsize)) {
		ASSERT(!HALFWORD_HEAP);
		hp++;
		break;
	    }
	#endif
	    argp = hp++;

	L_copy_boxed:
	    objp = boxed_val_rel(obj, src_base);
	    hdr = *objp;
	    switch (hdr & _TAG_HEADER_MASK) {
	    case ARITYVAL_SUBTAG:
		{
		    int const_flag = 1; /* assume constant tuple */
		    i = arityval(hdr);
		    *argp = make_tuple_rel(htop, dst_base);
		    tp = htop;	/* tp is pointer to new arity value */
		    *htop++ = *objp++; /* copy arity value */
		    while (i--) {
			elem = *objp++;
			if (!IS_CONST(elem)) {
			    const_flag = 0;
			}
			*htop++ = elem;
		    }
		    if (const_flag) {
			const_tuple = tp; /* this is the latest const_tuple */
		    }
		}
		break;
	    case REFC_BINARY_SUBTAG:
		{
		    ProcBin* pb;

		    pb = (ProcBin *) objp;
		    if (pb->flags) {
			erts_emasculate_writable_binary(pb);
		    }
		    i = thing_arityval(*objp) + 1;
		    hbot -= i;
		    tp = hbot;
		    while (i--)  {
			*tp++ = *objp++;
		    }
		    *argp = make_binary_rel(hbot, dst_base);
		    pb = (ProcBin*) hbot;
		    erts_refc_inc(&pb->val->refc, 2);
		    pb->next = off_heap->first;
		    pb->flags = 0;
		    off_heap->first = (struct erl_off_heap_header*) pb;
		    OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
		}
		break;
	    case SUB_BINARY_SUBTAG:
		{
		    ErlSubBin* sb = (ErlSubBin *) objp;
		    Eterm real_bin = sb->orig;
		    Uint bit_offset = sb->bitoffs;
		    Uint bit_size = sb -> bitsize;
		    Uint offset = sb->offs;
		    size_t size = sb->size;
		    Uint extra_bytes;
		    Uint real_size;
		    if ((bit_size + bit_offset) > 8) {
			extra_bytes = 2;
		    } else if ((bit_size + bit_offset) > 0) {
			extra_bytes = 1;
		    } else {
			extra_bytes = 0;
		    } 
		    real_size = size+extra_bytes;
		    objp = binary_val_rel(real_bin,src_base);
		    if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) {
			ErlHeapBin* from = (ErlHeapBin *) objp;
			ErlHeapBin* to;
			i = heap_bin_size(real_size);
			hbot -= i;
			to = (ErlHeapBin *) hbot;
			to->thing_word = header_heap_bin(real_size);
			to->size = real_size;
			sys_memcpy(to->data, ((byte *)from->data)+offset, real_size);
		    } else {
			ProcBin* from = (ProcBin *) objp;
			ProcBin* to;
			
			ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG);
			if (from->flags) {
			    erts_emasculate_writable_binary(from);
			}
			hbot -= PROC_BIN_SIZE;
			to = (ProcBin *) hbot;
			to->thing_word = HEADER_PROC_BIN;
			to->size = real_size;
			to->val = from->val;
			erts_refc_inc(&to->val->refc, 2);
			to->bytes = from->bytes + offset;
			to->next = off_heap->first;
			to->flags = 0;
			off_heap->first = (struct erl_off_heap_header*) to;
			OH_OVERHEAD(off_heap, to->size / sizeof(Eterm));
		    }
		    *argp = make_binary_rel(hbot, dst_base);
		    if (extra_bytes != 0) {
			ErlSubBin* res;
			hbot -= ERL_SUB_BIN_SIZE;
			res = (ErlSubBin *) hbot;
			res->thing_word = HEADER_SUB_BIN;
			res->size = size;
			res->bitsize = bit_size;
			res->bitoffs = bit_offset;
			res->offs = 0;
			res->is_writable = 0;
			res->orig = *argp;
			*argp = make_binary_rel(hbot, dst_base);
		    }
		    break;
		}
		break;
	    case FUN_SUBTAG:
		{
		    ErlFunThing* funp = (ErlFunThing *) objp;

		    i =  thing_arityval(hdr) + 2 + funp->num_free;
		    tp = htop;
		    while (i--)  {
			*htop++ = *objp++;
		    }
		    funp = (ErlFunThing *) tp;
		    funp->next = off_heap->first;
		    off_heap->first = (struct erl_off_heap_header*) funp;
		    erts_refc_inc(&funp->fe->refc, 2);
		    *argp = make_fun_rel(tp, dst_base);
		}
		break;
	    case EXTERNAL_PID_SUBTAG:
	    case EXTERNAL_PORT_SUBTAG:
	    case EXTERNAL_REF_SUBTAG:
		{
		  ExternalThing *etp = (ExternalThing *) htop;

		  i  = thing_arityval(hdr) + 1;
		  tp = htop;

		  while (i--)  {
		    *htop++ = *objp++;
		  }

		  etp->next = off_heap->first;
		  off_heap->first = (struct erl_off_heap_header*)etp;
		  erts_refc_inc(&etp->node->refc, 2);

		  *argp = make_external_rel(tp, dst_base);
		}
		break;
	    case MAP_SUBTAG:
		tp = htop;
		switch (MAP_HEADER_TYPE(hdr)) {
		    case MAP_HEADER_TAG_FLATMAP_HEAD :
                        i = flatmap_get_size(objp) + 3;
                        *argp = make_flatmap_rel(htop, dst_base);
                        while (i--) {
                            *htop++ = *objp++;
                        }
			break;
		    case MAP_HEADER_TAG_HAMT_HEAD_BITMAP :
		    case MAP_HEADER_TAG_HAMT_HEAD_ARRAY :
			*htop++ = *objp++;
		    case MAP_HEADER_TAG_HAMT_NODE_BITMAP :
			i = 1 + hashmap_bitcount(MAP_HEADER_VAL(hdr));
			while (i--)  { *htop++ = *objp++; }
			*argp = make_hashmap_rel(tp, dst_base);
			break;
		    default:
			erl_exit(ERTS_ABORT_EXIT, "copy_struct: bad hashmap type %d\n", MAP_HEADER_TYPE(hdr));
		}
		break;
	    case BIN_MATCHSTATE_SUBTAG:
		erl_exit(ERTS_ABORT_EXIT,
			 "copy_struct: matchstate term not allowed");
	    default:
		i = thing_arityval(hdr)+1;
		hbot -= i;
		tp = hbot;
		*argp = make_boxed_rel(hbot, dst_base);
		while (i--) {
		    *tp++ = *objp++;
		}
	    }
	    break;
	case TAG_PRIMARY_HEADER:
	    if (header_is_thing(obj) || hp == const_tuple) {
		hp += header_arity(obj) + 1;
	    } else {
		hp++;
	    }
	    break;
	}
    }

#ifdef DEBUG
    if (htop != hbot)
	erl_exit(ERTS_ABORT_EXIT,
		 "Internal error in copy_struct() when copying %T:"
		 " htop=%p != hbot=%p (sz=%beu)\n",
		 org_obj, htop, hbot, org_sz); 
#else
    if (htop > hbot) {
	erl_exit(ERTS_ABORT_EXIT,
		 "Internal error in copy_struct(): htop, hbot overrun\n");
    }
#endif
    *hpp = (Eterm *) (hstart+hsize);
    return res;
}