ERL_NIF_TERM enif_make_resource_binary(ErlNifEnv* env, void* obj, const void* data, size_t size) { Eterm bin = enif_make_resource(env, obj); ProcBin* pb = (ProcBin*) binary_val(bin); pb->bytes = (byte*) data; pb->size = size; return bin; }
BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3) { #if !defined(HIPE) BIF_ERROR(BIF_P, EXC_NOTSUP); #else Module* modp; Eterm res, mod; if (!ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_1) || is_not_atom(mod = erts_module_for_prepared_code (((ProcBin*)binary_val(BIF_ARG_1))->val))) { BIF_ERROR(BIF_P, BADARG); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD3(bif_export[BIF_code_make_stub_module_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); modp = erts_get_module(mod, erts_active_code_ix()); if (modp && modp->curr.num_breakpoints > 0) { ASSERT(modp->curr.code_hdr != NULL); erts_clear_module_break(modp); ASSERT(modp->curr.num_breakpoints == 0); } erts_start_staging_code_ix(1); res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); if (res == mod) { erts_end_staging_code_ix(); erts_commit_staging_code_ix(); if (!modp) modp = erts_get_module(mod, erts_active_code_ix()); hipe_redirect_to_module(modp); } else { erts_abort_staging_code_ix(); } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; #endif }
static ERTS_INLINE int io_list_vec_count(Eterm obj, Uint *v_size, Uint *c_size, Uint *b_size, Uint *in_clist, Uint *p_v_size, Uint *p_c_size, Uint *p_in_clist, Uint blimit) { Uint size = binary_size(obj); Eterm real; ERTS_DECLARE_DUMMY(Uint offset); int bitoffs; int bitsize; ERTS_GET_REAL_BIN(obj, real, offset, bitoffs, bitsize); if (bitsize != 0) return 1; if (thing_subtag(*binary_val(real)) == REFC_BINARY_SUBTAG && bitoffs == 0) { *b_size += size; if (*b_size < size) return 2; *in_clist = 0; ++*v_size; /* If iov_len is smaller then Uint we split the binary into*/ /* multiple smaller (2GB) elements in the iolist.*/ *v_size += size / MAX_SYSIOVEC_IOVLEN; if (size >= blimit) { *p_in_clist = 0; ++*p_v_size; } else { *p_c_size += size; if (!*p_in_clist) { *p_in_clist = 1; ++*p_v_size; } } } else { *c_size += size; if (*c_size < size) return 2; if (!*in_clist) { *in_clist = 1; ++*v_size; } *p_c_size += size; if (!*p_in_clist) { *p_in_clist = 1; ++*p_v_size; } } return 0; }
BIF_RETTYPE has_prepared_code_on_load_1(BIF_ALIST_1) { Eterm res; ProcBin* pb; if (!ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_1)) { error: BIF_ERROR(BIF_P, BADARG); } pb = (ProcBin*) binary_val(BIF_ARG_1); res = erts_has_code_on_load(pb->val); if (res == NIL) { goto error; } BIF_RET(res); }
int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* type, void** objp) { ProcBin* pb; Binary* mbin; ErlNifResource* resource; if (!ERTS_TERM_IS_MAGIC_BINARY(term)) { return 0; } pb = (ProcBin*) binary_val(term); /*if (pb->size != 0) { return 0; / * Or should we allow "resource binaries" as handles? * / }*/ mbin = pb->val; resource = (ErlNifResource*) ERTS_MAGIC_BIN_DATA(mbin); if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != &nif_resource_dtor || resource->type != type) { return 0; } *objp = resource->data; return 1; }
BIF_RETTYPE finish_loading_1(BIF_ALIST_1) { Sint i; Sint n; struct m* p = NULL; Uint exceptions; Eterm res; int is_blocking = 0; int do_commit = 0; if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD1(bif_export[BIF_finish_loading_1], BIF_P, BIF_ARG_1); } /* * Validate the argument before we start loading; it must be a * proper list where each element is a magic binary containing * prepared (not previously loaded) code. * * First count the number of elements and allocate an array * to keep the elements in. */ n = erts_list_length(BIF_ARG_1); if (n < 0) { badarg: if (p) { erts_free(ERTS_ALC_T_LOADER_TMP, p); } erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } p = erts_alloc(ERTS_ALC_T_LOADER_TMP, n*sizeof(struct m)); /* * We now know that the argument is a proper list. Validate * and collect the binaries into the array. */ for (i = 0; i < n; i++) { Eterm* cons = list_val(BIF_ARG_1); Eterm term = CAR(cons); ProcBin* pb; if (!ERTS_TERM_IS_MAGIC_BINARY(term)) { goto badarg; } pb = (ProcBin*) binary_val(term); p[i].code = pb->val; p[i].module = erts_module_for_prepared_code(p[i].code); if (p[i].module == NIL) { goto badarg; } BIF_ARG_1 = CDR(cons); } /* * Since we cannot handle atomic loading of a group of modules * if one or more of them uses on_load, we will only allow * more than one element in the list if none of the modules * have an on_load function. */ if (n > 1) { for (i = 0; i < n; i++) { if (erts_has_code_on_load(p[i].code) == am_true) { erts_free(ERTS_ALC_T_LOADER_TMP, p); erts_release_code_write_permission(); BIF_ERROR(BIF_P, SYSTEM_LIMIT); } } } /* * All types are correct. There cannot be a BADARG from now on. * Before we can start loading, we must check whether any of * the modules already has old code. To avoid a race, we must * not allow other process to initiate a code loading operation * from now on. */ res = am_ok; erts_start_staging_code_ix(n); for (i = 0; i < n; i++) { p[i].modp = erts_put_module(p[i].module); p[i].modp->seen = 0; } exceptions = 0; for (i = 0; i < n; i++) { p[i].exception = 0; if (p[i].modp->seen) { p[i].exception = 1; exceptions++; } p[i].modp->seen = 1; } if (exceptions) { res = exception_list(BIF_P, am_duplicated, p, exceptions); goto done; } for (i = 0; i < n; i++) { if (p[i].modp->curr.num_breakpoints > 0 || p[i].modp->curr.num_traced_exports > 0 || erts_is_default_trace_enabled()) { /* tracing involved, fallback with thread blocking */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); is_blocking = 1; break; } } if (is_blocking) { for (i = 0; i < n; i++) { if (p[i].modp->curr.num_breakpoints) { erts_clear_module_break(p[i].modp); ASSERT(p[i].modp->curr.num_breakpoints == 0); } } } exceptions = 0; for (i = 0; i < n; i++) { p[i].exception = 0; if (p[i].modp->curr.code_hdr && p[i].modp->old.code_hdr) { p[i].exception = 1; exceptions++; } } if (exceptions) { res = exception_list(BIF_P, am_not_purged, p, exceptions); } else { /* * Now we can load all code. This can't fail. */ exceptions = 0; for (i = 0; i < n; i++) { Eterm mod; Eterm retval; erts_refc_inc(&p[i].code->refc, 1); retval = erts_finish_loading(p[i].code, BIF_P, 0, &mod); ASSERT(retval == NIL || retval == am_on_load); if (retval == am_on_load) { p[i].exception = 1; exceptions++; } } /* * Check whether any module has an on_load_handler. */ if (exceptions) { res = exception_list(BIF_P, am_on_load, p, exceptions); } do_commit = 1; } done: return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n); }
Uint size_object(Eterm obj) { Uint sum = 0; Eterm* ptr; int arity; DECLARE_ESTACK(s); for (;;) { switch (primary_tag(obj)) { case TAG_PRIMARY_LIST: sum += 2; ptr = list_val(obj); obj = *ptr++; if (!IS_CONST(obj)) { ESTACK_PUSH(s, obj); } obj = *ptr; break; case TAG_PRIMARY_BOXED: { Eterm hdr = *boxed_val(obj); ASSERT(is_header(hdr)); switch (hdr & _TAG_HEADER_MASK) { case ARITYVAL_SUBTAG: ptr = tuple_val(obj); arity = header_arity(hdr); sum += arity + 1; if (arity == 0) { /* Empty tuple -- unusual. */ goto pop_next; } while (arity-- > 1) { obj = *++ptr; if (!IS_CONST(obj)) { ESTACK_PUSH(s, obj); } } obj = *++ptr; break; case FUN_SUBTAG: { Eterm* bptr = fun_val(obj); ErlFunThing* funp = (ErlFunThing *) bptr; unsigned eterms = 1 /* creator */ + funp->num_free; unsigned sz = thing_arityval(hdr); sum += 1 /* header */ + sz + eterms; bptr += 1 /* header */ + sz; while (eterms-- > 1) { obj = *bptr++; if (!IS_CONST(obj)) { ESTACK_PUSH(s, obj); } } obj = *bptr; break; } case SUB_BINARY_SUBTAG: { Eterm real_bin; Uint offset; /* Not used. */ Uint bitsize; Uint bitoffs; Uint extra_bytes; Eterm hdr; ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize); if ((bitsize + bitoffs) > 8) { sum += ERL_SUB_BIN_SIZE; extra_bytes = 2; } else if ((bitsize + bitoffs) > 0) { sum += ERL_SUB_BIN_SIZE; extra_bytes = 1; } else { extra_bytes = 0; } hdr = *binary_val(real_bin); if (thing_subtag(hdr) == REFC_BINARY_SUBTAG) { sum += PROC_BIN_SIZE; } else { sum += heap_bin_size(binary_size(obj)+extra_bytes); } goto pop_next; } break; case BIN_MATCHSTATE_SUBTAG: erl_exit(ERTS_ABORT_EXIT, "size_object: matchstate term not allowed"); default: sum += thing_arityval(hdr) + 1; goto pop_next; } } break; case TAG_PRIMARY_IMMED1: pop_next: if (ESTACK_ISEMPTY(s)) { DESTROY_ESTACK(s); return sum; } obj = ESTACK_POP(s); break; default: erl_exit(ERTS_ABORT_EXIT, "size_object: bad tag for %#x\n", obj); } } }
/* Copy a message to the message area. */ Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs) { Eterm obj; Eterm dest; #ifdef INCREMENTAL int alloc_old = 0; #else int total_need = 0; #endif VERBOSE(DEBUG_MESSAGES, ("COPY START; %T is sending a message @ 0x%016x\n%T\n", from->id, orig, orig)); #ifndef INCREMENTAL copy_start: #endif MA_STACK_PUSH(src,orig); MA_STACK_PUSH(dst,&dest); MA_STACK_PUSH(offset,offs); while (ma_src_top > 0) { obj = MA_STACK_POP(src); /* copy_struct_lazy should never be called with something that * do not need to be copied. Within the loop, nothing that do * not need copying should be placed in the src-stack. */ ASSERT(!NO_COPY(obj)); switch (primary_tag(obj)) { case TAG_PRIMARY_LIST: { Eterm *hp; Eterm *objp; GlobalAlloc(from,2,hp); objp = list_val(obj); MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_list(hp)); MA_STACK_POP(dst); /* TODO: Byt ordningen nedan så att CDR pushas först. */ if (NO_COPY(*objp)) { hp[0] = *objp; #ifdef INCREMENTAL if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend)) INC_STORE(gray,hp,2); #endif } else { MA_STACK_PUSH(src,*objp); MA_STACK_PUSH(dst,hp); MA_STACK_PUSH(offset,0); } objp++; if (NO_COPY(*objp)) { hp[1] = *objp; #ifdef INCREMENTAL if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend)) INC_STORE(gray,hp,2); #endif } else { MA_STACK_PUSH(src,*objp); MA_STACK_PUSH(dst,hp); MA_STACK_PUSH(offset,1); } continue; } case TAG_PRIMARY_BOXED: { Eterm *objp = boxed_val(obj); switch (*objp & _TAG_HEADER_MASK) { case ARITYVAL_SUBTAG: { Uint ari = arityval(*objp); Uint i; Eterm *hp; GlobalAlloc(from,ari + 1,hp); /* A GC above might invalidate the value of objp */ objp = boxed_val(obj); MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_tuple(hp)); MA_STACK_POP(dst); *hp = *objp++; for (i = 1; i <= ari; i++) { switch (primary_tag(*objp)) { case TAG_PRIMARY_LIST: case TAG_PRIMARY_BOXED: if (NO_COPY(*objp)) { hp[i] = *objp; #ifdef INCREMENTAL if (ptr_within(ptr_val(*objp), inc_fromspc,inc_fromend)) INC_STORE(gray,hp,BOXED_NEED(hp,*hp)); #endif objp++; } else { MA_STACK_PUSH(src,*objp++); MA_STACK_PUSH(dst,hp); MA_STACK_PUSH(offset,i); } break; default: hp[i] = *objp++; } } continue; } case REFC_BINARY_SUBTAG: { ProcBin *pb; Uint i = thing_arityval(*objp) + 1; Eterm *hp; GlobalAlloc(from,i,hp); /* A GC above might invalidate the value of objp */ objp = boxed_val(obj); MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_binary(hp)); MA_STACK_POP(dst); pb = (ProcBin*) hp; while (i--) { *hp++ = *objp++; } erts_refc_inc(&pb->val->refc, 2); pb->next = erts_global_offheap.mso; erts_global_offheap.mso = pb; erts_global_offheap.overhead += pb->size / sizeof(Eterm); continue; } case FUN_SUBTAG: { ErlFunThing *funp = (ErlFunThing*) objp; Uint i = thing_arityval(*objp) + 1; Uint j = i + 1 + funp->num_free; Uint k = i; Eterm *hp, *hp_start; GlobalAlloc(from,j,hp); /* A GC above might invalidate the value of objp */ objp = boxed_val(obj); hp_start = hp; MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_fun(hp)); MA_STACK_POP(dst); funp = (ErlFunThing*) hp; while (i--) { *hp++ = *objp++; } #ifndef HYBRID // FIND ME! funp->next = erts_global_offheap.funs; erts_global_offheap.funs = funp; erts_refc_inc(&funp->fe->refc, 2); #endif for (i = k; i < j; i++) { switch (primary_tag(*objp)) { case TAG_PRIMARY_LIST: case TAG_PRIMARY_BOXED: if (NO_COPY(*objp)) { #ifdef INCREMENTAL if (ptr_within(ptr_val(*objp), inc_fromspc,inc_fromend)) INC_STORE(gray,hp,BOXED_NEED(hp,*hp)); #endif *hp++ = *objp++; } else { MA_STACK_PUSH(src,*objp++); MA_STACK_PUSH(dst,hp_start); MA_STACK_PUSH(offset,i); hp++; } break; default: *hp++ = *objp++; } } continue; } case EXTERNAL_PID_SUBTAG: case EXTERNAL_PORT_SUBTAG: case EXTERNAL_REF_SUBTAG: { ExternalThing *etp; Uint i = thing_arityval(*objp) + 1; Eterm *hp; GlobalAlloc(from,i,hp); /* A GC above might invalidate the value of objp */ objp = boxed_val(obj); MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_external(hp)); MA_STACK_POP(dst); etp = (ExternalThing*) hp; while (i--) { *hp++ = *objp++; } etp->next = erts_global_offheap.externals; erts_global_offheap.externals = etp; erts_refc_inc(&etp->node->refc, 2); continue; } case SUB_BINARY_SUBTAG: { ErlSubBin *sb = (ErlSubBin *) objp; Eterm *hp; Eterm res_binary; Eterm real_bin = sb->orig; Uint bit_offset = sb->bitoffs; Uint bit_size = sb -> bitsize; Uint sub_offset = sb->offs; size_t size = sb->size; Uint extra_bytes; Uint real_size; Uint sub_binary_heapneed; if ((bit_size + bit_offset) > 8) { extra_bytes = 2; sub_binary_heapneed = ERL_SUB_BIN_SIZE; } else if ((bit_size + bit_offset) > 0) { extra_bytes = 1; sub_binary_heapneed = ERL_SUB_BIN_SIZE; } else { extra_bytes = 0; sub_binary_heapneed = 0; } real_size = size+extra_bytes; objp = binary_val(real_bin); if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) { ErlHeapBin *from_bin; ErlHeapBin *to_bin; Uint i = heap_bin_size(real_size); GlobalAlloc(from,i+sub_binary_heapneed,hp); from_bin = (ErlHeapBin *) objp; to_bin = (ErlHeapBin *) hp; to_bin->thing_word = header_heap_bin(real_size); to_bin->size = real_size; sys_memcpy(to_bin->data, ((byte *)from_bin->data) + sub_offset, real_size); res_binary = make_binary(to_bin); hp += i; } else { ProcBin *from_bin; ProcBin *to_bin; ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG); from_bin = (ProcBin *) objp; erts_refc_inc(&from_bin->val->refc, 2); GlobalAlloc(from,PROC_BIN_SIZE+sub_binary_heapneed,hp); to_bin = (ProcBin *) hp; to_bin->thing_word = HEADER_PROC_BIN; to_bin->size = real_size; to_bin->val = from_bin->val; to_bin->bytes = from_bin->bytes + sub_offset; to_bin->next = erts_global_offheap.mso; erts_global_offheap.mso = to_bin; erts_global_offheap.overhead += to_bin->size / sizeof(Eterm); res_binary=make_binary(to_bin); hp += PROC_BIN_SIZE; } if (extra_bytes != 0) { ErlSubBin* res; res = (ErlSubBin *) hp; res->thing_word = HEADER_SUB_BIN; res->size = size; res->bitsize = bit_size; res->bitoffs = bit_offset; res->offs = 0; res->is_writable = 0; res->orig = res_binary; res_binary = make_binary(hp); } MA_STACK_UPDATE(dst,MA_STACK_POP(offset),res_binary); MA_STACK_POP(dst); continue; } case BIN_MATCHSTATE_SUBTAG: erl_exit(ERTS_ABORT_EXIT, "copy_struct_lazy: matchstate term not allowed"); default: { Uint size = thing_arityval(*objp) + 1; Eterm *hp; GlobalAlloc(from,size,hp); /* A GC above might invalidate the value of objp */ objp = boxed_val(obj); MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_boxed(hp)); MA_STACK_POP(dst); while (size--) { *hp++ = *objp++; } continue; } } continue; } case TAG_PRIMARY_HEADER: ASSERT((obj & _TAG_HEADER_MASK) == ARITYVAL_SUBTAG); { Eterm *objp = &obj; Uint ari = arityval(obj); Uint i; Eterm *hp; GlobalAlloc(from,ari + 1,hp); MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_tuple(hp)); MA_STACK_POP(dst); *hp = *objp++; for (i = 1; i <= ari; i++) { switch (primary_tag(*objp)) { case TAG_PRIMARY_LIST: case TAG_PRIMARY_BOXED: if (NO_COPY(*objp)) { #ifdef INCREMENTAL if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend)) INC_STORE(gray,hp,ari + 1); #endif hp[i] = *objp++; } else { MA_STACK_PUSH(src,*objp++); MA_STACK_PUSH(dst,hp); MA_STACK_PUSH(offset,i); } break; default: hp[i] = *objp++; } } continue; } default: erl_exit(ERTS_ABORT_EXIT, "%s, line %d: Internal error in copy_struct_lazy: 0x%08x\n", __FILE__, __LINE__,obj); } } VERBOSE(DEBUG_MESSAGES, ("Copy allocated @ 0x%08lx:\n%T\n", (unsigned long)ptr_val(dest),dest)); ma_gc_flags &= ~GC_CYCLE_START; ASSERT(eq(orig, dest)); ASSERT(ma_src_top == 0); ASSERT(ma_dst_top == 0); ASSERT(ma_offset_top == 0); return dest; }
/* * Copy a structure to a heap. */ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) { char* hstart; Uint hsize; Eterm* htop; Eterm* hbot; Eterm* hp; Eterm* objp; Eterm* tp; Eterm res; Eterm elem; Eterm* tailp; Eterm* argp; Eterm* const_tuple; Eterm hdr; int i; #ifdef DEBUG Eterm org_obj = obj; Uint org_sz = sz; #endif if (IS_CONST(obj)) return obj; hp = htop = *hpp; hbot = htop + sz; hstart = (char *)htop; hsize = (char*) hbot - hstart; const_tuple = 0; /* Copy the object onto the heap */ switch (primary_tag(obj)) { case TAG_PRIMARY_LIST: argp = &res; goto L_copy_list; case TAG_PRIMARY_BOXED: argp = &res; goto L_copy_boxed; default: erl_exit(ERTS_ABORT_EXIT, "%s, line %d: Internal error in copy_struct: 0x%08x\n", __FILE__, __LINE__,obj); } L_copy: while (hp != htop) { obj = *hp; switch (primary_tag(obj)) { case TAG_PRIMARY_IMMED1: hp++; break; case TAG_PRIMARY_LIST: objp = list_val(obj); if (in_area(objp,hstart,hsize)) { hp++; break; } argp = hp++; /* Fall through */ L_copy_list: tailp = argp; while (is_list(obj)) { objp = list_val(obj); tp = tailp; elem = *objp; if (IS_CONST(elem)) { *(hbot-2) = elem; tailp = hbot-1; hbot -= 2; } else { *htop = elem; tailp = htop+1; htop += 2; } *tp = make_list(tailp - 1); obj = *(objp+1); } switch (primary_tag(obj)) { case TAG_PRIMARY_IMMED1: *tailp = obj; goto L_copy; case TAG_PRIMARY_BOXED: argp = tailp; goto L_copy_boxed; default: erl_exit(ERTS_ABORT_EXIT, "%s, line %d: Internal error in copy_struct: 0x%08x\n", __FILE__, __LINE__,obj); } case TAG_PRIMARY_BOXED: if (in_area(boxed_val(obj),hstart,hsize)) { hp++; break; } argp = hp++; L_copy_boxed: objp = boxed_val(obj); hdr = *objp; switch (hdr & _TAG_HEADER_MASK) { case ARITYVAL_SUBTAG: { int const_flag = 1; /* assume constant tuple */ i = arityval(hdr); *argp = make_tuple(htop); tp = htop; /* tp is pointer to new arity value */ *htop++ = *objp++; /* copy arity value */ while (i--) { elem = *objp++; if (!IS_CONST(elem)) { const_flag = 0; } *htop++ = elem; } if (const_flag) { const_tuple = tp; /* this is the latest const_tuple */ } } break; case REFC_BINARY_SUBTAG: { ProcBin* pb; pb = (ProcBin *) objp; if (pb->flags) { erts_emasculate_writable_binary(pb); } i = thing_arityval(*objp) + 1; hbot -= i; tp = hbot; while (i--) { *tp++ = *objp++; } *argp = make_binary(hbot); pb = (ProcBin*) hbot; erts_refc_inc(&pb->val->refc, 2); pb->next = off_heap->mso; pb->flags = 0; off_heap->mso = pb; off_heap->overhead += pb->size / sizeof(Eterm); } break; case SUB_BINARY_SUBTAG: { ErlSubBin* sb = (ErlSubBin *) objp; Eterm real_bin = sb->orig; Uint bit_offset = sb->bitoffs; Uint bit_size = sb -> bitsize; Uint offset = sb->offs; size_t size = sb->size; Uint extra_bytes; Uint real_size; if ((bit_size + bit_offset) > 8) { extra_bytes = 2; } else if ((bit_size + bit_offset) > 0) { extra_bytes = 1; } else { extra_bytes = 0; } real_size = size+extra_bytes; objp = binary_val(real_bin); if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) { ErlHeapBin* from = (ErlHeapBin *) objp; ErlHeapBin* to; i = heap_bin_size(real_size); hbot -= i; to = (ErlHeapBin *) hbot; to->thing_word = header_heap_bin(real_size); to->size = real_size; sys_memcpy(to->data, ((byte *)from->data)+offset, real_size); } else { ProcBin* from = (ProcBin *) objp; ProcBin* to; ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG); if (from->flags) { erts_emasculate_writable_binary(from); } hbot -= PROC_BIN_SIZE; to = (ProcBin *) hbot; to->thing_word = HEADER_PROC_BIN; to->size = real_size; to->val = from->val; erts_refc_inc(&to->val->refc, 2); to->bytes = from->bytes + offset; to->next = off_heap->mso; to->flags = 0; off_heap->mso = to; off_heap->overhead += to->size / sizeof(Eterm); } *argp = make_binary(hbot); if (extra_bytes != 0) { ErlSubBin* res; hbot -= ERL_SUB_BIN_SIZE; res = (ErlSubBin *) hbot; res->thing_word = HEADER_SUB_BIN; res->size = size; res->bitsize = bit_size; res->bitoffs = bit_offset; res->offs = 0; res->is_writable = 0; res->orig = *argp; *argp = make_binary(hbot); } break; } break; case FUN_SUBTAG: { ErlFunThing* funp = (ErlFunThing *) objp; i = thing_arityval(hdr) + 2 + funp->num_free; tp = htop; while (i--) { *htop++ = *objp++; } #ifndef HYBRID /* FIND ME! */ funp = (ErlFunThing *) tp; funp->next = off_heap->funs; off_heap->funs = funp; erts_refc_inc(&funp->fe->refc, 2); #endif *argp = make_fun(tp); } break; case EXTERNAL_PID_SUBTAG: case EXTERNAL_PORT_SUBTAG: case EXTERNAL_REF_SUBTAG: { ExternalThing *etp = (ExternalThing *) htop; i = thing_arityval(hdr) + 1; tp = htop; while (i--) { *htop++ = *objp++; } etp->next = off_heap->externals; off_heap->externals = etp; erts_refc_inc(&etp->node->refc, 2); *argp = make_external(tp); } break; case BIN_MATCHSTATE_SUBTAG: erl_exit(ERTS_ABORT_EXIT, "copy_struct: matchstate term not allowed"); default: i = thing_arityval(hdr)+1; hbot -= i; tp = hbot; *argp = make_boxed(hbot); while (i--) { *tp++ = *objp++; } } break; case TAG_PRIMARY_HEADER: if (header_is_thing(obj) || hp == const_tuple) { hp += header_arity(obj) + 1; } else { hp++; } break; } } #ifdef DEBUG if (htop != hbot) erl_exit(ERTS_ABORT_EXIT, "Internal error in copy_struct() when copying %T:" " htop=%p != hbot=%p (sz=%bpu)\n", org_obj, htop, hbot, org_sz); #else if (htop > hbot) { erl_exit(ERTS_ABORT_EXIT, "Internal error in copy_struct(): htop, hbot overrun\n"); } #endif *hpp = (Eterm *) (hstart+hsize); return res; }
static int iol2v_append_binary(iol2v_state_t *state, Eterm bin_term) { int is_acc_small, is_bin_small; UWord combined_size; UWord binary_size; Uint byte_offset, bit_offset, bit_size; byte *binary_data; Eterm *parent_header; Eterm parent_binary; ASSERT(state->bytereds_available > state->bytereds_spent); ERTS_GET_REAL_BIN(bin_term, parent_binary, byte_offset, bit_offset, bit_size); parent_header = binary_val(parent_binary); binary_size = binary_size(bin_term); if (bit_size != 0) { return 0; } else if (binary_size == 0) { state->bytereds_spent += 1; return 1; } is_acc_small = state->acc_size < IOL2V_SMALL_BIN_LIMIT; is_bin_small = binary_size < IOL2V_SMALL_BIN_LIMIT; combined_size = binary_size + state->acc_size; if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) { ProcBin *pb = (ProcBin*)parent_header; if (pb->flags) { erts_emasculate_writable_binary(pb); } binary_data = &((byte*)pb->bytes)[byte_offset]; } else { ErlHeapBin *hb = (ErlHeapBin*)parent_header; ASSERT(thing_subtag(*parent_header) == HEAP_BINARY_SUBTAG); ASSERT(is_bin_small); binary_data = &((byte*)&hb->data)[byte_offset]; } if (!is_bin_small && (state->acc_size == 0 || !is_acc_small)) { /* Avoid combining if we encounter an acceptably large binary while the * accumulator is either empty or large enough to be returned on its * own. */ if (state->acc_size != 0) { iol2v_enqueue_result(state, iol2v_promote_acc(state)); } iol2v_enqueue_result(state, bin_term); } else if (is_bin_small || combined_size < (IOL2V_SMALL_BIN_LIMIT * 2)) { /* If the candidate is small or we can't split the combination in two, * then just copy it into the accumulator. */ iol2v_expand_acc(state, binary_size); if (ERTS_LIKELY(bit_offset == 0)) { sys_memcpy(&(state->acc)->orig_bytes[state->acc_size], binary_data, binary_size); } else { ASSERT(binary_size <= ERTS_UWORD_MAX / 8); erts_copy_bits(binary_data, bit_offset, 1, (byte*)&(state->acc)->orig_bytes[state->acc_size], 0, 1, binary_size * 8); } state->acc_size += binary_size; } else { /* Otherwise, append enough data for the accumulator to be valid, and * then return the rest as a sub-binary. */ UWord spill = IOL2V_SMALL_BIN_LIMIT - state->acc_size; Eterm binary_tail; iol2v_expand_acc(state, spill); if (ERTS_LIKELY(bit_offset == 0)) { sys_memcpy(&(state->acc)->orig_bytes[state->acc_size], binary_data, spill); } else { ASSERT(binary_size <= ERTS_UWORD_MAX / 8); erts_copy_bits(binary_data, bit_offset, 1, (byte*)&(state->acc)->orig_bytes[state->acc_size], 0, 1, spill * 8); } state->acc_size += spill; binary_tail = iol2v_make_sub_bin(state, bin_term, spill, binary_size - spill); iol2v_enqueue_result(state, iol2v_promote_acc(state)); iol2v_enqueue_result(state, binary_tail); } return 1; }
int erts_ioq_iolist_to_vec(Eterm obj, /* io-list */ SysIOVec* iov, /* io vector */ ErtsIOQBinary** binv, /* binary reference vector */ ErtsIOQBinary* cbin, /* binary to store characters */ Uint bin_limit, /* small binaries limit */ int driver) { DECLARE_ESTACK(s); Eterm* objp; byte *buf = NULL; Uint len = 0; Uint csize = 0; int vlen = 0; byte* cptr; if (cbin) { if (driver) { buf = (byte*)cbin->driver.orig_bytes; len = cbin->driver.orig_size; } else { buf = (byte*)cbin->nif.orig_bytes; len = cbin->nif.orig_size; } } cptr = buf; goto L_jump_start; /* avoid push */ while (!ESTACK_ISEMPTY(s)) { obj = ESTACK_POP(s); L_jump_start: if (is_list(obj)) { L_iter_list: objp = list_val(obj); obj = CAR(objp); if (is_byte(obj)) { if (len == 0) goto L_overflow; *buf++ = unsigned_val(obj); csize++; len--; } else if (is_binary(obj)) { ESTACK_PUSH(s, CDR(objp)); goto handle_binary; } else if (is_list(obj)) { ESTACK_PUSH(s, CDR(objp)); goto L_iter_list; /* on head */ } else if (!is_nil(obj)) { goto L_type_error; } obj = CDR(objp); if (is_list(obj)) goto L_iter_list; /* on tail */ else if (is_binary(obj)) { goto handle_binary; } else if (!is_nil(obj)) { goto L_type_error; } } else if (is_binary(obj)) { Eterm real_bin; Uint offset; Eterm* bptr; Uint size; int bitoffs; int bitsize; handle_binary: size = binary_size(obj); ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize); ASSERT(bitsize == 0); bptr = binary_val(real_bin); if (*bptr == HEADER_PROC_BIN) { ProcBin* pb = (ProcBin *) bptr; if (bitoffs != 0) { if (len < size) { goto L_overflow; } erts_copy_bits(pb->bytes+offset, bitoffs, 1, (byte *) buf, 0, 1, size*8); csize += size; buf += size; len -= size; } else if (bin_limit && size < bin_limit) { if (len < size) { goto L_overflow; } sys_memcpy(buf, pb->bytes+offset, size); csize += size; buf += size; len -= size; } else { ErtsIOQBinary *qbin; if (csize != 0) { io_list_to_vec_set_vec(&iov, &binv, cbin, cptr, csize, &vlen); cptr = buf; csize = 0; } if (pb->flags) { erts_emasculate_writable_binary(pb); } if (driver) qbin = (ErtsIOQBinary*)Binary2ErlDrvBinary(pb->val); else qbin = (ErtsIOQBinary*)pb->val; io_list_to_vec_set_vec( &iov, &binv, qbin, pb->bytes+offset, size, &vlen); } } else { ErlHeapBin* hb = (ErlHeapBin *) bptr; if (len < size) { goto L_overflow; } copy_binary_to_buffer(buf, 0, ((byte *) hb->data)+offset, bitoffs, 8*size); csize += size; buf += size; len -= size; } } else if (!is_nil(obj)) { goto L_type_error; } } if (csize != 0) { io_list_to_vec_set_vec(&iov, &binv, cbin, cptr, csize, &vlen); } DESTROY_ESTACK(s); return vlen; L_type_error: DESTROY_ESTACK(s); return -2; L_overflow: DESTROY_ESTACK(s); return -1; }