term_t cbif_spawn_link1(proc_t *proc, term_t *regs) { term_t Fun = regs[0]; if (!is_boxed(Fun)) badarg(Fun); uint32_t *fdata = peel_boxed(Fun); if (boxed_tag(fdata) != SUBTAG_FUN) badarg(Fun); t_fun_t *f = (t_fun_t *)fdata; if (f->fe == 0) not_implemented("unloaded funs"); proc_t *new_proc = proc_make(proc->group_leader); int x = proc_spawn_fun0_N(new_proc, f); if (x == 0) x = inter_link_establish_N(&new_proc->links, proc->pid); if (x == 0) x = inter_link_establish_N(&proc->links, new_proc->pid); if (x < 0) { proc_destroy(new_proc); // no need to unlink, new_proc might have a link to proc but it was destroyed anyway if (x == -TOO_DEEP) fail(A_SYSTEM_LIMIT); else fail(A_NOT_SPAWNED); } return new_proc->pid; }
// maps:update/3 [18] term_t cbif_update3(proc_t *proc, term_t *regs) { term_t Key = regs[0]; term_t Value = regs[1]; term_t Map = regs[2]; if (!is_boxed_map(Map)) badarg(Map); t_map_t *m0 = (t_map_t *)peel_boxed(Map); int index = map_key_index(Key, m0->keys); if (index < 0) badarg(Key); int size = map_size(m0); int needed = WSIZE(t_map_t) +size; uint32_t *p = heap_alloc(&proc->hp, needed); t_map_t *m1 = (t_map_t *)p; box_map(p, size, m0->keys); heap_set_top(&proc->hp, p); memcpy(m1->values, m0->values, size *sizeof(term_t)); m1->values[index] = Value; return tag_boxed(m1); }
term_t cbif_spawn_monitor1(proc_t *proc, term_t *regs) { term_t Fun = regs[0]; if (!is_boxed(Fun)) badarg(Fun); uint32_t *fdata = peel_boxed(Fun); if (boxed_tag(fdata) != SUBTAG_FUN) badarg(Fun); t_fun_t *f = (t_fun_t *)fdata; if (f->fe == 0) not_implemented("unloaded funs"); term_t ref = heap_make_ref(&proc->hp); proc_t *new_proc = proc_make(proc->group_leader); int x = proc_spawn_fun0_N(new_proc, f); if (x == 0) { uint64_t ref_id = local_ref_id(ref); x = monitor(ref_id, proc->pid, new_proc->pid); } if (x < 0) { // no need to demonitor proc_destroy(new_proc); if (x == -TOO_DEEP) fail(A_SYSTEM_LIMIT); else fail(A_NOT_SPAWNED); } return heap_tuple2(&proc->hp, new_proc->pid, ref); }
term_t cbif_spawn1(proc_t *proc, term_t *regs) { term_t Fun = regs[0]; if (!is_boxed(Fun)) badarg(Fun); uint32_t *fdata = peel_boxed(Fun); if (boxed_tag(fdata) != SUBTAG_FUN) badarg(Fun); t_fun_t *f = (t_fun_t *)fdata; if (f->fe == 0) not_implemented("unloaded funs"); if (fun_arity(fdata) != fun_num_free(fdata)) badarg(); proc_t *new_proc = proc_make(proc->group_leader); int x = proc_spawn_fun0_N(new_proc, f); if (x < 0) { proc_destroy(new_proc); if (x == -TOO_DEEP) fail(A_SYSTEM_LIMIT); else fail(A_NOT_SPAWNED); } return new_proc->pid; }
term_t cbif_spawn3(proc_t *proc, term_t *regs) { term_t m = regs[0]; term_t f = regs[1]; term_t args = regs[2]; if (!is_atom(m)) badarg(m); if (!is_atom(f)) badarg(f); if (!is_list(args)) badarg(args); if (list_len(args) < 0) badarg(args); // too odd proc_t *new_proc = proc_make(proc->group_leader); int x = proc_spawn_N(new_proc, m, f, args); if (x < 0) { proc_destroy(new_proc); // safe fail(err_to_term(x)); } return new_proc->pid; }
term_t cbif_spawn_link3(proc_t *proc, term_t *regs) { term_t m = regs[0]; term_t f = regs[1]; term_t args = regs[2]; if (!is_atom(m)) badarg(m); if (!is_atom(f)) badarg(f); if (!is_list(args)) badarg(args); if (list_len(args) < 0) badarg(args); // too odd proc_t *new_proc = proc_make(proc->group_leader); int x = proc_spawn_N(new_proc, m, f, args); if (x == 0) x = inter_link_establish_N(&new_proc->links, proc->pid); if (x == 0) x = inter_link_establish_N(&proc->links, new_proc->pid); if (x < 0) { proc_destroy(new_proc); // no need to unlink, new_proc might have a link to proc but it is destroyed anyway fail(err_to_term(x)); } return new_proc->pid; }
term_t cbif_monitor2(proc_t *proc, term_t *regs) { term_t Type = regs[0]; term_t Item = regs[1]; if (Type != A_PROCESS) badarg(Type); if (!is_short_pid(Item) && !is_atom(Item)) badarg(Item); term_t ref = heap_make_ref(&proc->hp); proc_t *target = (is_short_pid(Item)) ?scheduler_lookup(Item) :scheduler_process_by_name(Item); if (target == 0) { // the process is gone already - send notification immediately // {'DOWN',#Ref<0.0.0.38>,process,<0.34.0>,noproc} term_t msg = heap_tuple5(&proc->hp, ADOWN__, ref, A_PROCESS, Item, A_NOPROC); int x = scheduler_new_local_mail_N(proc, msg); if (x < 0) fail(A_NO_MEMORY); } else { uint64_t ref_id = local_ref_id(ref); if (monitor(ref_id, proc->pid, target->pid) < 0) fail(A_NO_MEMORY); } return ref; }
term_t cbif_sha_update2(proc_t *proc, term_t *regs) { term_t Context = regs[0]; term_t Data = regs[1]; if (!is_boxed_binary(Context)) badarg(Context); bits_t bs, dst; bits_get_real(peel_boxed(Context), &bs); if (bs.ends -bs.starts != sizeof(struct sha1_ctx) *8) badarg(Context); struct sha1_ctx ctx; bits_init_buf((uint8_t *)&ctx, sizeof(ctx), &dst); bits_copy(&bs, &dst); if (!is_boxed_binary(Data) && !is_list(Data)) badarg(Data); int sz = iolist_size(Data); if (sz < 0) badarg(Data); assert(sz <= 65536); //TODO: use heap_tmp_buf for larger Data uint8_t buf[sz]; iolist_flatten(Data, buf); sha1_update(&ctx, sz, buf); uint8_t *ptr; term_t bin = heap_make_bin(&proc->hp, sizeof(ctx), &ptr); memcpy(ptr, &ctx, sizeof(ctx)); return bin; }
term_t cbif_exor2(proc_t *proc, term_t *regs) { term_t Bin1 = regs[0]; term_t Bin2 = regs[1]; if (!is_list(Bin1) && !is_boxed_binary(Bin1)) badarg(Bin1); if (!is_list(Bin2) && !is_boxed_binary(Bin2)) badarg(Bin2); int sz1 = iolist_size(Bin1); if (sz1 < 0) badarg(Bin1); int sz2 = iolist_size(Bin2); if (sz2 != sz1) badarg(Bin2); assert(sz1 <= 65536); //TODO: use heap_tmp_buf for larger binaries uint8_t data1[sz1]; iolist_flatten(Bin1, data1); uint8_t data2[sz2]; iolist_flatten(Bin2, data2); uint8_t *data3; term_t result = heap_make_bin(&proc->hp, sz1, &data3); for (int i = 0; i < sz1; i++) data3[i] = data1[i] ^ data2[i]; return result; }
term_t cbif_experimental2(proc_t *proc, term_t *regs) { term_t What = regs[0]; UNUSED term_t Arg = regs[1]; if (!is_atom(What)) badarg(What); switch (What) { // Cloudozer's 2nd anniversary -- remove in 2016 case A_CLOUDOZER: if (Arg == tag_int(2)) cloudozer2(); break; // Cloudozer's 2nd anniversary case A_MODULE_SIZE: #ifdef EXP_RUNTIME_METRICS print_loaded_module_sizes(); #endif // EXP_RUNTIME_METRICS break; case A_VARIANT_SIZE: #ifdef EXP_RUNTIME_METRICS print_variant_code_sizes(); #endif // EXP_RUNTIME_METRICS break; case A_COUNT_IOPS: #ifdef EXP_COUNT_IOPS print_iop_counters(); #endif // EXP_COUNT_IOPS break; case A_PROCESSING_DELAY: #ifdef EXP_LINC_LATENCY if (Arg == A_HELP) printk("ping -Q 42 -q -n -c 25000 -f <ip>\n"); else linc_display(); #endif // EXP_LINC_LATENCY break; case A_LLSTAT: #ifdef EXP_LINC_LLSTAT if (is_int(Arg)) llstat_restart(int_value(Arg)); else if (Arg == A_STOP) llstat_stop(); else llstat_display(); #endif // EXP_LINC_LLSTAT break; default: badarg(What); } return A_OK; }
term_t cbif_process_flag2(proc_t *proc, term_t *regs) { term_t Flag = regs[0]; term_t Value = regs[1]; if (!is_atom(Flag)) badarg(Flag); term_t old_val = proc_set_flag(proc, Flag, Value); if (old_val == noval) badarg(Value); return old_val; }
term_t cbif_demonitor1(proc_t *proc, term_t *regs) { term_t MonRef = regs[0]; if (!is_boxed(MonRef) || boxed_tag(peel_boxed(MonRef)) != SUBTAG_REF) badarg(MonRef); if (!ref_is_local(MonRef)) badarg(MonRef); if (demonitor(local_ref_id(MonRef), proc->pid) < 0) badarg(MonRef); return A_TRUE; }
term_t cbif_trace2(proc_t *proc, term_t *regs) { term_t Mask = regs[0]; term_t Mod = regs[1]; if (!is_int(Mask)) badarg(Mask); if (!is_atom(Mod)) badarg(Mod); #ifdef TRACE_HARNESS trace_mask = int_value(Mask); trace_module = Mod; #endif return A_OK; }
term_t cbif_rand_bytes1(proc_t *proc, term_t *regs) { term_t N = regs[0]; if (!is_int(N) || int_value(N) < 0) badarg(N); int len = int_value(N); uint8_t *ptr; term_t bin = heap_make_bin(&proc->hp, len, &ptr); uint8_t *p = ptr; while (p <= ptr +len -4) { uint32_t rnd = mt_lrand(); PUT_UINT_32(p, rnd); p += 4; } uint32_t last = mt_lrand(); switch(ptr +len -p) { case 3: *p++ = (uint8_t)(last >> 16); case 2: *p++ = (uint8_t)(last >> 8); case 1: *p++ = (uint8_t)last; case 0: break; } return bin; }
term_t cbif_trace2(proc_t *proc, term_t *regs) { // Cloudozer's 2nd anniversary -- remove in 2016 term_t Mask = regs[0]; term_t Mod = regs[1]; if (!is_int(Mask)) badarg(Mask); if (!is_atom(Mod)) badarg(Mod); #ifdef TRACE_HARNESS trace_mask = int_value(Mask); trace_module = Mod; #endif return A_OK; }
void tests(void) { char *loc; TEST_START("utf8_setlocale"); loc = setlocale(LC_CTYPE, "en_US.UTF-8"); ASSERT_PTR_NE(loc, NULL); TEST_DONE(); badarg(); one("null", NULL, 8, 6, 6, "(null)"); one("empty", "", 2, 0, 0, ""); one("ascii", "x", -2, -2, -2, "x"); one("newline", "a\nb", -2, -2, -2, "a\nb"); one("cr", "a\rb", -2, -2, -2, "a\rb"); one("tab", "a\tb", -2, -2, -2, "a\tb"); one("esc", "\033x", -2, -2, -2, "\\033x"); one("inv_badbyte", "\377x", -2, -2, -2, "\\377x"); one("inv_nocont", "\341x", -2, -2, -2, "\\341x"); one("inv_nolead", "a\200b", -2, -2, -2, "a\\200b"); one("sz_ascii", "1234567890123456", -2, -2, 16, "123456789012345"); one("sz_esc", "123456789012\033", -2, -2, 16, "123456789012"); one("width_ascii", "123", 2, 2, -1, "12"); one("width_double", "a\343\201\201", 2, 1, -1, "a"); one("double_fit", "a\343\201\201", 3, 3, 4, "a\343\201\201"); one("double_spc", "a\343\201\201", 4, 3, 4, "a\343\201\201"); }
term_t cbif_unlink1(proc_t *proc, term_t *regs) { term_t PidOid = regs[0]; if (!is_short_pid(PidOid) && !is_short_oid(PidOid)) badarg(PidOid); proc_t *peer_proc = 0; outlet_t *peer_outlet = 0; if (is_short_pid(PidOid)) peer_proc = scheduler_lookup(PidOid); else peer_outlet = outlet_lookup(PidOid); inter_links_t *peer_links = 0; if (peer_proc != 0) peer_links = &peer_proc->links; else if (peer_outlet != 0) peer_links = &peer_outlet->links; if (peer_links != 0) { if (are_inter_linked(&proc->links, PidOid)) { inter_link_break(&proc->links, PidOid); inter_link_break(peer_links, proc->pid); } } return A_TRUE; }
term_t cbif_group_leader2(proc_t *proc, term_t *rs) { term_t Leader = rs[0]; term_t Pid = rs[1]; if (!is_short_pid(Leader) && !is_atom(Leader)) badarg(Leader); if (!is_short_pid(Pid)) badarg(Pid); proc_t *peer = scheduler_lookup(Pid); if (peer == 0) badarg(Pid); peer->group_leader = Leader; return A_TRUE; }
term_t cbif_set_dictionary1(proc_t *proc, term_t *regs) { term_t Dict = regs[0]; if (!is_list(Dict)) badarg(Dict); proc->dictionary = Dict; return A_OK; }
term_t cbif_process_flag3(proc_t *proc, term_t *regs) { term_t Pid = regs[0]; term_t Flag = regs[1]; term_t Value = regs[2]; if (!is_short_pid(Pid)) badarg(Pid); proc_t *subj = scheduler_lookup(Pid); if (subj == 0) badarg(Pid); if (!is_atom(Flag)) badarg(Flag); term_t old_val = proc_set_flag(subj, Flag, Value); if (old_val == noval) badarg(Value); return old_val; }
// maps:keys/1 [24] term_t cbif_keys1(proc_t *proc, term_t *regs) { term_t Map = regs[0]; if (!is_boxed_map(Map)) badarg(Map); t_map_t *m = (t_map_t *)peel_boxed(Map); uint32_t *p = peel_tuple(m->keys); return heap_vector_to_list(&proc->hp, p+1, *p); }
term_t cbif_link1(proc_t *proc, term_t *regs) { term_t PidOid = regs[0]; if (!is_short_pid(PidOid) && !is_short_oid(PidOid)) badarg(PidOid); if (PidOid == proc->pid) return A_TRUE; proc_t *peer_proc = 0; outlet_t *peer_outlet = 0; if (is_short_pid(PidOid)) peer_proc = scheduler_lookup(PidOid); else peer_outlet = outlet_lookup(PidOid); inter_links_t *peer_links = 0; if (peer_proc != 0) peer_links = &peer_proc->links; else if (peer_outlet != 0) peer_links = &peer_outlet->links; if (peer_proc == 0 && peer_outlet == 0) { if (proc->trap_exit == A_TRUE) { int x = scheduler_signal_exit_N(proc, proc->pid, A_NOPROC); // does not destroy the proc if (x < 0) fail(A_NOT_LINKED); return A_TRUE; } else fail(A_NOPROC); } else { if (!are_inter_linked(&proc->links, PidOid)) { int x = inter_link_establish_N(&proc->links, PidOid); if (x < 0) fail(A_NOT_LINKED); x = inter_link_establish_N(peer_links, proc->pid); if (x < 0) { inter_link_break(&proc->links, PidOid); fail(A_NOT_LINKED); } } return A_TRUE; } }
term_t cbif_is_process_alive1(proc_t *proc, term_t *rs) { term_t Pid = rs[0]; if (!is_short_pid(Pid)) badarg(Pid); if (scheduler_lookup(Pid) != 0) return A_TRUE; return A_FALSE; }
term_t cbif_aes_cbc_crypt4(proc_t *proc, term_t *regs) { term_t Key = regs[0]; term_t IVec = regs[1]; term_t Data = regs[2]; term_t Dir = regs[3]; if (!is_list(Key) && !is_boxed_binary(Key)) badarg(Key); if (!is_boxed_binary(IVec)) badarg(IVec); if (!is_list(Data) && !is_boxed_binary(Data)) badarg(Data); if (!is_bool(Dir)) badarg(Dir); int key_size = iolist_size(Key); if (key_size < AES_MIN_KEY_SIZE || key_size > AES_MAX_KEY_SIZE) badarg(Key); uint8_t key_buf[key_size]; iolist_flatten(Key, key_buf); bits_t src, dst; bits_get_real(peel_boxed(IVec), &src); if (src.ends -src.starts != AES_BLOCK_SIZE *8) badarg(IVec); uint8_t ivec_buf[AES_BLOCK_SIZE]; bits_init_buf(ivec_buf, AES_BLOCK_SIZE, &dst); bits_copy(&src, &dst); int data_size = iolist_size(Data); if (data_size < 0) badarg(Data); assert(data_size <= 65536); //TODO: use heap_tmp_buf for larger Data uint8_t data_buf[data_size]; iolist_flatten(Data, data_buf); struct CBC_CTX(struct aes_ctx, AES_BLOCK_SIZE) ctx; if (Dir == A_TRUE) aes_set_encrypt_key((struct aes_ctx *)&ctx, key_size, key_buf); else aes_set_decrypt_key((struct aes_ctx *)&ctx, key_size, key_buf); CBC_SET_IV(&ctx, ivec_buf); uint8_t *ptr; term_t cipher_text = heap_make_bin(&proc->hp, data_size, &ptr); if (Dir == A_TRUE) CBC_ENCRYPT(&ctx, aes_encrypt, data_size, ptr, data_buf); else CBC_DECRYPT(&ctx, aes_decrypt, data_size, ptr, data_buf); return cipher_text; }
// maps:is_key/2 [25] term_t cbif_is_key2(proc_t *proc, term_t *regs) { term_t Key = regs[0]; term_t Map = regs[1]; if (!is_boxed_map(Map)) badarg(Map); t_map_t *m = (t_map_t *)peel_boxed(Map); int index = map_key_index(Key, m->keys); return (index < 0) ?A_FALSE :A_TRUE; }
term_t cbif_trace1(proc_t *proc, term_t *regs) { term_t Mask = regs[0]; if (!is_int(Mask)) badarg(Mask); #ifdef TRACE_HARNESS trace_mask = int_value(Mask); trace_module = noval; #endif return A_OK; }
term_t cbif_sha_final1(proc_t *proc, term_t *regs) { term_t Context = regs[0]; if (!is_boxed_binary(Context)) badarg(Context); bits_t bs, dst; bits_get_real(peel_boxed(Context), &bs); if (bs.ends -bs.starts != sizeof(struct sha1_ctx) *8) badarg(Context); struct sha1_ctx ctx; bits_init_buf((uint8_t *)&ctx, sizeof(ctx), &dst); bits_copy(&bs, &dst); uint8_t *ptr; term_t bin = heap_make_bin(&proc->hp, SHA1_DIGEST_SIZE, &ptr); sha1_digest(&ctx, SHA1_DIGEST_SIZE, ptr); return bin; }
// maps:put/3 [21] term_t cbif_put3(proc_t *proc, term_t *regs) { term_t Key = regs[0]; term_t Value = regs[1]; term_t Map = regs[2]; if (!is_boxed_map(Map)) badarg(Map); t_map_t *m0 = (t_map_t *)peel_boxed(Map); int index = map_key_index(Key, m0->keys); if (index >= 0) { // same as update/3 int size = map_size(m0); int needed = WSIZE(t_map_t) +size; uint32_t *p = heap_alloc(&proc->hp, needed); t_map_t *m1 = (t_map_t *)p; box_map(p, size, m0->keys); heap_set_top(&proc->hp, p); memcpy(m1->values, m0->values, size *sizeof(term_t)); m1->values[index] = Value; return tag_boxed(m1); } else { uint32_t *q = peel_tuple(m0->keys); int size = *q++; term_t *ks = q; term_t kvs[] = {Key,Value}; int needed = 1 +size+1 +2 +size+1; uint32_t *p = heap_alloc(&proc->hp, needed); term_t keys = tag_tuple(p); *p++ = size+1; term_t *ks1 = p; p += size+1; term_t out = tag_boxed(p); term_t *vs1 = p +WSIZE(t_map_t); box_map(p, size+1, keys); heap_set_top(&proc->hp, p); int size1 = map_merge(ks, m0->values, size, kvs, 1, ks1, vs1); assert(size1 == size+1); return out; } }
term_t cbif_spawn_monitor3(proc_t *proc, term_t *regs) { term_t m = regs[0]; term_t f = regs[1]; term_t args = regs[2]; if (!is_atom(m)) badarg(m); if (!is_atom(f)) badarg(f); if (!is_list(args)) badarg(args); if (list_len(args) < 0) badarg(args); // too odd term_t ref = heap_make_ref(&proc->hp); proc_t *new_proc = proc_make(proc->group_leader); int x = proc_spawn_N(new_proc, m, f, args); if (x == 0) { uint64_t ref_id = local_ref_id(ref); x = monitor(ref_id, proc->pid, new_proc->pid); } if (x < 0) { //NB: no need to demonitor proc_destroy(new_proc); if (x == -TOO_DEEP) fail(A_SYSTEM_LIMIT); else fail(A_NOT_SPAWNED); } return heap_tuple2(&proc->hp, new_proc->pid, ref); }
// maps:get/2 [28] term_t cbif_get2(proc_t *proc, term_t *regs) { term_t Key = regs[0]; term_t Map = regs[1]; if (!is_boxed_map(Map)) badarg(Map); t_map_t *m = (t_map_t *)peel_boxed(Map); int index = map_key_index(Key, m->keys); if (index < 0) fail(A_BAD_KEY); return m->values[index]; }