// // Flatten the valid iolist to the buffer of // appropriate size pointed to by ptr // uint8_t *iolist_flatten(term_t l, uint8_t *ptr) { if (is_nil(l)) return ptr; if (is_cons(l)) { do { uint32_t *term_data = peel_cons(l); term_t e = term_data[0]; if (is_int(e)) *ptr++ = int_value(e); else { assert(is_list(e) || (is_boxed(e) && is_binary(peel_boxed(e)))); ptr = iolist_flatten(e, ptr); } l = term_data[1]; if (is_boxed(l) && is_binary(peel_boxed(l))) return iolist_flatten(l, ptr); } while (is_cons(l)); assert(is_nil(l)); } else // is_binary() { bits_t bs, to; bits_get_real(peel_boxed(l), &bs); bits_init_buf(ptr, (bs.ends +7) /8, &to); ptr += (bs.ends - bs.starts) /8; bits_copy(&bs, &to); assert(bs.starts == bs.ends); } return ptr; }
term_t cbif_sha_update2(proc_t *proc, term_t *regs) { term_t Context = regs[0]; term_t Data = regs[1]; if (!is_boxed_binary(Context)) badarg(Context); bits_t bs, dst; bits_get_real(peel_boxed(Context), &bs); if (bs.ends -bs.starts != sizeof(struct sha1_ctx) *8) badarg(Context); struct sha1_ctx ctx; bits_init_buf((uint8_t *)&ctx, sizeof(ctx), &dst); bits_copy(&bs, &dst); if (!is_boxed_binary(Data) && !is_list(Data)) badarg(Data); int sz = iolist_size(Data); if (sz < 0) badarg(Data); assert(sz <= 65536); //TODO: use heap_tmp_buf for larger Data uint8_t buf[sz]; iolist_flatten(Data, buf); sha1_update(&ctx, sz, buf); uint8_t *ptr; term_t bin = heap_make_bin(&proc->hp, sizeof(ctx), &ptr); memcpy(ptr, &ctx, sizeof(ctx)); return bin; }
term_t cbif_aes_cbc_crypt4(proc_t *proc, term_t *regs) { term_t Key = regs[0]; term_t IVec = regs[1]; term_t Data = regs[2]; term_t Dir = regs[3]; if (!is_list(Key) && !is_boxed_binary(Key)) badarg(Key); if (!is_boxed_binary(IVec)) badarg(IVec); if (!is_list(Data) && !is_boxed_binary(Data)) badarg(Data); if (!is_bool(Dir)) badarg(Dir); int key_size = iolist_size(Key); if (key_size < AES_MIN_KEY_SIZE || key_size > AES_MAX_KEY_SIZE) badarg(Key); uint8_t key_buf[key_size]; iolist_flatten(Key, key_buf); bits_t src, dst; bits_get_real(peel_boxed(IVec), &src); if (src.ends -src.starts != AES_BLOCK_SIZE *8) badarg(IVec); uint8_t ivec_buf[AES_BLOCK_SIZE]; bits_init_buf(ivec_buf, AES_BLOCK_SIZE, &dst); bits_copy(&src, &dst); int data_size = iolist_size(Data); if (data_size < 0) badarg(Data); assert(data_size <= 65536); //TODO: use heap_tmp_buf for larger Data uint8_t data_buf[data_size]; iolist_flatten(Data, data_buf); struct CBC_CTX(struct aes_ctx, AES_BLOCK_SIZE) ctx; if (Dir == A_TRUE) aes_set_encrypt_key((struct aes_ctx *)&ctx, key_size, key_buf); else aes_set_decrypt_key((struct aes_ctx *)&ctx, key_size, key_buf); CBC_SET_IV(&ctx, ivec_buf); uint8_t *ptr; term_t cipher_text = heap_make_bin(&proc->hp, data_size, &ptr); if (Dir == A_TRUE) CBC_ENCRYPT(&ctx, aes_encrypt, data_size, ptr, data_buf); else CBC_DECRYPT(&ctx, aes_decrypt, data_size, ptr, data_buf); return cipher_text; }
term_t cbif_sha_final1(proc_t *proc, term_t *regs) { term_t Context = regs[0]; if (!is_boxed_binary(Context)) badarg(Context); bits_t bs, dst; bits_get_real(peel_boxed(Context), &bs); if (bs.ends -bs.starts != sizeof(struct sha1_ctx) *8) badarg(Context); struct sha1_ctx ctx; bits_init_buf((uint8_t *)&ctx, sizeof(ctx), &dst); bits_copy(&bs, &dst); uint8_t *ptr; term_t bin = heap_make_bin(&proc->hp, SHA1_DIGEST_SIZE, &ptr); sha1_digest(&ctx, SHA1_DIGEST_SIZE, ptr); return bin; }
//NB: called both from a callback and a BIF - do not send signals static int recv_bake_packets(outlet_t *ol, proc_t *cont_proc) { term_t reason = noval; term_t packet = noval; term_t active_tag = A_TCP; more_packets: if (ol->cr_in_progress || ol->active != INET_PASSIVE) { if (ol->packet == TCP_PB_RAW && ol->recv_expected_size != 0 && ol->recv_buf_off < ol->recv_expected_size) packet = A_MORE; else { uint32_t more_len; uint32_t adj_len = ol->recv_buf_off; // take into account expected_size for raw packets if (ol->packet == TCP_PB_RAW && ol->recv_expected_size != 0) adj_len = ol->recv_expected_size; bits_t bs; bits_init_buf(ol->recv_buffer, adj_len, &bs); packet = decode_packet_N(ol->packet, &bs, noval, ol->binary, &reason, &more_len, ol->packet_size, 0, &cont_proc->hp); if (packet == A_MORE && more_len != 0 && more_len > ol->recv_bufsize) return -TOO_LONG; if (packet != A_MORE && packet != noval) { uint32_t left = (bs.ends -bs.starts) /8; uint32_t consumed = adj_len -left; memmove(ol->recv_buffer, ol->recv_buffer +consumed, ol->recv_buf_off -consumed); ol->recv_buf_off -= consumed; //debug("---> recv_bake_packets: consumed %d left %d cr_in_progress %d active %d\n", // consumed, left, ol->cr_in_progress, ol->active); // Is it safe to acknowledge the data here, outside of the // receive callback? tcp_recved(ol->tcp, consumed); if (ol->packet == TCP_PB_HTTP || ol->packet == TCP_PB_HTTP_BIN) active_tag = A_HTTP; if (ol->packet == TCP_PB_HTTP) ol->packet = TCP_PB_HTTPH; else if (ol->packet == TCP_PB_HTTP_BIN) ol->packet = TCP_PB_HTTPH_BIN; else if (ol->packet == TCP_PB_HTTPH && packet == A_HTTP_EOH) ol->packet = TCP_PB_HTTP; else if (ol->packet == TCP_PB_HTTPH_BIN && packet == A_HTTP_EOH) ol->packet = TCP_PB_HTTP_BIN; } } } if (packet != A_MORE && ol->cr_in_progress) { cr_cancel_deferred(ol); term_t a = (packet == noval) ?A_ERROR :A_OK; term_t b = (packet == noval) ?reason :packet; inet_async2(ol->oid, ol->cr_reply_to, ASYNC_REF, a, b); } else if (packet != A_MORE && ol->active != INET_PASSIVE) { uint32_t *p = heap_alloc_N(&cont_proc->hp, 1 +3); if (p == 0) return -NO_MEMORY; p[0] = 3; p[1] = (packet == noval) ?A_TCP_ERROR :active_tag; p[2] = ol->oid; p[3] = (packet == noval) ?reason :packet; heap_set_top(&cont_proc->hp, p +1 +3); int x = scheduler_new_local_mail_N(cont_proc, tag_tuple(p)); if (x < 0) return x; if (ol->active == INET_ONCE && !is_tuple(packet)) // http_eoh ol->active = INET_PASSIVE; else if (ol->recv_buf_off > 0 && packet != noval) goto more_packets; } return 0; }