static void hookpoint_delete(void *val) { ph_hook_point_t *hook = *(ph_hook_point_t**)val; if (hook->head) { ph_mem_free(mt.head, hook->head); } ph_mem_free(mt.hookpoint, hook); }
void ph_dns_query_response_free(struct ph_dns_query_response *resp) { int i; for (i = 0; i < resp->num_answers; i++) { if (resp->answer[i].name != resp->name) { ph_mem_free(mt.string, resp->answer[i].name); } } ph_mem_free(mt.aresp, resp); }
static void free_chan(ph_dns_channel_t *chan) { ares_destroy(chan->chan); ph_ht_destroy(&chan->sock_map); pthread_mutex_destroy(&chan->chanlock); ph_mem_free(mt.chan, chan); }
static void free_head(ck_epoch_entry_t *ent) { ph_static_assert(ph_offsetof(ph_hook_point_head_t, entry) == 0, entry_must_be_first); ph_mem_free(mt.head, ent); }
ph_hook_point_t *ph_hook_point_get(ph_string_t *name, bool create) { ph_hook_point_t *hp = 0; ck_rwlock_read_lock(&rwlock); { ph_ht_lookup(&hook_hash, &name, &hp, false); } ck_rwlock_read_unlock(&rwlock); if (hp || !create) { return hp; } ck_rwlock_write_lock(&rwlock); { // Look again: someone may have populated while we were unlocked ph_ht_lookup(&hook_hash, &name, &hp, false); if (!hp) { hp = ph_mem_alloc(mt.hookpoint); if (hp) { if (ph_ht_set(&hook_hash, &name, &hp) != PH_OK) { ph_mem_free(mt.hookpoint, hp); hp = NULL; } } } } ck_rwlock_write_unlock(&rwlock); return hp; }
/* called when ares wants to change the event mask */ static void sock_state_cb(void *data, ares_socket_t socket_fd, int readable, int writable) { ph_dns_channel_t *chan = data; ph_job_t *job; ph_iomask_t mask = 0; if (readable) { mask |= PH_IOMASK_READ; } if (writable) { mask |= PH_IOMASK_WRITE; } if (ph_ht_lookup(&chan->sock_map, &socket_fd, &job, false) != PH_OK) { ph_panic("job for socket %d was not found in ares sock_state_cb", socket_fd); } if (mask) { apply_mask(chan, job, mask); } else { ph_job_set_nbio(job, 0, NULL); // We're done with this guy, remove it ph_ht_del(&chan->sock_map, &socket_fd); ph_mem_free(mt.job, job); } }
void ph_string_delref(ph_string_t *str) { if (!ph_refcnt_del(&str->ref)) { return; } if (str->mt > 0) { ph_mem_free(str->mt, str->buf); str->mt = PH_MEMTYPE_INVALID; } if (str->slice) { ph_string_delref(str->slice); str->slice = 0; } str->buf = 0; if (!str->onstack) { ph_mem_free(mt_string, str); } }
void ph_var_delref(ph_variant_t *var) { if (!ph_refcnt_del(&var->ref)) { return; } switch (var->type) { case PH_VAR_TRUE: case PH_VAR_FALSE: case PH_VAR_NULL: ph_panic("You have a refcounting problem"); case PH_VAR_ARRAY: if (var->u.aval.arr) { uint32_t i; for (i = 0; i < var->u.aval.len; i++) { ph_var_delref(var->u.aval.arr[i]); } ph_mem_free(mt.arr, var->u.aval.arr); var->u.aval.arr = 0; } break; case PH_VAR_OBJECT: ph_ht_destroy(&var->u.oval); break; case PH_VAR_STRING: if (var->u.sval) { ph_string_delref(var->u.sval); var->u.sval = 0; } break; default: ; } ph_mem_free(mt.var, var); }
// Called each time the session wakes up. // The `why` parameter indicates why we were woken up static void echo_processor(ph_sock_t *sock, ph_iomask_t why, void *arg) { struct echo_state *state = arg; ph_buf_t *buf; // If the socket encountered an error, or if the timeout was reached // (there's a default timeout, even if we didn't override it), then // we tear down the session if (why & (PH_IOMASK_ERR|PH_IOMASK_TIME)) { ph_log(PH_LOG_ERR, "disconnecting `P{sockaddr:%p}", (void*)&sock->peername); ph_sock_shutdown(sock, PH_SOCK_SHUT_RDWR); ph_mem_free(mt_state, state); ph_sock_free(sock); return; } // We loop because echo_processor is only triggered by newly arriving // data or events from the kernel. If we have data buffered and only // partially consume it, we won't get woken up until the next data // arrives, if ever. while (1) { // Try to read a line of text. // This returns a slice over the underlying buffer (if the line was // smaller than a buffer) or a freshly made contiguous buffer (if the // line was larger than our buffer segment size). Either way, we // own a reference to the returned buffer and should treat it as // a read-only slice. buf = ph_sock_read_line(sock); if (!buf) { // Not available yet, we'll try again later return; } // We got a line; update our state state->num_lines++; // Send our response. The data is buffered and automatically sent // to the client as it becomes writable, so we don't need to handle // partial writes or EAGAIN here. // If this was a "real" server, we would still check the return value // from the writes and proceed to tear down the session if things failed. // Note that buf includes the trailing CRLF, so our response // will implicitly end with CRLF too. ph_stm_printf(sock->stream, "You said [%d]: ", state->num_lines); ph_stm_write(sock->stream, ph_buf_mem(buf), ph_buf_len(buf), NULL); // We're done with buf, so we must release it ph_buf_delref(buf); } }
static void call_unreg(ck_epoch_entry_t *ent) { struct ph_hook_item_free *unreg; ph_static_assert(ph_offsetof(struct ph_hook_item_free, entry) == 0, entry_must_be_first); unreg = (struct ph_hook_item_free*)ent; unreg->unreg(unreg->closure, unreg->func); ph_mem_free(mt.unreg, unreg); }
ph_string_t *ph_string_make_empty(ph_memtype_t mt, uint32_t size) { char *buf = ph_mem_alloc_size(mt, size); ph_string_t *str; if (!buf) { return NULL; } str = ph_string_make_claim(mt, buf, 0, size); if (!str) { ph_mem_free(mt, buf); } return str; }
static void result_cb(void *arg, int status, int timeouts, unsigned char *abuf, int alen) { struct ph_dns_query *q = arg; struct ph_dns_query_response *resp = NULL; switch (q->qtype) { case PH_DNS_QUERY_NONE: q->func.raw(q->arg, status, timeouts, abuf, alen); break; case PH_DNS_QUERY_MX: if (status == ARES_SUCCESS) { resp = make_mx_resp(abuf, alen); } q->func.func(q->arg, status, timeouts, abuf, alen, resp); break; case PH_DNS_QUERY_A: if (status == ARES_SUCCESS) { resp = make_a_resp(abuf, alen); } q->func.func(q->arg, status, timeouts, abuf, alen, resp); break; case PH_DNS_QUERY_SRV: if (status == ARES_SUCCESS) { resp = make_srv_resp(abuf, alen); } q->func.func(q->arg, status, timeouts, abuf, alen, resp); break; case PH_DNS_QUERY_AAAA: if (status == ARES_SUCCESS) { resp = make_aaaa_resp(abuf, alen); } q->func.func(q->arg, status, timeouts, abuf, alen, resp); break; default: ph_panic("invalid qtype %d", q->qtype); } ph_mem_free(mt.query, q); }
ph_variant_t *ph_var_object(uint32_t nelems) { ph_variant_t *var; ph_result_t res; var = ph_mem_alloc(mt.var); if (!var) { return NULL; } var->ref = 1; var->type = PH_VAR_OBJECT; res = ph_ht_init(&var->u.oval, nelems, &ph_ht_string_key_def, &var_val_def); if (res != PH_OK) { ph_mem_free(mt.var, var); return 0; } return var; }
ph_variant_t *ph_var_array(uint32_t nelems) { ph_variant_t *var; var = ph_mem_alloc(mt.var); if (!var) { return NULL; } var->ref = 1; var->type = PH_VAR_ARRAY; var->u.aval.len = 0; var->u.aval.alloc = nelems; var->u.aval.arr = ph_mem_alloc_size(mt.arr, nelems * sizeof(ph_variant_t*)); if (!var->u.aval.arr) { ph_mem_free(mt.var, var); return NULL; } return var; }
/* called when ares creates a new socket */ static int sock_create_cb(ares_socket_t socket_fd, int type, void *data) { ph_dns_channel_t *chan = data; ph_job_t *job = ph_mem_alloc(mt.job); ph_unused_parameter(type); if (!job) { return -1; } ph_job_init(job); job->callback = process_ares; job->data = chan; job->fd = socket_fd; if (ph_ht_set(&chan->sock_map, &socket_fd, &job) != PH_OK) { ph_mem_free(mt.job, job); return -1; } return 0; }
void *ph_mem_realloc(ph_memtype_t mt, void *ptr, uint64_t size) { struct mem_type *mem_type; ph_counter_block_t *block; static const uint8_t slots[2] = { SLOT_BYTES, SLOT_REALLOC }; int64_t values[3]; struct sized_header *hdr; uint64_t orig_size; void *new_ptr; if (size == 0) { ph_mem_free(mt, ptr); return NULL; } if (ptr == NULL) { return ph_mem_alloc_size(mt, size); } mem_type = resolve_mt(mt); if (mem_type->def.item_size) { memory_panic( "mem_type %s is not vsize and cannot be used with ph_mem_realloc", mem_type->def.name); return NULL; } hdr = ptr; hdr--; ptr = hdr; if (hdr->mt != mt) { memory_panic("ph_mem_realloc: hdr->mt %d != caller provided mt %d %s", hdr->mt, mt, mem_type->def.name); } orig_size = hdr->size; if (orig_size == size) { return ptr; } hdr = realloc(ptr, size + HEADER_RESERVATION); if (!hdr) { ph_counter_scope_add(mem_type->scope, mem_type->first_slot + SLOT_OOM, 1); if (mem_type->def.flags & PH_MEM_FLAGS_PANIC) { ph_panic("OOM while allocating %" PRIu64 " bytes of %s/%s memory", size + HEADER_RESERVATION, mem_type->def.facility, mem_type->def.name); } return NULL; } new_ptr = hdr + 1; hdr->size = size; block = ph_counter_block_open(mem_type->scope); values[0] = size - orig_size; values[1] = 1; ph_counter_block_bulk_add(block, 2, slots, values); ph_counter_block_delref(block); if (size > orig_size && mem_type->def.flags & PH_MEM_FLAGS_ZERO) { memset((char*)new_ptr + orig_size, 0, size - orig_size); } return new_ptr; }
static ph_result_t do_unregister(ph_hook_point_t *hp, ph_hook_func func, void *closure) { ph_hook_point_head_t *old_head, *new_head; ph_result_t res = PH_ERR; uint16_t off = 0; bool found = false; struct ph_hook_item_free *unreg = 0; ck_rwlock_write_lock(&rwlock); { old_head = hp->head; if (!old_head) { goto done; } for (off = 0; off < old_head->nitems; off++) { if (old_head->items[off].func == func && old_head->items[off].closure == closure) { found = true; break; } } if (!found) { goto done; } new_head = ph_mem_alloc_size(mt.head, sizeof(*new_head) + (sizeof(ph_hook_item_t) * (old_head->nitems-1))); if (!new_head) { goto done; } if (old_head->items[off].unreg) { unreg = ph_mem_alloc(mt.unreg); if (!unreg) { ph_mem_free(mt.head, new_head); goto done; } unreg->closure = old_head->items[off].closure; unreg->func = old_head->items[off].func; unreg->unreg = old_head->items[off].unreg; } new_head->nitems = old_head->nitems - 1; // Copy before the item if (off) { memcpy(new_head->items, old_head->items, off * sizeof(ph_hook_item_t)); } // Copy after the item if (off + 1 <= old_head->nitems) { memcpy(new_head->items + off, old_head->items + off + 1, (old_head->nitems - (off+1)) * sizeof(ph_hook_item_t)); } // Don't need to re-sort, since we simply removed that item hp->head = new_head; ph_thread_epoch_defer(&old_head->entry, free_head); // Arrange to unregister if (unreg) { ph_thread_epoch_defer(&unreg->entry, call_unreg); } res = PH_OK; } done: ck_rwlock_write_unlock(&rwlock); return res; }