void libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, char* why_bogus) { struct ctx_query* q = (struct ctx_query*)arg; if(q->cancelled || q->w->back->want_to_quit) { if(q->w->is_bg_thread) { /* delete it now */ struct ub_ctx* ctx = q->w->ctx; lock_basic_lock(&ctx->cfglock); (void)rbtree_delete(&ctx->queries, q->node.key); ctx->num_async--; context_query_delete(q); lock_basic_unlock(&ctx->cfglock); } /* cancelled, do not give answer */ return; } q->msg_security = s; if(!buf) buf = q->w->env->scratch_buffer; if(rcode != 0) { error_encode(buf, rcode, NULL, 0, BIT_RD, NULL); } add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus); }
void libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, char* why_bogus) { struct ctx_query* q = (struct ctx_query*)arg; ub_event_callback_type cb = (ub_event_callback_type)q->cb; void* cb_arg = q->cb_arg; int cancelled = q->cancelled; /* delete it now */ struct ub_ctx* ctx = q->w->ctx; lock_basic_lock(&ctx->cfglock); (void)rbtree_delete(&ctx->queries, q->node.key); ctx->num_async--; context_query_delete(q); lock_basic_unlock(&ctx->cfglock); if(!cancelled) { /* call callback */ int sec = 0; if(s == sec_status_bogus) sec = 1; else if(s == sec_status_secure) sec = 2; (*cb)(cb_arg, rcode, (void*)sldns_buffer_begin(buf), (int)sldns_buffer_limit(buf), sec, why_bogus); } }
/** add result to the bg worker result queue */ static void add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt, int err, char* reason) { uint8_t* msg = NULL; uint32_t len = 0; if(w->want_quit) { context_query_delete(q); return; } /* serialize and delete unneeded q */ if(w->is_bg_thread) { lock_basic_lock(&w->ctx->cfglock); if(reason) q->res->why_bogus = strdup(reason); if(pkt) { q->msg_len = sldns_buffer_remaining(pkt); q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len); if(!q->msg) msg = context_serialize_answer(q, UB_NOMEM, NULL, &len); else msg = context_serialize_answer(q, err, NULL, &len); } else msg = context_serialize_answer(q, err, NULL, &len); lock_basic_unlock(&w->ctx->cfglock); } else { if(reason) q->res->why_bogus = strdup(reason); msg = context_serialize_answer(q, err, pkt, &len); (void)rbtree_delete(&w->ctx->queries, q->node.key); w->ctx->num_async--; context_query_delete(q); } if(!msg) { log_err("out of memory for async answer"); return; } if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) { log_err("out of memory for async answer"); return; } }
int ub_resolve(struct ub_ctx* ctx, char* name, int rrtype, int rrclass, struct ub_result** result) { struct ctx_query* q; int r; *result = NULL; lock_basic_lock(&ctx->cfglock); if(!ctx->finalized) { r = context_finalize(ctx); if(r) { lock_basic_unlock(&ctx->cfglock); return r; } } /* create new ctx_query and attempt to add to the list */ lock_basic_unlock(&ctx->cfglock); q = context_new(ctx, name, rrtype, rrclass, NULL, NULL); if(!q) return UB_NOMEM; /* become a resolver thread for a bit */ r = libworker_fg(ctx, q); if(r) { lock_basic_lock(&ctx->cfglock); (void)rbtree_delete(&ctx->queries, q->node.key); context_query_delete(q); lock_basic_unlock(&ctx->cfglock); return r; } q->res->answer_packet = q->msg; q->res->answer_len = (int)q->msg_len; q->msg = NULL; *result = q->res; q->res = NULL; lock_basic_lock(&ctx->cfglock); (void)rbtree_delete(&ctx->queries, q->node.key); context_query_delete(q); lock_basic_unlock(&ctx->cfglock); return UB_NOERROR; }
int ub_cancel(struct ub_ctx* ctx, int async_id) { struct ctx_query* q; uint8_t* msg = NULL; uint32_t len = 0; lock_basic_lock(&ctx->cfglock); q = (struct ctx_query*)rbtree_search(&ctx->queries, &async_id); if(!q || !q->async) { /* it is not there, so nothing to do */ lock_basic_unlock(&ctx->cfglock); return UB_NOID; } log_assert(q->async); q->cancelled = 1; /* delete it */ if(!ctx->dothread) { /* if forked */ (void)rbtree_delete(&ctx->queries, q->node.key); ctx->num_async--; msg = context_serialize_cancel(q, &len); context_query_delete(q); lock_basic_unlock(&ctx->cfglock); if(!msg) { return UB_NOMEM; } /* send cancel to background worker */ lock_basic_lock(&ctx->qqpipe_lock); if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) { lock_basic_unlock(&ctx->qqpipe_lock); free(msg); return UB_PIPE; } lock_basic_unlock(&ctx->qqpipe_lock); free(msg); } else { lock_basic_unlock(&ctx->cfglock); } return UB_NOERROR; }
int ub_resolve_async(struct ub_ctx* ctx, char* name, int rrtype, int rrclass, void* mydata, ub_callback_t callback, int* async_id) { struct ctx_query* q; uint8_t* msg = NULL; uint32_t len = 0; if(async_id) *async_id = 0; lock_basic_lock(&ctx->cfglock); if(!ctx->finalized) { int r = context_finalize(ctx); if(r) { lock_basic_unlock(&ctx->cfglock); return r; } } if(!ctx->created_bg) { int r; ctx->created_bg = 1; lock_basic_unlock(&ctx->cfglock); r = libworker_bg(ctx); if(r) { lock_basic_lock(&ctx->cfglock); ctx->created_bg = 0; lock_basic_unlock(&ctx->cfglock); return r; } } else { lock_basic_unlock(&ctx->cfglock); } /* create new ctx_query and attempt to add to the list */ q = context_new(ctx, name, rrtype, rrclass, callback, mydata); if(!q) return UB_NOMEM; /* write over pipe to background worker */ lock_basic_lock(&ctx->cfglock); msg = context_serialize_new_query(q, &len); if(!msg) { (void)rbtree_delete(&ctx->queries, q->node.key); ctx->num_async--; context_query_delete(q); lock_basic_unlock(&ctx->cfglock); return UB_NOMEM; } if(async_id) *async_id = q->querynum; lock_basic_unlock(&ctx->cfglock); lock_basic_lock(&ctx->qqpipe_lock); if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) { lock_basic_unlock(&ctx->qqpipe_lock); free(msg); return UB_PIPE; } lock_basic_unlock(&ctx->qqpipe_lock); free(msg); return UB_NOERROR; }
/** process answer from bg worker */ static int process_answer_detail(struct ub_ctx* ctx, uint8_t* msg, uint32_t len, ub_callback_t* cb, void** cbarg, int* err, struct ub_result** res) { struct ctx_query* q; if(context_serial_getcmd(msg, len) != UB_LIBCMD_ANSWER) { log_err("error: bad data from bg worker %d", (int)context_serial_getcmd(msg, len)); return 0; } lock_basic_lock(&ctx->cfglock); q = context_deserialize_answer(ctx, msg, len, err); if(!q) { lock_basic_unlock(&ctx->cfglock); /* probably simply the lookup that failed, i.e. * response returned before cancel was sent out, so noerror */ return 1; } log_assert(q->async); /* grab cb while locked */ if(q->cancelled) { *cb = NULL; *cbarg = NULL; } else { *cb = q->cb; *cbarg = q->cb_arg; } if(*err) { *res = NULL; ub_resolve_free(q->res); } else { /* parse the message, extract rcode, fill result */ ldns_buffer* buf = ldns_buffer_new(q->msg_len); struct regional* region = regional_create(); *res = q->res; (*res)->rcode = LDNS_RCODE_SERVFAIL; if(region && buf) { ldns_buffer_clear(buf); ldns_buffer_write(buf, q->msg, q->msg_len); ldns_buffer_flip(buf); libworker_enter_result(*res, buf, region, q->msg_security); } (*res)->answer_packet = q->msg; (*res)->answer_len = (int)q->msg_len; q->msg = NULL; ldns_buffer_free(buf); regional_destroy(region); } q->res = NULL; /* delete the q from list */ (void)rbtree_delete(&ctx->queries, q->node.key); ctx->num_async--; context_query_delete(q); lock_basic_unlock(&ctx->cfglock); if(*cb) return 2; ub_resolve_free(*res); return 1; }
/** delete q */ static void delq(rbnode_t* n, void* ATTR_UNUSED(arg)) { struct ctx_query* q = (struct ctx_query*)n; context_query_delete(q); }