/** * Assemble the rrsets in the anchors, ready for use by validator. * @param anchors: trust anchor storage. * @return: false on error. */ static int anchors_assemble_rrsets(struct val_anchors* anchors) { struct trust_anchor* ta; struct trust_anchor* next; size_t nods, nokey; lock_basic_lock(&anchors->lock); ta=(struct trust_anchor*)rbtree_first(anchors->tree); while((rbnode_type*)ta != RBTREE_NULL) { next = (struct trust_anchor*)rbtree_next(&ta->node); lock_basic_lock(&ta->lock); if(ta->autr || (ta->numDS == 0 && ta->numDNSKEY == 0)) { lock_basic_unlock(&ta->lock); ta = next; /* skip */ continue; } if(!anchors_assemble(ta)) { log_err("out of memory"); lock_basic_unlock(&ta->lock); lock_basic_unlock(&anchors->lock); return 0; } nods = anchors_ds_unsupported(ta); nokey = anchors_dnskey_unsupported(ta); if(nods) { log_nametypeclass(0, "warning: unsupported " "algorithm for trust anchor", ta->name, LDNS_RR_TYPE_DS, ta->dclass); } if(nokey) { log_nametypeclass(0, "warning: unsupported " "algorithm for trust anchor", ta->name, LDNS_RR_TYPE_DNSKEY, ta->dclass); } if(nods == ta->numDS && nokey == ta->numDNSKEY) { char b[257]; dname_str(ta->name, b); log_warn("trust anchor %s has no supported algorithms," " the anchor is ignored (check if you need to" " upgrade unbound and " #ifdef HAVE_LIBRESSL "libressl" #else "openssl" #endif ")", b); (void)rbtree_delete(anchors->tree, &ta->node); lock_basic_unlock(&ta->lock); if(anchors->dlv_anchor == ta) anchors->dlv_anchor = NULL; anchors_delfunc(&ta->node, NULL); ta = next; continue; } lock_basic_unlock(&ta->lock); ta = next; } lock_basic_unlock(&anchors->lock); return 1; }
void lruhash_remove(struct lruhash *table, hashvalue_t hash, void *key) { struct lruhash_entry *entry; struct lruhash_bucket *bucket; void *d; lock_basic_lock(&table->lock); bucket = &table->array[hash & table->size_mask]; if((entry=bucket_find_entry(table, bucket, hash, key))) { bucket_overflow_remove(bucket, entry); lru_remove(table, entry); } else { lock_basic_unlock(&table->lock); return; } table->num--; lock_basic_lock(&entry->lock); table->space_used -= (*table->sizefunc)(entry->key, entry->data); lock_basic_unlock(&entry->lock); lock_basic_unlock(&table->lock); //del key data d = entry->data; (*table->delkeyfunc)(entry->key); (*table->deldatafunc)(d); }
void anchors_delete_insecure(struct val_anchors* anchors, uint16_t c, uint8_t* nm) { struct trust_anchor key; struct trust_anchor* ta; key.node.key = &key; key.name = nm; key.namelabs = dname_count_size_labels(nm, &key.namelen); key.dclass = c; lock_basic_lock(&anchors->lock); if(!(ta=(struct trust_anchor*)rbtree_search(anchors->tree, &key))) { lock_basic_unlock(&anchors->lock); /* nothing there */ return; } /* lock it to drive away other threads that use it */ lock_basic_lock(&ta->lock); /* see if its really an insecure point */ if(ta->keylist || ta->autr || ta->numDS || ta->numDNSKEY) { lock_basic_unlock(&anchors->lock); lock_basic_unlock(&ta->lock); /* its not an insecure point, do not remove it */ return; } /* remove from tree */ (void)rbtree_delete(anchors->tree, &ta->node); anchors_init_parents_locked(anchors); lock_basic_unlock(&anchors->lock); /* actual free of data */ lock_basic_unlock(&ta->lock); anchors_delfunc(&ta->node, NULL); }
/** * Handle signals. * */ void signal_handler(sig_atomic_t sig) { switch (sig) { case SIGHUP: ods_log_debug("[%s] SIGHUP received", signal_str); signal_hup_recvd++; if (signal_engine) { lock_basic_lock(&signal_engine->signal_lock); /* [LOCK] signal */ lock_basic_alarm(&signal_engine->signal_cond); /* [UNLOCK] signal */ lock_basic_unlock(&signal_engine->signal_lock); } break; case SIGTERM: ods_log_debug("[%s] SIGTERM received", signal_str); signal_term_recvd++; if (signal_engine) { lock_basic_lock(&signal_engine->signal_lock); /* [LOCK] signal */ lock_basic_alarm(&signal_engine->signal_cond); /* [UNLOCK] signal */ lock_basic_unlock(&signal_engine->signal_lock); } break; default: break; } return; }
int ub_wait(struct ub_ctx* ctx) { int err; ub_callback_t cb; void* cbarg; struct ub_result* res; int r; uint8_t* msg; uint32_t len; /* this is basically the same loop as _process(), but with changes. * holds the rrpipe lock and waits with tube_wait */ while(1) { lock_basic_lock(&ctx->rrpipe_lock); lock_basic_lock(&ctx->cfglock); if(ctx->num_async == 0) { lock_basic_unlock(&ctx->cfglock); lock_basic_unlock(&ctx->rrpipe_lock); break; } lock_basic_unlock(&ctx->cfglock); /* keep rrpipe locked, while * o waiting for pipe readable * o parsing message * o possibly decrementing num_async * do callback without lock */ r = tube_wait(ctx->rr_pipe); if(r) { r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1); if(r == 0) { lock_basic_unlock(&ctx->rrpipe_lock); return UB_PIPE; } if(r == -1) { lock_basic_unlock(&ctx->rrpipe_lock); continue; } r = process_answer_detail(ctx, msg, len, &cb, &cbarg, &err, &res); lock_basic_unlock(&ctx->rrpipe_lock); free(msg); if(r == 0) return UB_PIPE; if(r == 2) (*cb)(cbarg, err, res); } else { lock_basic_unlock(&ctx->rrpipe_lock); } } return UB_NOERROR; }
/** * Run engine, run!. * */ static void engine_run(engine_type* engine, int single_run) { if (!engine) { return; } engine_start_workers(engine); engine_start_drudgers(engine); lock_basic_lock(&engine->signal_lock); engine->signal = SIGNAL_RUN; lock_basic_unlock(&engine->signal_lock); while (!engine->need_to_exit && !engine->need_to_reload) { lock_basic_lock(&engine->signal_lock); engine->signal = signal_capture(engine->signal); switch (engine->signal) { case SIGNAL_RUN: ods_log_assert(1); break; case SIGNAL_RELOAD: engine->need_to_reload = 1; break; case SIGNAL_SHUTDOWN: engine->need_to_exit = 1; break; default: ods_log_warning("[%s] invalid signal %d captured, " "keep running", engine_str, signal); engine->signal = SIGNAL_RUN; break; } lock_basic_unlock(&engine->signal_lock); if (single_run) { engine->need_to_exit = engine_all_zones_processed(engine); } lock_basic_lock(&engine->signal_lock); if (engine->signal == SIGNAL_RUN && !single_run) { ods_log_debug("[%s] taking a break", engine_str); lock_basic_sleep(&engine->signal_cond, &engine->signal_lock, 3600); } lock_basic_unlock(&engine->signal_lock); } ods_log_debug("[%s] signer halted", engine_str); engine_stop_drudgers(engine); engine_stop_workers(engine); (void)lhsm_reopen(engine->config->cfg_filename); return; }
struct lruhash_entry *lruhash_lookup(struct lruhash *table, hashvalue_t hash, void *key) { struct lruhash_entry *entry; struct lruhash_bucket *bucket; lock_basic_lock(&table->lock); bucket = &table->array[hash & table->size_mask]; if((entry=bucket_find_entry(table, bucket, hash, key))) { lru_touch(table, entry); lock_basic_lock(&entry->lock); } lock_basic_unlock(&table->lock); return entry; }
/** check if negative cache is still valid */ static void check_neg_invariants(struct val_neg_cache* neg) { struct val_neg_zone* z; /* check structure of LRU list */ lock_basic_lock(&neg->lock); check_lru(neg); unit_assert(neg->max == 1024*1024); unit_assert(neg->nsec3_max_iter == 1500); unit_assert(neg->tree.cmp == &val_neg_zone_compare); if(neg->tree.count == 0) { /* empty */ unit_assert(neg->tree.count == 0); unit_assert(neg->first == NULL); unit_assert(neg->last == NULL); unit_assert(neg->use == 0); lock_basic_unlock(&neg->lock); return; } unit_assert(neg->first != NULL); unit_assert(neg->last != NULL); RBTREE_FOR(z, struct val_neg_zone*, &neg->tree) { check_zone_invariants(neg, z); } lock_basic_unlock(&neg->lock); }
/** extended bg result callback, this function is ub_callback_t */ static void ext_callback(void* mydata, int err, struct ub_result* result) { struct track_id* my_id = (struct track_id*)mydata; int doprint = 0; if(my_id) { /* I have an id, make sure we are not cancelled */ lock_basic_lock(&my_id->lock); if(doprint) printf("cb %d: ", my_id->id); if(my_id->cancel) { printf("error: query id=%d returned, but was cancelled\n", my_id->id); abort(); exit(1); } lock_basic_unlock(&my_id->lock); } ext_check_result("ext_callback", err, result); log_assert(result); if(doprint) { struct lookinfo pi; pi.name = result?result->qname:"noname"; pi.result = result; pi.err = 0; print_result(&pi); } ub_resolve_free(result); }
//check table static void check_table(struct lruhash *table) { struct lruhash_entry *p; size_t c = 0; lock_basic_lock(&table->lock); unit_assert(table->num <= table->size); unit_assert(table->size_mask == (int)table->size - 1); unit_assert((table->lru_head && table->lru_tail) || (!table->lru_head && !table->lru_tail)); unit_assert(table->space_used <= table->space_max); if(table->lru_head) unit_assert(table->lru_head->prev == NULL); if(table->lru_tail) unit_assert(table->lru_tail->next == NULL); p = table->lru_head; while(p) { if(p->prev) { unit_assert(p->prev->next == p); } if(p->next) { unit_assert(p->next->prev == p); } c++; p = p->next; } unit_assert(c == table->num); unit_assert(table->space_used == table->num * sizefunc(NULL, NULL)); lock_basic_unlock(&table->lock); }
int tube_queue_item(struct tube* tube, uint8_t* msg, size_t len) { struct tube_res_list* item = (struct tube_res_list*)malloc(sizeof(*item)); verbose(VERB_ALGO, "tube queue_item len %d", (int)len); if(!item) { free(msg); log_err("out of memory for async answer"); return 0; } item->buf = msg; item->len = len; item->next = NULL; lock_basic_lock(&tube->res_lock); /* add at back of list, since the first one may be partially written */ if(tube->res_last) tube->res_last->next = item; else tube->res_list = item; tube->res_last = item; /* signal the eventhandle */ if(!WSASetEvent(tube->event)) { log_err("WSASetEvent: %s", wsa_strerror(WSAGetLastError())); } lock_basic_unlock(&tube->res_lock); return 1; }
static int testframe_lookup(struct module_env* env, struct cachedb_env* cachedb_env, char* key, struct sldns_buffer* result_buffer) { struct testframe_moddata* d = (struct testframe_moddata*) cachedb_env->backend_data; (void)env; verbose(VERB_ALGO, "testframe_lookup of %s", key); lock_basic_lock(&d->lock); if(d->stored_key && strcmp(d->stored_key, key) == 0) { if(d->stored_datalen > sldns_buffer_capacity(result_buffer)) { lock_basic_unlock(&d->lock); return 0; /* too large */ } verbose(VERB_ALGO, "testframe_lookup found %d bytes", (int)d->stored_datalen); sldns_buffer_clear(result_buffer); sldns_buffer_write(result_buffer, d->stored_data, d->stored_datalen); sldns_buffer_flip(result_buffer); lock_basic_unlock(&d->lock); return 1; } lock_basic_unlock(&d->lock); return 0; }
/** initialise parent pointers in the tree */ static void init_parents(struct val_anchors* anchors) { lock_basic_lock(&anchors->lock); anchors_init_parents_locked(anchors); lock_basic_unlock(&anchors->lock); }
/** create new trust anchor object */ static struct trust_anchor* anchor_new_ta(struct val_anchors* anchors, uint8_t* name, int namelabs, size_t namelen, uint16_t dclass) { #ifdef UNBOUND_DEBUG rbnode_t* r; #endif struct trust_anchor* ta = (struct trust_anchor*)regional_alloc( anchors->region, sizeof(struct trust_anchor)); if(!ta) return NULL; memset(ta, 0, sizeof(*ta)); ta->node.key = ta; ta->name = regional_alloc_init(anchors->region, name, namelen); if(!ta->name) return NULL; ta->namelabs = namelabs; ta->namelen = namelen; ta->dclass = dclass; lock_basic_init(&ta->lock); lock_basic_lock(&anchors->lock); #ifdef UNBOUND_DEBUG r = #endif rbtree_insert(anchors->tree, &ta->node); lock_basic_unlock(&anchors->lock); log_assert(r != NULL); return ta; }
void libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, char* why_bogus) { struct ctx_query* q = (struct ctx_query*)arg; if(q->cancelled || q->w->back->want_to_quit) { if(q->w->is_bg_thread) { /* delete it now */ struct ub_ctx* ctx = q->w->ctx; lock_basic_lock(&ctx->cfglock); (void)rbtree_delete(&ctx->queries, q->node.key); ctx->num_async--; context_query_delete(q); lock_basic_unlock(&ctx->cfglock); } /* cancelled, do not give answer */ return; } q->msg_security = s; if(!buf) buf = q->w->env->scratch_buffer; if(rcode != 0) { error_encode(buf, rcode, NULL, 0, BIT_RD, NULL); } add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus); }
void anchors_init_parents_locked(struct val_anchors* anchors) { struct trust_anchor* node, *prev = NULL, *p; int m; /* nobody else can grab locks because we hold the main lock. * Thus the previous items, after unlocked, are not deleted */ RBTREE_FOR(node, struct trust_anchor*, anchors->tree) { lock_basic_lock(&node->lock); node->parent = NULL; if(!prev || prev->dclass != node->dclass) { prev = node; lock_basic_unlock(&node->lock); continue; } (void)dname_lab_cmp(prev->name, prev->namelabs, node->name, node->namelabs, &m); /* we know prev is smaller */ /* sort order like: . com. bla.com. zwb.com. net. */ /* find the previous, or parent-parent-parent */ for(p = prev; p; p = p->parent) /* looking for name with few labels, a parent */ if(p->namelabs <= m) { /* ==: since prev matched m, this is closest*/ /* <: prev matches more, but is not a parent, * this one is a (grand)parent */ node->parent = p; break; } lock_basic_unlock(&node->lock); prev = node; } }
/** * Make sure that no appointed jobs have failed. * */ static ods_status worker_check_jobs(worker_type* worker, task_type* task) { ods_log_assert(worker); ods_log_assert(task); lock_basic_lock(&worker->worker_lock); if (worker->jobs_failed) { ods_log_error("[%s[%i]] sign zone %s failed: %u RRsets failed", worker2str(worker->type), worker->thread_num, task_who2str(task), worker->jobs_failed); lock_basic_unlock(&worker->worker_lock); return ODS_STATUS_ERR; } else if (worker->jobs_completed != worker->jobs_appointed) { ods_log_error("[%s[%i]] sign zone %s failed: processed %u of %u " "RRsets", worker2str(worker->type), worker->thread_num, task_who2str(task), worker->jobs_completed, worker->jobs_appointed); lock_basic_unlock(&worker->worker_lock); return ODS_STATUS_ERR; } else if (worker->need_to_exit) { ods_log_debug("[%s[%i]] sign zone %s failed: worker needs to exit", worker2str(worker->type), worker->thread_num, task_who2str(task)); lock_basic_unlock(&worker->worker_lock); return ODS_STATUS_ERR; } else { ods_log_debug("[%s[%i]] sign zone %s ok: %u of %u RRsets " "succeeded", worker2str(worker->type), worker->thread_num, task_who2str(task), worker->jobs_completed, worker->jobs_appointed); ods_log_assert(worker->jobs_appointed == worker->jobs_completed); } lock_basic_unlock(&worker->worker_lock); return ODS_STATUS_OK; }
/** * Create worker. * */ worker_type* worker_create(allocator_type* allocator, int num, worker_id type) { worker_type* worker; if (!allocator) { return NULL; } worker = (worker_type*) allocator_alloc(allocator, sizeof(worker_type)); if (!worker) { return NULL; } ods_log_debug("[%s[%i]] create", worker2str(type), num+1); lock_basic_init(&worker->worker_lock); lock_basic_set(&worker->worker_alarm); lock_basic_lock(&worker->worker_lock); worker->allocator = allocator; worker->thread_num = num +1; worker->engine = NULL; worker->task = NULL; worker->working_with = TASK_NONE; worker->need_to_exit = 0; worker->type = type; worker->clock_in = 0; worker->jobs_appointed = 0; worker->jobs_completed = 0; worker->jobs_failed = 0; worker->sleeping = 0; worker->waiting = 0; lock_basic_unlock(&worker->worker_lock); return worker; }
struct alloc_cache* context_obtain_alloc(struct ub_ctx* ctx, int locking) { struct alloc_cache* a; int tnum = 0; if(locking) { lock_basic_lock(&ctx->cfglock); } a = ctx->alloc_list; if(a) ctx->alloc_list = a->super; /* snip off list */ else tnum = ctx->thr_next_num++; if(locking) { lock_basic_unlock(&ctx->cfglock); } if(a) { a->super = &ctx->superalloc; return a; } a = (struct alloc_cache*)calloc(1, sizeof(*a)); if(!a) return NULL; alloc_init(a, &ctx->superalloc, tnum); return a; }
static void testframe_store(struct module_env* env, struct cachedb_env* cachedb_env, char* key, uint8_t* data, size_t data_len) { struct testframe_moddata* d = (struct testframe_moddata*) cachedb_env->backend_data; (void)env; lock_basic_lock(&d->lock); verbose(VERB_ALGO, "testframe_store %s (%d bytes)", key, (int)data_len); /* free old data element (if any) */ free(d->stored_key); d->stored_key = NULL; free(d->stored_data); d->stored_data = NULL; d->stored_datalen = 0; d->stored_data = memdup(data, data_len); if(!d->stored_data) { lock_basic_unlock(&d->lock); log_err("out of memory"); return; } d->stored_datalen = data_len; d->stored_key = strdup(key); if(!d->stored_key) { free(d->stored_data); d->stored_data = NULL; d->stored_datalen = 0; lock_basic_unlock(&d->lock); return; } lock_basic_unlock(&d->lock); /* (key,data) successfully stored */ }
void libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, char* why_bogus) { struct ctx_query* q = (struct ctx_query*)arg; ub_event_callback_type cb = (ub_event_callback_type)q->cb; void* cb_arg = q->cb_arg; int cancelled = q->cancelled; /* delete it now */ struct ub_ctx* ctx = q->w->ctx; lock_basic_lock(&ctx->cfglock); (void)rbtree_delete(&ctx->queries, q->node.key); ctx->num_async--; context_query_delete(q); lock_basic_unlock(&ctx->cfglock); if(!cancelled) { /* call callback */ int sec = 0; if(s == sec_status_bogus) sec = 1; else if(s == sec_status_secure) sec = 2; (*cb)(cb_arg, rcode, (void*)sldns_buffer_begin(buf), (int)sldns_buffer_limit(buf), sec, why_bogus); } }
static void sdns_openssl_lock_cb(int mode, int type, const char *file, int line) { if((mode&CRYPTO_LOCK)) { lock_basic_lock(&sdns_openssl_locks[type]); } else { lock_basic_unlock(&sdns_openssl_locks[type]); } }
/** * Notify a worker. * */ void worker_notify(lock_basic_type* lock, cond_basic_type* condition) { lock_basic_lock(lock); lock_basic_alarm(condition); lock_basic_unlock(lock); return; }
size_t val_neg_get_mem(struct val_neg_cache* neg) { size_t result; lock_basic_lock(&neg->lock); result = sizeof(*neg) + neg->use; lock_basic_unlock(&neg->lock); return result; }
/** * Notify all workers. * */ void worker_notify_all(lock_basic_type* lock, cond_basic_type* condition) { lock_basic_lock(lock); lock_basic_broadcast(condition); lock_basic_unlock(lock); return; }
void lruhash_insert(struct lruhash *table, hashvalue_t hash, struct lruhash_entry *entry, void *data) { struct lruhash_bucket *bucket; struct lruhash_entry *found, *reclaimlist=NULL; size_t need_size; need_size = table->sizefunc(entry->key, data); //find bucket lock_basic_lock(&table->lock); bucket = &table->array[hash & table->size_mask]; //see if entry exists if(!(found=bucket_find_entry(table, bucket, hash, entry->key))) { //if not found: add to bucket entry->overflow_next = bucket->overflow_list; bucket->overflow_list = entry; lru_front(table, entry); table->num++; table->space_used += need_size; } else { //if found: update data table->space_used += need_size - (*table->sizefunc)(found->key, found->data); (*table->delkeyfunc)(entry->key); lru_touch(table, found); lock_basic_lock(&found->lock); (*table->deldatafunc)(found->data); found->data = data; lock_basic_unlock(&found->lock); } if(table->space_used > table->space_max) reclaim_space(table, &reclaimlist); if(table->num >= table->size) table_grow(table); lock_basic_unlock(&table->lock); //del reclaim without lock while(reclaimlist) { struct lruhash_entry *n = reclaimlist->overflow_next; void *d = reclaimlist->data; (*table->delkeyfunc)(reclaimlist->key); (*table->deldatafunc)(d); reclaimlist = n; } }
/** * Work. * */ void worker_start(worker_type* worker) { time_t now, timeout = 1; task_type *task_that_was_worked_on; ods_log_assert(worker); while (worker->need_to_exit == 0) { ods_log_debug("[worker[%i]]: report for duty", worker->thread_num); lock_basic_lock(&worker->engine->taskq->schedule_lock); /* [LOCK] schedule */ worker->task = schedule_pop_task(worker->engine->taskq); if (worker->task) { /* [UNLOCK] schedule */ lock_basic_unlock(&worker->engine->taskq->schedule_lock); ods_log_debug("[worker[%i]] start working", worker->thread_num); worker->clock_in = time(NULL); worker_perform_task(worker); task_that_was_worked_on = worker->task; worker->task = NULL; ods_log_debug("[worker[%i]] finished working", worker->thread_num); if (task_that_was_worked_on) (void) lock_and_schedule_task(worker->engine->taskq, task_that_was_worked_on, 1); timeout = 1; } else { ods_log_debug("[worker[%i]] nothing to do", worker->thread_num); worker->task = schedule_get_first_task(worker->engine->taskq); /* [UNLOCK] schedule */ lock_basic_unlock(&worker->engine->taskq->schedule_lock); now = time_now(); if (worker->task && !worker->engine->taskq->loading) { timeout = (worker->task->when - now); } else { timeout *= 2; if (timeout > ODS_SE_MAX_BACKOFF) { timeout = ODS_SE_MAX_BACKOFF; } } worker->task = NULL; worker_sleep(worker, timeout); } } return; }
int ub_ctx_debuglevel(struct ub_ctx* ctx, int d) { lock_basic_lock(&ctx->cfglock); verbosity = d; ctx->env->cfg->verbosity = d; lock_basic_unlock(&ctx->cfglock); return UB_NOERROR; }
int ub_ctx_debugout(struct ub_ctx* ctx, void* out) { lock_basic_lock(&ctx->cfglock); log_file((FILE*)out); ctx->logfile_override = 1; ctx->log_out = out; lock_basic_unlock(&ctx->cfglock); return UB_NOERROR; }
/** * Worker waiting. * */ void worker_wait_timeout(lock_basic_type* lock, cond_basic_type* condition, time_t timeout) { lock_basic_lock(lock); lock_basic_sleep(condition, lock, timeout); lock_basic_unlock(lock); return; }