/** * Clean up zone. * */ void zone_cleanup(zone_type* zone) { allocator_type* allocator; lock_basic_type zone_lock; lock_basic_type xfr_lock; if (!zone) { return; } allocator = zone->allocator; zone_lock = zone->zone_lock; xfr_lock = zone->xfr_lock; ldns_rdf_deep_free(zone->apex); adapter_cleanup(zone->adinbound); adapter_cleanup(zone->adoutbound); namedb_cleanup(zone->db); ixfr_cleanup(zone->ixfr); xfrd_cleanup(zone->xfrd); notify_cleanup(zone->notify); signconf_cleanup(zone->signconf); stats_cleanup(zone->stats); allocator_deallocate(allocator, (void*) zone->notify_command); allocator_deallocate(allocator, (void*) zone->notify_args); allocator_deallocate(allocator, (void*) zone->policy_name); allocator_deallocate(allocator, (void*) zone->signconf_filename); allocator_deallocate(allocator, (void*) zone->name); allocator_deallocate(allocator, (void*) zone); allocator_cleanup(allocator); lock_basic_destroy(&xfr_lock); lock_basic_destroy(&zone_lock); return; }
void ub_ctx_delete(struct ub_ctx* ctx) { struct alloc_cache* a, *na; int do_stop = 1; if(!ctx) return; /* see if bg thread is created and if threads have been killed */ /* no locks, because those may be held by terminated threads */ /* for processes the read pipe is closed and we see that on read */ #ifdef HAVE_PTHREAD if(ctx->created_bg && ctx->dothread) { if(pthread_kill(ctx->bg_tid, 0) == ESRCH) { /* thread has been killed */ do_stop = 0; } } #endif /* HAVE_PTHREAD */ if(do_stop) ub_stop_bg(ctx); libworker_delete_event(ctx->event_worker); modstack_desetup(&ctx->mods, ctx->env); a = ctx->alloc_list; while(a) { na = a->super; a->super = &ctx->superalloc; alloc_clear(a); free(a); a = na; } local_zones_delete(ctx->local_zones); lock_basic_destroy(&ctx->qqpipe_lock); lock_basic_destroy(&ctx->rrpipe_lock); lock_basic_destroy(&ctx->cfglock); tube_delete(ctx->qq_pipe); tube_delete(ctx->rr_pipe); if(ctx->env) { slabhash_delete(ctx->env->msg_cache); rrset_cache_delete(ctx->env->rrset_cache); infra_delete(ctx->env->infra_cache); config_delete(ctx->env->cfg); edns_known_options_delete(ctx->env); auth_zones_delete(ctx->env->auth_zones); free(ctx->env); } ub_randfree(ctx->seed_rnd); alloc_clear(&ctx->superalloc); traverse_postorder(&ctx->queries, delq, NULL); free(ctx); #ifdef USE_WINSOCK WSACleanup(); #endif }
/** * Clean up engine. * */ void engine_cleanup(engine_type* engine) { size_t i = 0; if (!engine) { return; } if (engine->workers && engine->config) { for (i=0; i < (size_t) engine->config->num_worker_threads; i++) { worker_cleanup(engine->workers[i]); } free(engine->workers); } if (engine->drudgers && engine->config) { for (i=0; i < (size_t) engine->config->num_signer_threads; i++) { worker_cleanup(engine->drudgers[i]); } free(engine->drudgers); } zonelist_cleanup(engine->zonelist); schedule_cleanup(engine->taskq); fifoq_cleanup(engine->signq); cmdhandler_cleanup(engine->cmdhandler); dnshandler_cleanup(engine->dnshandler); xfrhandler_cleanup(engine->xfrhandler); engine_config_cleanup(engine->config); lock_basic_destroy(&engine->signal_lock); lock_basic_off(&engine->signal_cond); free(engine); }
struct lruhash *lruhash_create(size_t size, size_t maxmem, lruhash_sizefunc_t sizefunc, lruhash_compfunc_t compfunc, lruhash_delkeyfunc_t delkeyfunc, lruhash_deldatafunc_t deldatafunc) { struct lruhash *table = (struct lruhash*)calloc(1, sizeof(struct lruhash)); if(!table) return NULL; lock_basic_init(&table->lock); table->sizefunc = sizefunc; table->compfunc = compfunc; table->delkeyfunc = delkeyfunc; table->deldatafunc = deldatafunc; table->size = size; table->size_mask = (int)(size-1); table->lru_head = NULL; table->lru_tail = NULL; table->num = 0; table->space_used = 0; table->space_max = maxmem; table->array = calloc(table->size, sizeof(struct lruhash_bucket)); if(!table->array) { lock_basic_destroy(&table->lock); free(table); return NULL; } return table; }
void neg_cache_delete(struct val_neg_cache* neg) { if(!neg) return; lock_basic_destroy(&neg->lock); /* delete all the zones in the tree */ traverse_postorder(&neg->tree, &neg_clear_zones, NULL); free(neg); }
/** destroy locks in tree and delete autotrust anchors */ static void anchors_delfunc(rbnode_t* elem, void* ATTR_UNUSED(arg)) { struct trust_anchor* ta = (struct trust_anchor*)elem; if(ta->autr) { autr_point_delete(ta); } else { lock_basic_destroy(&ta->lock); } }
void lruhash_delete(struct lruhash *table) { size_t i; if(!table) return; lock_basic_destroy(&table->lock); for(i=0; i<table->size; i++) bucket_delete(table, &table->array[i]); free(table->array); free(table); }
void sdns_openssl_lock_delete(void) { int i; if(!sdns_openssl_locks) return; openssl_set_id_callback(NULL); CRYPTO_set_locking_callback(NULL); for(i=0; i<CRYPTO_num_locks(); i++) { lock_basic_destroy(&sdns_openssl_locks[i]); } free(sdns_openssl_locks); }
/** * Assemble the rrsets in the anchors, ready for use by validator. * @param anchors: trust anchor storage. * @return: false on error. */ static int anchors_assemble_rrsets(struct val_anchors* anchors) { struct trust_anchor* ta; struct trust_anchor* next; size_t nods, nokey; lock_basic_lock(&anchors->lock); ta=(struct trust_anchor*)rbtree_first(anchors->tree); while((rbnode_t*)ta != RBTREE_NULL) { next = (struct trust_anchor*)rbtree_next(&ta->node); lock_basic_lock(&ta->lock); if(ta->autr || (ta->numDS == 0 && ta->numDNSKEY == 0)) { lock_basic_unlock(&ta->lock); ta = next; /* skip */ continue; } if(!anchors_assemble(anchors, ta)) { log_err("out of memory"); lock_basic_unlock(&ta->lock); lock_basic_unlock(&anchors->lock); return 0; } nods = anchors_ds_unsupported(ta); nokey = anchors_dnskey_unsupported(ta); if(nods) { log_nametypeclass(0, "warning: unsupported " "algorithm for trust anchor", ta->name, LDNS_RR_TYPE_DS, ta->dclass); } if(nokey) { log_nametypeclass(0, "warning: unsupported " "algorithm for trust anchor", ta->name, LDNS_RR_TYPE_DNSKEY, ta->dclass); } if(nods == ta->numDS && nokey == ta->numDNSKEY) { char b[257]; dname_str(ta->name, b); log_warn("trust anchor %s has no supported algorithms," " the anchor is ignored (check if you need to" " upgrade unbound and openssl)", b); (void)rbtree_delete(anchors->tree, &ta->node); lock_basic_unlock(&ta->lock); lock_basic_destroy(&ta->lock); ta = next; continue; } lock_basic_unlock(&ta->lock); ta = next; } lock_basic_unlock(&anchors->lock); return 1; }
void tube_delete(struct tube* tube) { if(!tube) return; tube_remove_bg_listen(tube); tube_remove_bg_write(tube); tube_close_read(tube); tube_close_write(tube); if(!WSACloseEvent(tube->event)) log_err("WSACloseEvent: %s", wsa_strerror(WSAGetLastError())); lock_basic_destroy(&tube->res_lock); verbose(VERB_ALGO, "tube deleted"); free(tube); }
static void testframe_deinit(struct module_env* env, struct cachedb_env* cachedb_env) { struct testframe_moddata* d = (struct testframe_moddata*) cachedb_env->backend_data; (void)env; verbose(VERB_ALGO, "testframe_deinit"); if(!d) return; lock_basic_destroy(&d->lock); free(d->stored_key); free(d->stored_data); free(d); }
void ub_openssl_lock_delete(void) { #if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED) && defined(CRYPTO_LOCK) && OPENSSL_VERSION_NUMBER < 0x10100000L int i; if(!ub_openssl_locks) return; CRYPTO_set_id_callback(NULL); CRYPTO_set_locking_callback(NULL); for(i=0; i<CRYPTO_num_locks(); i++) { lock_basic_destroy(&ub_openssl_locks[i]); } free(ub_openssl_locks); #endif /* OPENSSL_THREADS */ }
void anchors_delete(struct val_anchors* anchors) { if(!anchors) return; lock_unprotect(&anchors->lock, anchors->autr); lock_unprotect(&anchors->lock, anchors); lock_basic_destroy(&anchors->lock); traverse_postorder(anchors->tree, anchors_delfunc, NULL); free(anchors->tree); regional_destroy(anchors->region); autr_global_delete(anchors->autr); free(anchors); }
/** * Clean up worker. * */ void worker_cleanup(worker_type* worker) { allocator_type* allocator; cond_basic_type worker_cond; lock_basic_type worker_lock; if (!worker) { return; } allocator = worker->allocator; worker_cond = worker->worker_alarm; worker_lock = worker->worker_lock; allocator_deallocate(allocator, (void*) worker); lock_basic_destroy(&worker_lock); lock_basic_off(&worker_cond); return; }
/** * Free zonelist. * */ void zonelist_free(zonelist_type* zl) { allocator_type* allocator; lock_basic_type zl_lock; if (!zl) { return; } if (zl->zones) { node_delfunc(zl->zones->root); ldns_rbtree_free(zl->zones); zl->zones = NULL; } allocator = zl->allocator; zl_lock = zl->zl_lock; allocator_deallocate(allocator, (void*) zl); lock_basic_destroy(&zl_lock); return; }
/** * Clean up a zonelist. * */ void zonelist_cleanup(zonelist_type* zl) { allocator_type* allocator; lock_basic_type zl_lock; if (!zl) { return; } ods_log_debug("[%s] cleanup zonelist", zl_str); if (zl->zones) { zone_delfunc(zl->zones->root); ldns_rbtree_free(zl->zones); zl->zones = NULL; } allocator = zl->allocator; zl_lock = zl->zl_lock; allocator_deallocate(allocator, (void*) zl); lock_basic_destroy(&zl_lock); return; }
/** * Clean up queue. * */ void fifoq_cleanup(fifoq_type* q) { allocator_type* allocator; lock_basic_type q_lock; cond_basic_type q_threshold; cond_basic_type q_nonfull; if (!q) { return; } allocator = q->allocator; q_lock = q->q_lock; q_threshold = q->q_threshold; q_nonfull = q->q_nonfull; allocator_deallocate(allocator, (void*) q); lock_basic_off(&q_threshold); lock_basic_off(&q_nonfull); lock_basic_destroy(&q_lock); return; }
/** * Clean up engine. * */ void engine_cleanup(engine_type* engine) { size_t i = 0; allocator_type* allocator; cond_basic_type signal_cond; lock_basic_type signal_lock; if (!engine) { return; } allocator = engine->allocator; signal_cond = engine->signal_cond; signal_lock = engine->signal_lock; if (engine->workers && engine->config) { for (i=0; i < (size_t) engine->config->num_worker_threads; i++) { worker_cleanup(engine->workers[i]); } allocator_deallocate(allocator, (void*) engine->workers); } #if HAVE_DRUDGERS if (engine->drudgers && engine->config) { for (i=0; i < (size_t) engine->config->num_signer_threads; i++) { worker_cleanup(engine->drudgers[i]); } allocator_deallocate(allocator, (void*) engine->drudgers); } #endif schedule_cleanup(engine->taskq); fifoq_cleanup(engine->signq); cmdhandler_cleanup(engine->cmdhandler); engine_config_cleanup(engine->config); allocator_deallocate(allocator, (void*) engine); lock_basic_destroy(&signal_lock); lock_basic_off(&signal_cond); allocator_cleanup(allocator); return; }
/** destroy locks in tree and delete autotrust anchors */ static void anchors_delfunc(rbnode_t* elem, void* ATTR_UNUSED(arg)) { struct trust_anchor* ta = (struct trust_anchor*)elem; if(!ta) return; if(ta->autr) { autr_point_delete(ta); } else { struct ta_key* p, *np; lock_basic_destroy(&ta->lock); free(ta->name); p = ta->keylist; while(p) { np = p->next; free(p->data); free(p); p = np; } assembled_rrset_delete(ta->ds_rrset); assembled_rrset_delete(ta->dnskey_rrset); free(ta); } }
void ub_ctx_delete(struct ub_ctx* ctx) { struct alloc_cache* a, *na; if(!ctx) return; /* stop the bg thread */ lock_basic_lock(&ctx->cfglock); if(ctx->created_bg) { uint8_t* msg; uint32_t len; uint32_t cmd = UB_LIBCMD_QUIT; lock_basic_unlock(&ctx->cfglock); lock_basic_lock(&ctx->qqpipe_lock); (void)tube_write_msg(ctx->qq_pipe, (uint8_t*)&cmd, (uint32_t)sizeof(cmd), 0); lock_basic_unlock(&ctx->qqpipe_lock); lock_basic_lock(&ctx->rrpipe_lock); while(tube_read_msg(ctx->rr_pipe, &msg, &len, 0)) { /* discard all results except a quit confirm */ if(context_serial_getcmd(msg, len) == UB_LIBCMD_QUIT) { free(msg); break; } free(msg); } lock_basic_unlock(&ctx->rrpipe_lock); /* if bg worker is a thread, wait for it to exit, so that all * resources are really gone. */ lock_basic_lock(&ctx->cfglock); if(ctx->dothread) { lock_basic_unlock(&ctx->cfglock); ub_thread_join(ctx->bg_tid); } else { lock_basic_unlock(&ctx->cfglock); } } else { lock_basic_unlock(&ctx->cfglock); } modstack_desetup(&ctx->mods, ctx->env); a = ctx->alloc_list; while(a) { na = a->super; a->super = &ctx->superalloc; alloc_clear(a); free(a); a = na; } local_zones_delete(ctx->local_zones); lock_basic_destroy(&ctx->qqpipe_lock); lock_basic_destroy(&ctx->rrpipe_lock); lock_basic_destroy(&ctx->cfglock); tube_delete(ctx->qq_pipe); tube_delete(ctx->rr_pipe); if(ctx->env) { slabhash_delete(ctx->env->msg_cache); rrset_cache_delete(ctx->env->rrset_cache); infra_delete(ctx->env->infra_cache); config_delete(ctx->env->cfg); free(ctx->env); } ub_randfree(ctx->seed_rnd); alloc_clear(&ctx->superalloc); traverse_postorder(&ctx->queries, delq, NULL); free(ctx); #ifdef USE_WINSOCK WSACleanup(); #endif }
/** extended thread worker */ static void* ext_thread(void* arg) { struct ext_thr_info* inf = (struct ext_thr_info*)arg; int i, r; struct ub_result* result; struct track_id* async_ids = NULL; log_thread_set(&inf->thread_num); if(inf->thread_num > NUMTHR*2/3) { async_ids = (struct track_id*)calloc((size_t)inf->numq, sizeof(struct track_id)); if(!async_ids) { printf("out of memory\n"); exit(1); } for(i=0; i<inf->numq; i++) { lock_basic_init(&async_ids[i].lock); } } for(i=0; i<inf->numq; i++) { if(async_ids) { r = ub_resolve_async(inf->ctx, inf->argv[i%inf->argc], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, &async_ids[i], ext_callback, &async_ids[i].id); checkerr("ub_resolve_async", r); if(i > 100) { lock_basic_lock(&async_ids[i-100].lock); r = ub_cancel(inf->ctx, async_ids[i-100].id); if(r != UB_NOID) async_ids[i-100].cancel=1; lock_basic_unlock(&async_ids[i-100].lock); if(r != UB_NOID) checkerr("ub_cancel", r); } } else if(inf->thread_num > NUMTHR/2) { /* async */ r = ub_resolve_async(inf->ctx, inf->argv[i%inf->argc], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, NULL, ext_callback, NULL); checkerr("ub_resolve_async", r); } else { /* blocking */ r = ub_resolve(inf->ctx, inf->argv[i%inf->argc], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, &result); ext_check_result("ub_resolve", r, result); ub_resolve_free(result); } } if(inf->thread_num > NUMTHR/2) { r = ub_wait(inf->ctx); checkerr("ub_ctx_wait", r); } if(async_ids) { for(i=0; i<inf->numq; i++) { lock_basic_destroy(&async_ids[i].lock); } } free(async_ids); return NULL; }
static void delkey(void *k1) { testkey *k = (testkey *)k1; lock_basic_destroy(&k->entry.lock); free(k); }