/** create context functionality, but no pipes */ static struct ub_ctx* ub_ctx_create_nopipe(void) { struct ub_ctx* ctx; unsigned int seed; #ifdef USE_WINSOCK int r; WSADATA wsa_data; #endif log_init(NULL, 0, NULL); /* logs to stderr */ log_ident_set("libunbound"); #ifdef USE_WINSOCK if((r = WSAStartup(MAKEWORD(2,2), &wsa_data)) != 0) { log_err("could not init winsock. WSAStartup: %s", wsa_strerror(r)); return NULL; } #endif verbosity = 0; /* errors only */ checklock_start(); ctx = (struct ub_ctx*)calloc(1, sizeof(*ctx)); if(!ctx) { errno = ENOMEM; return NULL; } alloc_init(&ctx->superalloc, NULL, 0); seed = (unsigned int)time(NULL) ^ (unsigned int)getpid(); if(!(ctx->seed_rnd = ub_initstate(seed, NULL))) { seed = 0; ub_randfree(ctx->seed_rnd); free(ctx); errno = ENOMEM; return NULL; } seed = 0; lock_basic_init(&ctx->qqpipe_lock); lock_basic_init(&ctx->rrpipe_lock); lock_basic_init(&ctx->cfglock); ctx->env = (struct module_env*)calloc(1, sizeof(*ctx->env)); if(!ctx->env) { ub_randfree(ctx->seed_rnd); free(ctx); errno = ENOMEM; return NULL; } ctx->env->cfg = config_create_forlib(); if(!ctx->env->cfg) { free(ctx->env); ub_randfree(ctx->seed_rnd); free(ctx); errno = ENOMEM; return NULL; } ctx->env->alloc = &ctx->superalloc; ctx->env->worker = NULL; ctx->env->need_to_validate = 0; modstack_init(&ctx->mods); rbtree_init(&ctx->queries, &context_query_cmp); return ctx; }
/** * Create worker. * */ worker_type* worker_create(allocator_type* allocator, int num) { worker_type* worker; if (!allocator) { return NULL; } ods_log_assert(allocator); worker = (worker_type*) allocator_alloc(allocator, sizeof(worker_type)); if (!worker) { return NULL; } ods_log_debug("create worker[%i]", num +1); worker->allocator = allocator; worker->thread_num = num +1; worker->engine = NULL; worker->task = NULL; worker->need_to_exit = 0; worker->clock_in = 0; worker->jobs_appointed = 0; worker->jobs_completed = 0; worker->jobs_failed = 0; worker->sleeping = 0; worker->waiting = 0; lock_basic_init(&worker->worker_lock); lock_basic_set(&worker->worker_alarm); return worker; }
/** * Create worker. * */ worker_type* worker_create(allocator_type* allocator, int num, worker_id type) { worker_type* worker; if (!allocator) { return NULL; } worker = (worker_type*) allocator_alloc(allocator, sizeof(worker_type)); if (!worker) { return NULL; } ods_log_debug("[%s[%i]] create", worker2str(type), num+1); lock_basic_init(&worker->worker_lock); lock_basic_set(&worker->worker_alarm); lock_basic_lock(&worker->worker_lock); worker->allocator = allocator; worker->thread_num = num +1; worker->engine = NULL; worker->task = NULL; worker->working_with = TASK_NONE; worker->need_to_exit = 0; worker->type = type; worker->clock_in = 0; worker->jobs_appointed = 0; worker->jobs_completed = 0; worker->jobs_failed = 0; worker->sleeping = 0; worker->waiting = 0; lock_basic_unlock(&worker->worker_lock); return worker; }
/** * Create new schedule. * */ schedule_type* schedule_create(allocator_type* allocator) { schedule_type* schedule; if (!allocator) { return NULL; } schedule = (schedule_type*) allocator_alloc(allocator, sizeof(schedule_type)); if (!schedule) { ods_log_error("[%s] unable to create schedule: allocator_alloc() " "failed", schedule_str); return NULL; } schedule->allocator = allocator; schedule->loading = 0; schedule->flushcount = 0; schedule->tasks = ldns_rbtree_create(task_compare); if (!schedule->tasks) { ods_log_error("[%s] unable to create schedule: ldns_rbtree_create() " "failed", schedule_str); allocator_deallocate(allocator, (void*) schedule); return NULL; } lock_basic_init(&schedule->schedule_lock); return schedule; }
/** create new trust anchor object */ static struct trust_anchor* anchor_new_ta(struct val_anchors* anchors, uint8_t* name, int namelabs, size_t namelen, uint16_t dclass) { #ifdef UNBOUND_DEBUG rbnode_t* r; #endif struct trust_anchor* ta = (struct trust_anchor*)regional_alloc( anchors->region, sizeof(struct trust_anchor)); if(!ta) return NULL; memset(ta, 0, sizeof(*ta)); ta->node.key = ta; ta->name = regional_alloc_init(anchors->region, name, namelen); if(!ta->name) return NULL; ta->namelabs = namelabs; ta->namelen = namelen; ta->dclass = dclass; lock_basic_init(&ta->lock); lock_basic_lock(&anchors->lock); #ifdef UNBOUND_DEBUG r = #endif rbtree_insert(anchors->tree, &ta->node); lock_basic_unlock(&anchors->lock); log_assert(r != NULL); return ta; }
struct val_anchors* anchors_create(void) { struct val_anchors* a = (struct val_anchors*)calloc(1, sizeof(*a)); if(!a) return NULL; a->region = regional_create(); if(!a->region) { free(a); return NULL; } a->tree = rbtree_create(anchor_cmp); if(!a->tree) { anchors_delete(a); return NULL; } a->autr = autr_global_create(); if(!a->autr) { anchors_delete(a); return NULL; } lock_basic_init(&a->lock); lock_protect(&a->lock, a, sizeof(*a)); lock_protect(&a->lock, a->autr, sizeof(*a->autr)); return a; }
struct lruhash *lruhash_create(size_t size, size_t maxmem, lruhash_sizefunc_t sizefunc, lruhash_compfunc_t compfunc, lruhash_delkeyfunc_t delkeyfunc, lruhash_deldatafunc_t deldatafunc) { struct lruhash *table = (struct lruhash*)calloc(1, sizeof(struct lruhash)); if(!table) return NULL; lock_basic_init(&table->lock); table->sizefunc = sizefunc; table->compfunc = compfunc; table->delkeyfunc = delkeyfunc; table->deldatafunc = deldatafunc; table->size = size; table->size_mask = (int)(size-1); table->lru_head = NULL; table->lru_tail = NULL; table->num = 0; table->space_used = 0; table->space_max = maxmem; table->array = calloc(table->size, sizeof(struct lruhash_bucket)); if(!table->array) { lock_basic_destroy(&table->lock); free(table); return NULL; } return table; }
/** * Create engine. * */ static engine_type* engine_create(void) { engine_type* engine; allocator_type* allocator = allocator_create(malloc, free); if (!allocator) { ods_log_error("[%s] unable to create engine: allocator_create() " "failed", engine_str); return NULL; } engine = (engine_type*) allocator_alloc(allocator, sizeof(engine_type)); if (!engine) { ods_log_error("[%s] unable to create engine: allocator_alloc() " "failed", engine_str); allocator_cleanup(allocator); return NULL; } engine->allocator = allocator; engine->config = NULL; engine->workers = NULL; engine->drudgers = NULL; engine->cmdhandler = NULL; engine->cmdhandler_done = 0; engine->dnshandler = NULL; engine->xfrhandler = NULL; engine->pid = -1; engine->uid = -1; engine->gid = -1; engine->daemonize = 0; engine->need_to_exit = 0; engine->need_to_reload = 0; lock_basic_init(&engine->signal_lock); lock_basic_set(&engine->signal_cond); lock_basic_lock(&engine->signal_lock); engine->signal = SIGNAL_INIT; lock_basic_unlock(&engine->signal_lock); engine->zonelist = zonelist_create(engine->allocator); if (!engine->zonelist) { engine_cleanup(engine); return NULL; } engine->taskq = schedule_create(engine->allocator); if (!engine->taskq) { engine_cleanup(engine); return NULL; } engine->signq = fifoq_create(engine->allocator); if (!engine->signq) { engine_cleanup(engine); return NULL; } return engine; }
static testkey *newkey(int id) { testkey *k = (testkey *)calloc(1, sizeof(testkey)); if(!k) { printf("calloc testkey: out of memory\n"); exit(1); } k->id = id; k->entry.hash = simplehash(id); k->entry.key = k; lock_basic_init(&k->entry.lock); return k; }
int ub_openssl_lock_init(void) { #if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED) && defined(CRYPTO_LOCK) && OPENSSL_VERSION_NUMBER < 0x10100000L int i; ub_openssl_locks = (lock_basic_type*)reallocarray( NULL, (size_t)CRYPTO_num_locks(), sizeof(lock_basic_type)); if(!ub_openssl_locks) return 0; for(i=0; i<CRYPTO_num_locks(); i++) { lock_basic_init(&ub_openssl_locks[i]); } CRYPTO_set_id_callback(&ub_crypto_id_cb); CRYPTO_set_locking_callback(&ub_crypto_lock_cb); #endif /* OPENSSL_THREADS */ return 1; }
int ub_openssl_lock_init(void) { #if defined(HAVE_SSL) && defined(OPENSSL_THREADS) && !defined(THREADS_DISABLED) int i; ub_openssl_locks = (lock_basic_t*)malloc( sizeof(lock_basic_t)*CRYPTO_num_locks()); if(!ub_openssl_locks) return 0; for(i=0; i<CRYPTO_num_locks(); i++) { lock_basic_init(&ub_openssl_locks[i]); } CRYPTO_set_id_callback(&ub_crypto_id_cb); CRYPTO_set_locking_callback(&ub_crypto_lock_cb); #endif /* OPENSSL_THREADS */ return 1; }
struct val_neg_cache* val_neg_create(struct config_file* cfg, size_t maxiter) { struct val_neg_cache* neg = (struct val_neg_cache*)calloc(1, sizeof(*neg)); if(!neg) { log_err("Could not create neg cache: out of memory"); return NULL; } neg->nsec3_max_iter = maxiter; neg->max = 1024*1024; /* 1 M is thousands of entries */ if(cfg) neg->max = cfg->neg_cache_size; rbtree_init(&neg->tree, &val_neg_zone_compare); lock_basic_init(&neg->lock); lock_protect(&neg->lock, neg, sizeof(*neg)); return neg; }
static int testframe_init(struct module_env* env, struct cachedb_env* cachedb_env) { struct testframe_moddata* d; (void)env; verbose(VERB_ALGO, "testframe_init"); d = (struct testframe_moddata*)calloc(1, sizeof(struct testframe_moddata)); cachedb_env->backend_data = (void*)d; if(!cachedb_env->backend_data) { log_err("out of memory"); return 0; } lock_basic_init(&d->lock); lock_protect(&d->lock, d, sizeof(*d)); return 1; }
int sdns_openssl_lock_init(void) { int i; sdns_openssl_locks = (lock_basic_t*)malloc( sizeof(lock_basic_t)*CRYPTO_num_locks()); if(!sdns_openssl_locks) return 0; for(i=0; i<CRYPTO_num_locks(); i++) { lock_basic_init(&sdns_openssl_locks[i]); } //openssl 1.0.0 CRYPTO_set_id_callback was replaced by CRYPTO_THREADID_set_callback openssl_set_id_callback(sdns_openssl_id_cb); CRYPTO_set_locking_callback(&sdns_openssl_lock_cb); return 1; }
/** * Create engine. * */ static engine_type* engine_create(void) { engine_type* engine; allocator_type* allocator = allocator_create(malloc, free); if (!allocator) { return NULL; } engine = (engine_type*) allocator_alloc(allocator, sizeof(engine_type)); if (!engine) { allocator_cleanup(allocator); return NULL; } engine->allocator = allocator; engine->config = NULL; engine->workers = NULL; engine->drudgers = NULL; engine->cmdhandler = NULL; engine->cmdhandler_done = 0; engine->pid = -1; engine->uid = -1; engine->gid = -1; engine->daemonize = 0; engine->need_to_exit = 0; engine->need_to_reload = 0; engine->signal = SIGNAL_INIT; lock_basic_init(&engine->signal_lock); lock_basic_set(&engine->signal_cond); engine->taskq = schedule_create(engine->allocator); if (!engine->taskq) { engine_cleanup(engine); return NULL; } engine->signq = fifoq_create(engine->allocator); if (!engine->signq) { engine_cleanup(engine); return NULL; } return engine; }
/** * Create engine. * */ static engine_type* engine_create(void) { engine_type* engine; CHECKALLOC(engine = (engine_type*) malloc(sizeof(engine_type))); engine->config = NULL; engine->workers = NULL; engine->drudgers = NULL; engine->cmdhandler = NULL; engine->cmdhandler_done = 0; engine->dnshandler = NULL; engine->xfrhandler = NULL; engine->taskq = NULL; engine->signq = NULL; engine->pid = -1; engine->uid = -1; engine->gid = -1; engine->daemonize = 0; engine->need_to_exit = 0; engine->need_to_reload = 0; lock_basic_init(&engine->signal_lock); lock_basic_set(&engine->signal_cond); lock_basic_lock(&engine->signal_lock); engine->signal = SIGNAL_INIT; lock_basic_unlock(&engine->signal_lock); engine->zonelist = zonelist_create(); if (!engine->zonelist) { engine_cleanup(engine); return NULL; } engine->taskq = schedule_create(); if (!engine->taskq) { engine_cleanup(engine); return NULL; } engine->signq = fifoq_create(); if (!engine->signq) { engine_cleanup(engine); return NULL; } return engine; }
/** * Create new FIFO queue. * */ fifoq_type* fifoq_create(allocator_type* allocator) { fifoq_type* fifoq; if (!allocator) { return NULL; } fifoq = (fifoq_type*) allocator_alloc(allocator, sizeof(fifoq_type)); if (!fifoq) { ods_log_error("[%s] unable to create fifoq: allocator_alloc() failed", fifoq_str); return NULL; } fifoq->allocator = allocator; fifoq_wipe(fifoq); lock_basic_init(&fifoq->q_lock); lock_basic_set(&fifoq->q_threshold); lock_basic_set(&fifoq->q_nonfull); return fifoq; }
/** create new trust anchor object */ static struct trust_anchor* anchor_new_ta(struct val_anchors* anchors, uint8_t* name, int namelabs, size_t namelen, uint16_t dclass, int lockit) { #ifdef UNBOUND_DEBUG rbnode_t* r; #endif struct trust_anchor* ta = (struct trust_anchor*)malloc( sizeof(struct trust_anchor)); if(!ta) return NULL; memset(ta, 0, sizeof(*ta)); ta->node.key = ta; ta->name = memdup(name, namelen); if(!ta->name) { free(ta); return NULL; } ta->namelabs = namelabs; ta->namelen = namelen; ta->dclass = dclass; lock_basic_init(&ta->lock); if(lockit) { lock_basic_lock(&anchors->lock); } #ifdef UNBOUND_DEBUG r = #else (void) #endif rbtree_insert(anchors->tree, &ta->node); if(lockit) { lock_basic_unlock(&anchors->lock); } log_assert(r != NULL); return ta; }
/** * Create a new zone list. * */ zonelist_type* zonelist_create(allocator_type* allocator) { zonelist_type* zlist = NULL; if (allocator) { zlist = (zonelist_type*) allocator_alloc(allocator, sizeof(zonelist_type)); } if (!zlist) { ods_log_error("[%s] unable to create zonelist: allocator_alloc() " "failed", zl_str); return NULL; } zlist->allocator = allocator; zlist->zones = ldns_rbtree_create(zone_compare); if (!zlist->zones) { ods_log_error("[%s] unable to create zonelist: ldns_rbtree_create() " "failed", zl_str); allocator_deallocate(allocator, (void*) zlist); return NULL; } zlist->last_modified = 0; lock_basic_init(&zlist->zl_lock); return zlist; }
struct tube* tube_create(void) { /* windows does not have forks like unix, so we only support * threads on windows. And thus the pipe need only connect * threads. We use a mutex and a list of datagrams. */ struct tube* tube = (struct tube*)calloc(1, sizeof(*tube)); if(!tube) { int err = errno; log_err("tube_create: out of memory"); errno = err; return NULL; } tube->event = WSACreateEvent(); if(tube->event == WSA_INVALID_EVENT) { free(tube); log_err("WSACreateEvent: %s", wsa_strerror(WSAGetLastError())); } if(!WSAResetEvent(tube->event)) { log_err("WSAResetEvent: %s", wsa_strerror(WSAGetLastError())); } lock_basic_init(&tube->res_lock); verbose(VERB_ALGO, "tube created"); return tube; }
/** * Create a new zone. * */ zone_type* zone_create(char* name, ldns_rr_class klass) { allocator_type* allocator = NULL; zone_type* zone = NULL; if (!name || !klass) { return NULL; } allocator = allocator_create(malloc, free); if (!allocator) { ods_log_error("[%s] unable to create zone %s: allocator_create() " "failed", zone_str, name); return NULL; } zone = (zone_type*) allocator_alloc(allocator, sizeof(zone_type)); if (!zone) { ods_log_error("[%s] unable to create zone %s: allocator_alloc()", "failed", zone_str, name); allocator_cleanup(allocator); return NULL; } zone->allocator = allocator; /* [start] PS 9218653: Drop trailing dot in domain name */ if (strlen(name) > 1 && name[strlen(name)-1] == '.') { name[strlen(name)-1] = '\0'; } /* [end] PS 9218653 */ zone->name = allocator_strdup(allocator, name); if (!zone->name) { ods_log_error("[%s] unable to create zone %s: allocator_strdup() " "failed", zone_str, name); zone_cleanup(zone); return NULL; } zone->klass = klass; zone->default_ttl = 3600; /* TODO: configure --default-ttl option? */ zone->apex = ldns_dname_new_frm_str(name); /* check zone->apex? */ zone->notify_command = NULL; zone->notify_ns = NULL; zone->notify_args = NULL; zone->policy_name = NULL; zone->signconf_filename = NULL; zone->adinbound = NULL; zone->adoutbound = NULL; zone->zl_status = ZONE_ZL_OK; zone->task = NULL; zone->xfrd = NULL; zone->notify = NULL; zone->db = namedb_create((void*)zone); if (!zone->db) { ods_log_error("[%s] unable to create zone %s: namedb_create() " "failed", zone_str, name); zone_cleanup(zone); return NULL; } zone->ixfr = ixfr_create((void*)zone); if (!zone->ixfr) { ods_log_error("[%s] unable to create zone %s: ixfr_create() " "failed", zone_str, name); zone_cleanup(zone); return NULL; } zone->signconf = signconf_create(); if (!zone->signconf) { ods_log_error("[%s] unable to create zone %s: signconf_create() " "failed", zone_str, name); zone_cleanup(zone); return NULL; } zone->stats = stats_create(); lock_basic_init(&zone->zone_lock); lock_basic_init(&zone->xfr_lock); return zone; }
/** extended thread worker */ static void* ext_thread(void* arg) { struct ext_thr_info* inf = (struct ext_thr_info*)arg; int i, r; struct ub_result* result; struct track_id* async_ids = NULL; log_thread_set(&inf->thread_num); if(inf->thread_num > NUMTHR*2/3) { async_ids = (struct track_id*)calloc((size_t)inf->numq, sizeof(struct track_id)); if(!async_ids) { printf("out of memory\n"); exit(1); } for(i=0; i<inf->numq; i++) { lock_basic_init(&async_ids[i].lock); } } for(i=0; i<inf->numq; i++) { if(async_ids) { r = ub_resolve_async(inf->ctx, inf->argv[i%inf->argc], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, &async_ids[i], ext_callback, &async_ids[i].id); checkerr("ub_resolve_async", r); if(i > 100) { lock_basic_lock(&async_ids[i-100].lock); r = ub_cancel(inf->ctx, async_ids[i-100].id); if(r != UB_NOID) async_ids[i-100].cancel=1; lock_basic_unlock(&async_ids[i-100].lock); if(r != UB_NOID) checkerr("ub_cancel", r); } } else if(inf->thread_num > NUMTHR/2) { /* async */ r = ub_resolve_async(inf->ctx, inf->argv[i%inf->argc], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, NULL, ext_callback, NULL); checkerr("ub_resolve_async", r); } else { /* blocking */ r = ub_resolve(inf->ctx, inf->argv[i%inf->argc], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, &result); ext_check_result("ub_resolve", r, result); ub_resolve_free(result); } } if(inf->thread_num > NUMTHR/2) { r = ub_wait(inf->ctx); checkerr("ub_ctx_wait", r); } /* if these locks are destroyed, or if the async_ids is freed, then a use-after-free happens in another thread. The allocation is only part of this test, though. */ /* if(async_ids) { for(i=0; i<inf->numq; i++) { lock_basic_destroy(&async_ids[i].lock); } } free(async_ids); */ return NULL; }