/** Do the cron action and wait for result exit value */ static void* win_do_cron(void* ATTR_UNUSED(arg)) { int mynum=65; char* cronaction; log_thread_set(&mynum); cronaction = lookup_reg_str("Software\\Unbound", "CronAction"); if(cronaction && strlen(cronaction)>0) { STARTUPINFO sinfo; PROCESS_INFORMATION pinfo; memset(&pinfo, 0, sizeof(pinfo)); memset(&sinfo, 0, sizeof(sinfo)); sinfo.cb = sizeof(sinfo); verbose(VERB_ALGO, "cronaction: %s", cronaction); if(!CreateProcess(NULL, cronaction, NULL, NULL, 0, CREATE_NO_WINDOW, NULL, NULL, &sinfo, &pinfo)) log_err("CreateProcess error"); else { waitforit(&pinfo); CloseHandle(pinfo.hProcess); CloseHandle(pinfo.hThread); } } free(cronaction); /* stop self */ CloseHandle(cron_thread); cron_thread = NULL; return NULL; }
/** main routine for threaded hash table test */ static void* test_thr_main(void* arg) { struct test_thr* t = (struct test_thr*)arg; int i; log_thread_set(&t->num); for(i=0; i<1000; i++) { switch(random() % 4) { case 0: case 3: testadd_unlim(t->table, NULL); break; case 1: testremove_unlim(t->table, NULL); break; case 2: testlookup_unlim(t->table, NULL); break; default: unit_assert(0); } if(0) lruhash_status(t->table, "hashtest", 1); if(i % 100 == 0) /* because of locking, not all the time */ check_table(t->table); } check_table(t->table); return NULL; }
/** * Function to start one thread. * @param arg: user argument. * @return: void* user return value could be used for thread_join results. */ static void* thread_start(void* arg) { struct worker* worker = (struct worker*)arg; int port_num = 0; log_thread_set(&worker->thread_num); ub_thread_blocksigs(); #ifdef THREADS_DISABLED /* close pipe ends used by main */ tube_close_write(worker->cmd); close_other_pipes(worker->daemon, worker->thread_num); #endif #ifdef SO_REUSEPORT if(worker->daemon->cfg->so_reuseport) port_num = worker->thread_num % worker->daemon->num_ports; else port_num = 0; #endif if(!worker_init(worker, worker->daemon->cfg, worker->daemon->ports[port_num], 0)) fatal_exit("Could not initialize thread"); worker_work(worker); return NULL; }
void daemon_cleanup(struct daemon* daemon) { int i; log_assert(daemon); /* before stopping main worker, handle signals ourselves, so we don't die on multiple reload signals for example. */ signal_handling_record(); log_thread_set(NULL); /* clean up caches because * a) RRset IDs will be recycled after a reload, causing collisions * b) validation config can change, thus rrset, msg, keycache clear */ slabhash_clear(&daemon->env->rrset_cache->table); slabhash_clear(daemon->env->msg_cache); local_zones_delete(daemon->local_zones); daemon->local_zones = NULL; /* key cache is cleared by module desetup during next daemon_fork() */ daemon_remote_clear(daemon->rc); for(i=0; i<daemon->num; i++) worker_delete(daemon->workers[i]); free(daemon->workers); daemon->workers = NULL; daemon->num = 0; #ifdef USE_DNSTAP dt_delete(daemon->dtenv); #endif daemon->cfg = NULL; }
/** the background thread func */ static void* libworker_dobg(void* arg) { /* setup */ uint32_t m; struct libworker* w = (struct libworker*)arg; struct ub_ctx* ctx; if(!w) { log_err("libunbound bg worker init failed, nomem"); return NULL; } ctx = w->ctx; log_thread_set(&w->thread_num); #ifdef THREADS_DISABLED /* we are forked */ w->is_bg_thread = 0; /* close non-used parts of the pipes */ tube_close_write(ctx->qq_pipe); tube_close_read(ctx->rr_pipe); #endif if(!tube_setup_bg_listen(ctx->qq_pipe, w->base, libworker_handle_control_cmd, w)) { log_err("libunbound bg worker init failed, no bglisten"); return NULL; } if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) { log_err("libunbound bg worker init failed, no bgwrite"); return NULL; } /* do the work */ comm_base_dispatch(w->base); /* cleanup */ m = UB_LIBCMD_QUIT; w->want_quit = 1; tube_remove_bg_listen(w->ctx->qq_pipe); tube_remove_bg_write(w->ctx->rr_pipe); libworker_delete(w); (void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m, (uint32_t)sizeof(m), 0); #ifdef THREADS_DISABLED /* close pipes from forked process before exit */ tube_close_read(ctx->qq_pipe); tube_close_write(ctx->rr_pipe); #endif return NULL; }
/** extended thread worker */ static void* ext_thread(void* arg) { struct ext_thr_info* inf = (struct ext_thr_info*)arg; int i, r; struct ub_result* result; struct track_id* async_ids = NULL; log_thread_set(&inf->thread_num); if(inf->thread_num > NUMTHR*2/3) { async_ids = (struct track_id*)calloc((size_t)inf->numq, sizeof(struct track_id)); if(!async_ids) { printf("out of memory\n"); exit(1); } for(i=0; i<inf->numq; i++) { lock_basic_init(&async_ids[i].lock); } } for(i=0; i<inf->numq; i++) { if(async_ids) { r = ub_resolve_async(inf->ctx, inf->argv[i%inf->argc], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, &async_ids[i], ext_callback, &async_ids[i].id); checkerr("ub_resolve_async", r); if(i > 100) { lock_basic_lock(&async_ids[i-100].lock); r = ub_cancel(inf->ctx, async_ids[i-100].id); if(r != UB_NOID) async_ids[i-100].cancel=1; lock_basic_unlock(&async_ids[i-100].lock); if(r != UB_NOID) checkerr("ub_cancel", r); } } else if(inf->thread_num > NUMTHR/2) { /* async */ r = ub_resolve_async(inf->ctx, inf->argv[i%inf->argc], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, NULL, ext_callback, NULL); checkerr("ub_resolve_async", r); } else { /* blocking */ r = ub_resolve(inf->ctx, inf->argv[i%inf->argc], LDNS_RR_TYPE_A, LDNS_RR_CLASS_IN, &result); ext_check_result("ub_resolve", r, result); ub_resolve_free(result); } } if(inf->thread_num > NUMTHR/2) { r = ub_wait(inf->ctx); checkerr("ub_ctx_wait", r); } /* if these locks are destroyed, or if the async_ids is freed, then a use-after-free happens in another thread. The allocation is only part of this test, though. */ /* if(async_ids) { for(i=0; i<inf->numq; i++) { lock_basic_destroy(&async_ids[i].lock); } } free(async_ids); */ return NULL; }