Example #1
0
void 
worker_send_cmd(struct worker* worker, enum worker_commands cmd)
{
	uint32_t c = (uint32_t)htonl(cmd);
	if(!tube_write_msg(worker->cmd, (uint8_t*)&c, sizeof(c), 0)) {
		log_err("worker send cmd %d failed", (int)cmd);
	}
}
Example #2
0
void server_stats_reply(struct worker* worker, int reset)
{
	struct stats_info s;
	server_stats_compile(worker, &s, reset);
	verbose(VERB_ALGO, "write stats replymsg");
	if(!tube_write_msg(worker->daemon->workers[0]->cmd, 
		(uint8_t*)&s, sizeof(s), 0))
		fatal_exit("could not write stat values over cmd channel");
}
Example #3
0
/** the background thread func */
static void*
libworker_dobg(void* arg)
{
	/* setup */
	uint32_t m;
	struct libworker* w = (struct libworker*)arg;
	struct ub_ctx* ctx;
	if(!w) {
		log_err("libunbound bg worker init failed, nomem");
		return NULL;
	}
	ctx = w->ctx;
	log_thread_set(&w->thread_num);
#ifdef THREADS_DISABLED
	/* we are forked */
	w->is_bg_thread = 0;
	/* close non-used parts of the pipes */
	tube_close_write(ctx->qq_pipe);
	tube_close_read(ctx->rr_pipe);
#endif
	if(!tube_setup_bg_listen(ctx->qq_pipe, w->base, 
		libworker_handle_control_cmd, w)) {
		log_err("libunbound bg worker init failed, no bglisten");
		return NULL;
	}
	if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) {
		log_err("libunbound bg worker init failed, no bgwrite");
		return NULL;
	}

	/* do the work */
	comm_base_dispatch(w->base);

	/* cleanup */
	m = UB_LIBCMD_QUIT;
	w->want_quit = 1;
	tube_remove_bg_listen(w->ctx->qq_pipe);
	tube_remove_bg_write(w->ctx->rr_pipe);
	libworker_delete(w);
	(void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m, 
		(uint32_t)sizeof(m), 0);
#ifdef THREADS_DISABLED
	/* close pipes from forked process before exit */
	tube_close_read(ctx->qq_pipe);
	tube_close_write(ctx->rr_pipe);
#endif
	return NULL;
}
Example #4
0
/** stop the bg thread */
static void ub_stop_bg(struct ub_ctx* ctx)
{
	/* stop the bg thread */
	lock_basic_lock(&ctx->cfglock);
	if(ctx->created_bg) {
		uint8_t* msg;
		uint32_t len;
		uint32_t cmd = UB_LIBCMD_QUIT;
		lock_basic_unlock(&ctx->cfglock);
		lock_basic_lock(&ctx->qqpipe_lock);
		(void)tube_write_msg(ctx->qq_pipe, (uint8_t*)&cmd, 
			(uint32_t)sizeof(cmd), 0);
		lock_basic_unlock(&ctx->qqpipe_lock);
		lock_basic_lock(&ctx->rrpipe_lock);
		while(tube_read_msg(ctx->rr_pipe, &msg, &len, 0)) {
			/* discard all results except a quit confirm */
			if(context_serial_getcmd(msg, len) == UB_LIBCMD_QUIT) {
				free(msg);
				break;
			}
			free(msg);
		}
		lock_basic_unlock(&ctx->rrpipe_lock);

		/* if bg worker is a thread, wait for it to exit, so that all
	 	 * resources are really gone. */
		lock_basic_lock(&ctx->cfglock);
		if(ctx->dothread) {
			lock_basic_unlock(&ctx->cfglock);
			ub_thread_join(ctx->bg_tid);
		} else {
			lock_basic_unlock(&ctx->cfglock);
#ifndef UB_ON_WINDOWS
			if(waitpid(ctx->bg_pid, NULL, 0) == -1) {
				if(verbosity > 2)
					log_err("waitpid: %s", strerror(errno));
			}
#endif
		}
	}
	else {
		lock_basic_unlock(&ctx->cfglock);
	}
}
Example #5
0
int
ub_cancel(struct ub_ctx* ctx, int async_id)
{
    struct ctx_query* q;
    uint8_t* msg = NULL;
    uint32_t len = 0;
    lock_basic_lock(&ctx->cfglock);
    q = (struct ctx_query*)rbtree_search(&ctx->queries, &async_id);
    if(!q || !q->async) {
        /* it is not there, so nothing to do */
        lock_basic_unlock(&ctx->cfglock);
        return UB_NOID;
    }
    log_assert(q->async);
    q->cancelled = 1;

    /* delete it */
    if(!ctx->dothread) { /* if forked */
        (void)rbtree_delete(&ctx->queries, q->node.key);
        ctx->num_async--;
        msg = context_serialize_cancel(q, &len);
        context_query_delete(q);
        lock_basic_unlock(&ctx->cfglock);
        if(!msg) {
            return UB_NOMEM;
        }
        /* send cancel to background worker */
        lock_basic_lock(&ctx->qqpipe_lock);
        if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) {
            lock_basic_unlock(&ctx->qqpipe_lock);
            free(msg);
            return UB_PIPE;
        }
        lock_basic_unlock(&ctx->qqpipe_lock);
        free(msg);
    } else {
        lock_basic_unlock(&ctx->cfglock);
    }
    return UB_NOERROR;
}
Example #6
0
int
ub_resolve_async(struct ub_ctx* ctx, char* name, int rrtype,
                 int rrclass, void* mydata, ub_callback_t callback, int* async_id)
{
    struct ctx_query* q;
    uint8_t* msg = NULL;
    uint32_t len = 0;

    if(async_id)
        *async_id = 0;
    lock_basic_lock(&ctx->cfglock);
    if(!ctx->finalized) {
        int r = context_finalize(ctx);
        if(r) {
            lock_basic_unlock(&ctx->cfglock);
            return r;
        }
    }
    if(!ctx->created_bg) {
        int r;
        ctx->created_bg = 1;
        lock_basic_unlock(&ctx->cfglock);
        r = libworker_bg(ctx);
        if(r) {
            lock_basic_lock(&ctx->cfglock);
            ctx->created_bg = 0;
            lock_basic_unlock(&ctx->cfglock);
            return r;
        }
    } else {
        lock_basic_unlock(&ctx->cfglock);
    }

    /* create new ctx_query and attempt to add to the list */
    q = context_new(ctx, name, rrtype, rrclass, callback, mydata);
    if(!q)
        return UB_NOMEM;

    /* write over pipe to background worker */
    lock_basic_lock(&ctx->cfglock);
    msg = context_serialize_new_query(q, &len);
    if(!msg) {
        (void)rbtree_delete(&ctx->queries, q->node.key);
        ctx->num_async--;
        context_query_delete(q);
        lock_basic_unlock(&ctx->cfglock);
        return UB_NOMEM;
    }
    if(async_id)
        *async_id = q->querynum;
    lock_basic_unlock(&ctx->cfglock);

    lock_basic_lock(&ctx->qqpipe_lock);
    if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) {
        lock_basic_unlock(&ctx->qqpipe_lock);
        free(msg);
        return UB_PIPE;
    }
    lock_basic_unlock(&ctx->qqpipe_lock);
    free(msg);
    return UB_NOERROR;
}
Example #7
0
void
ub_ctx_delete(struct ub_ctx* ctx)
{
    struct alloc_cache* a, *na;
    if(!ctx) return;
    /* stop the bg thread */
    lock_basic_lock(&ctx->cfglock);
    if(ctx->created_bg) {
        uint8_t* msg;
        uint32_t len;
        uint32_t cmd = UB_LIBCMD_QUIT;
        lock_basic_unlock(&ctx->cfglock);
        lock_basic_lock(&ctx->qqpipe_lock);
        (void)tube_write_msg(ctx->qq_pipe, (uint8_t*)&cmd,
                             (uint32_t)sizeof(cmd), 0);
        lock_basic_unlock(&ctx->qqpipe_lock);
        lock_basic_lock(&ctx->rrpipe_lock);
        while(tube_read_msg(ctx->rr_pipe, &msg, &len, 0)) {
            /* discard all results except a quit confirm */
            if(context_serial_getcmd(msg, len) == UB_LIBCMD_QUIT) {
                free(msg);
                break;
            }
            free(msg);
        }
        lock_basic_unlock(&ctx->rrpipe_lock);

        /* if bg worker is a thread, wait for it to exit, so that all
         * resources are really gone. */
        lock_basic_lock(&ctx->cfglock);
        if(ctx->dothread) {
            lock_basic_unlock(&ctx->cfglock);
            ub_thread_join(ctx->bg_tid);
        } else {
            lock_basic_unlock(&ctx->cfglock);
        }
    }
    else {
        lock_basic_unlock(&ctx->cfglock);
    }


    modstack_desetup(&ctx->mods, ctx->env);
    a = ctx->alloc_list;
    while(a) {
        na = a->super;
        a->super = &ctx->superalloc;
        alloc_clear(a);
        free(a);
        a = na;
    }
    local_zones_delete(ctx->local_zones);
    lock_basic_destroy(&ctx->qqpipe_lock);
    lock_basic_destroy(&ctx->rrpipe_lock);
    lock_basic_destroy(&ctx->cfglock);
    tube_delete(ctx->qq_pipe);
    tube_delete(ctx->rr_pipe);
    if(ctx->env) {
        slabhash_delete(ctx->env->msg_cache);
        rrset_cache_delete(ctx->env->rrset_cache);
        infra_delete(ctx->env->infra_cache);
        config_delete(ctx->env->cfg);
        free(ctx->env);
    }
    ub_randfree(ctx->seed_rnd);
    alloc_clear(&ctx->superalloc);
    traverse_postorder(&ctx->queries, delq, NULL);
    free(ctx);
#ifdef USE_WINSOCK
    WSACleanup();
#endif
}