long int
ub_random_max(struct ub_randstate* state, long int x)
{
	/* make sure we fetch in a range that is divisible by x. ignore
	 * values from d .. MAX_VALUE, instead draw a new number */
	long int d = MAX_VALUE - (MAX_VALUE % x); /* d is divisible by x */
	long int v = ub_random(state);
	while(d <= v)
		v = ub_random(state);
	return (v % x);
}
Example #2
0
/**
 * Allocate empty worker structures. With backptr and thread-number,
 * from 0..numthread initialised. Used as user arguments to new threads.
 * Creates the daemon random generator if it does not exist yet.
 * The random generator stays existing between reloads with a unique state.
 * @param daemon: the daemon with (new) config settings.
 */
static void 
daemon_create_workers(struct daemon* daemon)
{
	int i, numport;
	int* shufport;
	log_assert(daemon && daemon->cfg);
	if(!daemon->rand) {
		unsigned int seed = (unsigned int)time(NULL) ^ 
			(unsigned int)getpid() ^ 0x438;
		daemon->rand = ub_initstate(seed, NULL);
		if(!daemon->rand)
			fatal_exit("could not init random generator");
		hash_set_raninit((uint32_t)ub_random(daemon->rand));
	}
	shufport = (int*)calloc(65536, sizeof(int));
	if(!shufport)
		fatal_exit("out of memory during daemon init");
	numport = daemon_get_shufport(daemon, shufport);
	verbose(VERB_ALGO, "total of %d outgoing ports available", numport);
	
	daemon->num = (daemon->cfg->num_threads?daemon->cfg->num_threads:1);
	if(daemon->reuseport && (int)daemon->num < (int)daemon->num_ports) {
		log_warn("cannot reduce num-threads to %d because so-reuseport "
			"so continuing with %d threads.", (int)daemon->num,
			(int)daemon->num_ports);
		daemon->num = (int)daemon->num_ports;
	}
	daemon->workers = (struct worker**)calloc((size_t)daemon->num, 
		sizeof(struct worker*));
	if(!daemon->workers)
		fatal_exit("out of memory during daemon init");
	if(daemon->cfg->dnstap) {
#ifdef USE_DNSTAP
		daemon->dtenv = dt_create(daemon->cfg->dnstap_socket_path,
			(unsigned int)daemon->num);
		if (!daemon->dtenv)
			fatal_exit("dt_create failed");
		dt_apply_cfg(daemon->dtenv, daemon->cfg);
#else
		fatal_exit("dnstap enabled in config but not built with dnstap support");
#endif
	}
	for(i=0; i<daemon->num; i++) {
		if(!(daemon->workers[i] = worker_create(daemon, i,
			shufport+numport*i/daemon->num, 
			numport*(i+1)/daemon->num - numport*i/daemon->num)))
			/* the above is not ports/numthr, due to rounding */
			fatal_exit("could not create worker");
	}
	free(shufport);
}
/** reseed random generator */
static void
ub_arc4random_stir(struct ub_randstate* s, struct ub_randstate* from)
{
	unsigned char rand_buf[SEED_SIZE];
	int i;

	memset(&s->rc4, 0, sizeof(s->rc4));
	memset(rand_buf, 0xc, sizeof(rand_buf));
	if (from) {
		for(i=0; i<SEED_SIZE; i++)
			rand_buf[i] = (unsigned char)ub_random(from);
	} else {
		if(!RAND_status())
			ub_systemseed((unsigned)getpid()^(unsigned)time(NULL));
		if (RAND_bytes(rand_buf, (int)sizeof(rand_buf)) <= 0) {
			/* very unlikely that this happens, since we seeded
			 * above, if it does; complain and keep going */
			log_err("Couldn't obtain random bytes (error %ld)",
				    ERR_get_error());
			s->rc4_ready = 256;
			return;
		}
	}
	RC4_set_key(&s->rc4, SEED_SIZE, rand_buf);

	/*
	 * Discard early keystream, as per recommendations in:
	 * http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
	 */
	for(i = 0; i <= 256; i += sizeof(rand_buf))
		RC4(&s->rc4, sizeof(rand_buf), rand_buf, rand_buf);

	memset(rand_buf, 0, sizeof(rand_buf));

	s->rc4_ready = REKEY_BYTES;
}
Example #4
0
/** setup fresh libworker struct */
static struct libworker*
libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb)
{
	unsigned int seed;
	struct libworker* w = (struct libworker*)calloc(1, sizeof(*w));
	struct config_file* cfg = ctx->env->cfg;
	int* ports;
	int numports;
	if(!w) return NULL;
	w->is_bg = is_bg;
	w->ctx = ctx;
	w->env = (struct module_env*)malloc(sizeof(*w->env));
	if(!w->env) {
		free(w);
		return NULL;
	}
	*w->env = *ctx->env;
	w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread);
	if(!w->env->alloc) {
		libworker_delete(w);
		return NULL;
	}
	w->thread_num = w->env->alloc->thread_num;
	alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w);
	if(!w->is_bg || w->is_bg_thread) {
		lock_basic_lock(&ctx->cfglock);
	}
	w->env->scratch = regional_create_custom(cfg->msg_buffer_size);
	w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size);
	w->env->fwds = forwards_create();
	if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) { 
		forwards_delete(w->env->fwds);
		w->env->fwds = NULL;
	}
	w->env->hints = hints_create();
	if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) { 
		hints_delete(w->env->hints);
		w->env->hints = NULL;
	}
	if(cfg->ssl_upstream) {
		w->sslctx = connect_sslctx_create(NULL, NULL,
			cfg->tls_cert_bundle);
		if(!w->sslctx) {
			/* to make the setup fail after unlock */
			hints_delete(w->env->hints);
			w->env->hints = NULL;
		}
	}
	if(!w->is_bg || w->is_bg_thread) {
		lock_basic_unlock(&ctx->cfglock);
	}
	if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds ||
		!w->env->hints) {
		libworker_delete(w);
		return NULL;
	}
	w->env->worker = (struct worker*)w;
	w->env->probe_timer = NULL;
	seed = (unsigned int)time(NULL) ^ (unsigned int)getpid() ^
		(((unsigned int)w->thread_num)<<17);
	seed ^= (unsigned int)w->env->alloc->next_id;
	if(!w->is_bg || w->is_bg_thread) {
		lock_basic_lock(&ctx->cfglock);
	}
	if(!(w->env->rnd = ub_initstate(seed, ctx->seed_rnd))) {
		if(!w->is_bg || w->is_bg_thread) {
			lock_basic_unlock(&ctx->cfglock);
		}
		seed = 0;
		libworker_delete(w);
		return NULL;
	}
	if(!w->is_bg || w->is_bg_thread) {
		lock_basic_unlock(&ctx->cfglock);
	}
	if(1) {
		/* primitive lockout for threading: if it overwrites another
		 * thread it is like wiping the cache (which is likely empty
		 * at the start) */
		/* note we are holding the ctx lock in normal threaded
		 * cases so that is solved properly, it is only for many ctx
		 * in different threads that this may clash */
		static int done_raninit = 0;
		if(!done_raninit) {
			done_raninit = 1;
			hash_set_raninit((uint32_t)ub_random(w->env->rnd));
		}
	}
	seed = 0;

	if(eb)
		w->base = comm_base_create_event(eb);
	else	w->base = comm_base_create(0);
	if(!w->base) {
		libworker_delete(w);
		return NULL;
	}
	w->env->worker_base = w->base;
	if(!w->is_bg || w->is_bg_thread) {
		lock_basic_lock(&ctx->cfglock);
	}
	numports = cfg_condense_ports(cfg, &ports);
	if(numports == 0) {
		int locked = !w->is_bg || w->is_bg_thread;
		libworker_delete(w);
		if(locked) {
			lock_basic_unlock(&ctx->cfglock);
		}
		return NULL;
	}
	w->back = outside_network_create(w->base, cfg->msg_buffer_size,
		(size_t)cfg->outgoing_num_ports, cfg->out_ifs,
		cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6, 
		cfg->do_tcp?cfg->outgoing_num_tcp:0,
		w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id,
		ports, numports, cfg->unwanted_threshold,
		cfg->outgoing_tcp_mss, &libworker_alloc_cleanup, w,
		cfg->do_udp || cfg->udp_upstream_without_downstream, w->sslctx,
		cfg->delay_close, NULL);
	w->env->outnet = w->back;
	if(!w->is_bg || w->is_bg_thread) {
		lock_basic_unlock(&ctx->cfglock);
	}
	free(ports);
	if(!w->back) {
		libworker_delete(w);
		return NULL;
	}
	w->env->mesh = mesh_create(&ctx->mods, w->env);
	if(!w->env->mesh) {
		libworker_delete(w);
		return NULL;
	}
	w->env->send_query = &libworker_send_query;
	w->env->detach_subs = &mesh_detach_subs;
	w->env->attach_sub = &mesh_attach_sub;
	w->env->add_sub = &mesh_add_sub;
	w->env->kill_sub = &mesh_state_delete;
	w->env->detect_cycle = &mesh_detect_cycle;
	comm_base_timept(w->base, &w->env->now, &w->env->now_tv);
	return w;
}