Exemple #1
0
struct mesh_area* 
mesh_create(struct module_stack* stack, struct module_env* env)
{
	struct mesh_area* mesh = calloc(1, sizeof(struct mesh_area));
	if(!mesh) {
		log_err("mesh area alloc: out of memory");
		return NULL;
	}
	mesh->histogram = timehist_setup();
	mesh->qbuf_bak = ldns_buffer_new(env->cfg->msg_buffer_size);
	if(!mesh->histogram || !mesh->qbuf_bak) {
		free(mesh);
		log_err("mesh area alloc: out of memory");
		return NULL;
	}
	mesh->mods = *stack;
	mesh->env = env;
	rbtree_init(&mesh->run, &mesh_state_compare);
	rbtree_init(&mesh->all, &mesh_state_compare);
	mesh->num_reply_addrs = 0;
	mesh->num_reply_states = 0;
	mesh->num_detached_states = 0;
	mesh->num_forever_states = 0;
	mesh->stats_jostled = 0;
	mesh->stats_dropped = 0;
	mesh->max_reply_states = env->cfg->num_queries_per_thread;
	mesh->max_forever_states = (mesh->max_reply_states+1)/2;
#ifndef S_SPLINT_S
	mesh->jostle_max.tv_sec = (time_t)(env->cfg->jostle_time / 1000);
	mesh->jostle_max.tv_usec = (time_t)((env->cfg->jostle_time % 1000)
		*1000);
#endif
	return mesh;
}
Exemple #2
0
struct mesh_state* 
mesh_state_create(struct module_env* env, struct query_info* qinfo, 
	uint16_t qflags, int prime)
{
	struct regional* region = alloc_reg_obtain(env->alloc);
	struct mesh_state* mstate;
	int i;
	if(!region)
		return NULL;
	mstate = (struct mesh_state*)regional_alloc(region, 
		sizeof(struct mesh_state));
	if(!mstate) {
		alloc_reg_release(env->alloc, region);
		return NULL;
	}
	memset(mstate, 0, sizeof(*mstate));
	mstate->node = *RBTREE_NULL;
	mstate->run_node = *RBTREE_NULL;
	mstate->node.key = mstate;
	mstate->run_node.key = mstate;
	mstate->reply_list = NULL;
	mstate->list_select = mesh_no_list;
	mstate->replies_sent = 0;
	rbtree_init(&mstate->super_set, &mesh_state_ref_compare);
	rbtree_init(&mstate->sub_set, &mesh_state_ref_compare);
	mstate->num_activated = 0;
	/* init module qstate */
	mstate->s.qinfo.qtype = qinfo->qtype;
	mstate->s.qinfo.qclass = qinfo->qclass;
	mstate->s.qinfo.qname_len = qinfo->qname_len;
	mstate->s.qinfo.qname = regional_alloc_init(region, qinfo->qname,
		qinfo->qname_len);
	if(!mstate->s.qinfo.qname) {
		alloc_reg_release(env->alloc, region);
		return NULL;
	}
	/* remove all weird bits from qflags */
	mstate->s.query_flags = (qflags & (BIT_RD|BIT_CD));
	mstate->s.is_priming = prime;
	mstate->s.reply = NULL;
	mstate->s.region = region;
	mstate->s.curmod = 0;
	mstate->s.return_msg = 0;
	mstate->s.return_rcode = LDNS_RCODE_NOERROR;
	mstate->s.env = env;
	mstate->s.mesh_info = mstate;
	mstate->s.prefetch_leeway = 0;
	/* init modules */
	for(i=0; i<env->mesh->mods.num; i++) {
		mstate->s.minfo[i] = NULL;
		mstate->s.ext_state[i] = module_state_initial;
	}
	return mstate;
}
Exemple #3
0
int main()
{
    struct rbtree* tree = rbtree_init(compare);
    int ret = 0;
    if(tree == NULL)
    {
        fprintf(stderr,"malloc tree failed\n");
        return -1;
    }

    int i = 0;
    ULL  * array = malloc(SIZE*sizeof(ULL ));
    if(array == NULL)
    {
        fprintf(stderr,"malloc failed\n");
        return -1;
    }
 //   srand(time(NULL));
    for(i = 0;i<SIZE;i++)
    {
        array[i] = rand()%1000;
        ret  = rbtree_insert(tree,&array[i],&array[i]);//-1 mean alloc node failed, 
                                                     //-2 mean existed node with same key
        void * data = rbtree_lookup(tree,&array[i]);
        if(ret == 0)
            assert(data == &array[i]);
    }

    print_tree(tree);
    tree2dot(tree,"tree.dot");
    return 0;
}
Exemple #4
0
enum sec_status
nsec3_prove_nxornodata(struct module_env* env, struct val_env* ve,
	struct ub_packed_rrset_key** list, size_t num, 
	struct query_info* qinfo, struct key_entry_key* kkey, int* nodata)
{
	enum sec_status sec, secnx;
	rbtree_t ct;
	struct nsec3_filter flt;
	*nodata = 0;

	if(!list || num == 0 || !kkey || !key_entry_isgood(kkey))
		return sec_status_bogus; /* no valid NSEC3s, bogus */
	rbtree_init(&ct, &nsec3_hash_cmp); /* init names-to-hash cache */
	filter_init(&flt, list, num, qinfo); /* init RR iterator */
	if(!flt.zone)
		return sec_status_bogus; /* no RRs */
	if(nsec3_iteration_count_high(ve, &flt, kkey))
		return sec_status_insecure; /* iteration count too high */

	/* try nxdomain and nodata after another, while keeping the
	 * hash cache intact */

	secnx = nsec3_do_prove_nameerror(env, &flt, &ct, qinfo);
	if(secnx==sec_status_secure)
		return sec_status_secure;
	sec = nsec3_do_prove_nodata(env, &flt, &ct, qinfo);
	if(sec==sec_status_secure) {
		*nodata = 1;
	} else if(sec == sec_status_insecure) {
		*nodata = 1;
	} else if(secnx == sec_status_insecure) {
		sec = sec_status_insecure;
	}
	return sec;
}
Exemple #5
0
static void
_reset (EtnEncoder *e)
{
    ASSERT (e);
    _freeTree (&e->addrToIndex);
    rbtree_init (&e->addrToIndex, _compareFn, 0);
}
/** create context functionality, but no pipes */
static struct ub_ctx* ub_ctx_create_nopipe(void)
{
	struct ub_ctx* ctx;
	unsigned int seed;
#ifdef USE_WINSOCK
	int r;
	WSADATA wsa_data;
#endif
	
	log_init(NULL, 0, NULL); /* logs to stderr */
	log_ident_set("libunbound");
#ifdef USE_WINSOCK
	if((r = WSAStartup(MAKEWORD(2,2), &wsa_data)) != 0) {
		log_err("could not init winsock. WSAStartup: %s",
			wsa_strerror(r));
		return NULL;
	}
#endif
	verbosity = 0; /* errors only */
	checklock_start();
	ctx = (struct ub_ctx*)calloc(1, sizeof(*ctx));
	if(!ctx) {
		errno = ENOMEM;
		return NULL;
	}
	alloc_init(&ctx->superalloc, NULL, 0);
	seed = (unsigned int)time(NULL) ^ (unsigned int)getpid();
	if(!(ctx->seed_rnd = ub_initstate(seed, NULL))) {
		seed = 0;
		ub_randfree(ctx->seed_rnd);
		free(ctx);
		errno = ENOMEM;
		return NULL;
	}
	seed = 0;
	lock_basic_init(&ctx->qqpipe_lock);
	lock_basic_init(&ctx->rrpipe_lock);
	lock_basic_init(&ctx->cfglock);
	ctx->env = (struct module_env*)calloc(1, sizeof(*ctx->env));
	if(!ctx->env) {
		ub_randfree(ctx->seed_rnd);
		free(ctx);
		errno = ENOMEM;
		return NULL;
	}
	ctx->env->cfg = config_create_forlib();
	if(!ctx->env->cfg) {
		free(ctx->env);
		ub_randfree(ctx->seed_rnd);
		free(ctx);
		errno = ENOMEM;
		return NULL;
	}
	ctx->env->alloc = &ctx->superalloc;
	ctx->env->worker = NULL;
	ctx->env->need_to_validate = 0;
	modstack_init(&ctx->mods);
	rbtree_init(&ctx->queries, &context_query_cmp);
	return ctx;
}
Exemple #7
0
/** create a new localzone */
static struct local_zone*
local_zone_create(uint8_t* nm, size_t len, int labs, 
	enum localzone_type t, uint16_t dclass)
{
	struct local_zone* z = (struct local_zone*)calloc(1, sizeof(*z));
	if(!z) {
		return NULL;
	}
	z->node.key = z;
	z->dclass = dclass;
	z->type = t;
	z->name = nm;
	z->namelen = len;
	z->namelabs = labs;
	lock_rw_init(&z->lock);
	z->region = regional_create();
	if(!z->region) {
		free(z);
		return NULL;
	}
	rbtree_init(&z->data, &local_data_cmp);
	lock_protect(&z->lock, &z->parent, sizeof(*z)-sizeof(rbnode_t));
	/* also the zones->lock protects node, parent, name*, class */
	return z;
}
Exemple #8
0
// Generic reset; should only be called by subclasses.
void
etnEncoderReset(EtnEncoder *e, int (*write) (EtnEncoder *e, uint8_t *data, EtnLength length), void (*flush) (EtnEncoder *e))
{
    ASSERT (e);
    e->write = write;
    e->flush = flush;
    rbtree_init (&e->addrToIndex, _compareFn, 0);
}
static inline char *mmrun_init(int slot_count, int slot_size, char *run)
{
    rbtree_init(run);
    mmrun_set_bitmap((1 << slot_count) - 1, run);
    mmrun_set_slotcount(slot_count, run);
    mmrun_set_slotsize(slot_size, run);
    
    return run;
}
Exemple #10
0
static bool
_predicate (void)
{
	int i;
	KeyValuePair_t n;
	struct rbtree tree;
	KeyValuePair_t *node;
	struct rbtree_node *result;

	rbtree_init (&tree, _compareFn, 0);

	for (i = 0; i < TreeSize; i++) {
		node = malloc (sizeof (KeyValuePair_t));

		node->key = i;
		node->val = TreeSize + i;

		rbtree_insert ((struct rbtree_node *) &node->node, &tree);
	}

	// Lookup the nodes.
	for (i = 0; i < TreeSize; i++) {
		KeyValuePair_t *kvResult;
		n.key = i;
		kvResult = rbtree_container_of (rbtree_lookup ((struct rbtree_node *) &n.node, &tree), KeyValuePair_t, node);
		if (kvResult->key != i || kvResult->val != TreeSize + i) {
			return false;
		}
	}

	// This lookup should fail.
	n.key = TreeSize;
	result = rbtree_lookup ((struct rbtree_node *) &n.node, &tree);
	if (result != NULL) {
		return false;
	}

	//iterate (rbtree_first(&tree), iterateFn);
	result = rbtree_first(&tree);
	while (result) {
		KeyValuePair_t *kvResult = rbtree_container_of (result, KeyValuePair_t, node);
		struct rbtree_node *n = result;
		result = rbtree_next (result);
		rbtree_remove (n, &tree);
		free (kvResult);
	}

	// This lookup should fail because we just cleared the tree.
	n.key = TreeSize;
	n.key = 0;
	result = rbtree_lookup ((struct rbtree_node *) &n.node, &tree);
	if (result != NULL) {
		return false;
	}

	return true;
}
Exemple #11
0
Fichier : rbt.c Projet : 1587/ltp
rb_tree *rbtree_construct()
{
	rb_tree *tree = (rb_tree *) malloc(sizeof(rb_tree));
	if (!tree) {
		fprintf(stderr, "Memory Issue - Shortge Exists!\n");
		return NULL;
	}
	rbtree_init(tree);
	return tree;
}
Exemple #12
0
void
mesh_delete_all(struct mesh_area* mesh)
{
	/* free all query states */
	while(mesh->all.count)
		mesh_delete_helper(mesh->all.root);
	mesh->stats_dropped += mesh->num_reply_addrs;
	/* clear mesh area references */
	rbtree_init(&mesh->run, &mesh_state_compare);
	rbtree_init(&mesh->all, &mesh_state_compare);
	mesh->num_reply_addrs = 0;
	mesh->num_reply_states = 0;
	mesh->num_detached_states = 0;
	mesh->num_forever_states = 0;
	mesh->forever_first = NULL;
	mesh->forever_last = NULL;
	mesh->jostle_first = NULL;
	mesh->jostle_last = NULL;
}
Exemple #13
0
static bool linearInit()
{
	auto blk = MemBlock::Create((u8*)__ctru_linear_heap, __ctru_linear_heap_size);
	if (blk)
	{
		sLinearPool.AddBlock(blk);
		rbtree_init(&sAddrMap, addrMapNodeComparator);
		return true;
	}
	return false;
}
Exemple #14
0
/**
 * Create canonical form of rrset in the scratch buffer.
 * @param region: temporary region.
 * @param buf: the buffer to use.
 * @param k: the rrset to insert.
 * @param sig: RRSIG rdata to include.
 * @param siglen: RRSIG rdata len excluding signature field, but inclusive
 * 	signer name length.
 * @param sortree: if NULL is passed a new sorted rrset tree is built.
 * 	Otherwise it is reused.
 * @return false on alloc error.
 */
static int
rrset_canonical(struct regional* region, sldns_buffer* buf, 
	struct ub_packed_rrset_key* k, uint8_t* sig, size_t siglen,
	struct rbtree_t** sortree)
{
	struct packed_rrset_data* d = (struct packed_rrset_data*)k->entry.data;
	uint8_t* can_owner = NULL;
	size_t can_owner_len = 0;
	struct canon_rr* walk;
	struct canon_rr* rrs;

	if(!*sortree) {
		*sortree = (struct rbtree_t*)regional_alloc(region, 
			sizeof(rbtree_t));
		if(!*sortree)
			return 0;
		if(d->count > RR_COUNT_MAX)
			return 0; /* integer overflow protection */
		rrs = regional_alloc(region, sizeof(struct canon_rr)*d->count);
		if(!rrs) {
			*sortree = NULL;
			return 0;
		}
		rbtree_init(*sortree, &canonical_tree_compare);
		canonical_sort(k, d, *sortree, rrs);
	}

	sldns_buffer_clear(buf);
	sldns_buffer_write(buf, sig, siglen);
	/* canonicalize signer name */
	query_dname_tolower(sldns_buffer_begin(buf)+18); 
	RBTREE_FOR(walk, struct canon_rr*, (*sortree)) {
		/* see if there is enough space left in the buffer */
		if(sldns_buffer_remaining(buf) < can_owner_len + 2 + 2 + 4
			+ d->rr_len[walk->rr_idx]) {
			log_err("verify: failed to canonicalize, "
				"rrset too big");
			return 0;
		}
		/* determine canonical owner name */
		if(can_owner)
			sldns_buffer_write(buf, can_owner, can_owner_len);
		else	insert_can_owner(buf, k, sig, &can_owner, 
				&can_owner_len);
		sldns_buffer_write(buf, &k->rk.type, 2);
		sldns_buffer_write(buf, &k->rk.rrset_class, 2);
		sldns_buffer_write(buf, sig+4, 4);
		sldns_buffer_write(buf, d->rr_data[walk->rr_idx], 
			d->rr_len[walk->rr_idx]);
		canonicalize_rdata(buf, k, d->rr_len[walk->rr_idx]);
	}
	sldns_buffer_flip(buf);
	return 1;
}
Exemple #15
0
struct local_zones* 
local_zones_create(void)
{
	struct local_zones* zones = (struct local_zones*)calloc(1, 
		sizeof(*zones));
	if(!zones)
		return NULL;
	rbtree_init(&zones->ztree, &local_zone_cmp);
	lock_rw_init(&zones->lock);
	lock_protect(&zones->lock, &zones->ztree, sizeof(zones->ztree));
	/* also lock protects the rbnode's in struct local_zone */
	return zones;
}
Exemple #16
0
RBTree *
rbtree_new(CompareFunc compare_keys, FreeFunc free_key, FreeFunc free_value, Allocator *allocator)
{
    RBTree *tree;

    if(!(tree = (RBTree *)malloc(sizeof(RBTree))))
    {
        fprintf(stderr, "Couldn't allocate memory.\n");
        abort();
    }

    rbtree_init(tree, compare_keys, free_key, free_value, allocator);

    return tree;
}
int start_profiling(void) {
#if DUMP_OVERHEAD
   time_spent_in_migration = 0;
   rdtscll(time_start_profiling);
#endif

   rbtree_init();
#if ENABLE_THREAD_PLACEMENT
   tids_init();
#endif
   carrefour_init();

   consider_L1L2 = 1;
   ibs_start();
   return 0;
} 
Exemple #18
0
struct val_neg_cache* val_neg_create(struct config_file* cfg, size_t maxiter)
{
	struct val_neg_cache* neg = (struct val_neg_cache*)calloc(1, 
		sizeof(*neg));
	if(!neg) {
		log_err("Could not create neg cache: out of memory");
		return NULL;
	}
	neg->nsec3_max_iter = maxiter;
	neg->max = 1024*1024; /* 1 M is thousands of entries */
	if(cfg) neg->max = cfg->neg_cache_size;
	rbtree_init(&neg->tree, &val_neg_zone_compare);
	lock_basic_init(&neg->lock);
	lock_protect(&neg->lock, neg, sizeof(*neg));
	return neg;
}
Exemple #19
0
channel_spooler_t *start_spooler(channel_spooler_t *spl, ngx_str_t *chid, chanhead_pubsub_status_t *channel_status, nchan_store_t *store) {
  if(!spl->running) {
    ngx_memzero(spl, sizeof(*spl));
    rbtree_init(&spl->spoolseed, "spooler msg_id tree", spool_rbtree_node_id, spool_rbtree_bucketer, spool_rbtree_compare);
    
    spl->fn=&spooler_fn;
    //spl->prev_msg_id.time=0;
    //spl->prev_msg_id.tag=0;
    
    DBG("start SPOOLER %p", *spl);
    
    spl->chid = chid;
    spl->store = store;
    
    spl->channel_status = channel_status;
    
    spl->running = 1;
    //spl->want_to_stop = 0;
    return spl;
  }
  else {
    ERR("looks like spooler is already running. make sure spooler->running=0 befire starting.");
    assert(0);
    return NULL;
  }
  
  
  /*
  nchan_msg_id_t        id = {0,0};
  subscriber_pool_t     *spl;
  
  spl = get_spool(spl, &id);
  
  */
  
  
  
  
  
  
  
  
  
  
  
  
}
Exemple #20
0
enum sec_status
nsec3_prove_wildcard(struct module_env* env, struct val_env* ve,
        struct ub_packed_rrset_key** list, size_t num,
	struct query_info* qinfo, struct key_entry_key* kkey, uint8_t* wc)
{
	rbtree_t ct;
	struct nsec3_filter flt;
	struct ce_response ce;
	uint8_t* nc;
	size_t nc_len;
	size_t wclen;
	(void)dname_count_size_labels(wc, &wclen);

	if(!list || num == 0 || !kkey || !key_entry_isgood(kkey))
		return sec_status_bogus; /* no valid NSEC3s, bogus */
	rbtree_init(&ct, &nsec3_hash_cmp); /* init names-to-hash cache */
	filter_init(&flt, list, num, qinfo); /* init RR iterator */
	if(!flt.zone)
		return sec_status_bogus; /* no RRs */
	if(nsec3_iteration_count_high(ve, &flt, kkey))
		return sec_status_insecure; /* iteration count too high */

	/* We know what the (purported) closest encloser is by just 
	 * looking at the supposed generating wildcard. 
	 * The *. has already been removed from the wc name.
	 */
	memset(&ce, 0, sizeof(ce));
	ce.ce = wc;
	ce.ce_len = wclen;

	/* Now we still need to prove that the original data did not exist.
	 * Otherwise, we need to show that the next closer name is covered. */
	next_closer(qinfo->qname, qinfo->qname_len, ce.ce, &nc, &nc_len);
	if(!find_covering_nsec3(env, &flt, &ct, nc, nc_len, 
		&ce.nc_rrset, &ce.nc_rr)) {
		verbose(VERB_ALGO, "proveWildcard: did not find a covering "
			"NSEC3 that covered the next closer name.");
		return sec_status_bogus;
	}
	if(ce.nc_rrset && nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) {
		verbose(VERB_ALGO, "proveWildcard: NSEC3 optout");
		return sec_status_insecure;
	}
	return sec_status_secure;
}
Exemple #21
0
/*!
 * Construct a red-black tree with a comparison object
 *
 * \param comp Comparison function to be used by the tree
 *
 * \return The newly constructed  tree
 */
rbtree_t * rbtree_create
    (
    rb_compare comp
    )
    {
    rbtree_t * tree = (rbtree_t *) kmalloc(sizeof(rbtree_t));

    if (!tree)
        {
        printk("Not enough memory!\n");

        return NULL;
        }

    rbtree_init(tree, comp);

    return tree;
    }
Exemple #22
0
enum sec_status
nsec3_prove_nodata(struct module_env* env, struct val_env* ve,
	struct ub_packed_rrset_key** list, size_t num,
	struct query_info* qinfo, struct key_entry_key* kkey)
{
	rbtree_t ct;
	struct nsec3_filter flt;

	if(!list || num == 0 || !kkey || !key_entry_isgood(kkey))
		return sec_status_bogus; /* no valid NSEC3s, bogus */
	rbtree_init(&ct, &nsec3_hash_cmp); /* init names-to-hash cache */
	filter_init(&flt, list, num, qinfo); /* init RR iterator */
	if(!flt.zone)
		return sec_status_bogus; /* no RRs */
	if(nsec3_iteration_count_high(ve, &flt, kkey))
		return sec_status_insecure; /* iteration count too high */
	return nsec3_do_prove_nodata(env, &flt, &ct, qinfo);
}
Exemple #23
0
rbtree_head * rbtree_insert(rbtree_tree *tree, rbtree_head *node)
{
   unsigned long key;
   rbtree_head *p;
   assert(tree && node);
   if (!tree || !node)
      return null_head;
   rbtree_init(node);

   if (tree->root == null_head) { /*first add node*/
      node->color = RB_BLACK;
      tree->root = node;
      return node;
   }
   key = __RBTREE_KEY(tree, node);
   p = tree->root;
   do {
      int __cmp = __RBTREE_CMP(tree, key, __RBTREE_KEY(tree, p));
      if (__cmp == 0 && tree->unique)
         return null_head;

      if (__cmp <= 0) {
         if (p->left == null_head) {
            p->left = node;
            node->parent = p;
            break;
         }
         p = p->left;
      } else {
         if (p->right == null_head) {
            p->right = node;
            node->parent = p;
            break;
         }
         p = p->right;
      }
   } while (p);

   __rebalance_for_add(node, &tree->root);

   return node;
}
Exemple #24
0
/**
 * Initialize an ebb_server structure.  After calling ebb_server_init set
 * the callback server->new_connection and, optionally, callback data
 * server->data.  The new connection MUST be initialized with
 * ebb_connection_init before returning it to the server.
 *
 * @param server the server to initialize
 * @param loop a libev loop
 */
void 
ebb_server_init(ebb_server *server, struct ev_loop *loop)
{
  server->loop = loop;
  server->listening = FALSE;
  server->port[0] = '\0';
  server->fd = -1;
  server->connection_watcher.data = server;
  ev_init (&server->connection_watcher, on_connection);
  server->secure = FALSE;

#ifdef HAVE_GNUTLS
  rbtree_init(&server->session_cache, session_cache_compare);
  server->credentials = NULL;
#endif

  server->new_connection = NULL;
  server->start_connection = ebb_connection_start;
  server->data = NULL;
}
Exemple #25
0
/**
 * Create a single zone node
 * @param nm: name for zone (copied)
 * @param nm_len: length of name
 * @param labs: labels in name.
 * @param dclass: class of zone, host order.
 * @return new zone or NULL on failure
 */
static struct val_neg_zone* neg_setup_zone_node(
	uint8_t* nm, size_t nm_len, int labs, uint16_t dclass)
{
	struct val_neg_zone* zone = 
		(struct val_neg_zone*)calloc(1, sizeof(*zone));
	if(!zone) {
		return NULL;
	}
	zone->node.key = zone;
	zone->name = memdup(nm, nm_len);
	if(!zone->name) {
		free(zone);
		return NULL;
	}
	zone->len = nm_len;
	zone->labs = labs;
	zone->dclass = dclass;

	rbtree_init(&zone->tree, &val_neg_data_compare);
	return zone;
}
Exemple #26
0
channel_spooler_t *start_spooler(channel_spooler_t *spl, ngx_str_t *chid, chanhead_pubsub_status_t *channel_status, nchan_store_t *store, nchan_loc_conf_t *cf, spooler_fetching_strategy_t fetching_strategy, channel_spooler_handlers_t *handlers, void *handlers_privdata) {
  if(!spl->running) {
    ngx_memzero(spl, sizeof(*spl));
    rbtree_init(&spl->spoolseed, "spooler msg_id tree", spool_rbtree_node_id, spool_rbtree_bucketer, spool_rbtree_compare);
    
    spl->fn=&spooler_fn;
    //spl->prev_msg_id.time=0;
    //spl->prev_msg_id.tag=0;
    
    DBG("start SPOOLER %p", *spl);
    
    spl->chid = chid;
    spl->store = store;
    
    spl->channel_status = channel_status;
    
    spl->running = 1;
    //spl->want_to_stop = 0;
    spl->publish_events = 1;
    spl->fetching_strategy = fetching_strategy;
    
    init_spool(spl, &spl->current_msg_spool, &latest_msg_id);
    spl->current_msg_spool.msg_status = MSG_EXPECTED;
    
    spl->handlers = handlers;
    spl->handlers_privdata = handlers_privdata;
    
    spl->cf = cf;
    
    return spl;
  }
  else {
    ERR("looks like spooler is already running. make sure spooler->running=0 before starting.");
    assert(0);
    return NULL;
  }
}
Exemple #27
0
void mesh_detach_subs(struct module_qstate* qstate)
{
	struct mesh_area* mesh = qstate->env->mesh;
	struct mesh_state_ref* ref, lookup;
#ifdef UNBOUND_DEBUG
	struct rbnode_t* n;
#endif
	lookup.node.key = &lookup;
	lookup.s = qstate->mesh_info;
	RBTREE_FOR(ref, struct mesh_state_ref*, &qstate->mesh_info->sub_set) {
#ifdef UNBOUND_DEBUG
		n =
#endif
		rbtree_delete(&ref->s->super_set, &lookup);
		log_assert(n != NULL); /* must have been present */
		if(!ref->s->reply_list && !ref->s->cb_list
			&& ref->s->super_set.count == 0) {
			mesh->num_detached_states++;
			log_assert(mesh->num_detached_states + 
				mesh->num_reply_states <= mesh->all.count);
		}
	}
	rbtree_init(&qstate->mesh_info->sub_set, &mesh_state_ref_compare);
}
/** Read file to test NSEC3 hash algo */
static void
nsec3_hash_test(const char* fname)
{
	/* 
	 * The list contains a list of ldns-testpkts entries.
	 * Every entry is a test.
	 * 	The qname is hashed.
	 * 	The answer section AAAA RR name is the required result.
	 * 	The auth section NSEC3 is used to get hash parameters.
	 * The hash cache is maintained per file.
	 *
	 * The test does not perform canonicalization during the compare.
	 */
	rbtree_t ct;
	struct regional* region = regional_create();
	struct alloc_cache alloc;
	ldns_buffer* buf = ldns_buffer_new(65535);
	struct entry* e;
	struct entry* list = read_datafile(fname);

	if(!list)
		fatal_exit("could not read %s: %s", fname, strerror(errno));
	rbtree_init(&ct, &nsec3_hash_cmp);
	alloc_init(&alloc, NULL, 1);
	unit_assert(region && buf);

	/* ready to go! */
	for(e = list; e; e = e->next) {
		nsec3_hash_test_entry(e, &ct, &alloc, region, buf);
	}

	delete_entry(list);
	regional_destroy(region);
	alloc_clear(&alloc);
	ldns_buffer_free(buf);
}
Exemple #29
0
adlb_code
xlb_workq_init(int work_types, const xlb_layout *layout)
{
  assert(work_types >= 1);
  DEBUG("xlb_workq_init(work_types=%i)", work_types);

  adlb_code ac;

  bool ok = ptr_array_init(&wu_array, WU_ARRAY_INIT_SIZE);
  CHECK_MSG(ok, "wu_array initialisation failed");

  targeted_work_size = targeted_work_entries(work_types,
                                    layout->my_workers);
  ac = init_work_heaps(&targeted_work, targeted_work_size);
  ADLB_CHECK(ac);

  host_targeted_work_size = targeted_work_entries(work_types,
                                          layout->my_worker_hosts);
  ac = init_work_heaps(&host_targeted_work, host_targeted_work_size);
  ADLB_CHECK(ac);

  ac = init_work_heaps(&untargeted_work, work_types);
  ADLB_CHECK(ac);

  parallel_work = malloc(sizeof(parallel_work[0]) * (size_t)work_types);
  xlb_workq_parallel_task_count = 0;
  valgrind_assert(parallel_work != NULL);
  for (int i = 0; i < work_types; i++)
  {
    rbtree_init(&parallel_work[i]);
  }

  if (xlb_s.perfc_enabled)
  {
    DEBUG("PERF COUNTERS ENABLED");
    xlb_task_counters = malloc(sizeof(*xlb_task_counters) *
                               (size_t)work_types);
    valgrind_assert(xlb_task_counters != NULL);
    for (int i = 0; i < work_types; i++)
    {
      xlb_task_counters[i].targeted_enqueued = 0;
      xlb_task_counters[i].targeted_bypass = 0;
      xlb_task_counters[i].single_enqueued = 0;
      xlb_task_counters[i].single_bypass = 0;
      xlb_task_counters[i].single_stolen = 0;
      xlb_task_counters[i].parallel_enqueued = 0;
      xlb_task_counters[i].parallel_bypass = 0;
      xlb_task_counters[i].parallel_stolen = 0;

      xlb_task_counters[i].targeted_data_wait = 0;
      xlb_task_counters[i].targeted_data_no_wait = 0;
      xlb_task_counters[i].single_data_wait = 0;
      xlb_task_counters[i].single_data_no_wait = 0;
      xlb_task_counters[i].parallel_data_wait = 0;
      xlb_task_counters[i].parallel_data_no_wait = 0;
    }
  }
  else
  {
    xlb_task_counters = NULL;
  }

  return ADLB_SUCCESS;
}
Exemple #30
0
void
tb_init(struct rbtree *rb)
{
  rbtree_init(rb, &tc_less, NULL);
}