struct val_anchors* anchors_create(void) { struct val_anchors* a = (struct val_anchors*)calloc(1, sizeof(*a)); if(!a) return NULL; a->region = regional_create(); if(!a->region) { free(a); return NULL; } a->tree = rbtree_create(anchor_cmp); if(!a->tree) { anchors_delete(a); return NULL; } a->autr = autr_global_create(); if(!a->autr) { anchors_delete(a); return NULL; } lock_basic_init(&a->lock); lock_protect(&a->lock, a, sizeof(*a)); lock_protect(&a->lock, a->autr, sizeof(*a->autr)); return a; }
void alloc_init(struct alloc_cache* alloc, struct alloc_cache* super, int thread_num) { memset(alloc, 0, sizeof(*alloc)); alloc->super = super; alloc->thread_num = thread_num; alloc->next_id = (uint64_t)thread_num; /* in steps, so that type */ alloc->next_id <<= THRNUM_SHIFT; /* of *_id is used. */ alloc->last_id = 1; /* so no 64bit constants, */ alloc->last_id <<= THRNUM_SHIFT; /* or implicit 'int' ops. */ alloc->last_id -= 1; /* for compiler portability. */ alloc->last_id |= alloc->next_id; alloc->next_id += 1; /* because id=0 is special. */ alloc->max_reg_blocks = 100; alloc->num_reg_blocks = 0; alloc->reg_list = NULL; alloc->cleanup = NULL; alloc->cleanup_arg = NULL; if(alloc->super) prealloc_blocks(alloc, alloc->max_reg_blocks); if(!alloc->super) { lock_quick_init(&alloc->lock); lock_protect(&alloc->lock, alloc, sizeof(*alloc)); } }
/** create a new localzone */ static struct local_zone* local_zone_create(uint8_t* nm, size_t len, int labs, enum localzone_type t, uint16_t dclass) { struct local_zone* z = (struct local_zone*)calloc(1, sizeof(*z)); if(!z) { return NULL; } z->node.key = z; z->dclass = dclass; z->type = t; z->name = nm; z->namelen = len; z->namelabs = labs; lock_rw_init(&z->lock); z->region = regional_create(); if(!z->region) { free(z); return NULL; } rbtree_init(&z->data, &local_data_cmp); lock_protect(&z->lock, &z->parent, sizeof(*z)-sizeof(rbnode_t)); /* also the zones->lock protects node, parent, name*, class */ return z; }
struct msgreply_entry* query_info_entrysetup(struct query_info* q, struct reply_info* r, hashvalue_t h) { struct msgreply_entry* e = (struct msgreply_entry*)malloc( sizeof(struct msgreply_entry)); if(!e) return NULL; memcpy(&e->key, q, sizeof(*q)); e->entry.hash = h; e->entry.key = e; e->entry.data = r; lock_rw_init(&e->entry.lock); lock_protect(&e->entry.lock, &e->key, sizeof(e->key)); lock_protect(&e->entry.lock, &e->entry.hash, sizeof(e->entry.hash) + sizeof(e->entry.key) + sizeof(e->entry.data)); lock_protect(&e->entry.lock, e->key.qname, e->key.qname_len); q->qname = NULL; return e; }
struct local_zones* local_zones_create(void) { struct local_zones* zones = (struct local_zones*)calloc(1, sizeof(*zones)); if(!zones) return NULL; rbtree_init(&zones->ztree, &local_zone_cmp); lock_rw_init(&zones->lock); lock_protect(&zones->lock, &zones->ztree, sizeof(zones->ztree)); /* also lock protects the rbnode's in struct local_zone */ return zones; }
struct val_neg_cache* val_neg_create(struct config_file* cfg, size_t maxiter) { struct val_neg_cache* neg = (struct val_neg_cache*)calloc(1, sizeof(*neg)); if(!neg) { log_err("Could not create neg cache: out of memory"); return NULL; } neg->nsec3_max_iter = maxiter; neg->max = 1024*1024; /* 1 M is thousands of entries */ if(cfg) neg->max = cfg->neg_cache_size; rbtree_init(&neg->tree, &val_neg_zone_compare); lock_basic_init(&neg->lock); lock_protect(&neg->lock, neg, sizeof(*neg)); return neg; }
static int testframe_init(struct module_env* env, struct cachedb_env* cachedb_env) { struct testframe_moddata* d; (void)env; verbose(VERB_ALGO, "testframe_init"); d = (struct testframe_moddata*)calloc(1, sizeof(struct testframe_moddata)); cachedb_env->backend_data = (void*)d; if(!cachedb_env->backend_data) { log_err("out of memory"); return 0; } lock_basic_init(&d->lock); lock_protect(&d->lock, d, sizeof(*d)); return 1; }