/** check integrity of hash table */ static void check_table(struct lruhash* table) { struct lruhash_entry* p; size_t c = 0; lock_quick_lock(&table->lock); unit_assert( table->num <= table->size); unit_assert( table->size_mask == (int)table->size-1 ); unit_assert( (table->lru_start && table->lru_end) || (!table->lru_start && !table->lru_end) ); unit_assert( table->space_used <= table->space_max ); /* check lru list integrity */ if(table->lru_start) unit_assert(table->lru_start->lru_prev == NULL); if(table->lru_end) unit_assert(table->lru_end->lru_next == NULL); p = table->lru_start; while(p) { if(p->lru_prev) { unit_assert(p->lru_prev->lru_next == p); } if(p->lru_next) { unit_assert(p->lru_next->lru_prev == p); } c++; p = p->lru_next; } unit_assert(c == table->num); /* this assertion is specific to the unit test */ unit_assert( table->space_used == table->num * test_slabhash_sizefunc(NULL, NULL) ); lock_quick_unlock(&table->lock); }
void alloc_clear(struct alloc_cache* alloc) { alloc_special_type* p; struct regional* r, *nr; if(!alloc) return; if(!alloc->super) { lock_quick_destroy(&alloc->lock); } if(alloc->super && alloc->quar) { /* push entire list into super */ p = alloc->quar; while(alloc_special_next(p)) /* find last */ p = alloc_special_next(p); lock_quick_lock(&alloc->super->lock); alloc_set_special_next(p, alloc->super->quar); alloc->super->quar = alloc->quar; alloc->super->num_quar += alloc->num_quar; lock_quick_unlock(&alloc->super->lock); } else { alloc_clear_special_list(alloc); } alloc->quar = 0; alloc->num_quar = 0; r = alloc->reg_list; while(r) { nr = (struct regional*)r->next; free(r); r = nr; } alloc->reg_list = NULL; alloc->num_reg_blocks = 0; }
size_t slabhash_get_size(struct slabhash* sl) { size_t i, total = 0; for(i=0; i<sl->size; i++) { lock_quick_lock(&sl->array[i]->lock); total += sl->array[i]->space_max; lock_quick_unlock(&sl->array[i]->lock); } return total; }
size_t count_slabhash_entries(struct slabhash* sh) { size_t slab, cnt = 0; for(slab=0; slab<sh->size; slab++) { lock_quick_lock(&sh->array[slab]->lock); cnt += sh->array[slab]->num; lock_quick_unlock(&sh->array[slab]->lock); } return cnt; }
void alloc_clear_special(struct alloc_cache* alloc) { if(!alloc->super) { lock_quick_lock(&alloc->lock); } alloc_clear_special_list(alloc); alloc->quar = 0; alloc->num_quar = 0; if(!alloc->super) { lock_quick_unlock(&alloc->lock); } }
/** dump msg cache */ static int dump_msg_cache(SSL* ssl, struct worker* worker) { struct slabhash* sh = worker->env.msg_cache; size_t slab; if(!ssl_printf(ssl, "START_MSG_CACHE\n")) return 0; for(slab=0; slab<sh->size; slab++) { lock_quick_lock(&sh->array[slab]->lock); if(!dump_msg_lruhash(ssl, worker, sh->array[slab])) { lock_quick_unlock(&sh->array[slab]->lock); return 0; } lock_quick_unlock(&sh->array[slab]->lock); } return ssl_printf(ssl, "END_MSG_CACHE\n"); }
size_t alloc_get_mem(struct alloc_cache* alloc) { alloc_special_type* p; size_t s = sizeof(*alloc); if(!alloc->super) { lock_quick_lock(&alloc->lock); /* superalloc needs locking */ } s += sizeof(alloc_special_type) * alloc->num_quar; for(p = alloc->quar; p; p = alloc_special_next(p)) { s += lock_get_mem(&p->entry.lock); } s += alloc->num_reg_blocks * ALLOC_REG_SIZE; if(!alloc->super) { lock_quick_unlock(&alloc->lock); } return s; }
/** dump rrset cache */ static int dump_rrset_cache(SSL* ssl, struct worker* worker) { struct rrset_cache* r = worker->env.rrset_cache; size_t slab; if(!ssl_printf(ssl, "START_RRSET_CACHE\n")) return 0; for(slab=0; slab<r->table.size; slab++) { lock_quick_lock(&r->table.array[slab]->lock); if(!dump_rrset_lruhash(ssl, r->table.array[slab], *worker->env.now)) { lock_quick_unlock(&r->table.array[slab]->lock); return 0; } lock_quick_unlock(&r->table.array[slab]->lock); } return ssl_printf(ssl, "END_RRSET_CACHE\n"); }
/** test lru_front lru_remove */ static void test_lru(struct lruhash* table) { testkey_t* k = newkey(12); testkey_t* k2 = newkey(14); lock_quick_lock(&table->lock); unit_assert( table->lru_start == NULL && table->lru_end == NULL); lru_remove(table, &k->entry); unit_assert( table->lru_start == NULL && table->lru_end == NULL); /* add one */ lru_front(table, &k->entry); unit_assert( table->lru_start == &k->entry && table->lru_end == &k->entry); /* remove it */ lru_remove(table, &k->entry); unit_assert( table->lru_start == NULL && table->lru_end == NULL); /* add two */ lru_front(table, &k->entry); unit_assert( table->lru_start == &k->entry && table->lru_end == &k->entry); lru_front(table, &k2->entry); unit_assert( table->lru_start == &k2->entry && table->lru_end == &k->entry); /* remove first in list */ lru_remove(table, &k2->entry); unit_assert( table->lru_start == &k->entry && table->lru_end == &k->entry); lru_front(table, &k2->entry); unit_assert( table->lru_start == &k2->entry && table->lru_end == &k->entry); /* remove last in list */ lru_remove(table, &k->entry); unit_assert( table->lru_start == &k2->entry && table->lru_end == &k2->entry); /* empty the list */ lru_remove(table, &k2->entry); unit_assert( table->lru_start == NULL && table->lru_end == NULL); lock_quick_unlock(&table->lock); delkey(k); delkey(k2); }
/** Remove a zone */ static void do_zone_remove(SSL* ssl, struct worker* worker, char* arg) { uint8_t* nm; int nmlabs; size_t nmlen; struct local_zone* z; if(!parse_arg_name(ssl, arg, &nm, &nmlen, &nmlabs)) return; lock_quick_lock(&worker->daemon->local_zones->lock); if((z=local_zones_find(worker->daemon->local_zones, nm, nmlen, nmlabs, LDNS_RR_CLASS_IN))) { /* present in tree */ local_zones_del_zone(worker->daemon->local_zones, z); } lock_quick_unlock(&worker->daemon->local_zones->lock); free(nm); send_ok(ssl); }
/** entry to reply info conversion */ static void entry_to_repinfo(struct entry* e, struct alloc_cache* alloc, struct regional* region, ldns_buffer* pkt, struct query_info* qi, struct reply_info** rep) { int ret; struct edns_data edns; entry_to_buf(e, pkt); /* lock alloc lock to please lock checking software. * alloc_special_obtain assumes it is talking to a ub-alloc, * and does not need to perform locking. Here the alloc is * the only one, so we lock it here */ lock_quick_lock(&alloc->lock); ret = reply_info_parse(pkt, alloc, qi, rep, region, &edns); lock_quick_unlock(&alloc->lock); if(ret != 0) { printf("parse code %d: %s\n", ret, ldns_lookup_by_id(ldns_rcodes, ret)->name); unit_assert(ret != 0); } }
/** Add a new zone */ static void do_zone_add(SSL* ssl, struct worker* worker, char* arg) { uint8_t* nm; int nmlabs; size_t nmlen; char* arg2; enum localzone_type t; struct local_zone* z; if(!find_arg2(ssl, arg, &arg2)) return; if(!parse_arg_name(ssl, arg, &nm, &nmlen, &nmlabs)) return; if(!local_zone_str2type(arg2, &t)) { ssl_printf(ssl, "error not a zone type. %s\n", arg2); free(nm); return; } lock_quick_lock(&worker->daemon->local_zones->lock); if((z=local_zones_find(worker->daemon->local_zones, nm, nmlen, nmlabs, LDNS_RR_CLASS_IN))) { /* already present in tree */ lock_rw_wrlock(&z->lock); z->type = t; /* update type anyway */ lock_rw_unlock(&z->lock); free(nm); lock_quick_unlock(&worker->daemon->local_zones->lock); send_ok(ssl); return; } if(!local_zones_add_zone(worker->daemon->local_zones, nm, nmlen, nmlabs, LDNS_RR_CLASS_IN, t)) { lock_quick_unlock(&worker->daemon->local_zones->lock); ssl_printf(ssl, "error out of memory\n"); return; } lock_quick_unlock(&worker->daemon->local_zones->lock); send_ok(ssl); }
alloc_special_type* alloc_special_obtain(struct alloc_cache* alloc) { alloc_special_type* p; log_assert(alloc); /* see if in local cache */ if(alloc->quar) { p = alloc->quar; alloc->quar = alloc_special_next(p); alloc->num_quar--; p->id = alloc_get_id(alloc); return p; } /* see if in global cache */ if(alloc->super) { /* could maybe grab alloc_max/2 entries in one go, * but really, isn't that just as fast as this code? */ lock_quick_lock(&alloc->super->lock); if((p = alloc->super->quar)) { alloc->super->quar = alloc_special_next(p); alloc->super->num_quar--; } lock_quick_unlock(&alloc->super->lock); if(p) { p->id = alloc_get_id(alloc); return p; } } /* allocate new */ prealloc_setup(alloc); if(!(p = (alloc_special_type*)malloc(sizeof(alloc_special_type)))) { log_err("alloc_special_obtain: out of memory"); return NULL; } alloc_setup_special(p); p->id = alloc_get_id(alloc); return p; }
/* Add a new zone */ int ub_ctx_zone_add(struct ub_ctx* ctx, char *zone_name, char *zone_type) { enum localzone_type t; struct local_zone* z; uint8_t* nm; int nmlabs; size_t nmlen; int res = ub_ctx_finalize(ctx); if (res) return res; if(!local_zone_str2type(zone_type, &t)) { return UB_SYNTAX; } if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) { return UB_SYNTAX; } lock_quick_lock(&ctx->local_zones->lock); if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs, LDNS_RR_CLASS_IN))) { /* already present in tree */ lock_rw_wrlock(&z->lock); z->type = t; /* update type anyway */ lock_rw_unlock(&z->lock); lock_quick_unlock(&ctx->local_zones->lock); free(nm); return UB_NOERROR; } if(!local_zones_add_zone(ctx->local_zones, nm, nmlen, nmlabs, LDNS_RR_CLASS_IN, t)) { lock_quick_unlock(&ctx->local_zones->lock); return UB_NOMEM; } lock_quick_unlock(&ctx->local_zones->lock); return UB_NOERROR; }
/* Remove zone */ int ub_ctx_zone_remove(struct ub_ctx* ctx, char *zone_name) { struct local_zone* z; uint8_t* nm; int nmlabs; size_t nmlen; int res = ub_ctx_finalize(ctx); if (res) return res; if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) { return UB_SYNTAX; } lock_quick_lock(&ctx->local_zones->lock); if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs, LDNS_RR_CLASS_IN))) { /* present in tree */ local_zones_del_zone(ctx->local_zones, z); } lock_quick_unlock(&ctx->local_zones->lock); free(nm); return UB_NOERROR; }
void alloc_special_release(struct alloc_cache* alloc, alloc_special_type* mem) { log_assert(alloc); if(!mem) return; if(!alloc->super) { lock_quick_lock(&alloc->lock); /* superalloc needs locking */ } alloc_special_clean(mem); if(alloc->super && alloc->num_quar >= ALLOC_SPECIAL_MAX) { /* push it to the super structure */ pushintosuper(alloc, mem); return; } alloc_set_special_next(mem, alloc->quar); alloc->quar = mem; alloc->num_quar++; if(!alloc->super) { lock_quick_unlock(&alloc->lock); } }
/** push mem and some more items to the super */ static void pushintosuper(struct alloc_cache* alloc, alloc_special_type* mem) { int i; alloc_special_type *p = alloc->quar; log_assert(p); log_assert(alloc && alloc->super && alloc->num_quar >= ALLOC_SPECIAL_MAX); /* push ALLOC_SPECIAL_MAX/2 after mem */ alloc_set_special_next(mem, alloc->quar); for(i=1; i<ALLOC_SPECIAL_MAX/2; i++) { p = alloc_special_next(p); } alloc->quar = alloc_special_next(p); alloc->num_quar -= ALLOC_SPECIAL_MAX/2; /* dump mem+list into the super quar list */ lock_quick_lock(&alloc->super->lock); alloc_set_special_next(p, alloc->super->quar); alloc->super->quar = mem; alloc->super->num_quar += ALLOC_SPECIAL_MAX/2 + 1; lock_quick_unlock(&alloc->super->lock); /* so 1 lock per mem+alloc/2 deletes */ }
/** test bin_find_entry function and bin_overflow_remove */ static void test_bin_find_entry(struct lruhash* table) { testkey_t* k = newkey(12); testdata_t* d = newdata(128); testkey_t* k2 = newkey(12 + 1024); testkey_t* k3 = newkey(14); testkey_t* k4 = newkey(12 + 1024*2); hashvalue_t h = myhash(12); struct lruhash_bin bin; memset(&bin, 0, sizeof(bin)); bin_init(&bin, 1); /* remove from empty list */ bin_overflow_remove(&bin, &k->entry); /* find in empty list */ unit_assert( bin_find_entry(table, &bin, h, k) == NULL ); /* insert */ lock_quick_lock(&bin.lock); bin.overflow_list = &k->entry; lock_quick_unlock(&bin.lock); /* find, hash not OK. */ unit_assert( bin_find_entry(table, &bin, myhash(13), k) == NULL ); /* find, hash OK, but cmp not */ unit_assert( k->entry.hash == k2->entry.hash ); unit_assert( bin_find_entry(table, &bin, h, k2) == NULL ); /* find, hash OK, and cmp too */ unit_assert( bin_find_entry(table, &bin, h, k) == &k->entry ); /* remove the element */ lock_quick_lock(&bin.lock); bin_overflow_remove(&bin, &k->entry); lock_quick_unlock(&bin.lock); unit_assert( bin_find_entry(table, &bin, h, k) == NULL ); /* prepend two different elements; so the list is long */ /* one has the same hash, but different cmp */ lock_quick_lock(&bin.lock); unit_assert( k->entry.hash == k4->entry.hash ); k4->entry.overflow_next = &k->entry; k3->entry.overflow_next = &k4->entry; bin.overflow_list = &k3->entry; lock_quick_unlock(&bin.lock); /* find, hash not OK. */ unit_assert( bin_find_entry(table, &bin, myhash(13), k) == NULL ); /* find, hash OK, but cmp not */ unit_assert( k->entry.hash == k2->entry.hash ); unit_assert( bin_find_entry(table, &bin, h, k2) == NULL ); /* find, hash OK, and cmp too */ unit_assert( bin_find_entry(table, &bin, h, k) == &k->entry ); /* remove middle element */ unit_assert( bin_find_entry(table, &bin, k4->entry.hash, k4) == &k4->entry ); lock_quick_lock(&bin.lock); bin_overflow_remove(&bin, &k4->entry); lock_quick_unlock(&bin.lock); unit_assert( bin_find_entry(table, &bin, k4->entry.hash, k4) == NULL); /* remove last element */ lock_quick_lock(&bin.lock); bin_overflow_remove(&bin, &k->entry); lock_quick_unlock(&bin.lock); unit_assert( bin_find_entry(table, &bin, h, k) == NULL ); lock_quick_destroy(&bin.lock); delkey(k); delkey(k2); delkey(k3); delkey(k4); deldata(d); }