/** enter a data RR into auth data; a zone for it must exist */ static int lz_enter_rr_str(struct local_zones* zones, const char* rr) { uint8_t* rr_name; uint16_t rr_class; size_t len; int labs; struct local_zone* z; int r; if(!get_rr_nameclass(rr, &rr_name, &rr_class)) { log_err("bad rr %s", rr); return 0; } labs = dname_count_size_labels(rr_name, &len); lock_rw_rdlock(&zones->lock); z = local_zones_lookup(zones, rr_name, len, labs, rr_class); if(!z) { lock_rw_unlock(&zones->lock); fatal_exit("internal error: no zone for rr %s", rr); } lock_rw_wrlock(&z->lock); lock_rw_unlock(&zones->lock); free(rr_name); r = lz_enter_rr_into_zone(z, rr); lock_rw_unlock(&z->lock); return r; }
struct delegpt* forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass) { /* lookup the forward zone in the tree */ rbnode_t* res = NULL; struct iter_forward_zone *result; struct iter_forward_zone key; key.node.key = &key; key.dclass = qclass; key.name = qname; key.namelabs = dname_count_size_labels(qname, &key.namelen); if(rbtree_find_less_equal(fwd->tree, &key, &res)) { /* exact */ result = (struct iter_forward_zone*)res; } else { /* smaller element (or no element) */ int m; result = (struct iter_forward_zone*)res; if(!result || result->dclass != qclass) return NULL; /* count number of labels matched */ (void)dname_lab_cmp(result->name, result->namelabs, key.name, key.namelabs, &m); while(result) { /* go up until qname is subdomain of stub */ if(result->namelabs <= m) break; result = result->parent; } } if(result) return result->dp; return NULL; }
int local_zones_add_RR(struct local_zones* zones, const char* rr) { uint8_t* rr_name; uint16_t rr_class; size_t len; int labs; struct local_zone* z; int r; if(!get_rr_nameclass(rr, &rr_name, &rr_class)) { return 0; } labs = dname_count_size_labels(rr_name, &len); /* could first try readlock then get writelock if zone does not exist, * but we do not add enough RRs (from multiple threads) to optimize */ lock_rw_wrlock(&zones->lock); z = local_zones_lookup(zones, rr_name, len, labs, rr_class); if(!z) { z = local_zones_add_zone(zones, rr_name, len, labs, rr_class, local_zone_transparent); if(!z) { lock_rw_unlock(&zones->lock); return 0; } } else { free(rr_name); } lock_rw_wrlock(&z->lock); lock_rw_unlock(&zones->lock); r = lz_enter_rr_into_zone(z, rr); lock_rw_unlock(&z->lock); return r; }
int delegpt_set_name_mlc(struct delegpt* dp, uint8_t* name) { log_assert(dp->dp_type_mlc); dp->namelabs = dname_count_size_labels(name, &dp->namelen); dp->name = memdup(name, dp->namelen); return (dp->name != NULL); }
int delegpt_add_ns_mlc(struct delegpt* dp, uint8_t* name, uint8_t lame) { struct delegpt_ns* ns; size_t len; (void)dname_count_size_labels(name, &len); log_assert(dp->dp_type_mlc); /* slow check for duplicates to avoid counting failures when * adding the same server as a dependency twice */ if(delegpt_find_ns(dp, name, len)) return 1; ns = (struct delegpt_ns*)malloc(sizeof(struct delegpt_ns)); if(!ns) return 0; ns->namelen = len; ns->name = memdup(name, ns->namelen); if(!ns->name) { free(ns); return 0; } ns->next = dp->nslist; dp->nslist = ns; ns->resolved = 0; ns->got4 = 0; ns->got6 = 0; ns->lame = (uint8_t)lame; ns->done_pside4 = 0; ns->done_pside6 = 0; return 1; }
void anchors_delete_insecure(struct val_anchors* anchors, uint16_t c, uint8_t* nm) { struct trust_anchor key; struct trust_anchor* ta; key.node.key = &key; key.name = nm; key.namelabs = dname_count_size_labels(nm, &key.namelen); key.dclass = c; lock_basic_lock(&anchors->lock); if(!(ta=(struct trust_anchor*)rbtree_search(anchors->tree, &key))) { lock_basic_unlock(&anchors->lock); /* nothing there */ return; } /* lock it to drive away other threads that use it */ lock_basic_lock(&ta->lock); /* see if its really an insecure point */ if(ta->keylist || ta->autr || ta->numDS || ta->numDNSKEY) { lock_basic_unlock(&anchors->lock); lock_basic_unlock(&ta->lock); /* its not an insecure point, do not remove it */ return; } /* remove from tree */ (void)rbtree_delete(anchors->tree, &ta->node); anchors_init_parents_locked(anchors); lock_basic_unlock(&anchors->lock); /* actual free of data */ lock_basic_unlock(&ta->lock); anchors_delfunc(&ta->node, NULL); }
int delegpt_add_ns(struct delegpt* dp, struct regional* region, uint8_t* name, uint8_t lame) { struct delegpt_ns* ns; size_t len; (void)dname_count_size_labels(name, &len); log_assert(!dp->dp_type_mlc); /* slow check for duplicates to avoid counting failures when * adding the same server as a dependency twice */ if(delegpt_find_ns(dp, name, len)) return 1; ns = (struct delegpt_ns*)regional_alloc(region, sizeof(struct delegpt_ns)); if(!ns) return 0; ns->next = dp->nslist; ns->namelen = len; dp->nslist = ns; ns->name = regional_alloc_init(region, name, ns->namelen); ns->resolved = 0; ns->got4 = 0; ns->got6 = 0; ns->lame = lame; ns->done_pside4 = 0; ns->done_pside6 = 0; return ns->name != 0; }
/** * Find best signer name in this set of rrsigs. * @param rrset: which rrsigs to look through. * @param qinf: the query name that needs validation. * @param signer_name: the best signer_name. Updated if a better one is found. * @param signer_len: length of signer name. * @param matchcount: count of current best name (starts at 0 for no match). * Updated if match is improved. */ static void val_find_best_signer(struct ub_packed_rrset_key* rrset, struct query_info* qinf, uint8_t** signer_name, size_t* signer_len, int* matchcount) { struct packed_rrset_data* d = (struct packed_rrset_data*) rrset->entry.data; uint8_t* sign; size_t i; int m; for(i=d->count; i<d->count+d->rrsig_count; i++) { sign = d->rr_data[i]+2+18; /* look at signatures that are valid (long enough), * and have a signer name that is a superdomain of qname, * and then check the number of labels in the shared topdomain * improve the match if possible */ if(d->rr_len[i] > 2+19 && /* rdata, sig + root label*/ dname_subdomain_c(qinf->qname, sign)) { (void)dname_lab_cmp(qinf->qname, dname_count_labels(qinf->qname), sign, dname_count_labels(sign), &m); if(m > *matchcount) { *matchcount = m; *signer_name = sign; (void)dname_count_size_labels(*signer_name, signer_len); } } } }
int delegpt_set_name(struct delegpt* dp, struct regional* region, uint8_t* name) { log_assert(!dp->dp_type_mlc); dp->namelabs = dname_count_size_labels(name, &dp->namelen); dp->name = regional_alloc_init(region, name, dp->namelen); return dp->name != 0; }
/** test dname_count_size_labels */ static void dname_test_count_size_labels(void) { size_t sz = 0; unit_show_func("util/data/dname.c", "dname_count_size_labels"); unit_assert(dname_count_size_labels((uint8_t*)"", &sz) == 1); unit_assert(sz == 1); unit_assert(dname_count_size_labels((uint8_t*)"\003com", &sz) == 2); unit_assert(sz == 5); unit_assert(dname_count_size_labels((uint8_t*)"\003org", &sz) == 2); unit_assert(sz == 5); unit_assert(dname_count_size_labels((uint8_t*)"\007example\003com", &sz) == 3); unit_assert(sz == 13); unit_assert(dname_count_size_labels((uint8_t*)"\003bla\007example" "\003com", &sz) == 4); unit_assert(sz == 17); }
/** enter data RR into auth zone */ static int lz_enter_rr_into_zone(struct local_zone* z, const char* rrstr) { uint8_t* nm; size_t nmlen; int nmlabs; struct local_data* node; struct local_rrset* rrset; struct packed_rrset_data* pd; uint16_t rrtype = 0, rrclass = 0; time_t ttl = 0; uint8_t rr[LDNS_RR_BUF_SIZE]; uint8_t* rdata; size_t rdata_len; if(!get_rr_content(rrstr, &nm, &rrtype, &rrclass, &ttl, rr, sizeof(rr), &rdata, &rdata_len)) { log_err("bad local-data: %s", rrstr); return 0; } log_assert(z->dclass == rrclass); if(z->type == local_zone_redirect && query_dname_compare(z->name, nm) != 0) { log_err("local-data in redirect zone must reside at top of zone" ", not at %s", rrstr); free(nm); return 0; } nmlabs = dname_count_size_labels(nm, &nmlen); if(!lz_find_create_node(z, nm, nmlen, nmlabs, &node)) { free(nm); return 0; } log_assert(node); free(nm); rrset = local_data_find_type(node, rrtype); if(!rrset) { rrset = new_local_rrset(z->region, node, rrtype, rrclass); if(!rrset) return 0; if(query_dname_compare(node->name, z->name) == 0) { if(rrtype == LDNS_RR_TYPE_NSEC) rrset->rrset->rk.flags = PACKED_RRSET_NSEC_AT_APEX; if(rrtype == LDNS_RR_TYPE_SOA) z->soa = rrset->rrset; } } pd = (struct packed_rrset_data*)rrset->rrset->entry.data; log_assert(rrset && pd); /* check for duplicate RR */ if(rr_is_duplicate(pd, rdata, rdata_len)) { verbose(VERB_ALGO, "ignoring duplicate RR: %s", rrstr); return 1; } return insert_rr(z->region, pd, rdata, rdata_len, ttl); }
static struct iter_forward_zone* fwd_zone_find(struct iter_forwards* fwd, uint16_t c, uint8_t* nm) { struct iter_forward_zone key; key.node.key = &key; key.dclass = c; key.name = nm; key.namelabs = dname_count_size_labels(nm, &key.namelen); return (struct iter_forward_zone*)rbtree_search(fwd->tree, &key); }
/* form wireformat from text format domain name */ int parse_dname(const char* str, uint8_t** res, size_t* len, int* labs) { *res = sldns_str2wire_dname(str, len); *labs = 0; if(!*res) { log_err("cannot parse name %s", str); return 0; } *labs = dname_count_size_labels(*res, len); return 1; }
/** compress domain names in rdata, return RETVAL_* */ static int compress_rdata(sldns_buffer* pkt, uint8_t* rdata, size_t todolen, struct regional* region, struct compress_tree_node** tree, const sldns_rr_descriptor* desc) { int labs, r, rdf = 0; size_t dname_len, len, pos = sldns_buffer_position(pkt); uint8_t count = desc->_dname_count; sldns_buffer_skip(pkt, 2); /* rdata len fill in later */ /* space for rdatalen checked for already */ rdata += 2; todolen -= 2; while(todolen > 0 && count) { switch(desc->_wireformat[rdf]) { case LDNS_RDF_TYPE_DNAME: labs = dname_count_size_labels(rdata, &dname_len); if((r=compress_any_dname(rdata, pkt, labs, region, tree)) != RETVAL_OK) return r; rdata += dname_len; todolen -= dname_len; count--; len = 0; break; case LDNS_RDF_TYPE_STR: len = *rdata + 1; break; default: len = get_rdf_size(desc->_wireformat[rdf]); } if(len) { /* copy over */ if(sldns_buffer_remaining(pkt) < len) return RETVAL_TRUNC; sldns_buffer_write(pkt, rdata, len); todolen -= len; rdata += len; } rdf++; } /* copy remainder */ if(todolen > 0) { if(sldns_buffer_remaining(pkt) < todolen) return RETVAL_TRUNC; sldns_buffer_write(pkt, rdata, todolen); } /* set rdata len */ sldns_buffer_write_u16_at(pkt, pos, sldns_buffer_position(pkt)-pos-2); return RETVAL_OK; }
void hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm) { struct iter_hints_stub *z; size_t len; int labs = dname_count_size_labels(nm, &len); if(!(z=(struct iter_hints_stub*)name_tree_find(&hints->tree, nm, len, labs, c))) return; /* nothing to do */ (void)rbtree_delete(&hints->tree, &z->node); hints_stub_free(z); name_tree_init_parents(&hints->tree); }
/** insert a stub hole (if necessary) for stub name */ static int fwd_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm) { struct iter_forward_zone key; key.node.key = &key; key.dclass = c; key.name = nm; key.namelabs = dname_count_size_labels(key.name, &key.namelen); if(need_hole_insert(fwd->tree, &key)) { return forwards_insert_data(fwd, key.dclass, key.name, key.namelen, key.namelabs, NULL); } return 1; }
struct delegpt* delegpt_create_mlc(uint8_t* name) { struct delegpt* dp=(struct delegpt*)calloc(1, sizeof(*dp)); if(!dp) return NULL; dp->dp_type_mlc = 1; if(name) { dp->namelabs = dname_count_size_labels(name, &dp->namelen); dp->name = memdup(name, dp->namelen); if(!dp->name) { free(dp); return NULL; } } return dp; }
enum sec_status nsec3_prove_wildcard(struct module_env* env, struct val_env* ve, struct ub_packed_rrset_key** list, size_t num, struct query_info* qinfo, struct key_entry_key* kkey, uint8_t* wc) { rbtree_t ct; struct nsec3_filter flt; struct ce_response ce; uint8_t* nc; size_t nc_len; size_t wclen; (void)dname_count_size_labels(wc, &wclen); if(!list || num == 0 || !kkey || !key_entry_isgood(kkey)) return sec_status_bogus; /* no valid NSEC3s, bogus */ rbtree_init(&ct, &nsec3_hash_cmp); /* init names-to-hash cache */ filter_init(&flt, list, num, qinfo); /* init RR iterator */ if(!flt.zone) return sec_status_bogus; /* no RRs */ if(nsec3_iteration_count_high(ve, &flt, kkey)) return sec_status_insecure; /* iteration count too high */ /* We know what the (purported) closest encloser is by just * looking at the supposed generating wildcard. * The *. has already been removed from the wc name. */ memset(&ce, 0, sizeof(ce)); ce.ce = wc; ce.ce_len = wclen; /* Now we still need to prove that the original data did not exist. * Otherwise, we need to show that the next closer name is covered. */ next_closer(qinfo->qname, qinfo->qname_len, ce.ce, &nc, &nc_len); if(!find_covering_nsec3(env, &flt, &ct, nc, nc_len, &ce.nc_rrset, &ce.nc_rr)) { verbose(VERB_ALGO, "proveWildcard: did not find a covering " "NSEC3 that covered the next closer name."); return sec_status_bogus; } if(ce.nc_rrset && nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) { verbose(VERB_ALGO, "proveWildcard: NSEC3 optout"); return sec_status_insecure; } return sec_status_secure; }
/** * This routine adds a new RR to a trust anchor. The trust anchor may not * exist yet, and is created if not. The RR can be DS or DNSKEY. * This routine will also remove duplicates; storing them only once. * @param anchors: anchor storage. * @param name: name of trust anchor (wireformat) * @param type: type or RR * @param dclass: class of RR * @param rdata: rdata wireformat, starting with rdlength. * If NULL, nothing is stored, but an entry is created. * @param rdata_len: length of rdata including rdlength. * @return: NULL on error, else the trust anchor. */ static struct trust_anchor* anchor_store_new_key(struct val_anchors* anchors, uint8_t* name, uint16_t type, uint16_t dclass, uint8_t* rdata, size_t rdata_len) { struct ta_key* k; struct trust_anchor* ta; int namelabs; size_t namelen; namelabs = dname_count_size_labels(name, &namelen); if(type != LDNS_RR_TYPE_DS && type != LDNS_RR_TYPE_DNSKEY) { log_err("Bad type for trust anchor"); return 0; } /* lookup or create trustanchor */ ta = anchor_find(anchors, name, namelabs, namelen, dclass); if(!ta) { ta = anchor_new_ta(anchors, name, namelabs, namelen, dclass); if(!ta) return NULL; lock_basic_lock(&ta->lock); } if(!rdata) { lock_basic_unlock(&ta->lock); return ta; } /* look for duplicates */ if(anchor_find_key(ta, rdata, rdata_len, type)) { lock_basic_unlock(&ta->lock); return ta; } k = anchor_new_ta_key(anchors, rdata, rdata_len, type); if(!k) { lock_basic_unlock(&ta->lock); return NULL; } /* add new key */ if(type == LDNS_RR_TYPE_DS) ta->numDS++; else ta->numDNSKEY++; k->next = ta->keylist; ta->keylist = k; lock_basic_unlock(&ta->lock); return ta; }
/** parse commandline argument domain name */ static int parse_arg_name(SSL* ssl, char* str, uint8_t** res, size_t* len, int* labs) { ldns_rdf* rdf; *res = NULL; *len = 0; *labs = 0; rdf = ldns_dname_new_frm_str(str); if(!rdf) { ssl_printf(ssl, "error cannot parse name %s\n", str); return 0; } *res = memdup(ldns_rdf_data(rdf), ldns_rdf_size(rdf)); ldns_rdf_deep_free(rdf); if(!*res) { ssl_printf(ssl, "error out of memory\n"); return 0; } *labs = dname_count_size_labels(*res, len); return 1; }
struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints, uint8_t* qname, uint16_t qclass, struct delegpt* cache_dp) { size_t len; int labs; struct iter_hints_stub *r; /* first lookup the stub */ labs = dname_count_size_labels(qname, &len); r = (struct iter_hints_stub*)name_tree_lookup(&hints->tree, qname, len, labs, qclass); if(!r) return NULL; /* If there is no cache (root prime situation) */ if(cache_dp == NULL) { if(r->dp->namelabs != 1) return r; /* no cache dp, use any non-root stub */ return NULL; } /* * If the stub is same as the delegation we got * And has noprime set, we need to 'prime' to use this stub instead. */ if(r->noprime && query_dname_compare(cache_dp->name, r->dp->name)==0) return r; /* use this stub instead of cached dp */ /* * If our cached delegation point is above the hint, we need to prime. */ if(dname_strict_subdomain(r->dp->name, r->dp->namelabs, cache_dp->name, cache_dp->namelabs)) return r; /* need to prime this stub */ return NULL; }
int anchors_add_insecure(struct val_anchors* anchors, uint16_t c, uint8_t* nm) { struct trust_anchor key; key.node.key = &key; key.name = nm; key.namelabs = dname_count_size_labels(nm, &key.namelen); key.dclass = c; lock_basic_lock(&anchors->lock); if(rbtree_search(anchors->tree, &key)) { lock_basic_unlock(&anchors->lock); /* nothing to do, already an anchor or insecure point */ return 1; } if(!anchor_new_ta(anchors, nm, key.namelabs, key.namelen, c, 0)) { log_err("out of memory"); lock_basic_unlock(&anchors->lock); return 0; } /* no other contents in new ta, because it is insecure point */ anchors_init_parents_locked(anchors); lock_basic_unlock(&anchors->lock); return 1; }
/** enter implicit transparent zone for local-data: without local-zone: */ static int lz_setup_implicit(struct local_zones* zones, struct config_file* cfg) { /* walk over all items that have no parent zone and find * the name that covers them all (could be the root) and * add that as a transparent zone */ struct config_strlist* p; int have_name = 0; int have_other_classes = 0; uint16_t dclass = 0; uint8_t* nm = 0; size_t nmlen = 0; int nmlabs = 0; int match = 0; /* number of labels match count */ init_parents(zones); /* to enable local_zones_lookup() */ for(p = cfg->local_data; p; p = p->next) { uint8_t* rr_name; uint16_t rr_class; size_t len; int labs; if(!get_rr_nameclass(p->str, &rr_name, &rr_class)) { log_err("Bad local-data RR %s", p->str); return 0; } labs = dname_count_size_labels(rr_name, &len); lock_rw_rdlock(&zones->lock); if(!local_zones_lookup(zones, rr_name, len, labs, rr_class)) { if(!have_name) { dclass = rr_class; nm = rr_name; nmlen = len; nmlabs = labs; match = labs; have_name = 1; } else { int m; if(rr_class != dclass) { /* process other classes later */ free(rr_name); have_other_classes = 1; lock_rw_unlock(&zones->lock); continue; } /* find smallest shared topdomain */ (void)dname_lab_cmp(nm, nmlabs, rr_name, labs, &m); free(rr_name); if(m < match) match = m; } } else free(rr_name); lock_rw_unlock(&zones->lock); } if(have_name) { uint8_t* n2; struct local_zone* z; /* allocate zone of smallest shared topdomain to contain em */ n2 = nm; dname_remove_labels(&n2, &nmlen, nmlabs - match); n2 = memdup(n2, nmlen); free(nm); if(!n2) { log_err("out of memory"); return 0; } log_nametypeclass(VERB_ALGO, "implicit transparent local-zone", n2, 0, dclass); if(!(z=lz_enter_zone_dname(zones, n2, nmlen, match, local_zone_transparent, dclass))) { return 0; } lock_rw_unlock(&z->lock); } if(have_other_classes) { /* restart to setup other class */ return lz_setup_implicit(zones, cfg); } return 1; }
/** * Remove NSEC records between start and end points. * By walking the tree, the tree is sorted canonically. * @param neg: negative cache. * @param zone: the zone * @param el: element to start walking at. * @param nsec: the nsec record with the end point */ static void wipeout(struct val_neg_cache* neg, struct val_neg_zone* zone, struct val_neg_data* el, struct ub_packed_rrset_key* nsec) { struct packed_rrset_data* d = (struct packed_rrset_data*)nsec-> entry.data; uint8_t* end; size_t end_len; int end_labs, m; rbnode_t* walk, *next; struct val_neg_data* cur; uint8_t buf[257]; /* get endpoint */ if(!d || d->count == 0 || d->rr_len[0] < 2+1) return; if(ntohs(nsec->rk.type) == LDNS_RR_TYPE_NSEC) { end = d->rr_data[0]+2; end_len = dname_valid(end, d->rr_len[0]-2); end_labs = dname_count_labels(end); } else { /* NSEC3 */ if(!nsec3_get_nextowner_b32(nsec, 0, buf, sizeof(buf))) return; end = buf; end_labs = dname_count_size_labels(end, &end_len); } /* sanity check, both owner and end must be below the zone apex */ if(!dname_subdomain_c(el->name, zone->name) || !dname_subdomain_c(end, zone->name)) return; /* detect end of zone NSEC ; wipe until the end of zone */ if(query_dname_compare(end, zone->name) == 0) { end = NULL; } walk = rbtree_next(&el->node); while(walk && walk != RBTREE_NULL) { cur = (struct val_neg_data*)walk; /* sanity check: must be larger than start */ if(dname_canon_lab_cmp(cur->name, cur->labs, el->name, el->labs, &m) <= 0) { /* r == 0 skip original record. */ /* r < 0 too small! */ walk = rbtree_next(walk); continue; } /* stop at endpoint, also data at empty nonterminals must be * removed (no NSECs there) so everything between * start and end */ if(end && dname_canon_lab_cmp(cur->name, cur->labs, end, end_labs, &m) >= 0) { break; } /* this element has to be deleted, but we cannot do it * now, because we are walking the tree still ... */ /* get the next element: */ next = rbtree_next(walk); /* now delete the original element, this may trigger * rbtree rebalances, but really, the next element is * the one we need. * But it may trigger delete of other data and the * entire zone. However, if that happens, this is done * by deleting the *parents* of the element for deletion, * and maybe also the entire zone if it is empty. * But parents are smaller in canonical compare, thus, * if a larger element exists, then it is not a parent, * it cannot get deleted, the zone cannot get empty. * If the next==NULL, then zone can be empty. */ if(cur->in_use) neg_delete_data(neg, cur); walk = next; } }