/** find and add DS or NSEC to delegation msg */ static void find_add_ds(struct module_env* env, struct regional* region, struct dns_msg* msg, struct delegpt* dp, time_t now) { /* Lookup the DS or NSEC at the delegation point. */ struct ub_packed_rrset_key* rrset = rrset_cache_lookup( env->rrset_cache, dp->name, dp->namelen, LDNS_RR_TYPE_DS, msg->qinfo.qclass, 0, now, 0); if(!rrset) { /* NOTE: this won't work for alternate NSEC schemes * (opt-in, NSEC3) */ rrset = rrset_cache_lookup(env->rrset_cache, dp->name, dp->namelen, LDNS_RR_TYPE_NSEC, msg->qinfo.qclass, 0, now, 0); /* Note: the PACKED_RRSET_NSEC_AT_APEX flag is not used. * since this is a referral, we need the NSEC at the parent * side of the zone cut, not the NSEC at apex side. */ if(rrset && nsec_has_type(rrset, LDNS_RR_TYPE_DS)) { lock_rw_unlock(&rrset->entry.lock); rrset = NULL; /* discard wrong NSEC */ } } if(rrset) { /* add it to auth section. This is the second rrset. */ if((msg->rep->rrsets[msg->rep->rrset_count] = packed_rrset_copy_region(rrset, region, now))) { msg->rep->ns_numrrsets++; msg->rep->rrset_count++; } lock_rw_unlock(&rrset->entry.lock); } }
int iter_lookup_parent_glue_from_cache(struct module_env* env, struct delegpt* dp, struct regional* region, struct query_info* qinfo) { struct ub_packed_rrset_key* akey; struct delegpt_ns* ns; size_t num = delegpt_count_targets(dp); for(ns = dp->nslist; ns; ns = ns->next) { /* get cached parentside A */ akey = rrset_cache_lookup(env->rrset_cache, ns->name, ns->namelen, LDNS_RR_TYPE_A, qinfo->qclass, PACKED_RRSET_PARENT_SIDE, *env->now, 0); if(akey) { log_rrset_key(VERB_ALGO, "found parent-side", akey); ns->done_pside4 = 1; /* a negative-cache-element has no addresses it adds */ if(!delegpt_add_rrset_A(dp, region, akey, 1)) log_err("malloc failure in lookup_parent_glue"); lock_rw_unlock(&akey->entry.lock); } /* get cached parentside AAAA */ akey = rrset_cache_lookup(env->rrset_cache, ns->name, ns->namelen, LDNS_RR_TYPE_AAAA, qinfo->qclass, PACKED_RRSET_PARENT_SIDE, *env->now, 0); if(akey) { log_rrset_key(VERB_ALGO, "found parent-side", akey); ns->done_pside6 = 1; /* a negative-cache-element has no addresses it adds */ if(!delegpt_add_rrset_AAAA(dp, region, akey, 1)) log_err("malloc failure in lookup_parent_glue"); lock_rw_unlock(&akey->entry.lock); } } /* see if new (but lame) addresses have become available */ return delegpt_count_targets(dp) != num; }
/** find and add A and AAAA records for missing nameservers in delegpt */ int cache_fill_missing(struct module_env* env, uint16_t qclass, struct regional* region, struct delegpt* dp) { struct delegpt_ns* ns; struct msgreply_entry* neg; struct ub_packed_rrset_key* akey; time_t now = *env->now; for(ns = dp->nslist; ns; ns = ns->next) { akey = rrset_cache_lookup(env->rrset_cache, ns->name, ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0); if(akey) { if(!delegpt_add_rrset_A(dp, region, akey, ns->lame)) { lock_rw_unlock(&akey->entry.lock); return 0; } log_nametypeclass(VERB_ALGO, "found in cache", ns->name, LDNS_RR_TYPE_A, qclass); lock_rw_unlock(&akey->entry.lock); } else { /* BIT_CD on false because delegpt lookup does * not use dns64 translation */ neg = msg_cache_lookup(env, ns->name, ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0); if(neg) { delegpt_add_neg_msg(dp, neg); lock_rw_unlock(&neg->entry.lock); } } akey = rrset_cache_lookup(env->rrset_cache, ns->name, ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0); if(akey) { if(!delegpt_add_rrset_AAAA(dp, region, akey, ns->lame)) { lock_rw_unlock(&akey->entry.lock); return 0; } log_nametypeclass(VERB_ALGO, "found in cache", ns->name, LDNS_RR_TYPE_AAAA, qclass); lock_rw_unlock(&akey->entry.lock); } else { /* BIT_CD on false because delegpt lookup does * not use dns64 translation */ neg = msg_cache_lookup(env, ns->name, ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0); if(neg) { delegpt_add_neg_msg(dp, neg); lock_rw_unlock(&neg->entry.lock); } } } return 1; }
/** find and add A and AAAA records for nameservers in delegpt */ static int find_add_addrs(struct module_env* env, uint16_t qclass, struct regional* region, struct delegpt* dp, time_t now, struct dns_msg** msg) { struct delegpt_ns* ns; struct msgreply_entry* neg; struct ub_packed_rrset_key* akey; for(ns = dp->nslist; ns; ns = ns->next) { akey = rrset_cache_lookup(env->rrset_cache, ns->name, ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0); if(akey) { if(!delegpt_add_rrset_A(dp, region, akey, 0)) { lock_rw_unlock(&akey->entry.lock); return 0; } if(msg) addr_to_additional(akey, region, *msg, now); lock_rw_unlock(&akey->entry.lock); } else { /* BIT_CD on false because delegpt lookup does * not use dns64 translation */ neg = msg_cache_lookup(env, ns->name, ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0); if(neg) { delegpt_add_neg_msg(dp, neg); lock_rw_unlock(&neg->entry.lock); } } akey = rrset_cache_lookup(env->rrset_cache, ns->name, ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0); if(akey) { if(!delegpt_add_rrset_AAAA(dp, region, akey, 0)) { lock_rw_unlock(&akey->entry.lock); return 0; } if(msg) addr_to_additional(akey, region, *msg, now); lock_rw_unlock(&akey->entry.lock); } else { /* BIT_CD on false because delegpt lookup does * not use dns64 translation */ neg = msg_cache_lookup(env, ns->name, ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0); if(neg) { delegpt_add_neg_msg(dp, neg); lock_rw_unlock(&neg->entry.lock); } } } return 1; }
struct dns_msg* val_find_DS(struct module_env* env, uint8_t* nm, size_t nmlen, uint16_t c, struct regional* region) { struct dns_msg* msg; struct query_info qinfo; struct ub_packed_rrset_key *rrset = rrset_cache_lookup( env->rrset_cache, nm, nmlen, LDNS_RR_TYPE_DS, c, 0, *env->now, 0); if(rrset) { /* DS rrset exists. Return it to the validator immediately*/ struct ub_packed_rrset_key* copy = packed_rrset_copy_region( rrset, region, *env->now); lock_rw_unlock(&rrset->entry.lock); if(!copy) return NULL; msg = dns_msg_create(nm, nmlen, LDNS_RR_TYPE_DS, c, region, 1); if(!msg) return NULL; msg->rep->rrsets[0] = copy; msg->rep->rrset_count++; msg->rep->an_numrrsets++; return msg; } /* lookup in rrset and negative cache for NSEC/NSEC3 */ qinfo.qname = nm; qinfo.qname_len = nmlen; qinfo.qtype = LDNS_RR_TYPE_DS; qinfo.qclass = c; /* do not add SOA to reply message, it is going to be used internal */ msg = val_neg_getmsg(env->neg_cache, &qinfo, region, env->rrset_cache, env->scratch_buffer, *env->now, 0); return msg; }
/** * Add SOA record for external responses. * @param rrset_cache: to look into. * @param now: current time. * @param region: where to perform the allocation * @param msg: current msg with NSEC. * @param zone: val_neg_zone if we have one. * @return false on lookup or alloc failure. */ static int add_soa(struct rrset_cache* rrset_cache, uint32_t now, struct regional* region, struct dns_msg* msg, struct val_neg_zone* zone) { struct ub_packed_rrset_key* soa; uint8_t* nm; size_t nmlen; uint16_t dclass; if(zone) { nm = zone->name; nmlen = zone->len; dclass = zone->dclass; } else { /* Assumes the signer is the zone SOA to add */ nm = reply_nsec_signer(msg->rep, &nmlen, &dclass); if(!nm) return 0; } soa = rrset_cache_lookup(rrset_cache, nm, nmlen, LDNS_RR_TYPE_SOA, dclass, PACKED_RRSET_SOA_NEG, now, 0); if(!soa) return 0; if(!dns_msg_authadd(msg, region, soa, now)) { lock_rw_unlock(&soa->entry.lock); return 0; } lock_rw_unlock(&soa->entry.lock); return 1; }
/** find closest NS or DNAME and returns the rrset (locked) */ static struct ub_packed_rrset_key* find_closest_of_type(struct module_env* env, uint8_t* qname, size_t qnamelen, uint16_t qclass, time_t now, uint16_t searchtype, int stripfront) { struct ub_packed_rrset_key *rrset; uint8_t lablen; if(stripfront) { /* strip off so that DNAMEs have strict subdomain match */ lablen = *qname; qname += lablen + 1; qnamelen -= lablen + 1; } /* snip off front part of qname until the type is found */ while(qnamelen > 0) { if((rrset = rrset_cache_lookup(env->rrset_cache, qname, qnamelen, searchtype, qclass, 0, now, 0))) return rrset; /* snip off front label */ lablen = *qname; qname += lablen + 1; qnamelen -= lablen + 1; } return NULL; }
/** Fill TYPE_ANY response with some data from cache */ static struct dns_msg* fill_any(struct module_env* env, uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, struct regional* region) { time_t now = *env->now; struct dns_msg* msg = NULL; uint16_t lookup[] = {LDNS_RR_TYPE_A, LDNS_RR_TYPE_AAAA, LDNS_RR_TYPE_MX, LDNS_RR_TYPE_SOA, LDNS_RR_TYPE_NS, LDNS_RR_TYPE_DNAME, 0}; int i, num=6; /* number of RR types to look up */ log_assert(lookup[num] == 0); for(i=0; i<num; i++) { /* look up this RR for inclusion in type ANY response */ struct ub_packed_rrset_key* rrset = rrset_cache_lookup( env->rrset_cache, qname, qnamelen, lookup[i], qclass, 0, now, 0); struct packed_rrset_data *d; if(!rrset) continue; /* only if rrset from answer section */ d = (struct packed_rrset_data*)rrset->entry.data; if(d->trust == rrset_trust_add_noAA || d->trust == rrset_trust_auth_noAA || d->trust == rrset_trust_add_AA || d->trust == rrset_trust_auth_AA) { lock_rw_unlock(&rrset->entry.lock); continue; } /* create msg if none */ if(!msg) { msg = dns_msg_create(qname, qnamelen, qtype, qclass, region, (size_t)(num-i)); if(!msg) { lock_rw_unlock(&rrset->entry.lock); return NULL; } } /* add RRset to response */ if(!dns_msg_ansadd(msg, region, rrset, now)) { lock_rw_unlock(&rrset->entry.lock); return NULL; } lock_rw_unlock(&rrset->entry.lock); } return msg; }
/** load a msg rrset reference */ static int load_ref(SSL* ssl, sldns_buffer* buf, struct worker* worker, struct regional *region, struct ub_packed_rrset_key** rrset, int* go_on) { char* s = (char*)sldns_buffer_begin(buf); struct query_info qinfo; unsigned int flags; struct ub_packed_rrset_key* k; /* read line */ if(!ssl_read_buf(ssl, buf)) return 0; if(strncmp(s, "BADREF", 6) == 0) { *go_on = 0; /* its bad, skip it and skip message */ return 1; } s = load_qinfo(s, &qinfo, region); if(!s) { return 0; } if(sscanf(s, " %u", &flags) != 1) { log_warn("error cannot parse flags: %s", s); return 0; } /* lookup in cache */ k = rrset_cache_lookup(worker->env.rrset_cache, qinfo.qname, qinfo.qname_len, qinfo.qtype, qinfo.qclass, (uint32_t)flags, *worker->env.now, 0); if(!k) { /* not found or expired */ *go_on = 0; return 1; } /* store in result */ *rrset = packed_rrset_copy_region(k, region, *worker->env.now); lock_rw_unlock(&k->entry.lock); return (*rrset != NULL); }
int iter_lookup_parent_NS_from_cache(struct module_env* env, struct delegpt* dp, struct regional* region, struct query_info* qinfo) { struct ub_packed_rrset_key* akey; akey = rrset_cache_lookup(env->rrset_cache, dp->name, dp->namelen, LDNS_RR_TYPE_NS, qinfo->qclass, PACKED_RRSET_PARENT_SIDE, *env->now, 0); if(akey) { log_rrset_key(VERB_ALGO, "found parent-side NS in cache", akey); dp->has_parent_side_NS = 1; /* and mark the new names as lame */ if(!delegpt_rrset_add_ns(dp, region, akey, 1)) { lock_rw_unlock(&akey->entry.lock); return 0; } lock_rw_unlock(&akey->entry.lock); } return 1; }
/** * See if rrset exists in rrset cache. * If it does, the bit is checked, and if not expired, it is returned * allocated in region. * @param rrset_cache: rrset cache * @param qname: to lookup rrset name * @param qname_len: length of qname. * @param qtype: type of rrset to lookup, host order * @param qclass: class of rrset to lookup, host order * @param flags: flags for rrset to lookup * @param region: where to alloc result * @param checkbit: if true, a bit in the nsec typemap is checked for absence. * @param checktype: which bit to check * @param now: to check ttl against * @return rrset or NULL */ static struct ub_packed_rrset_key* grab_nsec(struct rrset_cache* rrset_cache, uint8_t* qname, size_t qname_len, uint16_t qtype, uint16_t qclass, uint32_t flags, struct regional* region, int checkbit, uint16_t checktype, uint32_t now) { struct ub_packed_rrset_key* r, *k = rrset_cache_lookup(rrset_cache, qname, qname_len, qtype, qclass, flags, now, 0); struct packed_rrset_data* d; if(!k) return NULL; d = (struct packed_rrset_data*)k->entry.data; if(d->ttl < now) { lock_rw_unlock(&k->entry.lock); return NULL; } /* only secure or unchecked records that have signatures. */ if( ! ( d->security == sec_status_secure || (d->security == sec_status_unchecked && d->rrsig_count > 0) ) ) { lock_rw_unlock(&k->entry.lock); return NULL; } /* check if checktype is absent */ if(checkbit && ( (qtype == LDNS_RR_TYPE_NSEC && nsec_has_type(k, checktype)) || (qtype == LDNS_RR_TYPE_NSEC3 && !nsec3_no_type(k, checktype)) )) { lock_rw_unlock(&k->entry.lock); return NULL; } /* looks OK! copy to region and return it */ r = packed_rrset_copy_region(k, region, now); /* if it failed, we return the NULL */ lock_rw_unlock(&k->entry.lock); return r; }
int val_neg_dlvlookup(struct val_neg_cache* neg, uint8_t* qname, size_t len, uint16_t qclass, struct rrset_cache* rrset_cache, uint32_t now) { /* lookup closest zone */ struct val_neg_zone* zone; struct val_neg_data* data; int labs; struct ub_packed_rrset_key* nsec; struct packed_rrset_data* d; uint32_t flags; uint8_t* wc; struct query_info qinfo; if(!neg) return 0; log_nametypeclass(VERB_ALGO, "negcache dlvlookup", qname, LDNS_RR_TYPE_DLV, qclass); labs = dname_count_labels(qname); lock_basic_lock(&neg->lock); zone = neg_closest_zone_parent(neg, qname, len, labs, qclass); while(zone && !zone->in_use) zone = zone->parent; if(!zone) { lock_basic_unlock(&neg->lock); return 0; } log_nametypeclass(VERB_ALGO, "negcache zone", zone->name, 0, zone->dclass); /* DLV is defined to use NSEC only */ if(zone->nsec3_hash) { lock_basic_unlock(&neg->lock); return 0; } /* lookup closest data record */ (void)neg_closest_data(zone, qname, len, labs, &data); while(data && !data->in_use) data = data->parent; if(!data) { lock_basic_unlock(&neg->lock); return 0; } log_nametypeclass(VERB_ALGO, "negcache rr", data->name, LDNS_RR_TYPE_NSEC, zone->dclass); /* lookup rrset in rrset cache */ flags = 0; if(query_dname_compare(data->name, zone->name) == 0) flags = PACKED_RRSET_NSEC_AT_APEX; nsec = rrset_cache_lookup(rrset_cache, data->name, data->len, LDNS_RR_TYPE_NSEC, zone->dclass, flags, now, 0); /* check if secure and TTL ok */ if(!nsec) { lock_basic_unlock(&neg->lock); return 0; } d = (struct packed_rrset_data*)nsec->entry.data; if(!d || now > d->ttl) { lock_rw_unlock(&nsec->entry.lock); /* delete data record if expired */ neg_delete_data(neg, data); lock_basic_unlock(&neg->lock); return 0; } if(d->security != sec_status_secure) { lock_rw_unlock(&nsec->entry.lock); neg_delete_data(neg, data); lock_basic_unlock(&neg->lock); return 0; } verbose(VERB_ALGO, "negcache got secure rrset"); /* check NSEC security */ /* check if NSEC proves no DLV type exists */ /* check if NSEC proves NXDOMAIN for qname */ qinfo.qname = qname; qinfo.qtype = LDNS_RR_TYPE_DLV; qinfo.qclass = qclass; if(!nsec_proves_nodata(nsec, &qinfo, &wc) && !val_nsec_proves_name_error(nsec, qname)) { /* the NSEC is not a denial for the DLV */ lock_rw_unlock(&nsec->entry.lock); lock_basic_unlock(&neg->lock); verbose(VERB_ALGO, "negcache not proven"); return 0; } /* so the NSEC was a NODATA proof, or NXDOMAIN proof. */ /* no need to check for wildcard NSEC; no wildcards in DLV repos */ /* no need to lookup SOA record for client; no response message */ lock_rw_unlock(&nsec->entry.lock); /* if OK touch the LRU for neg_data element */ neg_lru_touch(neg, data); lock_basic_unlock(&neg->lock); verbose(VERB_ALGO, "negcache DLV denial proven"); return 1; }
struct dns_msg* dns_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, uint16_t flags, struct regional* region, struct regional* scratch, int no_partial) { struct lruhash_entry* e; struct query_info k; hashvalue_type h; time_t now = *env->now; struct ub_packed_rrset_key* rrset; /* lookup first, this has both NXdomains and ANSWER responses */ k.qname = qname; k.qname_len = qnamelen; k.qtype = qtype; k.qclass = qclass; k.local_alias = NULL; h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); if(e) { struct msgreply_entry* key = (struct msgreply_entry*)e->key; struct reply_info* data = (struct reply_info*)e->data; struct dns_msg* msg = tomsg(env, &key->key, data, region, now, scratch); if(msg) { lock_rw_unlock(&e->lock); return msg; } /* could be msg==NULL; due to TTL or not all rrsets available */ lock_rw_unlock(&e->lock); } /* see if a DNAME exists. Checked for first, to enforce that DNAMEs * are more important, the CNAME is resynthesized and thus * consistent with the DNAME */ if(!no_partial && (rrset=find_closest_of_type(env, qname, qnamelen, qclass, now, LDNS_RR_TYPE_DNAME, 1))) { /* synthesize a DNAME+CNAME message based on this */ enum sec_status sec_status = sec_status_unchecked; struct dns_msg* msg = synth_dname_msg(rrset, region, now, &k, &sec_status); if(msg) { struct ub_packed_rrset_key* cname_rrset; lock_rw_unlock(&rrset->entry.lock); /* now, after unlocking the DNAME rrset lock, * check the sec_status, and see if we need to look * up the CNAME record associated before it can * be used */ /* normally, only secure DNAMEs allowed from cache*/ if(sec_status == sec_status_secure) return msg; /* but if we have a CNAME cached with this name, then we * have previously already allowed this name to pass. * the next cache lookup is going to fetch that CNAME itself, * but it is better to have the (unsigned)DNAME + CNAME in * that case */ cname_rrset = rrset_cache_lookup( env->rrset_cache, qname, qnamelen, LDNS_RR_TYPE_CNAME, qclass, 0, now, 0); if(cname_rrset) { /* CNAME already synthesized by * synth_dname_msg routine, so we can * straight up return the msg */ lock_rw_unlock(&cname_rrset->entry.lock); return msg; } } else { lock_rw_unlock(&rrset->entry.lock); } } /* see if we have CNAME for this domain, * but not for DS records (which are part of the parent) */ if(!no_partial && qtype != LDNS_RR_TYPE_DS && (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, LDNS_RR_TYPE_CNAME, qclass, 0, now, 0))) { uint8_t* wc = NULL; size_t wl; /* if the rrset is not a wildcard expansion, with wcname */ /* because, if we return that CNAME rrset on its own, it is * missing the NSEC or NSEC3 proof */ if(!(val_rrset_wildcard(rrset, &wc, &wl) && wc != NULL)) { struct dns_msg* msg = rrset_msg(rrset, region, now, &k); if(msg) { lock_rw_unlock(&rrset->entry.lock); return msg; } } lock_rw_unlock(&rrset->entry.lock); } /* construct DS, DNSKEY, DLV messages from rrset cache. */ if((qtype == LDNS_RR_TYPE_DS || qtype == LDNS_RR_TYPE_DNSKEY || qtype == LDNS_RR_TYPE_DLV) && (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, qtype, qclass, 0, now, 0))) { /* if the rrset is from the additional section, and the * signatures have fallen off, then do not synthesize a msg * instead, allow a full query for signed results to happen. * Forego all rrset data from additional section, because * some signatures may not be present and cause validation * failure. */ struct packed_rrset_data *d = (struct packed_rrset_data*) rrset->entry.data; if(d->trust != rrset_trust_add_noAA && d->trust != rrset_trust_add_AA && (qtype == LDNS_RR_TYPE_DS || (d->trust != rrset_trust_auth_noAA && d->trust != rrset_trust_auth_AA) )) { struct dns_msg* msg = rrset_msg(rrset, region, now, &k); if(msg) { lock_rw_unlock(&rrset->entry.lock); return msg; } } lock_rw_unlock(&rrset->entry.lock); } /* stop downwards cache search on NXDOMAIN. * Empty nonterminals are NOERROR, so an NXDOMAIN for foo * means bla.foo also does not exist. The DNSSEC proofs are * the same. We search upwards for NXDOMAINs. */ if(env->cfg->harden_below_nxdomain) while(!dname_is_root(k.qname)) { dname_remove_label(&k.qname, &k.qname_len); h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); if(!e && k.qtype != LDNS_RR_TYPE_A && env->cfg->qname_minimisation) { k.qtype = LDNS_RR_TYPE_A; h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); } if(e) { struct reply_info* data = (struct reply_info*)e->data; struct dns_msg* msg; if(FLAGS_GET_RCODE(data->flags) == LDNS_RCODE_NXDOMAIN && data->security == sec_status_secure && (msg=tomsg(env, &k, data, region, now, scratch))){ lock_rw_unlock(&e->lock); msg->qinfo.qname=qname; msg->qinfo.qname_len=qnamelen; /* check that DNSSEC really works out */ msg->rep->security = sec_status_unchecked; return msg; } lock_rw_unlock(&e->lock); } k.qtype = qtype; } /* fill common RR types for ANY response to avoid requery */ if(qtype == LDNS_RR_TYPE_ANY) { return fill_any(env, qname, qnamelen, qtype, qclass, region); } return NULL; }
struct dns_msg* dns_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, uint16_t flags, struct regional* region, struct regional* scratch) { struct lruhash_entry* e; struct query_info k; hashvalue_t h; time_t now = *env->now; struct ub_packed_rrset_key* rrset; /* lookup first, this has both NXdomains and ANSWER responses */ k.qname = qname; k.qname_len = qnamelen; k.qtype = qtype; k.qclass = qclass; h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); if(e) { struct msgreply_entry* key = (struct msgreply_entry*)e->key; struct reply_info* data = (struct reply_info*)e->data; struct dns_msg* msg = tomsg(env, &key->key, data, region, now, scratch); if(msg) { lock_rw_unlock(&e->lock); return msg; } /* could be msg==NULL; due to TTL or not all rrsets available */ lock_rw_unlock(&e->lock); } /* see if a DNAME exists. Checked for first, to enforce that DNAMEs * are more important, the CNAME is resynthesized and thus * consistent with the DNAME */ if( (rrset=find_closest_of_type(env, qname, qnamelen, qclass, now, LDNS_RR_TYPE_DNAME, 1))) { /* synthesize a DNAME+CNAME message based on this */ struct dns_msg* msg = synth_dname_msg(rrset, region, now, &k); if(msg) { lock_rw_unlock(&rrset->entry.lock); return msg; } lock_rw_unlock(&rrset->entry.lock); } /* see if we have CNAME for this domain, * but not for DS records (which are part of the parent) */ if( qtype != LDNS_RR_TYPE_DS && (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, LDNS_RR_TYPE_CNAME, qclass, 0, now, 0))) { struct dns_msg* msg = rrset_msg(rrset, region, now, &k); if(msg) { lock_rw_unlock(&rrset->entry.lock); return msg; } lock_rw_unlock(&rrset->entry.lock); } /* construct DS, DNSKEY, DLV messages from rrset cache. */ if((qtype == LDNS_RR_TYPE_DS || qtype == LDNS_RR_TYPE_DNSKEY || qtype == LDNS_RR_TYPE_DLV) && (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, qtype, qclass, 0, now, 0))) { /* if the rrset is from the additional section, and the * signatures have fallen off, then do not synthesize a msg * instead, allow a full query for signed results to happen. * Forego all rrset data from additional section, because * some signatures may not be present and cause validation * failure. */ struct packed_rrset_data *d = (struct packed_rrset_data*) rrset->entry.data; if(d->trust != rrset_trust_add_noAA && d->trust != rrset_trust_add_AA && (qtype == LDNS_RR_TYPE_DS || (d->trust != rrset_trust_auth_noAA && d->trust != rrset_trust_auth_AA) )) { struct dns_msg* msg = rrset_msg(rrset, region, now, &k); if(msg) { lock_rw_unlock(&rrset->entry.lock); return msg; } } lock_rw_unlock(&rrset->entry.lock); } /* stop downwards cache search on NXDOMAIN. * Empty nonterminals are NOERROR, so an NXDOMAIN for foo * means bla.foo also does not exist. The DNSSEC proofs are * the same. We search upwards for NXDOMAINs. */ if(env->cfg->harden_below_nxdomain) while(!dname_is_root(k.qname)) { dname_remove_label(&k.qname, &k.qname_len); h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); if(e) { struct reply_info* data = (struct reply_info*)e->data; struct dns_msg* msg; if(FLAGS_GET_RCODE(data->flags) == LDNS_RCODE_NXDOMAIN && data->security == sec_status_secure && (msg=tomsg(env, &k, data, region, now, scratch))) { lock_rw_unlock(&e->lock); msg->qinfo.qname=qname; msg->qinfo.qname_len=qnamelen; /* check that DNSSEC really works out */ msg->rep->security = sec_status_unchecked; return msg; } lock_rw_unlock(&e->lock); } } return NULL; }