/* Invalidate the message associated with query_info stored in message cache */ void invalidateQueryInCache(struct module_qstate* qstate, struct query_info* qinfo) { hashvalue_t h; struct lruhash_entry* e; struct reply_info *r; size_t i, j; h = query_info_hash(qinfo, qstate->query_flags); if ((e=slabhash_lookup(qstate->env->msg_cache, h, qinfo, 0))) { r = (struct reply_info*)(e->data); if (r) { r->ttl = 0; if(rrset_array_lock(r->ref, r->rrset_count, *qstate->env->now)) { for(i=0; i< r->rrset_count; i++) { struct packed_rrset_data* data = (struct packed_rrset_data*) r->ref[i].key->entry.data; if(i>0 && r->ref[i].key == r->ref[i-1].key) continue; data->ttl = r->ttl; for(j=0; j<data->count + data->rrsig_count; j++) data->rr_ttl[j] = r->ttl; } rrset_array_unlock(r->ref, r->rrset_count); } } lock_rw_unlock(&e->lock); } else { log_info("invalidateQueryInCache: qinfo is not in cache"); } }
int dns_cache_store(struct module_env* env, struct query_info* msgqinf, struct reply_info* msgrep, int is_referral, time_t leeway, int pside, struct regional* region, uint32_t flags) { struct reply_info* rep = NULL; /* alloc, malloc properly (not in region, like msg is) */ rep = reply_info_copy(msgrep, env->alloc, NULL); if(!rep) return 0; /* ttl must be relative ;i.e. 0..86400 not time(0)+86400. * the env->now is added to message and RRsets in this routine. */ /* the leeway is used to invalidate other rrsets earlier */ if(is_referral) { /* store rrsets */ struct rrset_ref ref; size_t i; for(i=0; i<rep->rrset_count; i++) { packed_rrset_ttl_add((struct packed_rrset_data*) rep->rrsets[i]->entry.data, *env->now); ref.key = rep->rrsets[i]; ref.id = rep->rrsets[i]->id; /*ignore ret: it was in the cache, ref updated */ /* no leeway for typeNS */ (void)rrset_cache_update(env->rrset_cache, &ref, env->alloc, *env->now + ((ntohs(ref.key->rk.type)==LDNS_RR_TYPE_NS && !pside) ? 0:leeway)); } free(rep); return 1; } else { /* store msg, and rrsets */ struct query_info qinf; hashvalue_type h; qinf = *msgqinf; qinf.qname = memdup(msgqinf->qname, msgqinf->qname_len); if(!qinf.qname) { reply_info_parsedelete(rep, env->alloc); return 0; } /* fixup flags to be sensible for a reply based on the cache */ /* this module means that RA is available. It is an answer QR. * Not AA from cache. Not CD in cache (depends on client bit). */ rep->flags |= (BIT_RA | BIT_QR); rep->flags &= ~(BIT_AA | BIT_CD); h = query_info_hash(&qinf, (uint16_t)flags); dns_cache_store_msg(env, &qinf, h, rep, leeway, pside, msgrep, flags, region); /* qname is used inside query_info_entrysetup, and set to * NULL. If it has not been used, free it. free(0) is safe. */ free(qinf.qname); } return 1; }
/** flush something from rrset and msg caches */ static void do_cache_remove(struct worker* worker, uint8_t* nm, size_t nmlen, uint16_t t, uint16_t c) { hashvalue_t h; struct query_info k; rrset_cache_remove(worker->env.rrset_cache, nm, nmlen, t, c, 0); k.qname = nm; k.qname_len = nmlen; k.qtype = t; k.qclass = c; h = query_info_hash(&k); slabhash_remove(worker->env.msg_cache, h, &k); }
/** delete message from message cache */ static void msg_cache_remove(struct module_env* env, uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, uint16_t flags) { struct query_info k; hashvalue_type h; k.qname = qname; k.qname_len = qnamelen; k.qtype = qtype; k.qclass = qclass; k.local_alias = NULL; h = query_info_hash(&k, flags); slabhash_remove(env->msg_cache, h, &k); }
/** lookup message in message cache */ static struct msgreply_entry* msg_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, uint16_t flags, time_t now, int wr) { struct lruhash_entry* e; struct query_info k; hashvalue_t h; k.qname = qname; k.qname_len = qnamelen; k.qtype = qtype; k.qclass = qclass; h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, wr); if(!e) return NULL; if( now > ((struct reply_info*)e->data)->ttl ) { lock_rw_unlock(&e->lock); return NULL; } return (struct msgreply_entry*)e->key; }
struct dns_msg* dns_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, uint16_t flags, struct regional* region, struct regional* scratch, int no_partial) { struct lruhash_entry* e; struct query_info k; hashvalue_type h; time_t now = *env->now; struct ub_packed_rrset_key* rrset; /* lookup first, this has both NXdomains and ANSWER responses */ k.qname = qname; k.qname_len = qnamelen; k.qtype = qtype; k.qclass = qclass; k.local_alias = NULL; h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); if(e) { struct msgreply_entry* key = (struct msgreply_entry*)e->key; struct reply_info* data = (struct reply_info*)e->data; struct dns_msg* msg = tomsg(env, &key->key, data, region, now, scratch); if(msg) { lock_rw_unlock(&e->lock); return msg; } /* could be msg==NULL; due to TTL or not all rrsets available */ lock_rw_unlock(&e->lock); } /* see if a DNAME exists. Checked for first, to enforce that DNAMEs * are more important, the CNAME is resynthesized and thus * consistent with the DNAME */ if(!no_partial && (rrset=find_closest_of_type(env, qname, qnamelen, qclass, now, LDNS_RR_TYPE_DNAME, 1))) { /* synthesize a DNAME+CNAME message based on this */ enum sec_status sec_status = sec_status_unchecked; struct dns_msg* msg = synth_dname_msg(rrset, region, now, &k, &sec_status); if(msg) { struct ub_packed_rrset_key* cname_rrset; lock_rw_unlock(&rrset->entry.lock); /* now, after unlocking the DNAME rrset lock, * check the sec_status, and see if we need to look * up the CNAME record associated before it can * be used */ /* normally, only secure DNAMEs allowed from cache*/ if(sec_status == sec_status_secure) return msg; /* but if we have a CNAME cached with this name, then we * have previously already allowed this name to pass. * the next cache lookup is going to fetch that CNAME itself, * but it is better to have the (unsigned)DNAME + CNAME in * that case */ cname_rrset = rrset_cache_lookup( env->rrset_cache, qname, qnamelen, LDNS_RR_TYPE_CNAME, qclass, 0, now, 0); if(cname_rrset) { /* CNAME already synthesized by * synth_dname_msg routine, so we can * straight up return the msg */ lock_rw_unlock(&cname_rrset->entry.lock); return msg; } } else { lock_rw_unlock(&rrset->entry.lock); } } /* see if we have CNAME for this domain, * but not for DS records (which are part of the parent) */ if(!no_partial && qtype != LDNS_RR_TYPE_DS && (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, LDNS_RR_TYPE_CNAME, qclass, 0, now, 0))) { uint8_t* wc = NULL; size_t wl; /* if the rrset is not a wildcard expansion, with wcname */ /* because, if we return that CNAME rrset on its own, it is * missing the NSEC or NSEC3 proof */ if(!(val_rrset_wildcard(rrset, &wc, &wl) && wc != NULL)) { struct dns_msg* msg = rrset_msg(rrset, region, now, &k); if(msg) { lock_rw_unlock(&rrset->entry.lock); return msg; } } lock_rw_unlock(&rrset->entry.lock); } /* construct DS, DNSKEY, DLV messages from rrset cache. */ if((qtype == LDNS_RR_TYPE_DS || qtype == LDNS_RR_TYPE_DNSKEY || qtype == LDNS_RR_TYPE_DLV) && (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, qtype, qclass, 0, now, 0))) { /* if the rrset is from the additional section, and the * signatures have fallen off, then do not synthesize a msg * instead, allow a full query for signed results to happen. * Forego all rrset data from additional section, because * some signatures may not be present and cause validation * failure. */ struct packed_rrset_data *d = (struct packed_rrset_data*) rrset->entry.data; if(d->trust != rrset_trust_add_noAA && d->trust != rrset_trust_add_AA && (qtype == LDNS_RR_TYPE_DS || (d->trust != rrset_trust_auth_noAA && d->trust != rrset_trust_auth_AA) )) { struct dns_msg* msg = rrset_msg(rrset, region, now, &k); if(msg) { lock_rw_unlock(&rrset->entry.lock); return msg; } } lock_rw_unlock(&rrset->entry.lock); } /* stop downwards cache search on NXDOMAIN. * Empty nonterminals are NOERROR, so an NXDOMAIN for foo * means bla.foo also does not exist. The DNSSEC proofs are * the same. We search upwards for NXDOMAINs. */ if(env->cfg->harden_below_nxdomain) while(!dname_is_root(k.qname)) { dname_remove_label(&k.qname, &k.qname_len); h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); if(!e && k.qtype != LDNS_RR_TYPE_A && env->cfg->qname_minimisation) { k.qtype = LDNS_RR_TYPE_A; h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); } if(e) { struct reply_info* data = (struct reply_info*)e->data; struct dns_msg* msg; if(FLAGS_GET_RCODE(data->flags) == LDNS_RCODE_NXDOMAIN && data->security == sec_status_secure && (msg=tomsg(env, &k, data, region, now, scratch))){ lock_rw_unlock(&e->lock); msg->qinfo.qname=qname; msg->qinfo.qname_len=qnamelen; /* check that DNSSEC really works out */ msg->rep->security = sec_status_unchecked; return msg; } lock_rw_unlock(&e->lock); } k.qtype = qtype; } /* fill common RR types for ANY response to avoid requery */ if(qtype == LDNS_RR_TYPE_ANY) { return fill_any(env, qname, qnamelen, qtype, qclass, region); } return NULL; }
struct dns_msg* dns_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, uint16_t flags, struct regional* region, struct regional* scratch) { struct lruhash_entry* e; struct query_info k; hashvalue_t h; time_t now = *env->now; struct ub_packed_rrset_key* rrset; /* lookup first, this has both NXdomains and ANSWER responses */ k.qname = qname; k.qname_len = qnamelen; k.qtype = qtype; k.qclass = qclass; h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); if(e) { struct msgreply_entry* key = (struct msgreply_entry*)e->key; struct reply_info* data = (struct reply_info*)e->data; struct dns_msg* msg = tomsg(env, &key->key, data, region, now, scratch); if(msg) { lock_rw_unlock(&e->lock); return msg; } /* could be msg==NULL; due to TTL or not all rrsets available */ lock_rw_unlock(&e->lock); } /* see if a DNAME exists. Checked for first, to enforce that DNAMEs * are more important, the CNAME is resynthesized and thus * consistent with the DNAME */ if( (rrset=find_closest_of_type(env, qname, qnamelen, qclass, now, LDNS_RR_TYPE_DNAME, 1))) { /* synthesize a DNAME+CNAME message based on this */ struct dns_msg* msg = synth_dname_msg(rrset, region, now, &k); if(msg) { lock_rw_unlock(&rrset->entry.lock); return msg; } lock_rw_unlock(&rrset->entry.lock); } /* see if we have CNAME for this domain, * but not for DS records (which are part of the parent) */ if( qtype != LDNS_RR_TYPE_DS && (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, LDNS_RR_TYPE_CNAME, qclass, 0, now, 0))) { struct dns_msg* msg = rrset_msg(rrset, region, now, &k); if(msg) { lock_rw_unlock(&rrset->entry.lock); return msg; } lock_rw_unlock(&rrset->entry.lock); } /* construct DS, DNSKEY, DLV messages from rrset cache. */ if((qtype == LDNS_RR_TYPE_DS || qtype == LDNS_RR_TYPE_DNSKEY || qtype == LDNS_RR_TYPE_DLV) && (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, qtype, qclass, 0, now, 0))) { /* if the rrset is from the additional section, and the * signatures have fallen off, then do not synthesize a msg * instead, allow a full query for signed results to happen. * Forego all rrset data from additional section, because * some signatures may not be present and cause validation * failure. */ struct packed_rrset_data *d = (struct packed_rrset_data*) rrset->entry.data; if(d->trust != rrset_trust_add_noAA && d->trust != rrset_trust_add_AA && (qtype == LDNS_RR_TYPE_DS || (d->trust != rrset_trust_auth_noAA && d->trust != rrset_trust_auth_AA) )) { struct dns_msg* msg = rrset_msg(rrset, region, now, &k); if(msg) { lock_rw_unlock(&rrset->entry.lock); return msg; } } lock_rw_unlock(&rrset->entry.lock); } /* stop downwards cache search on NXDOMAIN. * Empty nonterminals are NOERROR, so an NXDOMAIN for foo * means bla.foo also does not exist. The DNSSEC proofs are * the same. We search upwards for NXDOMAINs. */ if(env->cfg->harden_below_nxdomain) while(!dname_is_root(k.qname)) { dname_remove_label(&k.qname, &k.qname_len); h = query_info_hash(&k, flags); e = slabhash_lookup(env->msg_cache, h, &k, 0); if(e) { struct reply_info* data = (struct reply_info*)e->data; struct dns_msg* msg; if(FLAGS_GET_RCODE(data->flags) == LDNS_RCODE_NXDOMAIN && data->security == sec_status_secure && (msg=tomsg(env, &k, data, region, now, scratch))) { lock_rw_unlock(&e->lock); msg->qinfo.qname=qname; msg->qinfo.qname_len=qnamelen; /* check that DNSSEC really works out */ msg->rep->security = sec_status_unchecked; return msg; } lock_rw_unlock(&e->lock); } } return NULL; }