/** allocate and initialize an rrset structure; this function is based * on new_local_rrset() from the localzone.c module */ static struct ub_packed_rrset_key* new_rrset(struct regional* region, uint16_t rrtype, uint16_t rrclass) { struct packed_rrset_data* pd; struct ub_packed_rrset_key* rrset = regional_alloc_zero( region, sizeof(*rrset)); if(!rrset) { log_err("out of memory"); return NULL; } rrset->entry.key = rrset; pd = regional_alloc_zero(region, sizeof(*pd)); if(!pd) { log_err("out of memory"); return NULL; } pd->trust = rrset_trust_prim_noglue; pd->security = sec_status_insecure; rrset->entry.data = pd; rrset->rk.dname = regional_alloc_zero(region, 1); if(!rrset->rk.dname) { log_err("out of memory"); return NULL; } rrset->rk.dname_len = 1; rrset->rk.type = htons(rrtype); rrset->rk.rrset_class = htons(rrclass); return rrset; }
struct dns_msg* dns_msg_create(uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, struct regional* region, size_t capacity) { struct dns_msg* msg = (struct dns_msg*)regional_alloc(region, sizeof(struct dns_msg)); if(!msg) return NULL; msg->qinfo.qname = regional_alloc_init(region, qname, qnamelen); if(!msg->qinfo.qname) return NULL; msg->qinfo.qname_len = qnamelen; msg->qinfo.qtype = qtype; msg->qinfo.qclass = qclass; msg->qinfo.local_alias = NULL; /* non-packed reply_info, because it needs to grow the array */ msg->rep = (struct reply_info*)regional_alloc_zero(region, sizeof(struct reply_info)-sizeof(struct rrset_ref)); if(!msg->rep) return NULL; if(capacity > RR_COUNT_MAX) return NULL; /* integer overflow protection */ msg->rep->flags = BIT_QR; /* with QR, no AA */ msg->rep->qdcount = 1; msg->rep->rrsets = (struct ub_packed_rrset_key**) regional_alloc(region, capacity*sizeof(struct ub_packed_rrset_key*)); if(!msg->rep->rrsets) return NULL; return msg; }
/** Populate action info structure with the results of response-ip action * processing, iff as the result of response-ip processing we are actually * taking some action. Only action is set if action_only is true. * Returns true on success, false on failure. */ static int populate_action_info(struct respip_action_info* actinfo, enum respip_action action, const struct resp_addr* raddr, const struct ub_packed_rrset_key* ATTR_UNUSED(rrset), int ATTR_UNUSED(tag), const struct respip_set* ATTR_UNUSED(ipset), int ATTR_UNUSED(action_only), struct regional* region) { if(action == respip_none || !raddr) return 1; actinfo->action = action; /* for inform variants, make a copy of the matched address block for * later logging. We make a copy to proactively avoid disruption if * and when we allow a dynamic update to the respip tree. */ if(action == respip_inform || action == respip_inform_deny) { struct respip_addr_info* a = regional_alloc_zero(region, sizeof(*a)); if(!a) { log_err("out of memory"); return 0; } a->addr = raddr->node.addr; a->addrlen = raddr->node.addrlen; a->net = raddr->node.net; actinfo->addrinfo = a; } return 1; }
/** returns the node in the address tree for the specified netblock string; * non-existent node will be created if 'create' is true */ static struct resp_addr* respip_find_or_create(struct respip_set* set, const char* ipstr, int create) { struct resp_addr* node; struct sockaddr_storage addr; int net; socklen_t addrlen; if(!netblockstrtoaddr(ipstr, 0, &addr, &addrlen, &net)) { log_err("cannot parse netblock: '%s'", ipstr); return NULL; } node = (struct resp_addr*)addr_tree_find(&set->ip_tree, &addr, addrlen, net); if(!node && create) { node = regional_alloc_zero(set->region, sizeof(*node)); if(!node) { log_err("out of memory"); return NULL; } node->action = respip_none; if(!addr_tree_insert(&set->ip_tree, &node->node, &addr, addrlen, net)) { /* We know we didn't find it, so this should be * impossible. */ log_warn("unexpected: duplicate address: %s", ipstr); } } return node; }
/** find a node, create it if not and all its empty nonterminal parents */ static int lz_find_create_node(struct local_zone* z, uint8_t* nm, size_t nmlen, int nmlabs, struct local_data** res) { struct local_data* ld = lz_find_node(z, nm, nmlen, nmlabs); if(!ld) { /* create a domain name to store rr. */ ld = (struct local_data*)regional_alloc_zero(z->region, sizeof(*ld)); if(!ld) { log_err("out of memory adding local data"); return 0; } ld->node.key = ld; ld->name = regional_alloc_init(z->region, nm, nmlen); if(!ld->name) { log_err("out of memory"); return 0; } ld->namelen = nmlen; ld->namelabs = nmlabs; if(!rbtree_insert(&z->data, &ld->node)) { log_assert(0); /* duplicate name */ } /* see if empty nonterminals need to be created */ if(nmlabs > z->namelabs) { dname_remove_label(&nm, &nmlen); if(!lz_find_create_node(z, nm, nmlen, nmlabs-1, res)) return 0; } } *res = ld; return 1; }
void iter_store_parentside_neg(struct module_env* env, struct query_info* qinfo, struct reply_info* rep) { /* TTL: NS from referral in iq->deleg_msg, * or first RR from iq->response, * or servfail5secs if !iq->response */ time_t ttl = NORR_TTL; struct ub_packed_rrset_key* neg; struct packed_rrset_data* newd; if(rep) { struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep); if(!rrset && rep->rrset_count != 0) rrset = rep->rrsets[0]; if(rrset) ttl = ub_packed_rrset_ttl(rrset); } /* create empty rrset to store */ neg = (struct ub_packed_rrset_key*)regional_alloc(env->scratch, sizeof(struct ub_packed_rrset_key)); if(!neg) { log_err("out of memory in store_parentside_neg"); return; } memset(&neg->entry, 0, sizeof(neg->entry)); neg->entry.key = neg; neg->rk.type = htons(qinfo->qtype); neg->rk.rrset_class = htons(qinfo->qclass); neg->rk.flags = 0; neg->rk.dname = regional_alloc_init(env->scratch, qinfo->qname, qinfo->qname_len); if(!neg->rk.dname) { log_err("out of memory in store_parentside_neg"); return; } neg->rk.dname_len = qinfo->qname_len; neg->entry.hash = rrset_key_hash(&neg->rk); newd = (struct packed_rrset_data*)regional_alloc_zero(env->scratch, sizeof(struct packed_rrset_data) + sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t)); if(!newd) { log_err("out of memory in store_parentside_neg"); return; } neg->entry.data = newd; newd->ttl = ttl; /* entry must have one RR, otherwise not valid in cache. * put in one RR with empty rdata: those are ignored as nameserver */ newd->count = 1; newd->rrsig_count = 0; newd->trust = rrset_trust_ans_noAA; newd->rr_len = (size_t*)((uint8_t*)newd + sizeof(struct packed_rrset_data)); newd->rr_len[0] = 0 /* zero len rdata */ + sizeof(uint16_t); packed_rrset_ptr_fixup(newd); newd->rr_ttl[0] = newd->ttl; sldns_write_uint16(newd->rr_data[0], 0 /* zero len rdata */); /* store it */ log_rrset_key(VERB_ALGO, "store parent-side negative", neg); iter_store_parentside_rrset(env, neg); }
/** load a msg entry */ static int load_msg(SSL* ssl, ldns_buffer* buf, struct worker* worker) { struct regional* region = worker->scratchpad; struct query_info qinf; struct reply_info rep; char* s = (char*)ldns_buffer_begin(buf); unsigned int flags, qdcount, ttl, security, an, ns, ar; size_t i; int go_on = 1; regional_free_all(region); if(strncmp(s, "msg ", 4) != 0) { log_warn("error expected msg but got %s", s); return 0; } s += 4; s = load_qinfo(s, &qinf, buf, region); if(!s) { return 0; } /* read remainder of line */ if(sscanf(s, " %u %u %u %u %u %u %u", &flags, &qdcount, &ttl, &security, &an, &ns, &ar) != 7) { log_warn("error cannot parse numbers: %s", s); return 0; } rep.flags = (uint16_t)flags; rep.qdcount = (uint16_t)qdcount; rep.ttl = (uint32_t)ttl; rep.prefetch_ttl = PREFETCH_TTL_CALC(rep.ttl); rep.security = (enum sec_status)security; rep.an_numrrsets = (size_t)an; rep.ns_numrrsets = (size_t)ns; rep.ar_numrrsets = (size_t)ar; rep.rrset_count = (size_t)an+(size_t)ns+(size_t)ar; rep.rrsets = (struct ub_packed_rrset_key**)regional_alloc_zero( region, sizeof(struct ub_packed_rrset_key*)*rep.rrset_count); /* fill repinfo with references */ for(i=0; i<rep.rrset_count; i++) { if(!load_ref(ssl, buf, worker, region, &rep.rrsets[i], &go_on)) { return 0; } } if(!go_on) return 1; /* skip this one, not all references satisfied */ if(!dns_cache_store(&worker->env, &qinf, &rep, 0, 0, 0, NULL)) { log_warn("error out of memory"); return 0; } return 1; }
/** load an rrset entry */ static int load_rrset(SSL* ssl, sldns_buffer* buf, struct worker* worker) { char* s = (char*)sldns_buffer_begin(buf); struct regional* region = worker->scratchpad; struct ub_packed_rrset_key* rk; struct packed_rrset_data* d; unsigned int rr_count, rrsig_count, trust, security; long long ttl; unsigned int i; int go_on = 1; regional_free_all(region); rk = (struct ub_packed_rrset_key*)regional_alloc_zero(region, sizeof(*rk)); d = (struct packed_rrset_data*)regional_alloc_zero(region, sizeof(*d)); if(!rk || !d) { log_warn("error out of memory"); return 0; } if(strncmp(s, ";rrset", 6) != 0) { log_warn("error expected ';rrset' but got %s", s); return 0; } s += 6; if(strncmp(s, " nsec_apex", 10) == 0) { s += 10; rk->rk.flags |= PACKED_RRSET_NSEC_AT_APEX; } if(sscanf(s, " " ARG_LL "d %u %u %u %u", &ttl, &rr_count, &rrsig_count, &trust, &security) != 5) { log_warn("error bad rrset spec %s", s); return 0; } if(rr_count == 0 && rrsig_count == 0) { log_warn("bad rrset without contents"); return 0; } if(rr_count > RR_COUNT_MAX || rrsig_count > RR_COUNT_MAX) { log_warn("bad rrset with too many rrs"); return 0; } d->count = (size_t)rr_count; d->rrsig_count = (size_t)rrsig_count; d->security = (enum sec_status)security; d->trust = (enum rrset_trust)trust; d->ttl = (time_t)ttl + *worker->env.now; d->rr_len = regional_alloc_zero(region, sizeof(size_t)*(d->count+d->rrsig_count)); d->rr_ttl = regional_alloc_zero(region, sizeof(time_t)*(d->count+d->rrsig_count)); d->rr_data = regional_alloc_zero(region, sizeof(uint8_t*)*(d->count+d->rrsig_count)); if(!d->rr_len || !d->rr_ttl || !d->rr_data) { log_warn("error out of memory"); return 0; } /* read the rr's themselves */ for(i=0; i<rr_count; i++) { if(!load_rr(ssl, buf, region, rk, d, i, 0, &go_on, *worker->env.now)) { log_warn("could not read rr %u", i); return 0; } } for(i=0; i<rrsig_count; i++) { if(!load_rr(ssl, buf, region, rk, d, i+rr_count, 1, &go_on, *worker->env.now)) { log_warn("could not read rrsig %u", i); return 0; } } if(!go_on) { /* skip this entry */ return 1; } return move_into_cache(rk, d, worker); }
/** synthesize DNAME+CNAME response from cached DNAME item */ static struct dns_msg* synth_dname_msg(struct ub_packed_rrset_key* rrset, struct regional* region, time_t now, struct query_info* q, enum sec_status* sec_status) { struct dns_msg* msg; struct ub_packed_rrset_key* ck; struct packed_rrset_data* newd, *d = (struct packed_rrset_data*) rrset->entry.data; uint8_t* newname, *dtarg = NULL; size_t newlen, dtarglen; if(now > d->ttl) return NULL; /* only allow validated (with DNSSEC) DNAMEs used from cache * for insecure DNAMEs, query again. */ *sec_status = d->security; /* return sec status, so the status of the CNAME can be checked * by the calling routine. */ msg = gen_dns_msg(region, q, 2); /* DNAME + CNAME RRset */ if(!msg) return NULL; msg->rep->flags = BIT_QR; /* reply, no AA, no error */ msg->rep->authoritative = 0; /* reply stored in cache can't be authoritative */ msg->rep->qdcount = 1; msg->rep->ttl = d->ttl - now; msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); msg->rep->security = sec_status_unchecked; msg->rep->an_numrrsets = 1; msg->rep->ns_numrrsets = 0; msg->rep->ar_numrrsets = 0; msg->rep->rrset_count = 1; msg->rep->rrsets[0] = packed_rrset_copy_region(rrset, region, now); if(!msg->rep->rrsets[0]) /* copy DNAME */ return NULL; /* synth CNAME rrset */ get_cname_target(rrset, &dtarg, &dtarglen); if(!dtarg) return NULL; newlen = q->qname_len + dtarglen - rrset->rk.dname_len; if(newlen > LDNS_MAX_DOMAINLEN) { msg->rep->flags |= LDNS_RCODE_YXDOMAIN; return msg; } newname = (uint8_t*)regional_alloc(region, newlen); if(!newname) return NULL; /* new name is concatenation of qname front (without DNAME owner) * and DNAME target name */ memcpy(newname, q->qname, q->qname_len-rrset->rk.dname_len); memmove(newname+(q->qname_len-rrset->rk.dname_len), dtarg, dtarglen); /* create rest of CNAME rrset */ ck = (struct ub_packed_rrset_key*)regional_alloc(region, sizeof(struct ub_packed_rrset_key)); if(!ck) return NULL; memset(&ck->entry, 0, sizeof(ck->entry)); msg->rep->rrsets[1] = ck; ck->entry.key = ck; ck->rk.type = htons(LDNS_RR_TYPE_CNAME); ck->rk.rrset_class = rrset->rk.rrset_class; ck->rk.flags = 0; ck->rk.dname = regional_alloc_init(region, q->qname, q->qname_len); if(!ck->rk.dname) return NULL; ck->rk.dname_len = q->qname_len; ck->entry.hash = rrset_key_hash(&ck->rk); newd = (struct packed_rrset_data*)regional_alloc_zero(region, sizeof(struct packed_rrset_data) + sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t) + newlen); if(!newd) return NULL; ck->entry.data = newd; newd->ttl = 0; /* 0 for synthesized CNAME TTL */ newd->count = 1; newd->rrsig_count = 0; newd->trust = rrset_trust_ans_noAA; newd->rr_len = (size_t*)((uint8_t*)newd + sizeof(struct packed_rrset_data)); newd->rr_len[0] = newlen + sizeof(uint16_t); packed_rrset_ptr_fixup(newd); newd->rr_ttl[0] = newd->ttl; msg->rep->ttl = newd->ttl; msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(newd->ttl); sldns_write_uint16(newd->rr_data[0], newlen); memmove(newd->rr_data[0] + sizeof(uint16_t), newname, newlen); msg->rep->an_numrrsets ++; msg->rep->rrset_count ++; return msg; }
void respip_operate(struct module_qstate* qstate, enum module_ev event, int id, struct outbound_entry* outbound) { struct respip_qstate* rq = (struct respip_qstate*)qstate->minfo[id]; log_query_info(VERB_QUERY, "respip operate: query", &qstate->qinfo); (void)outbound; if(event == module_event_new || event == module_event_pass) { if(!rq) { rq = regional_alloc_zero(qstate->region, sizeof(*rq)); if(!rq) goto servfail; rq->state = RESPIP_INIT; qstate->minfo[id] = rq; } if(rq->state == RESPIP_SUBQUERY_FINISHED) { qstate->ext_state[id] = module_finished; return; } verbose(VERB_ALGO, "respip: pass to next module"); qstate->ext_state[id] = module_wait_module; } else if(event == module_event_moddone) { /* If the reply may be subject to response-ip rewriting * according to the query type, check the actions. If a * rewrite is necessary, we'll replace the reply in qstate * with the new one. */ enum module_ext_state next_state = module_finished; if((qstate->qinfo.qtype == LDNS_RR_TYPE_A || qstate->qinfo.qtype == LDNS_RR_TYPE_AAAA || qstate->qinfo.qtype == LDNS_RR_TYPE_ANY) && qstate->return_msg && qstate->return_msg->rep) { struct respip_action_info actinfo = {respip_none, NULL}; struct reply_info* new_rep = qstate->return_msg->rep; struct ub_packed_rrset_key* alias_rrset = NULL; if(!respip_rewrite_reply(&qstate->qinfo, qstate->client_info, qstate->return_msg->rep, &new_rep, &actinfo, &alias_rrset, 0, qstate->region)) { goto servfail; } if(actinfo.action != respip_none) { /* save action info for logging on a * per-front-end-query basis */ if(!(qstate->respip_action_info = regional_alloc_init(qstate->region, &actinfo, sizeof(actinfo)))) { log_err("out of memory"); goto servfail; } } else { qstate->respip_action_info = NULL; } if (new_rep == qstate->return_msg->rep && (actinfo.action == respip_deny || actinfo.action == respip_inform_deny)) { /* for deny-variant actions (unless response-ip * data is applied), mark the query state so * the response will be dropped for all * clients. */ qstate->is_drop = 1; } else if(alias_rrset) { if(!generate_cname_request(qstate, alias_rrset)) goto servfail; next_state = module_wait_subquery; } qstate->return_msg->rep = new_rep; } qstate->ext_state[id] = next_state; } else qstate->ext_state[id] = module_finished; return; servfail: qstate->return_rcode = LDNS_RCODE_SERVFAIL; qstate->return_msg = NULL; }
/** enter override into zone */ static int lz_enter_override(struct local_zones* zones, char* zname, char* netblock, char* type, uint16_t rr_class) { uint8_t dname[LDNS_MAX_DOMAINLEN+1]; size_t dname_len = sizeof(dname); int dname_labs; struct sockaddr_storage addr; int net; socklen_t addrlen; struct local_zone* z; enum localzone_type t; /* parse zone name */ if(sldns_str2wire_dname_buf(zname, dname, &dname_len) != 0) { log_err("cannot parse zone name in local-zone-override: %s %s", zname, netblock); return 0; } dname_labs = dname_count_labels(dname); /* parse netblock */ if(!netblockstrtoaddr(netblock, UNBOUND_DNS_PORT, &addr, &addrlen, &net)) { log_err("cannot parse netblock in local-zone-override: %s %s", zname, netblock); return 0; } /* parse zone type */ if(!local_zone_str2type(type, &t)) { log_err("cannot parse type in local-zone-override: %s %s %s", zname, netblock, type); return 0; } /* find localzone entry */ lock_rw_rdlock(&zones->lock); z = local_zones_find(zones, dname, dname_len, dname_labs, rr_class); if(!z) { lock_rw_unlock(&zones->lock); log_err("no local-zone for local-zone-override %s", zname); return 0; } lock_rw_wrlock(&z->lock); lock_rw_unlock(&zones->lock); /* create netblock addr_tree if not present yet */ if(!z->override_tree) { z->override_tree = (struct rbtree_t*)regional_alloc_zero( z->region, sizeof(*z->override_tree)); if(!z->override_tree) { lock_rw_unlock(&z->lock); log_err("out of memory"); return 0; } addr_tree_init(z->override_tree); } /* add new elem to tree */ if(z->override_tree) { struct local_zone_override* n; n = (struct local_zone_override*)regional_alloc_zero( z->region, sizeof(*n)); if(!n) { lock_rw_unlock(&z->lock); log_err("out of memory"); return 0; } n->type = t; if(!addr_tree_insert(z->override_tree, (struct addr_tree_node*)n, &addr, addrlen, net)) { lock_rw_unlock(&z->lock); log_err("duplicate local-zone-override %s %s", zname, netblock); return 1; } } lock_rw_unlock(&z->lock); return 1; }
/** find local data tag string match for the given type in the list */ static int find_tag_datas(struct query_info* qinfo, struct config_strlist* list, struct ub_packed_rrset_key* r, struct regional* temp, uint8_t* zname, size_t zlen) { struct config_strlist* p; char buf[65536]; uint8_t rr[LDNS_RR_BUF_SIZE]; size_t len; int res; struct packed_rrset_data* d; for(p=list; p; p=p->next) { len = sizeof(rr); /* does this element match the type? */ snprintf(buf, sizeof(buf), ". %s", p->str); res = sldns_str2wire_rr_buf(buf, rr, &len, NULL, 3600, zname, zlen, NULL, 0); if(res != 0) /* parse errors are already checked before, in * acllist check_data, skip this for robustness */ continue; if(len < 1 /* . */ + 8 /* typeclassttl*/ + 2 /*rdatalen*/) continue; if(sldns_wirerr_get_type(rr, len, 1) != qinfo->qtype) continue; /* do we have entries already? if not setup key */ if(r->rk.dname == NULL) { r->entry.key = r; r->rk.dname = qinfo->qname; r->rk.dname_len = qinfo->qname_len; r->rk.type = htons(qinfo->qtype); r->rk.rrset_class = htons(qinfo->qclass); r->rk.flags = 0; d = (struct packed_rrset_data*)regional_alloc_zero( temp, sizeof(struct packed_rrset_data) + sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t)); if(!d) return 0; /* out of memory */ r->entry.data = d; d->ttl = sldns_wirerr_get_ttl(rr, len, 1); d->rr_len = (size_t*)((uint8_t*)d + sizeof(struct packed_rrset_data)); d->rr_data = (uint8_t**)&(d->rr_len[1]); d->rr_ttl = (time_t*)&(d->rr_data[1]); } d = (struct packed_rrset_data*)r->entry.data; /* add entry to the data */ if(d->count != 0) { size_t* oldlen = d->rr_len; uint8_t** olddata = d->rr_data; time_t* oldttl = d->rr_ttl; /* increase arrays for lookup */ /* this is of course slow for very many records, * but most redirects are expected with few records */ d->rr_len = (size_t*)regional_alloc_zero(temp, (d->count+1)*sizeof(size_t)); d->rr_data = (uint8_t**)regional_alloc_zero(temp, (d->count+1)*sizeof(uint8_t*)); d->rr_ttl = (time_t*)regional_alloc_zero(temp, (d->count+1)*sizeof(time_t)); if(!d->rr_len || !d->rr_data || !d->rr_ttl) return 0; /* out of memory */ /* first one was allocated after struct d, but new * ones get their own array increment alloc, so * copy old content */ memmove(d->rr_len, oldlen, d->count*sizeof(size_t)); memmove(d->rr_data, olddata, d->count*sizeof(uint8_t*)); memmove(d->rr_ttl, oldttl, d->count*sizeof(time_t)); } d->rr_len[d->count] = sldns_wirerr_get_rdatalen(rr, len, 1)+2; d->rr_ttl[d->count] = sldns_wirerr_get_ttl(rr, len, 1); d->rr_data[d->count] = regional_alloc_init(temp, sldns_wirerr_get_rdatawl(rr, len, 1), d->rr_len[d->count]); if(!d->rr_data[d->count]) if(!d) return 0; /* out of memory */ d->count++; } if(r->rk.dname) return 1; return 0; }