/** copy rrsets from replyinfo to dest replyinfo */ static int repinfo_copy_rrsets(struct reply_info* dest, struct reply_info* from, struct regional* region) { size_t i, s; struct packed_rrset_data* fd, *dd; struct ub_packed_rrset_key* fk, *dk; for(i=0; i<dest->rrset_count; i++) { fk = from->rrsets[i]; dk = dest->rrsets[i]; fd = (struct packed_rrset_data*)fk->entry.data; dk->entry.hash = fk->entry.hash; dk->rk = fk->rk; if(region) { dk->id = fk->id; dk->rk.dname = (uint8_t*)regional_alloc_init(region, fk->rk.dname, fk->rk.dname_len); } else dk->rk.dname = (uint8_t*)memdup(fk->rk.dname, fk->rk.dname_len); if(!dk->rk.dname) return 0; s = packed_rrset_sizeof(fd); if(region) dd = (struct packed_rrset_data*)regional_alloc_init( region, fd, s); else dd = (struct packed_rrset_data*)memdup(fd, s); if(!dd) return 0; packed_rrset_ptr_fixup(dd); dk->entry.data = (void*)dd; } return 1; }
/** load an RR into rrset */ static int load_rr(SSL* ssl, sldns_buffer* buf, struct regional* region, struct ub_packed_rrset_key* rk, struct packed_rrset_data* d, unsigned int i, int is_rrsig, int* go_on, time_t now) { uint8_t rr[LDNS_RR_BUF_SIZE]; size_t rr_len = sizeof(rr), dname_len = 0; int status; /* read the line */ if(!ssl_read_buf(ssl, buf)) return 0; if(strncmp((char*)sldns_buffer_begin(buf), "BADRR\n", 6) == 0) { *go_on = 0; return 1; } status = sldns_str2wire_rr_buf((char*)sldns_buffer_begin(buf), rr, &rr_len, &dname_len, 3600, NULL, 0, NULL, 0); if(status != 0) { log_warn("error cannot parse rr: %s: %s", sldns_get_errorstr_parse(status), (char*)sldns_buffer_begin(buf)); return 0; } if(is_rrsig && sldns_wirerr_get_type(rr, rr_len, dname_len) != LDNS_RR_TYPE_RRSIG) { log_warn("error expected rrsig but got %s", (char*)sldns_buffer_begin(buf)); return 0; } /* convert ldns rr into packed_rr */ d->rr_ttl[i] = (time_t)sldns_wirerr_get_ttl(rr, rr_len, dname_len) + now; sldns_buffer_clear(buf); d->rr_len[i] = sldns_wirerr_get_rdatalen(rr, rr_len, dname_len)+2; d->rr_data[i] = (uint8_t*)regional_alloc_init(region, sldns_wirerr_get_rdatawl(rr, rr_len, dname_len), d->rr_len[i]); if(!d->rr_data[i]) { log_warn("error out of memory"); return 0; } /* if first entry, fill the key structure */ if(i==0) { rk->rk.type = htons(sldns_wirerr_get_type(rr, rr_len, dname_len)); rk->rk.rrset_class = htons(sldns_wirerr_get_class(rr, rr_len, dname_len)); rk->rk.dname_len = dname_len; rk->rk.dname = regional_alloc_init(region, rr, dname_len); if(!rk->rk.dname) { log_warn("error out of memory"); return 0; } } return 1; }
/** find a node, create it if not and all its empty nonterminal parents */ static int lz_find_create_node(struct local_zone* z, uint8_t* nm, size_t nmlen, int nmlabs, struct local_data** res) { struct local_data* ld = lz_find_node(z, nm, nmlen, nmlabs); if(!ld) { /* create a domain name to store rr. */ ld = (struct local_data*)regional_alloc_zero(z->region, sizeof(*ld)); if(!ld) { log_err("out of memory adding local data"); return 0; } ld->node.key = ld; ld->name = regional_alloc_init(z->region, nm, nmlen); if(!ld->name) { log_err("out of memory"); return 0; } ld->namelen = nmlen; ld->namelabs = nmlabs; if(!rbtree_insert(&z->data, &ld->node)) { log_assert(0); /* duplicate name */ } /* see if empty nonterminals need to be created */ if(nmlabs > z->namelabs) { dname_remove_label(&nm, &nmlen); if(!lz_find_create_node(z, nm, nmlen, nmlabs-1, res)) return 0; } } *res = ld; return 1; }
/** allocate dns_msg from query_info and reply_info */ static struct dns_msg* gen_dns_msg(struct regional* region, struct query_info* q, size_t num) { struct dns_msg* msg = (struct dns_msg*)regional_alloc(region, sizeof(struct dns_msg)); if(!msg) return NULL; memcpy(&msg->qinfo, q, sizeof(struct query_info)); msg->qinfo.qname = regional_alloc_init(region, q->qname, q->qname_len); if(!msg->qinfo.qname) return NULL; /* allocate replyinfo struct and rrset key array separately */ msg->rep = (struct reply_info*)regional_alloc(region, sizeof(struct reply_info) - sizeof(struct rrset_ref)); if(!msg->rep) return NULL; if(num > RR_COUNT_MAX) return NULL; /* integer overflow protection */ msg->rep->rrsets = (struct ub_packed_rrset_key**) regional_alloc(region, num * sizeof(struct ub_packed_rrset_key*)); if(!msg->rep->rrsets) return NULL; return msg; }
int delegpt_add_ns(struct delegpt* dp, struct regional* region, uint8_t* name, int lame) { struct delegpt_ns* ns; size_t len; (void)dname_count_size_labels(name, &len); /* slow check for duplicates to avoid counting failures when * adding the same server as a dependency twice */ if(delegpt_find_ns(dp, name, len)) return 1; ns = (struct delegpt_ns*)regional_alloc(region, sizeof(struct delegpt_ns)); if(!ns) return 0; ns->next = dp->nslist; ns->namelen = len; dp->nslist = ns; ns->name = regional_alloc_init(region, name, ns->namelen); ns->resolved = 0; ns->got4 = 0; ns->got6 = 0; ns->lame = (uint8_t)lame; ns->done_pside4 = 0; ns->done_pside6 = 0; return 1; }
/** insert RR into RRset data structure; Wastes a couple of bytes */ static int insert_rr(struct regional* region, struct packed_rrset_data* pd, uint8_t* rdata, size_t rdata_len, time_t ttl) { size_t* oldlen = pd->rr_len; time_t* oldttl = pd->rr_ttl; uint8_t** olddata = pd->rr_data; /* add RR to rrset */ pd->count++; pd->rr_len = regional_alloc(region, sizeof(*pd->rr_len)*pd->count); pd->rr_ttl = regional_alloc(region, sizeof(*pd->rr_ttl)*pd->count); pd->rr_data = regional_alloc(region, sizeof(*pd->rr_data)*pd->count); if(!pd->rr_len || !pd->rr_ttl || !pd->rr_data) { log_err("out of memory"); return 0; } if(pd->count > 1) { memcpy(pd->rr_len+1, oldlen, sizeof(*pd->rr_len)*(pd->count-1)); memcpy(pd->rr_ttl+1, oldttl, sizeof(*pd->rr_ttl)*(pd->count-1)); memcpy(pd->rr_data+1, olddata, sizeof(*pd->rr_data)*(pd->count-1)); } pd->rr_len[0] = rdata_len; pd->rr_ttl[0] = ttl; pd->rr_data[0] = regional_alloc_init(region, rdata, rdata_len); if(!pd->rr_data[0]) { log_err("out of memory"); return 0; } return 1; }
struct dns_msg* dns_msg_create(uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, struct regional* region, size_t capacity) { struct dns_msg* msg = (struct dns_msg*)regional_alloc(region, sizeof(struct dns_msg)); if(!msg) return NULL; msg->qinfo.qname = regional_alloc_init(region, qname, qnamelen); if(!msg->qinfo.qname) return NULL; msg->qinfo.qname_len = qnamelen; msg->qinfo.qtype = qtype; msg->qinfo.qclass = qclass; msg->qinfo.local_alias = NULL; /* non-packed reply_info, because it needs to grow the array */ msg->rep = (struct reply_info*)regional_alloc_zero(region, sizeof(struct reply_info)-sizeof(struct rrset_ref)); if(!msg->rep) return NULL; if(capacity > RR_COUNT_MAX) return NULL; /* integer overflow protection */ msg->rep->flags = BIT_QR; /* with QR, no AA */ msg->rep->qdcount = 1; msg->rep->rrsets = (struct ub_packed_rrset_key**) regional_alloc(region, capacity*sizeof(struct ub_packed_rrset_key*)); if(!msg->rep->rrsets) return NULL; return msg; }
/** create new trust anchor object */ static struct trust_anchor* anchor_new_ta(struct val_anchors* anchors, uint8_t* name, int namelabs, size_t namelen, uint16_t dclass) { #ifdef UNBOUND_DEBUG rbnode_t* r; #endif struct trust_anchor* ta = (struct trust_anchor*)regional_alloc( anchors->region, sizeof(struct trust_anchor)); if(!ta) return NULL; memset(ta, 0, sizeof(*ta)); ta->node.key = ta; ta->name = regional_alloc_init(anchors->region, name, namelen); if(!ta->name) return NULL; ta->namelabs = namelabs; ta->namelen = namelen; ta->dclass = dclass; lock_basic_init(&ta->lock); lock_basic_lock(&anchors->lock); #ifdef UNBOUND_DEBUG r = #endif rbtree_insert(anchors->tree, &ta->node); lock_basic_unlock(&anchors->lock); log_assert(r != NULL); return ta; }
int delegpt_set_name(struct delegpt* dp, struct regional* region, uint8_t* name) { dp->namelabs = dname_count_size_labels(name, &dp->namelen); dp->name = regional_alloc_init(region, name, dp->namelen); return dp->name != 0; }
void iter_store_parentside_neg(struct module_env* env, struct query_info* qinfo, struct reply_info* rep) { /* TTL: NS from referral in iq->deleg_msg, * or first RR from iq->response, * or servfail5secs if !iq->response */ time_t ttl = NORR_TTL; struct ub_packed_rrset_key* neg; struct packed_rrset_data* newd; if(rep) { struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep); if(!rrset && rep->rrset_count != 0) rrset = rep->rrsets[0]; if(rrset) ttl = ub_packed_rrset_ttl(rrset); } /* create empty rrset to store */ neg = (struct ub_packed_rrset_key*)regional_alloc(env->scratch, sizeof(struct ub_packed_rrset_key)); if(!neg) { log_err("out of memory in store_parentside_neg"); return; } memset(&neg->entry, 0, sizeof(neg->entry)); neg->entry.key = neg; neg->rk.type = htons(qinfo->qtype); neg->rk.rrset_class = htons(qinfo->qclass); neg->rk.flags = 0; neg->rk.dname = regional_alloc_init(env->scratch, qinfo->qname, qinfo->qname_len); if(!neg->rk.dname) { log_err("out of memory in store_parentside_neg"); return; } neg->rk.dname_len = qinfo->qname_len; neg->entry.hash = rrset_key_hash(&neg->rk); newd = (struct packed_rrset_data*)regional_alloc_zero(env->scratch, sizeof(struct packed_rrset_data) + sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t)); if(!newd) { log_err("out of memory in store_parentside_neg"); return; } neg->entry.data = newd; newd->ttl = ttl; /* entry must have one RR, otherwise not valid in cache. * put in one RR with empty rdata: those are ignored as nameserver */ newd->count = 1; newd->rrsig_count = 0; newd->trust = rrset_trust_ans_noAA; newd->rr_len = (size_t*)((uint8_t*)newd + sizeof(struct packed_rrset_data)); newd->rr_len[0] = 0 /* zero len rdata */ + sizeof(uint16_t); packed_rrset_ptr_fixup(newd); newd->rr_ttl[0] = newd->ttl; sldns_write_uint16(newd->rr_data[0], 0 /* zero len rdata */); /* store it */ log_rrset_key(VERB_ALGO, "store parent-side negative", neg); iter_store_parentside_rrset(env, neg); }
struct mesh_state* mesh_state_create(struct module_env* env, struct query_info* qinfo, uint16_t qflags, int prime) { struct regional* region = alloc_reg_obtain(env->alloc); struct mesh_state* mstate; int i; if(!region) return NULL; mstate = (struct mesh_state*)regional_alloc(region, sizeof(struct mesh_state)); if(!mstate) { alloc_reg_release(env->alloc, region); return NULL; } memset(mstate, 0, sizeof(*mstate)); mstate->node = *RBTREE_NULL; mstate->run_node = *RBTREE_NULL; mstate->node.key = mstate; mstate->run_node.key = mstate; mstate->reply_list = NULL; mstate->list_select = mesh_no_list; mstate->replies_sent = 0; rbtree_init(&mstate->super_set, &mesh_state_ref_compare); rbtree_init(&mstate->sub_set, &mesh_state_ref_compare); mstate->num_activated = 0; /* init module qstate */ mstate->s.qinfo.qtype = qinfo->qtype; mstate->s.qinfo.qclass = qinfo->qclass; mstate->s.qinfo.qname_len = qinfo->qname_len; mstate->s.qinfo.qname = regional_alloc_init(region, qinfo->qname, qinfo->qname_len); if(!mstate->s.qinfo.qname) { alloc_reg_release(env->alloc, region); return NULL; } /* remove all weird bits from qflags */ mstate->s.query_flags = (qflags & (BIT_RD|BIT_CD)); mstate->s.is_priming = prime; mstate->s.reply = NULL; mstate->s.region = region; mstate->s.curmod = 0; mstate->s.return_msg = 0; mstate->s.return_rcode = LDNS_RCODE_NOERROR; mstate->s.env = env; mstate->s.mesh_info = mstate; mstate->s.prefetch_leeway = 0; /* init modules */ for(i=0; i<env->mesh->mods.num; i++) { mstate->s.minfo[i] = NULL; mstate->s.ext_state[i] = module_state_initial; } return mstate; }
/** copy msg to worker pad */ static int copy_msg(struct regional* region, struct lruhash_entry* e, struct query_info** k, struct reply_info** d) { struct reply_info* rep = (struct reply_info*)e->data; *d = (struct reply_info*)regional_alloc_init(region, e->data, sizeof(struct reply_info) + sizeof(struct rrset_ref) * (rep->rrset_count-1) + sizeof(struct ub_packed_rrset_key*) * rep->rrset_count); if(!*d) return 0; (*d)->rrsets = (struct ub_packed_rrset_key**)(void *)( (uint8_t*)(&((*d)->ref[0])) + sizeof(struct rrset_ref) * rep->rrset_count); *k = (struct query_info*)regional_alloc_init(region, e->key, sizeof(struct query_info)); if(!*k) return 0; (*k)->qname = regional_alloc_init(region, (*k)->qname, (*k)->qname_len); return (*k)->qname != NULL; }
struct ub_packed_rrset_key* packed_rrset_copy_region(struct ub_packed_rrset_key* key, struct regional* region, time_t now) { struct ub_packed_rrset_key* ck = regional_alloc(region, sizeof(struct ub_packed_rrset_key)); struct packed_rrset_data* d; struct packed_rrset_data* data = (struct packed_rrset_data*) key->entry.data; size_t dsize, i; if(!ck) return NULL; ck->id = key->id; memset(&ck->entry, 0, sizeof(ck->entry)); ck->entry.hash = key->entry.hash; ck->entry.key = ck; ck->rk = key->rk; ck->rk.dname = regional_alloc_init(region, key->rk.dname, key->rk.dname_len); if(!ck->rk.dname) return NULL; dsize = packed_rrset_sizeof(data); d = (struct packed_rrset_data*)regional_alloc_init(region, data, dsize); if(!d) return NULL; ck->entry.data = d; packed_rrset_ptr_fixup(d); /* make TTLs relative - once per rrset */ for(i=0; i<d->count + d->rrsig_count; i++) { if(d->rr_ttl[i] < now) d->rr_ttl[i] = 0; else d->rr_ttl[i] -= now; } if(d->ttl < now) d->ttl = 0; else d->ttl -= now; return ck; }
/** * make a deep copy of 'key' in 'region'. * This is largely derived from packed_rrset_copy_region() and * packed_rrset_ptr_fixup(), but differs in the following points: * * - It doesn't assume all data in 'key' are in a contiguous memory region. * Although that would be the case in most cases, 'key' can be passed from * a lower-level module and it might not build the rrset to meet the * assumption. In fact, an rrset specified as response-ip-data or generated * in local_data_find_tag_datas() breaks the assumption. So it would be * safer not to naively rely on the assumption. On the other hand, this * function ensures the copied rrset data are in a contiguous region so * that it won't cause a disruption even if an upper layer module naively * assumes the memory layout. * - It doesn't copy RRSIGs (if any) in 'key'. The rrset will be used in * a reply that was already faked, so it doesn't make much sense to provide * partial sigs even if they are valid themselves. * - It doesn't adjust TTLs as it basically has to be a verbatim copy of 'key' * just allocated in 'region' (the assumption is necessary TTL adjustment * has been already done in 'key'). * * This function returns the copied rrset key on success, and NULL on memory * allocation failure. */ struct ub_packed_rrset_key* copy_rrset(const struct ub_packed_rrset_key* key, struct regional* region) { struct ub_packed_rrset_key* ck = regional_alloc(region, sizeof(struct ub_packed_rrset_key)); struct packed_rrset_data* d; struct packed_rrset_data* data = key->entry.data; size_t dsize, i; uint8_t* nextrdata; /* derived from packed_rrset_copy_region(), but don't use * packed_rrset_sizeof() and do exclude RRSIGs */ if(!ck) return NULL; ck->id = key->id; memset(&ck->entry, 0, sizeof(ck->entry)); ck->entry.hash = key->entry.hash; ck->entry.key = ck; ck->rk = key->rk; ck->rk.dname = regional_alloc_init(region, key->rk.dname, key->rk.dname_len); if(!ck->rk.dname) return NULL; dsize = sizeof(struct packed_rrset_data) + data->count * (sizeof(size_t)+sizeof(uint8_t*)+sizeof(time_t)); for(i=0; i<data->count; i++) dsize += data->rr_len[i]; d = regional_alloc(region, dsize); if(!d) return NULL; *d = *data; d->rrsig_count = 0; ck->entry.data = d; /* derived from packed_rrset_ptr_fixup() with copying the data */ d->rr_len = (size_t*)((uint8_t*)d + sizeof(struct packed_rrset_data)); d->rr_data = (uint8_t**)&(d->rr_len[d->count]); d->rr_ttl = (time_t*)&(d->rr_data[d->count]); nextrdata = (uint8_t*)&(d->rr_ttl[d->count]); for(i=0; i<d->count; i++) { d->rr_len[i] = data->rr_len[i]; d->rr_ttl[i] = data->rr_ttl[i]; d->rr_data[i] = nextrdata; memcpy(d->rr_data[i], data->rr_data[i], data->rr_len[i]); nextrdata += d->rr_len[i]; } return ck; }
/** * Assemble an rrset structure for the type * @param region: allocated in this region. * @param ta: trust anchor. * @param num: number of items to fetch from list. * @param type: fetch only items of this type. * @return rrset or NULL on error. */ static struct ub_packed_rrset_key* assemble_it(struct regional* region, struct trust_anchor* ta, size_t num, uint16_t type) { struct ub_packed_rrset_key* pkey = (struct ub_packed_rrset_key*) regional_alloc(region, sizeof(*pkey)); struct packed_rrset_data* pd; struct ta_key* tk; size_t i; if(!pkey) return NULL; memset(pkey, 0, sizeof(*pkey)); pkey->rk.dname = regional_alloc_init(region, ta->name, ta->namelen); if(!pkey->rk.dname) return NULL; pkey->rk.dname_len = ta->namelen; pkey->rk.type = htons(type); pkey->rk.rrset_class = htons(ta->dclass); /* The rrset is build in an uncompressed way. This means it * cannot be copied in the normal way. */ pd = (struct packed_rrset_data*)regional_alloc(region, sizeof(*pd)); if(!pd) return NULL; memset(pd, 0, sizeof(*pd)); pd->count = num; pd->trust = rrset_trust_ultimate; pd->rr_len = (size_t*)regional_alloc(region, num*sizeof(size_t)); if(!pd->rr_len) return NULL; pd->rr_ttl = (uint32_t*)regional_alloc(region, num*sizeof(uint32_t)); if(!pd->rr_ttl) return NULL; pd->rr_data = (uint8_t**)regional_alloc(region, num*sizeof(uint8_t*)); if(!pd->rr_data) return NULL; /* fill in rrs */ i=0; for(tk = ta->keylist; tk; tk = tk->next) { if(tk->type != type) continue; pd->rr_len[i] = tk->len; /* reuse data ptr to allocation in region */ pd->rr_data[i] = tk->data; pd->rr_ttl[i] = 0; i++; } pkey->entry.data = (void*)pd; return pkey; }
struct dns_msg* dns_copy_msg(struct dns_msg* from, struct regional* region) { struct dns_msg* m = (struct dns_msg*)regional_alloc(region, sizeof(struct dns_msg)); if(!m) return NULL; m->qinfo = from->qinfo; if(!(m->qinfo.qname = regional_alloc_init(region, from->qinfo.qname, from->qinfo.qname_len))) return NULL; if(!(m->rep = reply_info_copy(from->rep, NULL, region))) return NULL; return m; }
/** read qinfo from next three words */ static char* load_qinfo(char* str, struct query_info* qinfo, ldns_buffer* buf, struct regional* region) { /* s is part of the buf */ char* s = str; ldns_rr* rr; ldns_status status; /* skip three words */ s = strchr(str, ' '); if(s) s = strchr(s+1, ' '); if(s) s = strchr(s+1, ' '); if(!s) { log_warn("error line too short, %s", str); return NULL; } s[0] = 0; s++; /* parse them */ status = ldns_rr_new_question_frm_str(&rr, str, NULL, NULL); if(status != LDNS_STATUS_OK) { log_warn("error cannot parse: %s %s", ldns_get_errorstr_by_id(status), str); return NULL; } qinfo->qtype = ldns_rr_get_type(rr); qinfo->qclass = ldns_rr_get_class(rr); ldns_buffer_clear(buf); status = ldns_dname2buffer_wire(buf, ldns_rr_owner(rr)); ldns_rr_free(rr); if(status != LDNS_STATUS_OK) { log_warn("error cannot dname2wire: %s", ldns_get_errorstr_by_id(status)); return NULL; } ldns_buffer_flip(buf); qinfo->qname_len = ldns_buffer_limit(buf); qinfo->qname = (uint8_t*)regional_alloc_init(region, ldns_buffer_begin(buf), ldns_buffer_limit(buf)); if(!qinfo->qname) { log_warn("error out of memory"); return NULL; } return s; }
/** create new trustanchor key */ static struct ta_key* anchor_new_ta_key(struct val_anchors* anchors, uint8_t* rdata, size_t rdata_len, uint16_t type) { struct ta_key* k = (struct ta_key*)regional_alloc(anchors->region, sizeof(*k)); if(!k) return NULL; memset(k, 0, sizeof(*k)); k->data = regional_alloc_init(anchors->region, rdata, rdata_len); if(!k->data) return NULL; k->len = rdata_len; k->type = type; return k; }
/** perform b32 encoding of hash */ static int nsec3_calc_b32(struct regional* region, ldns_buffer* buf, struct nsec3_cached_hash* c) { int r; ldns_buffer_clear(buf); r = ldns_b32_ntop_extended_hex(c->hash, c->hash_len, (char*)ldns_buffer_begin(buf), ldns_buffer_limit(buf)); if(r < 1) { log_err("b32_ntop_extended_hex: error in encoding: %d", r); return 0; } c->b32_len = (size_t)r; c->b32 = regional_alloc_init(region, ldns_buffer_begin(buf), c->b32_len); if(!c->b32) return 0; return 1; }
static int respip_tag_cfg(struct respip_set* set, const char* ipstr, const uint8_t* taglist, size_t taglen) { struct resp_addr* node; if(!(node=respip_find_or_create(set, ipstr, 1))) return 0; if(node->taglist) { log_warn("duplicate response-address-tag for '%s', overridden.", ipstr); } node->taglist = regional_alloc_init(set->region, taglist, taglen); if(!node->taglist) { log_err("out of memory"); return 0; } node->taglen = taglen; return 1; }
/** Test hash algo - NSEC3 hash it and compare result */ static void nsec3_hash_test_entry(struct entry* e, rbtree_type* ct, struct alloc_cache* alloc, struct regional* region, sldns_buffer* buf) { struct query_info qinfo; struct reply_info* rep = NULL; struct ub_packed_rrset_key* answer, *nsec3; struct nsec3_cached_hash* hash = NULL; int ret; uint8_t* qname; if(vsig) { char* s = sldns_wire2str_pkt(e->reply_list->reply_pkt, e->reply_list->reply_len); printf("verifying NSEC3 hash:\n%s\n", s?s:"outofmemory"); free(s); } entry_to_repinfo(e, alloc, region, buf, &qinfo, &rep); nsec3 = find_rrset_type(rep, LDNS_RR_TYPE_NSEC3); answer = find_rrset_type(rep, LDNS_RR_TYPE_AAAA); qname = regional_alloc_init(region, qinfo.qname, qinfo.qname_len); /* check test is OK */ unit_assert(nsec3 && answer && qname); ret = nsec3_hash_name(ct, region, buf, nsec3, 0, qname, qinfo.qname_len, &hash); if(ret != 1) { printf("Bad nsec3_hash_name retcode %d\n", ret); unit_assert(ret == 1); } unit_assert(hash->dname && hash->hash && hash->hash_len && hash->b32 && hash->b32_len); unit_assert(hash->b32_len == (size_t)answer->rk.dname[0]); /* does not do lowercasing. */ unit_assert(memcmp(hash->b32, answer->rk.dname+1, hash->b32_len) == 0); reply_info_parsedelete(rep, alloc); query_info_clear(&qinfo); }
int mesh_state_add_reply(struct mesh_state* s, struct edns_data* edns, struct comm_reply* rep, uint16_t qid, uint16_t qflags, uint8_t* qname) { struct mesh_reply* r = regional_alloc(s->s.region, sizeof(struct mesh_reply)); if(!r) return 0; r->query_reply = *rep; r->edns = *edns; r->qid = qid; r->qflags = qflags; r->start_time = *s->s.env->now_tv; r->next = s->reply_list; r->qname = regional_alloc_init(s->s.region, qname, s->s.qinfo.qname_len); if(!r->qname) return 0; s->reply_list = r; return 1; }
/** read qinfo from next three words */ static char* load_qinfo(char* str, struct query_info* qinfo, struct regional* region) { /* s is part of the buf */ char* s = str; uint8_t rr[LDNS_RR_BUF_SIZE]; size_t rr_len = sizeof(rr), dname_len = 0; int status; /* skip three words */ s = strchr(str, ' '); if(s) s = strchr(s+1, ' '); if(s) s = strchr(s+1, ' '); if(!s) { log_warn("error line too short, %s", str); return NULL; } s[0] = 0; s++; /* parse them */ status = sldns_str2wire_rr_question_buf(str, rr, &rr_len, &dname_len, NULL, 0, NULL, 0); if(status != 0) { log_warn("error cannot parse: %s %s", sldns_get_errorstr_parse(status), str); return NULL; } qinfo->qtype = sldns_wirerr_get_type(rr, rr_len, dname_len); qinfo->qclass = sldns_wirerr_get_class(rr, rr_len, dname_len); qinfo->qname_len = dname_len; qinfo->qname = (uint8_t*)regional_alloc_init(region, rr, dname_len); qinfo->local_alias = NULL; if(!qinfo->qname) { log_warn("error out of memory"); return NULL; } return s; }
/** load an RR into rrset */ static int load_rr(SSL* ssl, ldns_buffer* buf, struct regional* region, struct ub_packed_rrset_key* rk, struct packed_rrset_data* d, unsigned int i, int is_rrsig, int* go_on, uint32_t now) { ldns_rr* rr; ldns_status status; /* read the line */ if(!ssl_read_buf(ssl, buf)) return 0; if(strncmp((char*)ldns_buffer_begin(buf), "BADRR\n", 6) == 0) { *go_on = 0; return 1; } status = ldns_rr_new_frm_str(&rr, (char*)ldns_buffer_begin(buf), LDNS_DEFAULT_TTL, NULL, NULL); if(status != LDNS_STATUS_OK) { log_warn("error cannot parse rr: %s: %s", ldns_get_errorstr_by_id(status), (char*)ldns_buffer_begin(buf)); return 0; } if(is_rrsig && ldns_rr_get_type(rr) != LDNS_RR_TYPE_RRSIG) { log_warn("error expected rrsig but got %s", (char*)ldns_buffer_begin(buf)); return 0; } /* convert ldns rr into packed_rr */ d->rr_ttl[i] = ldns_rr_ttl(rr) + now; ldns_buffer_clear(buf); ldns_buffer_skip(buf, 2); status = ldns_rr_rdata2buffer_wire(buf, rr); if(status != LDNS_STATUS_OK) { log_warn("error cannot rr2wire: %s", ldns_get_errorstr_by_id(status)); ldns_rr_free(rr); return 0; } ldns_buffer_flip(buf); ldns_buffer_write_u16_at(buf, 0, ldns_buffer_limit(buf) - 2); d->rr_len[i] = ldns_buffer_limit(buf); d->rr_data[i] = (uint8_t*)regional_alloc_init(region, ldns_buffer_begin(buf), ldns_buffer_limit(buf)); if(!d->rr_data[i]) { ldns_rr_free(rr); log_warn("error out of memory"); return 0; } /* if first entry, fill the key structure */ if(i==0) { rk->rk.type = htons(ldns_rr_get_type(rr)); rk->rk.rrset_class = htons(ldns_rr_get_class(rr)); ldns_buffer_clear(buf); status = ldns_dname2buffer_wire(buf, ldns_rr_owner(rr)); if(status != LDNS_STATUS_OK) { log_warn("error cannot dname2buffer: %s", ldns_get_errorstr_by_id(status)); ldns_rr_free(rr); return 0; } ldns_buffer_flip(buf); rk->rk.dname_len = ldns_buffer_limit(buf); rk->rk.dname = regional_alloc_init(region, ldns_buffer_begin(buf), ldns_buffer_limit(buf)); if(!rk->rk.dname) { log_warn("error out of memory"); ldns_rr_free(rr); return 0; } } ldns_rr_free(rr); return 1; }
void respip_operate(struct module_qstate* qstate, enum module_ev event, int id, struct outbound_entry* outbound) { struct respip_qstate* rq = (struct respip_qstate*)qstate->minfo[id]; log_query_info(VERB_QUERY, "respip operate: query", &qstate->qinfo); (void)outbound; if(event == module_event_new || event == module_event_pass) { if(!rq) { rq = regional_alloc_zero(qstate->region, sizeof(*rq)); if(!rq) goto servfail; rq->state = RESPIP_INIT; qstate->minfo[id] = rq; } if(rq->state == RESPIP_SUBQUERY_FINISHED) { qstate->ext_state[id] = module_finished; return; } verbose(VERB_ALGO, "respip: pass to next module"); qstate->ext_state[id] = module_wait_module; } else if(event == module_event_moddone) { /* If the reply may be subject to response-ip rewriting * according to the query type, check the actions. If a * rewrite is necessary, we'll replace the reply in qstate * with the new one. */ enum module_ext_state next_state = module_finished; if((qstate->qinfo.qtype == LDNS_RR_TYPE_A || qstate->qinfo.qtype == LDNS_RR_TYPE_AAAA || qstate->qinfo.qtype == LDNS_RR_TYPE_ANY) && qstate->return_msg && qstate->return_msg->rep) { struct respip_action_info actinfo = {respip_none, NULL}; struct reply_info* new_rep = qstate->return_msg->rep; struct ub_packed_rrset_key* alias_rrset = NULL; if(!respip_rewrite_reply(&qstate->qinfo, qstate->client_info, qstate->return_msg->rep, &new_rep, &actinfo, &alias_rrset, 0, qstate->region)) { goto servfail; } if(actinfo.action != respip_none) { /* save action info for logging on a * per-front-end-query basis */ if(!(qstate->respip_action_info = regional_alloc_init(qstate->region, &actinfo, sizeof(actinfo)))) { log_err("out of memory"); goto servfail; } } else { qstate->respip_action_info = NULL; } if (new_rep == qstate->return_msg->rep && (actinfo.action == respip_deny || actinfo.action == respip_inform_deny)) { /* for deny-variant actions (unless response-ip * data is applied), mark the query state so * the response will be dropped for all * clients. */ qstate->is_drop = 1; } else if(alias_rrset) { if(!generate_cname_request(qstate, alias_rrset)) goto servfail; next_state = module_wait_subquery; } qstate->return_msg->rep = new_rep; } qstate->ext_state[id] = next_state; } else qstate->ext_state[id] = module_finished; return; servfail: qstate->return_rcode = LDNS_RCODE_SERVFAIL; qstate->return_msg = NULL; }
char * regional_strdup(struct regional *r, const char *string) { return (char*)regional_alloc_init(r, string, strlen(string)+1); }
/** synthesize DNAME+CNAME response from cached DNAME item */ static struct dns_msg* synth_dname_msg(struct ub_packed_rrset_key* rrset, struct regional* region, time_t now, struct query_info* q, enum sec_status* sec_status) { struct dns_msg* msg; struct ub_packed_rrset_key* ck; struct packed_rrset_data* newd, *d = (struct packed_rrset_data*) rrset->entry.data; uint8_t* newname, *dtarg = NULL; size_t newlen, dtarglen; if(now > d->ttl) return NULL; /* only allow validated (with DNSSEC) DNAMEs used from cache * for insecure DNAMEs, query again. */ *sec_status = d->security; /* return sec status, so the status of the CNAME can be checked * by the calling routine. */ msg = gen_dns_msg(region, q, 2); /* DNAME + CNAME RRset */ if(!msg) return NULL; msg->rep->flags = BIT_QR; /* reply, no AA, no error */ msg->rep->authoritative = 0; /* reply stored in cache can't be authoritative */ msg->rep->qdcount = 1; msg->rep->ttl = d->ttl - now; msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); msg->rep->security = sec_status_unchecked; msg->rep->an_numrrsets = 1; msg->rep->ns_numrrsets = 0; msg->rep->ar_numrrsets = 0; msg->rep->rrset_count = 1; msg->rep->rrsets[0] = packed_rrset_copy_region(rrset, region, now); if(!msg->rep->rrsets[0]) /* copy DNAME */ return NULL; /* synth CNAME rrset */ get_cname_target(rrset, &dtarg, &dtarglen); if(!dtarg) return NULL; newlen = q->qname_len + dtarglen - rrset->rk.dname_len; if(newlen > LDNS_MAX_DOMAINLEN) { msg->rep->flags |= LDNS_RCODE_YXDOMAIN; return msg; } newname = (uint8_t*)regional_alloc(region, newlen); if(!newname) return NULL; /* new name is concatenation of qname front (without DNAME owner) * and DNAME target name */ memcpy(newname, q->qname, q->qname_len-rrset->rk.dname_len); memmove(newname+(q->qname_len-rrset->rk.dname_len), dtarg, dtarglen); /* create rest of CNAME rrset */ ck = (struct ub_packed_rrset_key*)regional_alloc(region, sizeof(struct ub_packed_rrset_key)); if(!ck) return NULL; memset(&ck->entry, 0, sizeof(ck->entry)); msg->rep->rrsets[1] = ck; ck->entry.key = ck; ck->rk.type = htons(LDNS_RR_TYPE_CNAME); ck->rk.rrset_class = rrset->rk.rrset_class; ck->rk.flags = 0; ck->rk.dname = regional_alloc_init(region, q->qname, q->qname_len); if(!ck->rk.dname) return NULL; ck->rk.dname_len = q->qname_len; ck->entry.hash = rrset_key_hash(&ck->rk); newd = (struct packed_rrset_data*)regional_alloc_zero(region, sizeof(struct packed_rrset_data) + sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t) + newlen); if(!newd) return NULL; ck->entry.data = newd; newd->ttl = 0; /* 0 for synthesized CNAME TTL */ newd->count = 1; newd->rrsig_count = 0; newd->trust = rrset_trust_ans_noAA; newd->rr_len = (size_t*)((uint8_t*)newd + sizeof(struct packed_rrset_data)); newd->rr_len[0] = newlen + sizeof(uint16_t); packed_rrset_ptr_fixup(newd); newd->rr_ttl[0] = newd->ttl; msg->rep->ttl = newd->ttl; msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(newd->ttl); sldns_write_uint16(newd->rr_data[0], newlen); memmove(newd->rr_data[0] + sizeof(uint16_t), newname, newlen); msg->rep->an_numrrsets ++; msg->rep->rrset_count ++; return msg; }
/** find local data tag string match for the given type in the list */ static int find_tag_datas(struct query_info* qinfo, struct config_strlist* list, struct ub_packed_rrset_key* r, struct regional* temp, uint8_t* zname, size_t zlen) { struct config_strlist* p; char buf[65536]; uint8_t rr[LDNS_RR_BUF_SIZE]; size_t len; int res; struct packed_rrset_data* d; for(p=list; p; p=p->next) { len = sizeof(rr); /* does this element match the type? */ snprintf(buf, sizeof(buf), ". %s", p->str); res = sldns_str2wire_rr_buf(buf, rr, &len, NULL, 3600, zname, zlen, NULL, 0); if(res != 0) /* parse errors are already checked before, in * acllist check_data, skip this for robustness */ continue; if(len < 1 /* . */ + 8 /* typeclassttl*/ + 2 /*rdatalen*/) continue; if(sldns_wirerr_get_type(rr, len, 1) != qinfo->qtype) continue; /* do we have entries already? if not setup key */ if(r->rk.dname == NULL) { r->entry.key = r; r->rk.dname = qinfo->qname; r->rk.dname_len = qinfo->qname_len; r->rk.type = htons(qinfo->qtype); r->rk.rrset_class = htons(qinfo->qclass); r->rk.flags = 0; d = (struct packed_rrset_data*)regional_alloc_zero( temp, sizeof(struct packed_rrset_data) + sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t)); if(!d) return 0; /* out of memory */ r->entry.data = d; d->ttl = sldns_wirerr_get_ttl(rr, len, 1); d->rr_len = (size_t*)((uint8_t*)d + sizeof(struct packed_rrset_data)); d->rr_data = (uint8_t**)&(d->rr_len[1]); d->rr_ttl = (time_t*)&(d->rr_data[1]); } d = (struct packed_rrset_data*)r->entry.data; /* add entry to the data */ if(d->count != 0) { size_t* oldlen = d->rr_len; uint8_t** olddata = d->rr_data; time_t* oldttl = d->rr_ttl; /* increase arrays for lookup */ /* this is of course slow for very many records, * but most redirects are expected with few records */ d->rr_len = (size_t*)regional_alloc_zero(temp, (d->count+1)*sizeof(size_t)); d->rr_data = (uint8_t**)regional_alloc_zero(temp, (d->count+1)*sizeof(uint8_t*)); d->rr_ttl = (time_t*)regional_alloc_zero(temp, (d->count+1)*sizeof(time_t)); if(!d->rr_len || !d->rr_data || !d->rr_ttl) return 0; /* out of memory */ /* first one was allocated after struct d, but new * ones get their own array increment alloc, so * copy old content */ memmove(d->rr_len, oldlen, d->count*sizeof(size_t)); memmove(d->rr_data, olddata, d->count*sizeof(uint8_t*)); memmove(d->rr_ttl, oldttl, d->count*sizeof(time_t)); } d->rr_len[d->count] = sldns_wirerr_get_rdatalen(rr, len, 1)+2; d->rr_ttl[d->count] = sldns_wirerr_get_ttl(rr, len, 1); d->rr_data[d->count] = regional_alloc_init(temp, sldns_wirerr_get_rdatawl(rr, len, 1), d->rr_len[d->count]); if(!d->rr_data[d->count]) if(!d) return 0; /* out of memory */ d->count++; } if(r->rk.dname) return 1; return 0; }