Exemplo n.º 1
0
/** copy rrsets from replyinfo to dest replyinfo */
static int
repinfo_copy_rrsets(struct reply_info* dest, struct reply_info* from, 
	struct regional* region)
{
	size_t i, s;
	struct packed_rrset_data* fd, *dd;
	struct ub_packed_rrset_key* fk, *dk;
	for(i=0; i<dest->rrset_count; i++) {
		fk = from->rrsets[i];
		dk = dest->rrsets[i];
		fd = (struct packed_rrset_data*)fk->entry.data;
		dk->entry.hash = fk->entry.hash;
		dk->rk = fk->rk;
		if(region) {
			dk->id = fk->id;
			dk->rk.dname = (uint8_t*)regional_alloc_init(region,
				fk->rk.dname, fk->rk.dname_len);
		} else	
			dk->rk.dname = (uint8_t*)memdup(fk->rk.dname, 
				fk->rk.dname_len);
		if(!dk->rk.dname)
			return 0;
		s = packed_rrset_sizeof(fd);
		if(region)
			dd = (struct packed_rrset_data*)regional_alloc_init(
				region, fd, s);
		else	dd = (struct packed_rrset_data*)memdup(fd, s);
		if(!dd) 
			return 0;
		packed_rrset_ptr_fixup(dd);
		dk->entry.data = (void*)dd;
	}
	return 1;
}
Exemplo n.º 2
0
struct ub_packed_rrset_key* 
packed_rrset_copy_alloc(struct ub_packed_rrset_key* key, 
	struct alloc_cache* alloc, time_t now)
{
	struct packed_rrset_data* fd, *dd;
	struct ub_packed_rrset_key* dk = alloc_special_obtain(alloc);
	if(!dk) return NULL;
	fd = (struct packed_rrset_data*)key->entry.data;
	dk->entry.hash = key->entry.hash;
	dk->rk = key->rk;
	dk->rk.dname = (uint8_t*)memdup(key->rk.dname, key->rk.dname_len);
	if(!dk->rk.dname) {
		alloc_special_release(alloc, dk);
		return NULL;
	}
	dd = (struct packed_rrset_data*)memdup(fd, packed_rrset_sizeof(fd));
	if(!dd) {
		free(dk->rk.dname);
		alloc_special_release(alloc, dk);
		return NULL;
	}
	packed_rrset_ptr_fixup(dd);
	dk->entry.data = (void*)dd;
	packed_rrset_ttl_add(dd, now);
	return dk;
}
Exemplo n.º 3
0
void iter_store_parentside_neg(struct module_env* env, 
        struct query_info* qinfo, struct reply_info* rep)
{
	/* TTL: NS from referral in iq->deleg_msg,
	 *      or first RR from iq->response,
	 *      or servfail5secs if !iq->response */ 
	time_t ttl = NORR_TTL;
	struct ub_packed_rrset_key* neg;
	struct packed_rrset_data* newd;
	if(rep) {
		struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep);
		if(!rrset && rep->rrset_count != 0) rrset = rep->rrsets[0];
		if(rrset) ttl = ub_packed_rrset_ttl(rrset);
	}
	/* create empty rrset to store */
	neg = (struct ub_packed_rrset_key*)regional_alloc(env->scratch,
	                sizeof(struct ub_packed_rrset_key));
	if(!neg) {
		log_err("out of memory in store_parentside_neg");
		return;
	}
	memset(&neg->entry, 0, sizeof(neg->entry));
	neg->entry.key = neg;
	neg->rk.type = htons(qinfo->qtype);
	neg->rk.rrset_class = htons(qinfo->qclass);
	neg->rk.flags = 0;
	neg->rk.dname = regional_alloc_init(env->scratch, qinfo->qname, 
		qinfo->qname_len);
	if(!neg->rk.dname) {
		log_err("out of memory in store_parentside_neg");
		return;
	}
	neg->rk.dname_len = qinfo->qname_len;
	neg->entry.hash = rrset_key_hash(&neg->rk);
	newd = (struct packed_rrset_data*)regional_alloc_zero(env->scratch, 
		sizeof(struct packed_rrset_data) + sizeof(size_t) +
		sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t));
	if(!newd) {
		log_err("out of memory in store_parentside_neg");
		return;
	}
	neg->entry.data = newd;
	newd->ttl = ttl;
	/* entry must have one RR, otherwise not valid in cache.
	 * put in one RR with empty rdata: those are ignored as nameserver */
	newd->count = 1;
	newd->rrsig_count = 0;
	newd->trust = rrset_trust_ans_noAA;
	newd->rr_len = (size_t*)((uint8_t*)newd +
		sizeof(struct packed_rrset_data));
	newd->rr_len[0] = 0 /* zero len rdata */ + sizeof(uint16_t);
	packed_rrset_ptr_fixup(newd);
	newd->rr_ttl[0] = newd->ttl;
	sldns_write_uint16(newd->rr_data[0], 0 /* zero len rdata */);
	/* store it */
	log_rrset_key(VERB_ALGO, "store parent-side negative", neg);
	iter_store_parentside_rrset(env, neg);
}
Exemplo n.º 4
0
/** move entry into cache */
static int
move_into_cache(struct ub_packed_rrset_key* k, 
	struct packed_rrset_data* d, struct worker* worker)
{
	struct ub_packed_rrset_key* ak;
	struct packed_rrset_data* ad;
	size_t s, i, num = d->count + d->rrsig_count;
	struct rrset_ref ref;
	uint8_t* p;

	ak = alloc_special_obtain(&worker->alloc);
	if(!ak) {
		log_warn("error out of memory");
		return 0;
	}
	ak->entry.data = NULL;
	ak->rk = k->rk;
	ak->entry.hash = rrset_key_hash(&k->rk);
	ak->rk.dname = (uint8_t*)memdup(k->rk.dname, k->rk.dname_len);
	if(!ak->rk.dname) {
		log_warn("error out of memory");
		ub_packed_rrset_parsedelete(ak, &worker->alloc);
		return 0;
	}
	s = sizeof(*ad) + (sizeof(size_t) + sizeof(uint8_t*) + 
		sizeof(time_t))* num;
	for(i=0; i<num; i++)
		s += d->rr_len[i];
	ad = (struct packed_rrset_data*)malloc(s);
	if(!ad) {
		log_warn("error out of memory");
		ub_packed_rrset_parsedelete(ak, &worker->alloc);
		return 0;
	}
	p = (uint8_t*)ad;
	memmove(p, d, sizeof(*ad));
	p += sizeof(*ad);
	memmove(p, &d->rr_len[0], sizeof(size_t)*num);
	p += sizeof(size_t)*num;
	memmove(p, &d->rr_data[0], sizeof(uint8_t*)*num);
	p += sizeof(uint8_t*)*num;
	memmove(p, &d->rr_ttl[0], sizeof(time_t)*num);
	p += sizeof(time_t)*num;
	for(i=0; i<num; i++) {
		memmove(p, d->rr_data[i], d->rr_len[i]);
		p += d->rr_len[i];
	}
	packed_rrset_ptr_fixup(ad);

	ak->entry.data = ad;

	ref.key = ak;
	ref.id = ak->id;
	(void)rrset_cache_update(worker->env.rrset_cache, &ref,
		&worker->alloc, *worker->env.now);
	return 1;
}
Exemplo n.º 5
0
struct ub_packed_rrset_key*
packed_rrset_copy_region(struct ub_packed_rrset_key* key, 
	struct regional* region, time_t now)
{
	struct ub_packed_rrset_key* ck = regional_alloc(region, 
		sizeof(struct ub_packed_rrset_key));
	struct packed_rrset_data* d;
	struct packed_rrset_data* data = (struct packed_rrset_data*)
		key->entry.data;
	size_t dsize, i;
	if(!ck)
		return NULL;
	ck->id = key->id;
	memset(&ck->entry, 0, sizeof(ck->entry));
	ck->entry.hash = key->entry.hash;
	ck->entry.key = ck;
	ck->rk = key->rk;
	ck->rk.dname = regional_alloc_init(region, key->rk.dname, 
		key->rk.dname_len);
	if(!ck->rk.dname)
		return NULL;
	dsize = packed_rrset_sizeof(data);
	d = (struct packed_rrset_data*)regional_alloc_init(region, data, dsize);
	if(!d)
		return NULL;
	ck->entry.data = d;
	packed_rrset_ptr_fixup(d);
	/* make TTLs relative - once per rrset */
	for(i=0; i<d->count + d->rrsig_count; i++) {
		if(d->rr_ttl[i] < now)
			d->rr_ttl[i] = 0;
		else	d->rr_ttl[i] -= now;
	}
	if(d->ttl < now)
		d->ttl = 0;
	else	d->ttl -= now;
	return ck;
}
Exemplo n.º 6
0
/** synthesize DNAME+CNAME response from cached DNAME item */
static struct dns_msg*
synth_dname_msg(struct ub_packed_rrset_key* rrset, struct regional* region, 
	time_t now, struct query_info* q, enum sec_status* sec_status)
{
	struct dns_msg* msg;
	struct ub_packed_rrset_key* ck;
	struct packed_rrset_data* newd, *d = (struct packed_rrset_data*)
		rrset->entry.data;
	uint8_t* newname, *dtarg = NULL;
	size_t newlen, dtarglen;
	if(now > d->ttl)
		return NULL;
	/* only allow validated (with DNSSEC) DNAMEs used from cache 
	 * for insecure DNAMEs, query again. */
	*sec_status = d->security;
	/* return sec status, so the status of the CNAME can be checked
	 * by the calling routine. */
	msg = gen_dns_msg(region, q, 2); /* DNAME + CNAME RRset */
	if(!msg)
		return NULL;
	msg->rep->flags = BIT_QR; /* reply, no AA, no error */
        msg->rep->authoritative = 0; /* reply stored in cache can't be authoritative */
	msg->rep->qdcount = 1;
	msg->rep->ttl = d->ttl - now;
	msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl);
	msg->rep->security = sec_status_unchecked;
	msg->rep->an_numrrsets = 1;
	msg->rep->ns_numrrsets = 0;
	msg->rep->ar_numrrsets = 0;
	msg->rep->rrset_count = 1;
	msg->rep->rrsets[0] = packed_rrset_copy_region(rrset, region, now);
	if(!msg->rep->rrsets[0]) /* copy DNAME */
		return NULL;
	/* synth CNAME rrset */
	get_cname_target(rrset, &dtarg, &dtarglen);
	if(!dtarg)
		return NULL;
	newlen = q->qname_len + dtarglen - rrset->rk.dname_len;
	if(newlen > LDNS_MAX_DOMAINLEN) {
		msg->rep->flags |= LDNS_RCODE_YXDOMAIN;
		return msg;
	}
	newname = (uint8_t*)regional_alloc(region, newlen);
	if(!newname)
		return NULL;
	/* new name is concatenation of qname front (without DNAME owner)
	 * and DNAME target name */
	memcpy(newname, q->qname, q->qname_len-rrset->rk.dname_len);
	memmove(newname+(q->qname_len-rrset->rk.dname_len), dtarg, dtarglen);
	/* create rest of CNAME rrset */
	ck = (struct ub_packed_rrset_key*)regional_alloc(region, 
		sizeof(struct ub_packed_rrset_key));
	if(!ck)
		return NULL;
	memset(&ck->entry, 0, sizeof(ck->entry));
	msg->rep->rrsets[1] = ck;
	ck->entry.key = ck;
	ck->rk.type = htons(LDNS_RR_TYPE_CNAME);
	ck->rk.rrset_class = rrset->rk.rrset_class;
	ck->rk.flags = 0;
	ck->rk.dname = regional_alloc_init(region, q->qname, q->qname_len);
	if(!ck->rk.dname)
		return NULL;
	ck->rk.dname_len = q->qname_len;
	ck->entry.hash = rrset_key_hash(&ck->rk);
	newd = (struct packed_rrset_data*)regional_alloc_zero(region,
		sizeof(struct packed_rrset_data) + sizeof(size_t) + 
		sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t) 
		+ newlen);
	if(!newd)
		return NULL;
	ck->entry.data = newd;
	newd->ttl = 0; /* 0 for synthesized CNAME TTL */
	newd->count = 1;
	newd->rrsig_count = 0;
	newd->trust = rrset_trust_ans_noAA;
	newd->rr_len = (size_t*)((uint8_t*)newd + 
		sizeof(struct packed_rrset_data));
	newd->rr_len[0] = newlen + sizeof(uint16_t);
	packed_rrset_ptr_fixup(newd);
	newd->rr_ttl[0] = newd->ttl;
	msg->rep->ttl = newd->ttl;
	msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(newd->ttl);
	sldns_write_uint16(newd->rr_data[0], newlen);
	memmove(newd->rr_data[0] + sizeof(uint16_t), newname, newlen);
	msg->rep->an_numrrsets ++;
	msg->rep->rrset_count ++;
	return msg;
}