Ejemplo n.º 1
0
/** insert RR into RRset data structure; Wastes a couple of bytes */
static int
insert_rr(struct regional* region, struct packed_rrset_data* pd,
	uint8_t* rdata, size_t rdata_len, time_t ttl)
{
	size_t* oldlen = pd->rr_len;
	time_t* oldttl = pd->rr_ttl;
	uint8_t** olddata = pd->rr_data;

	/* add RR to rrset */
	pd->count++;
	pd->rr_len = regional_alloc(region, sizeof(*pd->rr_len)*pd->count);
	pd->rr_ttl = regional_alloc(region, sizeof(*pd->rr_ttl)*pd->count);
	pd->rr_data = regional_alloc(region, sizeof(*pd->rr_data)*pd->count);
	if(!pd->rr_len || !pd->rr_ttl || !pd->rr_data) {
		log_err("out of memory");
		return 0;
	}
	if(pd->count > 1) {
		memcpy(pd->rr_len+1, oldlen, 
			sizeof(*pd->rr_len)*(pd->count-1));
		memcpy(pd->rr_ttl+1, oldttl, 
			sizeof(*pd->rr_ttl)*(pd->count-1));
		memcpy(pd->rr_data+1, olddata, 
			sizeof(*pd->rr_data)*(pd->count-1));
	}
	pd->rr_len[0] = rdata_len;
	pd->rr_ttl[0] = ttl;
	pd->rr_data[0] = regional_alloc_init(region, rdata, rdata_len);
	if(!pd->rr_data[0]) {
		log_err("out of memory");
		return 0;
	}
	return 1;
}
Ejemplo n.º 2
0
/** allocate dns_msg from query_info and reply_info */
static struct dns_msg*
gen_dns_msg(struct regional* region, struct query_info* q, size_t num)
{
	struct dns_msg* msg = (struct dns_msg*)regional_alloc(region, 
		sizeof(struct dns_msg));
	if(!msg)
		return NULL;
	memcpy(&msg->qinfo, q, sizeof(struct query_info));
	msg->qinfo.qname = regional_alloc_init(region, q->qname, q->qname_len);
	if(!msg->qinfo.qname)
		return NULL;
	/* allocate replyinfo struct and rrset key array separately */
	msg->rep = (struct reply_info*)regional_alloc(region,
		sizeof(struct reply_info) - sizeof(struct rrset_ref));
	if(!msg->rep)
		return NULL;
	if(num > RR_COUNT_MAX)
		return NULL; /* integer overflow protection */
	msg->rep->rrsets = (struct ub_packed_rrset_key**)
		regional_alloc(region,
		num * sizeof(struct ub_packed_rrset_key*));
	if(!msg->rep->rrsets)
		return NULL;
	return msg;
}
Ejemplo n.º 3
0
int mesh_state_attachment(struct mesh_state* super, struct mesh_state* sub)
{
#ifdef UNBOUND_DEBUG
	struct rbnode_t* n;
#endif
	struct mesh_state_ref* subref; /* points to sub, inserted in super */
	struct mesh_state_ref* superref; /* points to super, inserted in sub */
	if( !(subref = regional_alloc(super->s.region,
		sizeof(struct mesh_state_ref))) ||
		!(superref = regional_alloc(sub->s.region,
		sizeof(struct mesh_state_ref))) ) {
		log_err("mesh_state_attachment: out of memory");
		return 0;
	}
	superref->node.key = superref;
	superref->s = super;
	subref->node.key = subref;
	subref->s = sub;
#ifdef UNBOUND_DEBUG
	n =
#endif
	rbtree_insert(&sub->super_set, &superref->node);
	log_assert(n != NULL);
#ifdef UNBOUND_DEBUG
	n =
#endif
	rbtree_insert(&super->sub_set, &subref->node);
	log_assert(n != NULL);
	return 1;
}
Ejemplo n.º 4
0
struct dns_msg*
dns_msg_create(uint8_t* qname, size_t qnamelen, uint16_t qtype, 
	uint16_t qclass, struct regional* region, size_t capacity)
{
	struct dns_msg* msg = (struct dns_msg*)regional_alloc(region,
		sizeof(struct dns_msg));
	if(!msg)
		return NULL;
	msg->qinfo.qname = regional_alloc_init(region, qname, qnamelen);
	if(!msg->qinfo.qname)
		return NULL;
	msg->qinfo.qname_len = qnamelen;
	msg->qinfo.qtype = qtype;
	msg->qinfo.qclass = qclass;
	msg->qinfo.local_alias = NULL;
	/* non-packed reply_info, because it needs to grow the array */
	msg->rep = (struct reply_info*)regional_alloc_zero(region, 
		sizeof(struct reply_info)-sizeof(struct rrset_ref));
	if(!msg->rep)
		return NULL;
	if(capacity > RR_COUNT_MAX)
		return NULL; /* integer overflow protection */
	msg->rep->flags = BIT_QR; /* with QR, no AA */
	msg->rep->qdcount = 1;
	msg->rep->rrsets = (struct ub_packed_rrset_key**)
		regional_alloc(region, 
		capacity*sizeof(struct ub_packed_rrset_key*));
	if(!msg->rep->rrsets)
		return NULL;
	return msg;
}
/** synthesize a CNAME rrset */
static struct rrset_parse*
synth_cname_rrset(uint8_t** sname, size_t* snamelen, uint8_t* alias,
                  size_t aliaslen, struct regional* region, struct msg_parse* msg,
                  struct rrset_parse* rrset, struct rrset_parse* prev,
                  struct rrset_parse* nx, ldns_buffer* pkt)
{
    struct rrset_parse* cn = (struct rrset_parse*)regional_alloc(region,
                             sizeof(struct rrset_parse));
    if(!cn)
        return NULL;
    memset(cn, 0, sizeof(*cn));
    cn->rr_first = (struct rr_parse*)regional_alloc(region,
                   sizeof(struct rr_parse));
    if(!cn->rr_first)
        return NULL;
    cn->rr_last = cn->rr_first;
    /* CNAME from sname to alias */
    cn->dname = (uint8_t*)regional_alloc(region, *snamelen);
    if(!cn->dname)
        return NULL;
    dname_pkt_copy(pkt, cn->dname, *sname);
    cn->dname_len = *snamelen;
    cn->type = LDNS_RR_TYPE_CNAME;
    cn->section = rrset->section;
    cn->rrset_class = rrset->rrset_class;
    cn->rr_count = 1;
    cn->size = sizeof(uint16_t) + aliaslen;
    cn->hash=pkt_hash_rrset(pkt, cn->dname, cn->type, cn->rrset_class, 0);
    /* allocate TTL + rdatalen + uncompressed dname */
    memset(cn->rr_first, 0, sizeof(struct rr_parse));
    cn->rr_first->outside_packet = 1;
    cn->rr_first->ttl_data = (uint8_t*)regional_alloc(region,
                             sizeof(uint32_t)+sizeof(uint16_t)+aliaslen);
    if(!cn->rr_first->ttl_data)
        return NULL;
    ldns_write_uint32(cn->rr_first->ttl_data, 0); /* TTL = 0 */
    ldns_write_uint16(cn->rr_first->ttl_data+4, aliaslen);
    memmove(cn->rr_first->ttl_data+6, alias, aliaslen);
    cn->rr_first->size = sizeof(uint16_t)+aliaslen;

    /* link it in */
    cn->rrset_all_next = nx;
    if(prev)
        prev->rrset_all_next = cn;
    else	msg->rrset_first = cn;
    if(nx == NULL)
        msg->rrset_last = cn;
    msg->rrset_count ++;
    msg->an_rrsets++;
    /* it is not inserted in the msg hashtable. */

    *sname = cn->rr_first->ttl_data + sizeof(uint32_t)+sizeof(uint16_t);
    *snamelen = aliaslen;
    return cn;
}
Ejemplo n.º 6
0
/* Create response according to the ldns packet content */
int createResponse(struct module_qstate* qstate, sldns_buffer* pkt)
{
    struct msg_parse* prs;
    struct edns_data edns;
    
    /* parse message */
    prs = (struct msg_parse*) regional_alloc(qstate->env->scratch, sizeof(struct msg_parse));
    if (!prs) {
	log_err("storeResponse: out of memory on incoming message");
	return 0;
    }

    memset(prs, 0, sizeof(*prs));
    memset(&edns, 0, sizeof(edns));

    sldns_buffer_set_position(pkt, 0);
    if (parse_packet(pkt, prs, qstate->env->scratch) != LDNS_RCODE_NOERROR) {
	verbose(VERB_ALGO, "storeResponse: parse error on reply packet");
	return 0;
    }
    /* edns is not examined, but removed from message to help cache */
    if(parse_extract_edns(prs, &edns) != LDNS_RCODE_NOERROR)
	return 0;

    /* remove CD-bit, we asked for in case we handle validation ourself */
    prs->flags &= ~BIT_CD;

    /* allocate response dns_msg in region */
    qstate->return_msg = (struct dns_msg*)regional_alloc(qstate->region, sizeof(struct dns_msg));
    if (!qstate->return_msg)
       return 0;

    memset(qstate->return_msg, 0, sizeof(*qstate->return_msg));
    if(!parse_create_msg(pkt, prs, NULL, &(qstate->return_msg)->qinfo, &(qstate->return_msg)->rep, qstate->region)) {
	log_err("storeResponse: malloc failure: allocating incoming dns_msg");
	return 0;
    }
    
    /* Make sure that the RA flag is set (since the presence of 
     * this module means that recursion is available) */
    /* qstate->return_msg->rep->flags |= BIT_RA; */

    /* Clear the AA flag */
    /* FIXME: does this action go here or in some other module? */
    /*qstate->return_msg->rep->flags &= ~BIT_AA; */

    /* make sure QR flag is on */
    /*qstate->return_msg->rep->flags |= BIT_QR; */

    if(verbosity >= VERB_ALGO)
	log_dns_msg("storeResponse: packet:", &qstate->return_msg->qinfo, qstate->return_msg->rep);

    return 1;
}
Ejemplo n.º 7
0
/**
 * Create canonical form of rrset in the scratch buffer.
 * @param region: temporary region.
 * @param buf: the buffer to use.
 * @param k: the rrset to insert.
 * @param sig: RRSIG rdata to include.
 * @param siglen: RRSIG rdata len excluding signature field, but inclusive
 * 	signer name length.
 * @param sortree: if NULL is passed a new sorted rrset tree is built.
 * 	Otherwise it is reused.
 * @return false on alloc error.
 */
static int
rrset_canonical(struct regional* region, sldns_buffer* buf, 
	struct ub_packed_rrset_key* k, uint8_t* sig, size_t siglen,
	struct rbtree_t** sortree)
{
	struct packed_rrset_data* d = (struct packed_rrset_data*)k->entry.data;
	uint8_t* can_owner = NULL;
	size_t can_owner_len = 0;
	struct canon_rr* walk;
	struct canon_rr* rrs;

	if(!*sortree) {
		*sortree = (struct rbtree_t*)regional_alloc(region, 
			sizeof(rbtree_t));
		if(!*sortree)
			return 0;
		if(d->count > RR_COUNT_MAX)
			return 0; /* integer overflow protection */
		rrs = regional_alloc(region, sizeof(struct canon_rr)*d->count);
		if(!rrs) {
			*sortree = NULL;
			return 0;
		}
		rbtree_init(*sortree, &canonical_tree_compare);
		canonical_sort(k, d, *sortree, rrs);
	}

	sldns_buffer_clear(buf);
	sldns_buffer_write(buf, sig, siglen);
	/* canonicalize signer name */
	query_dname_tolower(sldns_buffer_begin(buf)+18); 
	RBTREE_FOR(walk, struct canon_rr*, (*sortree)) {
		/* see if there is enough space left in the buffer */
		if(sldns_buffer_remaining(buf) < can_owner_len + 2 + 2 + 4
			+ d->rr_len[walk->rr_idx]) {
			log_err("verify: failed to canonicalize, "
				"rrset too big");
			return 0;
		}
		/* determine canonical owner name */
		if(can_owner)
			sldns_buffer_write(buf, can_owner, can_owner_len);
		else	insert_can_owner(buf, k, sig, &can_owner, 
				&can_owner_len);
		sldns_buffer_write(buf, &k->rk.type, 2);
		sldns_buffer_write(buf, &k->rk.rrset_class, 2);
		sldns_buffer_write(buf, sig+4, 4);
		sldns_buffer_write(buf, d->rr_data[walk->rr_idx], 
			d->rr_len[walk->rr_idx]);
		canonicalize_rdata(buf, k, d->rr_len[walk->rr_idx]);
	}
	sldns_buffer_flip(buf);
	return 1;
}
Ejemplo n.º 8
0
/**
 * make a deep copy of 'key' in 'region'.
 * This is largely derived from packed_rrset_copy_region() and
 * packed_rrset_ptr_fixup(), but differs in the following points:
 *
 * - It doesn't assume all data in 'key' are in a contiguous memory region.
 *   Although that would be the case in most cases, 'key' can be passed from
 *   a lower-level module and it might not build the rrset to meet the
 *   assumption.  In fact, an rrset specified as response-ip-data or generated
 *   in local_data_find_tag_datas() breaks the assumption.  So it would be
 *   safer not to naively rely on the assumption.  On the other hand, this
 *   function ensures the copied rrset data are in a contiguous region so
 *   that it won't cause a disruption even if an upper layer module naively
 *   assumes the memory layout.
 * - It doesn't copy RRSIGs (if any) in 'key'.  The rrset will be used in
 *   a reply that was already faked, so it doesn't make much sense to provide
 *   partial sigs even if they are valid themselves.
 * - It doesn't adjust TTLs as it basically has to be a verbatim copy of 'key'
 *   just allocated in 'region' (the assumption is necessary TTL adjustment
 *   has been already done in 'key').
 *
 * This function returns the copied rrset key on success, and NULL on memory
 * allocation failure.
 */
struct ub_packed_rrset_key*
copy_rrset(const struct ub_packed_rrset_key* key, struct regional* region)
{
	struct ub_packed_rrset_key* ck = regional_alloc(region,
		sizeof(struct ub_packed_rrset_key));
	struct packed_rrset_data* d;
	struct packed_rrset_data* data = key->entry.data;
	size_t dsize, i;
	uint8_t* nextrdata;

	/* derived from packed_rrset_copy_region(), but don't use
	 * packed_rrset_sizeof() and do exclude RRSIGs */
	if(!ck)
		return NULL;
	ck->id = key->id;
	memset(&ck->entry, 0, sizeof(ck->entry));
	ck->entry.hash = key->entry.hash;
	ck->entry.key = ck;
	ck->rk = key->rk;
	ck->rk.dname = regional_alloc_init(region, key->rk.dname,
		key->rk.dname_len);
	if(!ck->rk.dname)
		return NULL;

	dsize = sizeof(struct packed_rrset_data) + data->count *
		(sizeof(size_t)+sizeof(uint8_t*)+sizeof(time_t));
	for(i=0; i<data->count; i++)
		dsize += data->rr_len[i];
	d = regional_alloc(region, dsize);
	if(!d)
		return NULL;
	*d = *data;
	d->rrsig_count = 0;
	ck->entry.data = d;

	/* derived from packed_rrset_ptr_fixup() with copying the data */
	d->rr_len = (size_t*)((uint8_t*)d + sizeof(struct packed_rrset_data));
	d->rr_data = (uint8_t**)&(d->rr_len[d->count]);
	d->rr_ttl = (time_t*)&(d->rr_data[d->count]);
	nextrdata = (uint8_t*)&(d->rr_ttl[d->count]);
	for(i=0; i<d->count; i++) {
		d->rr_len[i] = data->rr_len[i];
		d->rr_ttl[i] = data->rr_ttl[i];
		d->rr_data[i] = nextrdata;
		memcpy(d->rr_data[i], data->rr_data[i], data->rr_len[i]);
		nextrdata += d->rr_len[i];
	}

	return ck;
}
Ejemplo n.º 9
0
/** 
 * Assemble an rrset structure for the type 
 * @param region: allocated in this region.
 * @param ta: trust anchor.
 * @param num: number of items to fetch from list.
 * @param type: fetch only items of this type.
 * @return rrset or NULL on error.
 */
static struct ub_packed_rrset_key*
assemble_it(struct regional* region, struct trust_anchor* ta, size_t num, 
	uint16_t type)
{
	struct ub_packed_rrset_key* pkey = (struct ub_packed_rrset_key*)
		regional_alloc(region, sizeof(*pkey));
	struct packed_rrset_data* pd;
	struct ta_key* tk;
	size_t i;
	if(!pkey)
		return NULL;
	memset(pkey, 0, sizeof(*pkey));
	pkey->rk.dname = regional_alloc_init(region, ta->name, ta->namelen);
	if(!pkey->rk.dname)
		return NULL;
	
	pkey->rk.dname_len = ta->namelen;
	pkey->rk.type = htons(type);
	pkey->rk.rrset_class = htons(ta->dclass);
	/* The rrset is build in an uncompressed way. This means it
	 * cannot be copied in the normal way. */
	pd = (struct packed_rrset_data*)regional_alloc(region, sizeof(*pd));
	if(!pd)
		return NULL;
	memset(pd, 0, sizeof(*pd));
	pd->count = num;
	pd->trust = rrset_trust_ultimate;
	pd->rr_len = (size_t*)regional_alloc(region, num*sizeof(size_t));
	if(!pd->rr_len)
		return NULL;
	pd->rr_ttl = (uint32_t*)regional_alloc(region, num*sizeof(uint32_t));
	if(!pd->rr_ttl)
		return NULL;
	pd->rr_data = (uint8_t**)regional_alloc(region, num*sizeof(uint8_t*));
	if(!pd->rr_data)
		return NULL;
	/* fill in rrs */
	i=0;
	for(tk = ta->keylist; tk; tk = tk->next) {
		if(tk->type != type)
			continue;
		pd->rr_len[i] = tk->len;
		/* reuse data ptr to allocation in region */
		pd->rr_data[i] = tk->data;
		pd->rr_ttl[i] = 0;
		i++;
	}
	pkey->entry.data = (void*)pd;
	return pkey;
}
Ejemplo n.º 10
0
/** create new trust anchor object */
static struct trust_anchor*
anchor_new_ta(struct val_anchors* anchors, uint8_t* name, int namelabs,
	size_t namelen, uint16_t dclass)
{
#ifdef UNBOUND_DEBUG
	rbnode_t* r;
#endif
	struct trust_anchor* ta = (struct trust_anchor*)regional_alloc(
		anchors->region, sizeof(struct trust_anchor));
	if(!ta)
		return NULL;
	memset(ta, 0, sizeof(*ta));
	ta->node.key = ta;
	ta->name = regional_alloc_init(anchors->region, name, namelen);
	if(!ta->name)
		return NULL;
	ta->namelabs = namelabs;
	ta->namelen = namelen;
	ta->dclass = dclass;
	lock_basic_init(&ta->lock);
	lock_basic_lock(&anchors->lock);
#ifdef UNBOUND_DEBUG
	r =
#endif
	rbtree_insert(anchors->tree, &ta->node);
	lock_basic_unlock(&anchors->lock);
	log_assert(r != NULL);
	return ta;
}
Ejemplo n.º 11
0
int 
delegpt_add_ns(struct delegpt* dp, struct regional* region, uint8_t* name,
	int lame)
{
	struct delegpt_ns* ns;
	size_t len;
	(void)dname_count_size_labels(name, &len);
	/* slow check for duplicates to avoid counting failures when
	 * adding the same server as a dependency twice */
	if(delegpt_find_ns(dp, name, len))
		return 1;
	ns = (struct delegpt_ns*)regional_alloc(region,
		sizeof(struct delegpt_ns));
	if(!ns)
		return 0;
	ns->next = dp->nslist;
	ns->namelen = len;
	dp->nslist = ns;
	ns->name = regional_alloc_init(region, name, ns->namelen);
	ns->resolved = 0;
	ns->got4 = 0;
	ns->got6 = 0;
	ns->lame = (uint8_t)lame;
	ns->done_pside4 = 0;
	ns->done_pside6 = 0;
	return 1;
}
Ejemplo n.º 12
0
/** create rrset return 0 on failure */
static int
parse_create_rrset(sldns_buffer* pkt, struct rrset_parse* pset,
	struct packed_rrset_data** data, struct regional* region)
{
	/* allocate */
	size_t s;
	if(pset->rr_count > RR_COUNT_MAX || pset->rrsig_count > RR_COUNT_MAX ||
		pset->size > RR_COUNT_MAX)
		return 0; /* protect against integer overflow */
	s = sizeof(struct packed_rrset_data) + 
		(pset->rr_count + pset->rrsig_count) * 
		(sizeof(size_t)+sizeof(uint8_t*)+sizeof(time_t)) + 
		pset->size;
	if(region)
		*data = regional_alloc(region, s);
	else	*data = malloc(s);
	if(!*data)
		return 0;
	/* copy & decompress */
	if(!parse_rr_copy(pkt, pset, *data)) {
		if(!region) free(*data);
		return 0;
	}
	return 1;
}
Ejemplo n.º 13
0
/** constructor for replyinfo */
static struct reply_info*
construct_reply_info_base(struct regional* region, uint16_t flags, size_t qd,
	uint32_t ttl, uint32_t prettl, size_t an, size_t ns, size_t ar, 
	size_t total, enum sec_status sec)
{
	struct reply_info* rep;
	/* rrset_count-1 because the first ref is part of the struct. */
	size_t s = sizeof(struct reply_info) - sizeof(struct rrset_ref) +
		sizeof(struct ub_packed_rrset_key*) * total;
	if(region)
		rep = (struct reply_info*)regional_alloc(region, s);
	else	rep = (struct reply_info*)malloc(s + 
			sizeof(struct rrset_ref) * (total));
	if(!rep) 
		return NULL;
	rep->flags = flags;
	rep->qdcount = qd;
	rep->ttl = ttl;
	rep->prefetch_ttl = prettl;
	rep->an_numrrsets = an;
	rep->ns_numrrsets = ns;
	rep->ar_numrrsets = ar;
	rep->rrset_count = total;
	rep->security = sec;
	rep->authoritative = 0;
	/* array starts after the refs */
	if(region)
		rep->rrsets = (struct ub_packed_rrset_key**)&(rep->ref[0]);
	else	rep->rrsets = (struct ub_packed_rrset_key**)&(rep->ref[total]);
	/* zero the arrays to assist cleanup in case of malloc failure */
	memset( rep->rrsets, 0, sizeof(struct ub_packed_rrset_key*) * total);
	if(!region)
		memset( &rep->ref[0], 0, sizeof(struct rrset_ref) * total);
	return rep;
}
Ejemplo n.º 14
0
int
parse_copy_decompress_rrset(sldns_buffer* pkt, struct msg_parse* msg,
	struct rrset_parse *pset, struct regional* region, 
	struct ub_packed_rrset_key* pk)
{
	struct packed_rrset_data* data;
	pk->rk.flags = pset->flags;
	pk->rk.dname_len = pset->dname_len;
	if(region)
		pk->rk.dname = (uint8_t*)regional_alloc(
			region, pset->dname_len);
	else	pk->rk.dname = 
			(uint8_t*)malloc(pset->dname_len);
	if(!pk->rk.dname)
		return 0;
	/** copy & decompress dname */
	dname_pkt_copy(pkt, pk->rk.dname, pset->dname);
	/** copy over type and class */
	pk->rk.type = htons(pset->type);
	pk->rk.rrset_class = pset->rrset_class;
	/** read data part. */
	if(!parse_create_rrset(pkt, pset, &data, region))
		return 0;
	pk->entry.data = (void*)data;
	pk->entry.key = (void*)pk;
	pk->entry.hash = pset->hash;
	data->trust = get_rrset_trust(msg, pset);
	return 1;
}
Ejemplo n.º 15
0
int 
delegpt_add_addr(struct delegpt* dp, struct regional* region, 
	struct sockaddr_storage* addr, socklen_t addrlen, int bogus, 
	int lame, int nodup)
{
	struct delegpt_addr* a;
	if(nodup) {
		if((a = delegpt_find_addr(dp, addr, addrlen))) {
			if(bogus)
				a->bogus = bogus;
			if(!lame)
				a->lame = 0;
			return 1;
		}
	}

	a = (struct delegpt_addr*)regional_alloc(region,
		sizeof(struct delegpt_addr));
	if(!a)
		return 0;
	a->next_target = dp->target_list;
	dp->target_list = a;
	a->next_result = 0;
	a->next_usable = dp->usable_list;
	dp->usable_list = a;
	memcpy(&a->addr, addr, addrlen);
	a->addrlen = addrlen;
	a->attempts = 0;
	a->bogus = bogus;
	a->lame = lame;
	return 1;
}
Ejemplo n.º 16
0
int 
delegpt_add_addr(struct delegpt* dp, struct regional* region, 
	struct sockaddr_storage* addr, socklen_t addrlen, uint8_t bogus, 
	uint8_t lame)
{
	struct delegpt_addr* a;
	log_assert(!dp->dp_type_mlc);
	/* check for duplicates */
	if((a = delegpt_find_addr(dp, addr, addrlen))) {
		if(bogus)
			a->bogus = bogus;
		if(!lame)
			a->lame = 0;
		return 1;
	}

	a = (struct delegpt_addr*)regional_alloc(region,
		sizeof(struct delegpt_addr));
	if(!a)
		return 0;
	a->next_target = dp->target_list;
	dp->target_list = a;
	a->next_result = 0;
	a->next_usable = dp->usable_list;
	dp->usable_list = a;
	memcpy(&a->addr, addr, addrlen);
	a->addrlen = addrlen;
	a->attempts = 0;
	a->bogus = bogus;
	a->lame = lame;
	a->dnsseclame = 0;
	return 1;
}
Ejemplo n.º 17
0
int reply_info_parse(sldns_buffer* pkt, struct alloc_cache* alloc,
        struct query_info* qinf, struct reply_info** rep, 
	struct regional* region, struct edns_data* edns)
{
	/* use scratch pad region-allocator during parsing. */
	struct msg_parse* msg;
	int ret;
	
	qinf->qname = NULL;
	*rep = NULL;
	if(!(msg = regional_alloc(region, sizeof(*msg)))) {
		return LDNS_RCODE_SERVFAIL;
	}
	memset(msg, 0, sizeof(*msg));
	
	sldns_buffer_set_position(pkt, 0);
	if((ret = parse_packet(pkt, msg, region)) != 0) {
		return ret;
	}
	if((ret = parse_extract_edns(msg, edns)) != 0)
		return ret;

	/* parse OK, allocate return structures */
	/* this also performs dname decompression */
	if(!parse_create_msg(pkt, msg, alloc, qinf, rep, NULL)) {
		query_info_clear(qinf);
		reply_info_parsedelete(*rep, alloc);
		*rep = NULL;
		return LDNS_RCODE_SERVFAIL;
	}
	return 0;
}
Ejemplo n.º 18
0
/** Add rr (from packet here) to rrset, skips rr */
static int
add_rr_to_rrset(struct rrset_parse* rrset, sldns_buffer* pkt, 
	struct msg_parse* msg, struct regional* region, 
	sldns_pkt_section section, uint16_t type)
{
	struct rr_parse* rr;
	/* check section of rrset. */
	if(rrset->section != section && type != LDNS_RR_TYPE_RRSIG &&
		rrset->type != LDNS_RR_TYPE_RRSIG) {
		/* silently drop it - we drop the last part, since
		 * trust in rr data depends on the section it is in. 
		 * the less trustworthy part is discarded. 
		 * also the last part is more likely to be incomplete.
		 * RFC 2181: must put RRset only once in response. */
		/*
		verbose(VERB_QUERY, "Packet contains rrset data in "
			"multiple sections, dropped last part.");
		log_buf(VERB_QUERY, "packet was", pkt);
		*/
		/* forwards */
		if(!skip_ttl_rdata(pkt))
			return LDNS_RCODE_FORMERR;
		return 0;
	} 

	if( (msg->qtype == LDNS_RR_TYPE_RRSIG ||
	     msg->qtype == LDNS_RR_TYPE_ANY) 
	    && sig_is_double(pkt, rrset, sldns_buffer_current(pkt))) {
		if(!skip_ttl_rdata(pkt))
			return LDNS_RCODE_FORMERR;
		return 0;
	}
	
	/* create rr */
	if(!(rr = (struct rr_parse*)regional_alloc(region, sizeof(*rr))))
		return LDNS_RCODE_SERVFAIL;
	rr->outside_packet = 0;
	rr->ttl_data = sldns_buffer_current(pkt);
	rr->next = 0;
	if(type == LDNS_RR_TYPE_RRSIG && rrset->type != LDNS_RR_TYPE_RRSIG) {
		if(rrset->rrsig_last) 
			rrset->rrsig_last->next = rr;
		else	rrset->rrsig_first = rr;
		rrset->rrsig_last = rr;
		rrset->rrsig_count++;
	} else {
		if(rrset->rr_last)
			rrset->rr_last->next = rr;
		else	rrset->rr_first = rr;
		rrset->rr_last = rr;
		rrset->rr_count++;
	}

	/* calc decompressed size */
	if(!calc_size(pkt, type, rr))
		return LDNS_RCODE_FORMERR;
	rrset->size += rr->size;

	return 0;
}
Ejemplo n.º 19
0
/**
 * Allocate new rrset in region, fill with data.
 */
static struct rrset_parse* 
new_rrset(struct msg_parse* msg, uint8_t* dname, size_t dnamelen, 
	uint16_t type, uint16_t dclass, hashvalue_t hash, 
	uint32_t rrset_flags, sldns_pkt_section section, 
	struct regional* region)
{
	struct rrset_parse* p = regional_alloc(region, sizeof(*p));
	if(!p) return NULL;
	p->rrset_bucket_next = msg->hashtable[hash & (PARSE_TABLE_SIZE-1)];
	msg->hashtable[hash & (PARSE_TABLE_SIZE-1)] = p;
	p->rrset_all_next = 0;
	if(msg->rrset_last)
		msg->rrset_last->rrset_all_next = p;
	else 	msg->rrset_first = p;
	msg->rrset_last = p;
	p->hash = hash;
	p->section = section;
	p->dname = dname;
	p->dname_len = dnamelen;
	p->type = type;
	p->rrset_class = dclass;
	p->flags = rrset_flags;
	p->rr_count = 0;
	p->size = 0;
	p->rr_first = 0;
	p->rr_last = 0;
	p->rrsig_count = 0;
	p->rrsig_first = 0;
	p->rrsig_last = 0;
	return p;
}
Ejemplo n.º 20
0
void *
regional_alloc_zero(struct regional *r, size_t size)
{
	void *s = regional_alloc(r, size);
	if(!s) return NULL;
	memset(s, 0, size);
	return s;
}
Ejemplo n.º 21
0
void *
regional_alloc_init(struct regional* r, const void *init, size_t size)
{
	void *s = regional_alloc(r, size);
	if(!s) return NULL;
	memcpy(s, init, size);
	return s;
}
Ejemplo n.º 22
0
void iter_store_parentside_neg(struct module_env* env, 
        struct query_info* qinfo, struct reply_info* rep)
{
	/* TTL: NS from referral in iq->deleg_msg,
	 *      or first RR from iq->response,
	 *      or servfail5secs if !iq->response */ 
	time_t ttl = NORR_TTL;
	struct ub_packed_rrset_key* neg;
	struct packed_rrset_data* newd;
	if(rep) {
		struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep);
		if(!rrset && rep->rrset_count != 0) rrset = rep->rrsets[0];
		if(rrset) ttl = ub_packed_rrset_ttl(rrset);
	}
	/* create empty rrset to store */
	neg = (struct ub_packed_rrset_key*)regional_alloc(env->scratch,
	                sizeof(struct ub_packed_rrset_key));
	if(!neg) {
		log_err("out of memory in store_parentside_neg");
		return;
	}
	memset(&neg->entry, 0, sizeof(neg->entry));
	neg->entry.key = neg;
	neg->rk.type = htons(qinfo->qtype);
	neg->rk.rrset_class = htons(qinfo->qclass);
	neg->rk.flags = 0;
	neg->rk.dname = regional_alloc_init(env->scratch, qinfo->qname, 
		qinfo->qname_len);
	if(!neg->rk.dname) {
		log_err("out of memory in store_parentside_neg");
		return;
	}
	neg->rk.dname_len = qinfo->qname_len;
	neg->entry.hash = rrset_key_hash(&neg->rk);
	newd = (struct packed_rrset_data*)regional_alloc_zero(env->scratch, 
		sizeof(struct packed_rrset_data) + sizeof(size_t) +
		sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t));
	if(!newd) {
		log_err("out of memory in store_parentside_neg");
		return;
	}
	neg->entry.data = newd;
	newd->ttl = ttl;
	/* entry must have one RR, otherwise not valid in cache.
	 * put in one RR with empty rdata: those are ignored as nameserver */
	newd->count = 1;
	newd->rrsig_count = 0;
	newd->trust = rrset_trust_ans_noAA;
	newd->rr_len = (size_t*)((uint8_t*)newd +
		sizeof(struct packed_rrset_data));
	newd->rr_len[0] = 0 /* zero len rdata */ + sizeof(uint16_t);
	packed_rrset_ptr_fixup(newd);
	newd->rr_ttl[0] = newd->ttl;
	sldns_write_uint16(newd->rr_data[0], 0 /* zero len rdata */);
	/* store it */
	log_rrset_key(VERB_ALGO, "store parent-side negative", neg);
	iter_store_parentside_rrset(env, neg);
}
Ejemplo n.º 23
0
struct delegpt* 
delegpt_create(struct regional* region)
{
	struct delegpt* dp=(struct delegpt*)regional_alloc(
		region, sizeof(*dp));
	if(!dp)
		return NULL;
	memset(dp, 0, sizeof(*dp));
	return dp;
}
Ejemplo n.º 24
0
struct mesh_state* 
mesh_state_create(struct module_env* env, struct query_info* qinfo, 
	uint16_t qflags, int prime)
{
	struct regional* region = alloc_reg_obtain(env->alloc);
	struct mesh_state* mstate;
	int i;
	if(!region)
		return NULL;
	mstate = (struct mesh_state*)regional_alloc(region, 
		sizeof(struct mesh_state));
	if(!mstate) {
		alloc_reg_release(env->alloc, region);
		return NULL;
	}
	memset(mstate, 0, sizeof(*mstate));
	mstate->node = *RBTREE_NULL;
	mstate->run_node = *RBTREE_NULL;
	mstate->node.key = mstate;
	mstate->run_node.key = mstate;
	mstate->reply_list = NULL;
	mstate->list_select = mesh_no_list;
	mstate->replies_sent = 0;
	rbtree_init(&mstate->super_set, &mesh_state_ref_compare);
	rbtree_init(&mstate->sub_set, &mesh_state_ref_compare);
	mstate->num_activated = 0;
	/* init module qstate */
	mstate->s.qinfo.qtype = qinfo->qtype;
	mstate->s.qinfo.qclass = qinfo->qclass;
	mstate->s.qinfo.qname_len = qinfo->qname_len;
	mstate->s.qinfo.qname = regional_alloc_init(region, qinfo->qname,
		qinfo->qname_len);
	if(!mstate->s.qinfo.qname) {
		alloc_reg_release(env->alloc, region);
		return NULL;
	}
	/* remove all weird bits from qflags */
	mstate->s.query_flags = (qflags & (BIT_RD|BIT_CD));
	mstate->s.is_priming = prime;
	mstate->s.reply = NULL;
	mstate->s.region = region;
	mstate->s.curmod = 0;
	mstate->s.return_msg = 0;
	mstate->s.return_rcode = LDNS_RCODE_NOERROR;
	mstate->s.env = env;
	mstate->s.mesh_info = mstate;
	mstate->s.prefetch_leeway = 0;
	/* init modules */
	for(i=0; i<env->mesh->mods.num; i++) {
		mstate->s.minfo[i] = NULL;
		mstate->s.ext_state[i] = module_state_initial;
	}
	return mstate;
}
Ejemplo n.º 25
0
/** new query for cachedb */
static int
cachedb_new(struct module_qstate* qstate, int id)
{
	struct cachedb_qstate* iq = (struct cachedb_qstate*)regional_alloc(
		qstate->region, sizeof(struct cachedb_qstate));
	qstate->minfo[id] = iq;
	if(!iq) 
		return 0;
	memset(iq, 0, sizeof(*iq));
	/* initialise it */
	/* TODO */

	return 1;
}
Ejemplo n.º 26
0
struct dns_msg* 
dns_copy_msg(struct dns_msg* from, struct regional* region)
{
	struct dns_msg* m = (struct dns_msg*)regional_alloc(region,
		sizeof(struct dns_msg));
	if(!m)
		return NULL;
	m->qinfo = from->qinfo;
	if(!(m->qinfo.qname = regional_alloc_init(region, from->qinfo.qname,
		from->qinfo.qname_len)))
		return NULL;
	if(!(m->rep = reply_info_copy(from->rep, NULL, region)))
		return NULL;
	return m;
}
Ejemplo n.º 27
0
struct dns_msg* 
dns_alloc_msg(sldns_buffer* pkt, struct msg_parse* msg, 
	struct regional* region)
{
	struct dns_msg* m = (struct dns_msg*)regional_alloc(region,
		sizeof(struct dns_msg));
	if(!m)
		return NULL;
	memset(m, 0, sizeof(*m));
	if(!parse_create_msg(pkt, msg, NULL, &m->qinfo, &m->rep, region)) {
		log_err("malloc failure: allocating incoming dns_msg");
		return NULL;
	}
	return m;
}
Ejemplo n.º 28
0
/**
 * Create node for domain name compression tree.
 * @param dname: pointer to uncompressed dname (stored in tree).
 * @param labs: number of labels in dname.
 * @param offset: offset into packet for dname.
 * @param region: how to allocate memory for new node.
 * @return new node or 0 on malloc failure.
 */
static struct compress_tree_node*
compress_tree_newnode(uint8_t* dname, int labs, size_t offset, 
	struct regional* region)
{
	struct compress_tree_node* n = (struct compress_tree_node*)
		regional_alloc(region, sizeof(struct compress_tree_node));
	if(!n) return 0;
	n->left = 0;
	n->right = 0;
	n->parent = 0;
	n->dname = dname;
	n->labs = labs;
	n->offset = offset;
	return n;
}
Ejemplo n.º 29
0
void sock_list_insert(struct sock_list** list, struct sockaddr_storage* addr,
	socklen_t len, struct regional* region)
{
	struct sock_list* add = (struct sock_list*)regional_alloc(region,
		sizeof(*add) - sizeof(add->addr) + (size_t)len);
	if(!add) {
		log_err("out of memory in socketlist insert");
		return;
	}
	log_assert(list);
	add->next = *list;
	add->len = len;
	*list = add;
	if(len) memmove(&add->addr, addr, len);
}
Ejemplo n.º 30
0
/** insert new address into acl_list structure */
static int
acl_list_insert(struct acl_list* acl, struct sockaddr_storage* addr, 
	socklen_t addrlen, int net, enum acl_access control, 
	int complain_duplicates)
{
	struct acl_addr* node = regional_alloc(acl->region,
		sizeof(struct acl_addr));
	if(!node)
		return 0;
	node->control = control;
	if(!addr_tree_insert(&acl->tree, &node->node, addr, addrlen, net)) {
		if(complain_duplicates)
			verbose(VERB_QUERY, "duplicate acl address ignored.");
	}
	return 1;
}