/** test adding a random element */
static void
testlookup(struct lruhash* table, testdata_t* ref[])
{
	int num = random() % HASHTESTMAX;
	testkey_t* key = newkey(num);
	struct lruhash_entry* en = lruhash_lookup(table, myhash(num), key, 0);
	testdata_t* data = en? (testdata_t*)en->data : NULL;
	if(en) {
		unit_assert(en->key);
		unit_assert(en->data);
	}
	if(0) log_info("lookup %d got %d, expect %d", num, en? data->data :-1,
		ref[num]? ref[num]->data : -1);
	unit_assert( data == ref[num] );
	if(en) { lock_rw_unlock(&en->lock); }
	delkey(key);
}
Exemplo n.º 2
0
void 
infra_update_tcp_works(struct infra_cache* infra,
        struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* nm,
	size_t nmlen)
{
	struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen,
		nm, nmlen, 1);
	struct infra_data* data;
	if(!e)
		return; /* doesn't exist */
	data = (struct infra_data*)e->data;
	if(data->rtt.rto >= RTT_MAX_TIMEOUT)
		/* do not disqualify this server altogether, it is better
		 * than nothing */
		data->rtt.rto = RTT_MAX_TIMEOUT-1000;
	lock_rw_unlock(&e->lock);
}
Exemplo n.º 3
0
/** find and add A and AAAA records for missing nameservers in delegpt */
int
cache_fill_missing(struct module_env* env, uint16_t qclass, 
	struct regional* region, struct delegpt* dp)
{
	struct delegpt_ns* ns;
	struct msgreply_entry* neg;
	struct ub_packed_rrset_key* akey;
	time_t now = *env->now;
	for(ns = dp->nslist; ns; ns = ns->next) {
		akey = rrset_cache_lookup(env->rrset_cache, ns->name, 
			ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0);
		if(akey) {
			if(!delegpt_add_rrset_A(dp, region, akey, ns->lame)) {
				lock_rw_unlock(&akey->entry.lock);
				return 0;
			}
			log_nametypeclass(VERB_ALGO, "found in cache",
				ns->name, LDNS_RR_TYPE_A, qclass);
			lock_rw_unlock(&akey->entry.lock);
		} else {
			/* BIT_CD on false because delegpt lookup does
			 * not use dns64 translation */
			neg = msg_cache_lookup(env, ns->name, ns->namelen,
				LDNS_RR_TYPE_A, qclass, 0, now, 0);
			if(neg) {
				delegpt_add_neg_msg(dp, neg);
				lock_rw_unlock(&neg->entry.lock);
			}
		}
		akey = rrset_cache_lookup(env->rrset_cache, ns->name, 
			ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0);
		if(akey) {
			if(!delegpt_add_rrset_AAAA(dp, region, akey, ns->lame)) {
				lock_rw_unlock(&akey->entry.lock);
				return 0;
			}
			log_nametypeclass(VERB_ALGO, "found in cache",
				ns->name, LDNS_RR_TYPE_AAAA, qclass);
			lock_rw_unlock(&akey->entry.lock);
		} else {
			/* BIT_CD on false because delegpt lookup does
			 * not use dns64 translation */
			neg = msg_cache_lookup(env, ns->name, ns->namelen,
				LDNS_RR_TYPE_AAAA, qclass, 0, now, 0);
			if(neg) {
				delegpt_add_neg_msg(dp, neg);
				lock_rw_unlock(&neg->entry.lock);
			}
		}
	}
	return 1;
}
Exemplo n.º 4
0
/** find and add A and AAAA records for nameservers in delegpt */
static int
find_add_addrs(struct module_env* env, uint16_t qclass, 
	struct regional* region, struct delegpt* dp, time_t now, 
	struct dns_msg** msg)
{
	struct delegpt_ns* ns;
	struct msgreply_entry* neg;
	struct ub_packed_rrset_key* akey;
	for(ns = dp->nslist; ns; ns = ns->next) {
		akey = rrset_cache_lookup(env->rrset_cache, ns->name, 
			ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0);
		if(akey) {
			if(!delegpt_add_rrset_A(dp, region, akey, 0)) {
				lock_rw_unlock(&akey->entry.lock);
				return 0;
			}
			if(msg)
				addr_to_additional(akey, region, *msg, now);
			lock_rw_unlock(&akey->entry.lock);
		} else {
			/* BIT_CD on false because delegpt lookup does
			 * not use dns64 translation */
			neg = msg_cache_lookup(env, ns->name, ns->namelen,
				LDNS_RR_TYPE_A, qclass, 0, now, 0);
			if(neg) {
				delegpt_add_neg_msg(dp, neg);
				lock_rw_unlock(&neg->entry.lock);
			}
		}
		akey = rrset_cache_lookup(env->rrset_cache, ns->name, 
			ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0);
		if(akey) {
			if(!delegpt_add_rrset_AAAA(dp, region, akey, 0)) {
				lock_rw_unlock(&akey->entry.lock);
				return 0;
			}
			if(msg)
				addr_to_additional(akey, region, *msg, now);
			lock_rw_unlock(&akey->entry.lock);
		} else {
			/* BIT_CD on false because delegpt lookup does
			 * not use dns64 translation */
			neg = msg_cache_lookup(env, ns->name, ns->namelen,
				LDNS_RR_TYPE_AAAA, qclass, 0, now, 0);
			if(neg) {
				delegpt_add_neg_msg(dp, neg);
				lock_rw_unlock(&neg->entry.lock);
			}
		}
	}
	return 1;
}
Exemplo n.º 5
0
/** load a msg rrset reference */
static int
load_ref(SSL* ssl, sldns_buffer* buf, struct worker* worker, 
	struct regional *region, struct ub_packed_rrset_key** rrset, 
	int* go_on)
{
	char* s = (char*)sldns_buffer_begin(buf);
	struct query_info qinfo;
	unsigned int flags;
	struct ub_packed_rrset_key* k;

	/* read line */
	if(!ssl_read_buf(ssl, buf))
		return 0;
	if(strncmp(s, "BADREF", 6) == 0) {
		*go_on = 0; /* its bad, skip it and skip message */
		return 1;
	}

	s = load_qinfo(s, &qinfo, region);
	if(!s) {
		return 0;
	}
	if(sscanf(s, " %u", &flags) != 1) {
		log_warn("error cannot parse flags: %s", s);
		return 0;
	}

	/* lookup in cache */
	k = rrset_cache_lookup(worker->env.rrset_cache, qinfo.qname,
		qinfo.qname_len, qinfo.qtype, qinfo.qclass,
		(uint32_t)flags, *worker->env.now, 0);
	if(!k) {
		/* not found or expired */
		*go_on = 0;
		return 1;
	}

	/* store in result */
	*rrset = packed_rrset_copy_region(k, region, *worker->env.now);
	lock_rw_unlock(&k->entry.lock);

	return (*rrset != NULL);
}
Exemplo n.º 6
0
int infra_ratelimit_exceeded(struct infra_cache* infra, uint8_t* name,
	size_t namelen, time_t timenow)
{
	struct lruhash_entry* entry;
	int lim, max;
	if(!infra_dp_ratelimit)
		return 0; /* not enabled */

	/* find ratelimit */
	lim = infra_find_ratelimit(infra, name, namelen);

	/* find current rate */
	entry = infra_find_ratedata(infra, name, namelen, 0);
	if(!entry)
		return 0; /* not cached */
	max = infra_rate_max(entry->data, timenow);
	lock_rw_unlock(&entry->lock);

	return (max >= lim);
}
/** test adding a random element (unlimited range) */
static void
testlookup_unlim(struct lruhash* table, testdata_t** ref)
{
	int num = random() % (HASHTESTMAX*10);
	testkey_t* key = newkey(num);
	struct lruhash_entry* en = lruhash_lookup(table, myhash(num), key, 0);
	testdata_t* data = en? (testdata_t*)en->data : NULL;
	if(en) {
		unit_assert(en->key);
		unit_assert(en->data);
	}
	if(0 && ref) log_info("lookup unlim %d got %d, expect %d", num, en ? 
		data->data :-1, ref[num] ? ref[num]->data : -1);
	if(data && ref) {
		/* its okay for !data, it fell off the lru */
		unit_assert( data == ref[num] );
	}
	if(en) { lock_rw_unlock(&en->lock); }
	delkey(key);
}
Exemplo n.º 8
0
/** store rrsets in the rrset cache. 
 * @param env: module environment with caches.
 * @param rep: contains list of rrsets to store.
 * @param now: current time.
 * @param leeway: during prefetch how much leeway to update TTLs.
 * 	This makes rrsets (other than type NS) timeout sooner so they get
 * 	updated with a new full TTL.
 * 	Type NS does not get this, because it must not be refreshed from the
 * 	child domain, but keep counting down properly.
 * @param pside: if from parentside discovered NS, so that its NS is okay
 * 	in a prefetch situation to be updated (without becoming sticky).
 * @param qrep: update rrsets here if cache is better
 * @param region: for qrep allocs.
 */
static void
store_rrsets(struct module_env* env, struct reply_info* rep, time_t now,
	time_t leeway, int pside, struct reply_info* qrep,
	struct regional* region)
{
        size_t i;
        /* see if rrset already exists in cache, if not insert it. */
        for(i=0; i<rep->rrset_count; i++) {
                rep->ref[i].key = rep->rrsets[i];
                rep->ref[i].id = rep->rrsets[i]->id;
		/* update ref if it was in the cache */ 
		switch(rrset_cache_update(env->rrset_cache, &rep->ref[i],
                        env->alloc, now + ((ntohs(rep->ref[i].key->rk.type)==
			LDNS_RR_TYPE_NS && !pside)?0:leeway))) {
		case 0: /* ref unchanged, item inserted */
			break;
		case 2: /* ref updated, cache is superior */
			if(region) {
				struct ub_packed_rrset_key* ck;
				lock_rw_rdlock(&rep->ref[i].key->entry.lock);
				/* if deleted rrset, do not copy it */
				if(rep->ref[i].key->id == 0)
					ck = NULL;
				else 	ck = packed_rrset_copy_region(
					rep->ref[i].key, region, now);
				lock_rw_unlock(&rep->ref[i].key->entry.lock);
				if(ck) {
					/* use cached copy if memory allows */
					qrep->rrsets[i] = ck;
				}
			}
			/* no break: also copy key item */
			/* the line below is matched by gcc regex and silences
			 * the fallthrough warning */
			/* fallthrough */
		case 1: /* ref updated, item inserted */
                        rep->rrsets[i] = rep->ref[i].key;
		}
        }
}
Exemplo n.º 9
0
Arquivo: dns.c Projeto: coyizumi/cs111
/** lookup message in message cache */
static struct msgreply_entry*
msg_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen,
                 uint16_t qtype, uint16_t qclass, uint16_t flags, time_t now, int wr)
{
    struct lruhash_entry* e;
    struct query_info k;
    hashvalue_t h;

    k.qname = qname;
    k.qname_len = qnamelen;
    k.qtype = qtype;
    k.qclass = qclass;
    h = query_info_hash(&k, flags);
    e = slabhash_lookup(env->msg_cache, h, &k, wr);

    if(!e) return NULL;
    if( now > ((struct reply_info*)e->data)->ttl ) {
        lock_rw_unlock(&e->lock);
        return NULL;
    }
    return (struct msgreply_entry*)e->key;
}
Exemplo n.º 10
0
/** Add a new zone */
static void
do_zone_add(SSL* ssl, struct worker* worker, char* arg)
{
	uint8_t* nm;
	int nmlabs;
	size_t nmlen;
	char* arg2;
	enum localzone_type t;
	struct local_zone* z;
	if(!find_arg2(ssl, arg, &arg2))
		return;
	if(!parse_arg_name(ssl, arg, &nm, &nmlen, &nmlabs))
		return;
	if(!local_zone_str2type(arg2, &t)) {
		ssl_printf(ssl, "error not a zone type. %s\n", arg2);
		free(nm);
		return;
	}
	lock_quick_lock(&worker->daemon->local_zones->lock);
	if((z=local_zones_find(worker->daemon->local_zones, nm, nmlen, 
		nmlabs, LDNS_RR_CLASS_IN))) {
		/* already present in tree */
		lock_rw_wrlock(&z->lock);
		z->type = t; /* update type anyway */
		lock_rw_unlock(&z->lock);
		free(nm);
		lock_quick_unlock(&worker->daemon->local_zones->lock);
		send_ok(ssl);
		return;
	}
	if(!local_zones_add_zone(worker->daemon->local_zones, nm, nmlen, 
		nmlabs, LDNS_RR_CLASS_IN, t)) {
		lock_quick_unlock(&worker->daemon->local_zones->lock);
		ssl_printf(ssl, "error out of memory\n");
		return;
	}
	lock_quick_unlock(&worker->daemon->local_zones->lock);
	send_ok(ssl);
}
Exemplo n.º 11
0
/* Add a new zone */
int ub_ctx_zone_add(struct ub_ctx* ctx, char *zone_name, char *zone_type)
{
    enum localzone_type t;
    struct local_zone* z;
    uint8_t* nm;
    int nmlabs;
    size_t nmlen;

    int res = ub_ctx_finalize(ctx);
    if (res) return res;

    if(!local_zone_str2type(zone_type, &t)) {
        return UB_SYNTAX;
    }

    if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) {
        return UB_SYNTAX;
    }

    lock_quick_lock(&ctx->local_zones->lock);
    if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs,
                           LDNS_RR_CLASS_IN))) {
        /* already present in tree */
        lock_rw_wrlock(&z->lock);
        z->type = t; /* update type anyway */
        lock_rw_unlock(&z->lock);
        lock_quick_unlock(&ctx->local_zones->lock);
        free(nm);
        return UB_NOERROR;
    }
    if(!local_zones_add_zone(ctx->local_zones, nm, nmlen, nmlabs,
                             LDNS_RR_CLASS_IN, t)) {
        lock_quick_unlock(&ctx->local_zones->lock);
        return UB_NOMEM;
    }
    lock_quick_unlock(&ctx->local_zones->lock);
    return UB_NOERROR;
}
Exemplo n.º 12
0
int 
infra_set_lame(struct infra_cache* infra, struct sockaddr_storage* addr,
	socklen_t addrlen, uint8_t* nm, size_t nmlen, time_t timenow,
	int dnsseclame, int reclame, uint16_t qtype)
{
	struct infra_data* data;
	struct lruhash_entry* e;
	int needtoinsert = 0;
	e = infra_lookup_nottl(infra, addr, addrlen, nm, nmlen, 1);
	if(!e) {
		/* insert it */
		if(!(e = new_entry(infra, addr, addrlen, nm, nmlen, timenow))) {
			log_err("set_lame: malloc failure");
			return 0;
		}
		needtoinsert = 1;
	} else if( ((struct infra_data*)e->data)->ttl < timenow) {
		/* expired, reuse existing entry */
		data_entry_init(infra, e, timenow);
	}
	/* got an entry, now set the zone lame */
	data = (struct infra_data*)e->data;
	/* merge data (if any) */
	if(dnsseclame)
		data->isdnsseclame = 1;
	if(reclame)
		data->rec_lame = 1;
	if(!dnsseclame && !reclame && qtype == LDNS_RR_TYPE_A)
		data->lame_type_A = 1;
	if(!dnsseclame  && !reclame && qtype != LDNS_RR_TYPE_A)
		data->lame_other = 1;
	/* done */
	if(needtoinsert)
		slabhash_insert(infra->hosts, e->hash, e, e->data, NULL);
	else 	{ lock_rw_unlock(&e->lock); }
	return 1;
}
Exemplo n.º 13
0
/** iterate over the kiddies of the given name and set their parent ptr */
static void
set_kiddo_parents(struct local_zone* z, struct local_zone* match, 
	struct local_zone* newp)
{
	/* both zones and z are locked already */
	/* in the sorted rbtree, the kiddies of z are located after z */
	/* z must be present in the tree */
	struct local_zone* p = z;
	p = (struct local_zone*)rbtree_next(&p->node);
	while(p!=(struct local_zone*)RBTREE_NULL &&
		p->dclass == z->dclass && dname_strict_subdomain(p->name,
		p->namelabs, z->name, z->namelabs)) {
		/* update parent ptr */
		/* only when matches with existing parent pointer, so that
		 * deeper child structures are not touched, i.e.
		 * update of x, and a.x, b.x, f.b.x, g.b.x, c.x, y
		 * gets to update a.x, b.x and c.x */
		lock_rw_wrlock(&p->lock);
		if(p->parent == match)
			p->parent = newp;
		lock_rw_unlock(&p->lock);
		p = (struct local_zone*)rbtree_next(&p->node);
	}
}
Exemplo n.º 14
0
/* Remove zone */
int ub_ctx_zone_remove(struct ub_ctx* ctx, const char *zone_name)
{   
	struct local_zone* z;
	uint8_t* nm;
	int nmlabs;
	size_t nmlen;

	int res = ub_ctx_finalize(ctx);
	if (res) return res;

	if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) {
		return UB_SYNTAX;
	}

	lock_rw_wrlock(&ctx->local_zones->lock);
	if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs, 
		LDNS_RR_CLASS_IN))) {
		/* present in tree */
		local_zones_del_zone(ctx->local_zones, z);
	}
	lock_rw_unlock(&ctx->local_zones->lock);
	free(nm);
	return UB_NOERROR;
}
Exemplo n.º 15
0
long long infra_get_host_rto(struct infra_cache* infra,
        struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* nm,
	size_t nmlen, struct rtt_info* rtt, int* delay, time_t timenow,
	int* tA, int* tAAAA, int* tother)
{
	struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen,
		nm, nmlen, 0);
	struct infra_data* data;
	long long ttl = -2;
	if(!e) return -1;
	data = (struct infra_data*)e->data;
	if(data->ttl >= timenow) {
		ttl = (long long)(data->ttl - timenow);
		memmove(rtt, &data->rtt, sizeof(*rtt));
		if(timenow < data->probedelay)
			*delay = (int)(data->probedelay - timenow);
		else	*delay = 0;
	}
	*tA = (int)data->timeout_A;
	*tAAAA = (int)data->timeout_AAAA;
	*tother = (int)data->timeout_other;
	lock_rw_unlock(&e->lock);
	return ttl;
}
Exemplo n.º 16
0
struct dns_msg* 
dns_cache_lookup(struct module_env* env,
	uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
	uint16_t flags, struct regional* region, struct regional* scratch,
	int no_partial)
{
	struct lruhash_entry* e;
	struct query_info k;
	hashvalue_type h;
	time_t now = *env->now;
	struct ub_packed_rrset_key* rrset;

	/* lookup first, this has both NXdomains and ANSWER responses */
	k.qname = qname;
	k.qname_len = qnamelen;
	k.qtype = qtype;
	k.qclass = qclass;
	k.local_alias = NULL;
	h = query_info_hash(&k, flags);
	e = slabhash_lookup(env->msg_cache, h, &k, 0);
	if(e) {
		struct msgreply_entry* key = (struct msgreply_entry*)e->key;
		struct reply_info* data = (struct reply_info*)e->data;
		struct dns_msg* msg = tomsg(env, &key->key, data, region, now, 
			scratch);
		if(msg) {
			lock_rw_unlock(&e->lock);
			return msg;
		}
		/* could be msg==NULL; due to TTL or not all rrsets available */
		lock_rw_unlock(&e->lock);
	}

	/* see if a DNAME exists. Checked for first, to enforce that DNAMEs
	 * are more important, the CNAME is resynthesized and thus 
	 * consistent with the DNAME */
	if(!no_partial &&
		(rrset=find_closest_of_type(env, qname, qnamelen, qclass, now,
		LDNS_RR_TYPE_DNAME, 1))) {
		/* synthesize a DNAME+CNAME message based on this */
		enum sec_status sec_status = sec_status_unchecked;
		struct dns_msg* msg = synth_dname_msg(rrset, region, now, &k,
			&sec_status);
		if(msg) {
			struct ub_packed_rrset_key* cname_rrset;
			lock_rw_unlock(&rrset->entry.lock);
			/* now, after unlocking the DNAME rrset lock,
			 * check the sec_status, and see if we need to look
			 * up the CNAME record associated before it can
			 * be used */
			/* normally, only secure DNAMEs allowed from cache*/
			if(sec_status == sec_status_secure)
				return msg;
			/* but if we have a CNAME cached with this name, then we
			 * have previously already allowed this name to pass.
			 * the next cache lookup is going to fetch that CNAME itself,
			 * but it is better to have the (unsigned)DNAME + CNAME in
			 * that case */
			cname_rrset = rrset_cache_lookup(
				env->rrset_cache, qname, qnamelen,
				LDNS_RR_TYPE_CNAME, qclass, 0, now, 0);
			if(cname_rrset) {
				/* CNAME already synthesized by
				 * synth_dname_msg routine, so we can
				 * straight up return the msg */
				lock_rw_unlock(&cname_rrset->entry.lock);
				return msg;
			}
		} else {
			lock_rw_unlock(&rrset->entry.lock);
		}
	}

	/* see if we have CNAME for this domain,
	 * but not for DS records (which are part of the parent) */
	if(!no_partial && qtype != LDNS_RR_TYPE_DS &&
	   (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, 
		LDNS_RR_TYPE_CNAME, qclass, 0, now, 0))) {
		uint8_t* wc = NULL;
		size_t wl;
		/* if the rrset is not a wildcard expansion, with wcname */
		/* because, if we return that CNAME rrset on its own, it is
		 * missing the NSEC or NSEC3 proof */
		if(!(val_rrset_wildcard(rrset, &wc, &wl) && wc != NULL)) {
			struct dns_msg* msg = rrset_msg(rrset, region, now, &k);
			if(msg) {
				lock_rw_unlock(&rrset->entry.lock);
				return msg;
			}
		}
		lock_rw_unlock(&rrset->entry.lock);
	}

	/* construct DS, DNSKEY, DLV messages from rrset cache. */
	if((qtype == LDNS_RR_TYPE_DS || qtype == LDNS_RR_TYPE_DNSKEY ||
		qtype == LDNS_RR_TYPE_DLV) &&
		(rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, 
		qtype, qclass, 0, now, 0))) {
		/* if the rrset is from the additional section, and the
		 * signatures have fallen off, then do not synthesize a msg
		 * instead, allow a full query for signed results to happen.
		 * Forego all rrset data from additional section, because
		 * some signatures may not be present and cause validation
		 * failure.
		 */
		struct packed_rrset_data *d = (struct packed_rrset_data*)
			rrset->entry.data;
		if(d->trust != rrset_trust_add_noAA && 
			d->trust != rrset_trust_add_AA && 
			(qtype == LDNS_RR_TYPE_DS || 
				(d->trust != rrset_trust_auth_noAA 
				&& d->trust != rrset_trust_auth_AA) )) {
			struct dns_msg* msg = rrset_msg(rrset, region, now, &k);
			if(msg) {
				lock_rw_unlock(&rrset->entry.lock);
				return msg;
			}
		}
		lock_rw_unlock(&rrset->entry.lock);
	}

	/* stop downwards cache search on NXDOMAIN.
	 * Empty nonterminals are NOERROR, so an NXDOMAIN for foo
	 * means bla.foo also does not exist.  The DNSSEC proofs are
	 * the same.  We search upwards for NXDOMAINs. */
	if(env->cfg->harden_below_nxdomain)
	    while(!dname_is_root(k.qname)) {
		dname_remove_label(&k.qname, &k.qname_len);
		h = query_info_hash(&k, flags);
		e = slabhash_lookup(env->msg_cache, h, &k, 0);
		if(!e && k.qtype != LDNS_RR_TYPE_A &&
			env->cfg->qname_minimisation) {
			k.qtype = LDNS_RR_TYPE_A;
			h = query_info_hash(&k, flags);
			e = slabhash_lookup(env->msg_cache, h, &k, 0);
		}
		if(e) {
			struct reply_info* data = (struct reply_info*)e->data;
			struct dns_msg* msg;
			if(FLAGS_GET_RCODE(data->flags) == LDNS_RCODE_NXDOMAIN
			  && data->security == sec_status_secure
			  && (msg=tomsg(env, &k, data, region, now, scratch))){
				lock_rw_unlock(&e->lock);
				msg->qinfo.qname=qname;
				msg->qinfo.qname_len=qnamelen;
				/* check that DNSSEC really works out */
				msg->rep->security = sec_status_unchecked;
				return msg;
			}
			lock_rw_unlock(&e->lock);
		}
		k.qtype = qtype;
	    }

	/* fill common RR types for ANY response to avoid requery */
	if(qtype == LDNS_RR_TYPE_ANY) {
		return fill_any(env, qname, qnamelen, qtype, qclass, region);
	}

	return NULL;
}
Exemplo n.º 17
0
/** enter override into zone */
static int
lz_enter_override(struct local_zones* zones, char* zname, char* netblock,
	char* type, uint16_t rr_class)
{
	uint8_t dname[LDNS_MAX_DOMAINLEN+1];
	size_t dname_len = sizeof(dname);
	int dname_labs;
	struct sockaddr_storage addr;
	int net;
	socklen_t addrlen;
	struct local_zone* z;
	enum localzone_type t;

	/* parse zone name */
	if(sldns_str2wire_dname_buf(zname, dname, &dname_len) != 0) {
		log_err("cannot parse zone name in local-zone-override: %s %s",
			zname, netblock);
		return 0;
	}
	dname_labs = dname_count_labels(dname);

	/* parse netblock */
	if(!netblockstrtoaddr(netblock, UNBOUND_DNS_PORT, &addr, &addrlen,
		&net)) {
		log_err("cannot parse netblock in local-zone-override: %s %s",
			zname, netblock);
		return 0;
	}

	/* parse zone type */
	if(!local_zone_str2type(type, &t)) {
		log_err("cannot parse type in local-zone-override: %s %s %s",
			zname, netblock, type);
		return 0;
	}

	/* find localzone entry */
	lock_rw_rdlock(&zones->lock);
	z = local_zones_find(zones, dname, dname_len, dname_labs, rr_class);
	if(!z) {
		lock_rw_unlock(&zones->lock);
		log_err("no local-zone for local-zone-override %s", zname);
		return 0;
	}
	lock_rw_wrlock(&z->lock);
	lock_rw_unlock(&zones->lock);

	/* create netblock addr_tree if not present yet */
	if(!z->override_tree) {
		z->override_tree = (struct rbtree_t*)regional_alloc_zero(
			z->region, sizeof(*z->override_tree));
		if(!z->override_tree) {
			lock_rw_unlock(&z->lock);
			log_err("out of memory");
			return 0;
		}
		addr_tree_init(z->override_tree);
	}
	/* add new elem to tree */
	if(z->override_tree) {
		struct local_zone_override* n;
		n = (struct local_zone_override*)regional_alloc_zero(
			z->region, sizeof(*n));
		if(!n) {
			lock_rw_unlock(&z->lock);
			log_err("out of memory");
			return 0;
		}
		n->type = t;
		if(!addr_tree_insert(z->override_tree,
			(struct addr_tree_node*)n, &addr, addrlen, net)) {
			lock_rw_unlock(&z->lock);
			log_err("duplicate local-zone-override %s %s",
				zname, netblock);
			return 1;
		}
	}

	lock_rw_unlock(&z->lock);
	return 1;
}
Exemplo n.º 18
0
/** enter default zones */
static int
lz_enter_defaults(struct local_zones* zones, struct config_file* cfg)
{
	struct local_zone* z;
	const char** zstr;

	/* this list of zones is from RFC 6303 and RFC 7686 */

	/* block localhost level zones first, then onion and later the LAN zones */

	/* localhost. zone */
	if(!lz_exists(zones, "localhost.") &&
		!lz_nodefault(cfg, "localhost.")) {
		if(!(z=lz_enter_zone(zones, "localhost.", "static", 
			LDNS_RR_CLASS_IN)) ||
		   !lz_enter_rr_into_zone(z,
			"localhost. 10800 IN NS localhost.") ||
		   !lz_enter_rr_into_zone(z,
			"localhost. 10800 IN SOA localhost. nobody.invalid. "
			"1 3600 1200 604800 10800") ||
		   !lz_enter_rr_into_zone(z,
			"localhost. 10800 IN A 127.0.0.1") ||
		   !lz_enter_rr_into_zone(z,
			"localhost. 10800 IN AAAA ::1")) {
			log_err("out of memory adding default zone");
			if(z) { lock_rw_unlock(&z->lock); }
			return 0;
		}
		lock_rw_unlock(&z->lock);
	}
	/* reverse ip4 zone */
	if(!lz_exists(zones, "127.in-addr.arpa.") &&
		!lz_nodefault(cfg, "127.in-addr.arpa.")) {
		if(!(z=lz_enter_zone(zones, "127.in-addr.arpa.", "static", 
			LDNS_RR_CLASS_IN)) ||
		   !lz_enter_rr_into_zone(z,
			"127.in-addr.arpa. 10800 IN NS localhost.") ||
		   !lz_enter_rr_into_zone(z,
			"127.in-addr.arpa. 10800 IN SOA localhost. "
			"nobody.invalid. 1 3600 1200 604800 10800") ||
		   !lz_enter_rr_into_zone(z,
			"1.0.0.127.in-addr.arpa. 10800 IN PTR localhost.")) {
			log_err("out of memory adding default zone");
			if(z) { lock_rw_unlock(&z->lock); }
			return 0;
		}
		lock_rw_unlock(&z->lock);
	}
	/* reverse ip6 zone */
	if(!lz_exists(zones, "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.") &&
		!lz_nodefault(cfg, "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.")) {
		if(!(z=lz_enter_zone(zones, "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.", "static", 
			LDNS_RR_CLASS_IN)) ||
		   !lz_enter_rr_into_zone(z,
			"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa. 10800 IN NS localhost.") ||
		   !lz_enter_rr_into_zone(z,
			"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa. 10800 IN SOA localhost. "
			"nobody.invalid. 1 3600 1200 604800 10800") ||
		   !lz_enter_rr_into_zone(z,
			"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa. 10800 IN PTR localhost.")) {
			log_err("out of memory adding default zone");
			if(z) { lock_rw_unlock(&z->lock); }
			return 0;
		}
		lock_rw_unlock(&z->lock);
	}
	/* onion. zone (RFC 7686) */
	if(!lz_exists(zones, "onion.") &&
		!lz_nodefault(cfg, "onion.")) {
		if(!(z=lz_enter_zone(zones, "onion.", "static", 
			LDNS_RR_CLASS_IN)) ||
		   !lz_enter_rr_into_zone(z,
			"onion. 10800 IN NS localhost.") ||
		   !lz_enter_rr_into_zone(z,
			"onion. 10800 IN SOA localhost. nobody.invalid. "
			"1 3600 1200 604800 10800")) {
			log_err("out of memory adding default zone");
			if(z) { lock_rw_unlock(&z->lock); }
			return 0;
		}
		lock_rw_unlock(&z->lock);
	}

	/* block AS112 zones, unless asked not to */
	if(!cfg->unblock_lan_zones) {
		for(zstr = as112_zones; *zstr; zstr++) {
			if(!add_as112_default(zones, cfg, *zstr)) {
				log_err("out of memory adding default zone");
				return 0;
			}
		}
	}
	return 1;
}
Exemplo n.º 19
0
/** enter implicit transparent zone for local-data: without local-zone: */
static int
lz_setup_implicit(struct local_zones* zones, struct config_file* cfg)
{
	/* walk over all items that have no parent zone and find
	 * the name that covers them all (could be the root) and
	 * add that as a transparent zone */
	struct config_strlist* p;
	int have_name = 0;
	int have_other_classes = 0;
	uint16_t dclass = 0;
	uint8_t* nm = 0;
	size_t nmlen = 0;
	int nmlabs = 0;
	int match = 0; /* number of labels match count */

	init_parents(zones); /* to enable local_zones_lookup() */
	for(p = cfg->local_data; p; p = p->next) {
		uint8_t* rr_name;
		uint16_t rr_class;
		size_t len;
		int labs;
		if(!get_rr_nameclass(p->str, &rr_name, &rr_class)) {
			log_err("Bad local-data RR %s", p->str);
			return 0;
		}
		labs = dname_count_size_labels(rr_name, &len);
		lock_rw_rdlock(&zones->lock);
		if(!local_zones_lookup(zones, rr_name, len, labs, rr_class)) {
			if(!have_name) {
				dclass = rr_class;
				nm = rr_name;
				nmlen = len;
				nmlabs = labs;
				match = labs;
				have_name = 1;
			} else {
				int m;
				if(rr_class != dclass) {
					/* process other classes later */
					free(rr_name);
					have_other_classes = 1;
					lock_rw_unlock(&zones->lock);
					continue;
				}
				/* find smallest shared topdomain */
				(void)dname_lab_cmp(nm, nmlabs, 
					rr_name, labs, &m);
				free(rr_name);
				if(m < match)
					match = m;
			}
		} else free(rr_name);
		lock_rw_unlock(&zones->lock);
	}
	if(have_name) {
		uint8_t* n2;
		struct local_zone* z;
		/* allocate zone of smallest shared topdomain to contain em */
		n2 = nm;
		dname_remove_labels(&n2, &nmlen, nmlabs - match);
		n2 = memdup(n2, nmlen);
		free(nm);
		if(!n2) {
			log_err("out of memory");
			return 0;
		}
		log_nametypeclass(VERB_ALGO, "implicit transparent local-zone", 
			n2, 0, dclass);
		if(!(z=lz_enter_zone_dname(zones, n2, nmlen, match, 
			local_zone_transparent, dclass))) {
			return 0;
		}
		lock_rw_unlock(&z->lock);
	}
	if(have_other_classes) { 
		/* restart to setup other class */
		return lz_setup_implicit(zones, cfg);
	}
	return 1;
}
Exemplo n.º 20
0
int
respip_rewrite_reply(const struct query_info* qinfo,
	const struct respip_client_info* cinfo, const struct reply_info* rep,
	struct reply_info** new_repp, struct respip_action_info* actinfo,
	struct ub_packed_rrset_key** alias_rrset, int search_only,
	struct regional* region)
{
	const uint8_t* ctaglist;
	size_t ctaglen;
	const uint8_t* tag_actions;
	size_t tag_actions_size;
	struct config_strlist** tag_datas;
	size_t tag_datas_size;
	struct view* view = NULL;
	struct respip_set* ipset = NULL;
	size_t rrset_id = 0;
	enum respip_action action = respip_none;
	int tag = -1;
	const struct resp_addr* raddr = NULL;
	int ret = 1;
	struct ub_packed_rrset_key* redirect_rrset = NULL;

	if(!cinfo)
		goto done;
	ctaglist = cinfo->taglist;
	ctaglen = cinfo->taglen;
	tag_actions = cinfo->tag_actions;
	tag_actions_size = cinfo->tag_actions_size;
	tag_datas = cinfo->tag_datas;
	tag_datas_size = cinfo->tag_datas_size;
	view = cinfo->view;
	ipset = cinfo->respip_set;

	/** Try to use response-ip config from the view first; use
	  * global response-ip config if we don't have the view or we don't
	  * have the matching per-view config (and the view allows the use
	  * of global data in this case).
	  * Note that we lock the view even if we only use view members that
	  * currently don't change after creation.  This is for safety for
	  * future possible changes as the view documentation seems to expect
	  * any of its member can change in the view's lifetime.
	  * Note also that we assume 'view' is valid in this function, which
	  * should be safe (see unbound bug #1191) */
	if(view) {
		lock_rw_rdlock(&view->lock);
		if(view->respip_set) {
			if((raddr = respip_addr_lookup(rep,
				&view->respip_set->ip_tree, &rrset_id))) {
				/** for per-view respip directives the action
				 * can only be direct (i.e. not tag-based) */
				action = raddr->action;
			}
		}
		if(!raddr && !view->isfirst)
			goto done;
	}
	if(!raddr && ipset && (raddr = respip_addr_lookup(rep, &ipset->ip_tree,
		&rrset_id))) {
		action = (enum respip_action)local_data_find_tag_action(
			raddr->taglist, raddr->taglen, ctaglist, ctaglen,
			tag_actions, tag_actions_size,
			(enum localzone_type)raddr->action, &tag,
			ipset->tagname, ipset->num_tags);
	}
	if(raddr && !search_only) {
		int result = 0;

		/* first, see if we have response-ip or tag action for the
		 * action except for 'always' variants. */
		if(action != respip_always_refuse
			&& action != respip_always_transparent
			&& action != respip_always_nxdomain
			&& (result = respip_data_answer(raddr, action,
			qinfo->qtype, rep, rrset_id, new_repp, tag, tag_datas,
			tag_datas_size, ipset->tagname, ipset->num_tags,
			&redirect_rrset, region)) < 0) {
			ret = 0;
			goto done;
		}

		/* if no action data applied, take action specific to the
		 * action without data. */
		if(!result && !respip_nodata_answer(qinfo->qtype, action, rep,
			rrset_id, new_repp, region)) {
			ret = 0;
			goto done;
		}
	}
  done:
	if(view) {
		lock_rw_unlock(&view->lock);
	}
	if(ret) {
		/* If we're redirecting the original answer to a
		 * CNAME, record the CNAME rrset so the caller can take
		 * the appropriate action.  Note that we don't check the
		 * action type; it should normally be 'redirect', but it
		 * can be of other type when a data-dependent tag action
		 * uses redirect response-ip data.
		 */
		if(redirect_rrset &&
			redirect_rrset->rk.type == ntohs(LDNS_RR_TYPE_CNAME) &&
			qinfo->qtype != LDNS_RR_TYPE_ANY)
			*alias_rrset = redirect_rrset;
		/* on success, populate respip result structure */
		ret = populate_action_info(actinfo, action, raddr,
			redirect_rrset, tag, ipset, search_only, region);
	}
	return ret;
}
Exemplo n.º 21
0
int
infra_get_lame_rtt(struct infra_cache* infra,
        struct sockaddr_storage* addr, socklen_t addrlen,
        uint8_t* name, size_t namelen, uint16_t qtype, 
	int* lame, int* dnsseclame, int* reclame, int* rtt, time_t timenow)
{
	struct infra_data* host;
	struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen,
		name, namelen, 0);
	if(!e) 
		return 0;
	host = (struct infra_data*)e->data;
	*rtt = rtt_unclamped(&host->rtt);
	if(host->rtt.rto >= PROBE_MAXRTO && timenow < host->probedelay
		&& rtt_notimeout(&host->rtt)*4 <= host->rtt.rto) {
		/* single probe for this domain, and we are not probing */
		/* unless the query type allows a probe to happen */
		if(qtype == LDNS_RR_TYPE_A) {
			if(host->timeout_A >= TIMEOUT_COUNT_MAX)
				*rtt = USEFUL_SERVER_TOP_TIMEOUT;
			else	*rtt = USEFUL_SERVER_TOP_TIMEOUT-1000;
		} else if(qtype == LDNS_RR_TYPE_AAAA) {
			if(host->timeout_AAAA >= TIMEOUT_COUNT_MAX)
				*rtt = USEFUL_SERVER_TOP_TIMEOUT;
			else	*rtt = USEFUL_SERVER_TOP_TIMEOUT-1000;
		} else {
			if(host->timeout_other >= TIMEOUT_COUNT_MAX)
				*rtt = USEFUL_SERVER_TOP_TIMEOUT;
			else	*rtt = USEFUL_SERVER_TOP_TIMEOUT-1000;
		}
	}
	if(timenow > host->ttl) {
		/* expired entry */
		/* see if this can be a re-probe of an unresponsive server */
		/* minus 1000 because that is outside of the RTTBAND, so
		 * blacklisted servers stay blacklisted if this is chosen */
		if(host->rtt.rto >= USEFUL_SERVER_TOP_TIMEOUT) {
			lock_rw_unlock(&e->lock);
			*rtt = USEFUL_SERVER_TOP_TIMEOUT-1000;
			*lame = 0;
			*dnsseclame = 0;
			*reclame = 0;
			return 1;
		}
		lock_rw_unlock(&e->lock);
		return 0;
	}
	/* check lameness first */
	if(host->lame_type_A && qtype == LDNS_RR_TYPE_A) {
		lock_rw_unlock(&e->lock);
		*lame = 1;
		*dnsseclame = 0;
		*reclame = 0;
		return 1;
	} else if(host->lame_other && qtype != LDNS_RR_TYPE_A) {
		lock_rw_unlock(&e->lock);
		*lame = 1;
		*dnsseclame = 0;
		*reclame = 0;
		return 1;
	} else if(host->isdnsseclame) {
		lock_rw_unlock(&e->lock);
		*lame = 0;
		*dnsseclame = 1;
		*reclame = 0;
		return 1;
	} else if(host->rec_lame) {
		lock_rw_unlock(&e->lock);
		*lame = 0;
		*dnsseclame = 0;
		*reclame = 1;
		return 1;
	}
	/* no lameness for this type of query */
	lock_rw_unlock(&e->lock);
	*lame = 0;
	*dnsseclame = 0;
	*reclame = 0;
	return 1;
}
Exemplo n.º 22
0
Arquivo: dns.c Projeto: coyizumi/cs111
struct dns_msg*
dns_cache_lookup(struct module_env* env,
                 uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
                 uint16_t flags, struct regional* region, struct regional* scratch)
{
    struct lruhash_entry* e;
    struct query_info k;
    hashvalue_t h;
    time_t now = *env->now;
    struct ub_packed_rrset_key* rrset;

    /* lookup first, this has both NXdomains and ANSWER responses */
    k.qname = qname;
    k.qname_len = qnamelen;
    k.qtype = qtype;
    k.qclass = qclass;
    h = query_info_hash(&k, flags);
    e = slabhash_lookup(env->msg_cache, h, &k, 0);
    if(e) {
        struct msgreply_entry* key = (struct msgreply_entry*)e->key;
        struct reply_info* data = (struct reply_info*)e->data;
        struct dns_msg* msg = tomsg(env, &key->key, data, region, now,
                                    scratch);
        if(msg) {
            lock_rw_unlock(&e->lock);
            return msg;
        }
        /* could be msg==NULL; due to TTL or not all rrsets available */
        lock_rw_unlock(&e->lock);
    }

    /* see if a DNAME exists. Checked for first, to enforce that DNAMEs
     * are more important, the CNAME is resynthesized and thus
     * consistent with the DNAME */
    if( (rrset=find_closest_of_type(env, qname, qnamelen, qclass, now,
                                    LDNS_RR_TYPE_DNAME, 1))) {
        /* synthesize a DNAME+CNAME message based on this */
        struct dns_msg* msg = synth_dname_msg(rrset, region, now, &k);
        if(msg) {
            lock_rw_unlock(&rrset->entry.lock);
            return msg;
        }
        lock_rw_unlock(&rrset->entry.lock);
    }

    /* see if we have CNAME for this domain,
     * but not for DS records (which are part of the parent) */
    if( qtype != LDNS_RR_TYPE_DS &&
            (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen,
                                      LDNS_RR_TYPE_CNAME, qclass, 0, now, 0))) {
        struct dns_msg* msg = rrset_msg(rrset, region, now, &k);
        if(msg) {
            lock_rw_unlock(&rrset->entry.lock);
            return msg;
        }
        lock_rw_unlock(&rrset->entry.lock);
    }

    /* construct DS, DNSKEY, DLV messages from rrset cache. */
    if((qtype == LDNS_RR_TYPE_DS || qtype == LDNS_RR_TYPE_DNSKEY ||
            qtype == LDNS_RR_TYPE_DLV) &&
            (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen,
                                      qtype, qclass, 0, now, 0))) {
        /* if the rrset is from the additional section, and the
         * signatures have fallen off, then do not synthesize a msg
         * instead, allow a full query for signed results to happen.
         * Forego all rrset data from additional section, because
         * some signatures may not be present and cause validation
         * failure.
         */
        struct packed_rrset_data *d = (struct packed_rrset_data*)
                                      rrset->entry.data;
        if(d->trust != rrset_trust_add_noAA &&
                d->trust != rrset_trust_add_AA &&
                (qtype == LDNS_RR_TYPE_DS ||
                 (d->trust != rrset_trust_auth_noAA
                  && d->trust != rrset_trust_auth_AA) )) {
            struct dns_msg* msg = rrset_msg(rrset, region, now, &k);
            if(msg) {
                lock_rw_unlock(&rrset->entry.lock);
                return msg;
            }
        }
        lock_rw_unlock(&rrset->entry.lock);
    }

    /* stop downwards cache search on NXDOMAIN.
     * Empty nonterminals are NOERROR, so an NXDOMAIN for foo
     * means bla.foo also does not exist.  The DNSSEC proofs are
     * the same.  We search upwards for NXDOMAINs. */
    if(env->cfg->harden_below_nxdomain)
        while(!dname_is_root(k.qname)) {
            dname_remove_label(&k.qname, &k.qname_len);
            h = query_info_hash(&k, flags);
            e = slabhash_lookup(env->msg_cache, h, &k, 0);
            if(e) {
                struct reply_info* data = (struct reply_info*)e->data;
                struct dns_msg* msg;
                if(FLAGS_GET_RCODE(data->flags) == LDNS_RCODE_NXDOMAIN
                        && data->security == sec_status_secure
                        && (msg=tomsg(env, &k, data, region, now, scratch))) {
                    lock_rw_unlock(&e->lock);
                    msg->qinfo.qname=qname;
                    msg->qinfo.qname_len=qnamelen;
                    /* check that DNSSEC really works out */
                    msg->rep->security = sec_status_unchecked;
                    return msg;
                }
                lock_rw_unlock(&e->lock);
            }
        }

    return NULL;
}
Exemplo n.º 23
0
void local_zones_print(struct local_zones* zones)
{
	struct local_zone* z;
	lock_rw_rdlock(&zones->lock);
	log_info("number of auth zones %u", (unsigned)zones->ztree.count);
	RBTREE_FOR(z, struct local_zone*, &zones->ztree) {
		lock_rw_rdlock(&z->lock);
		switch(z->type) {
		case local_zone_deny:
			log_nametypeclass(0, "deny zone", 
				z->name, 0, z->dclass);
			break;
		case local_zone_refuse:
			log_nametypeclass(0, "refuse zone", 
				z->name, 0, z->dclass);
			break;
		case local_zone_redirect:
			log_nametypeclass(0, "redirect zone", 
				z->name, 0, z->dclass);
			break;
		case local_zone_transparent:
			log_nametypeclass(0, "transparent zone", 
				z->name, 0, z->dclass);
			break;
		case local_zone_typetransparent:
			log_nametypeclass(0, "typetransparent zone", 
				z->name, 0, z->dclass);
			break;
		case local_zone_static:
			log_nametypeclass(0, "static zone", 
				z->name, 0, z->dclass);
			break;
		case local_zone_inform:
			log_nametypeclass(0, "inform zone", 
				z->name, 0, z->dclass);
			break;
		case local_zone_inform_deny:
			log_nametypeclass(0, "inform_deny zone", 
				z->name, 0, z->dclass);
			break;
		case local_zone_always_transparent:
			log_nametypeclass(0, "always_transparent zone", 
				z->name, 0, z->dclass);
			break;
		case local_zone_always_refuse:
			log_nametypeclass(0, "always_refuse zone", 
				z->name, 0, z->dclass);
			break;
		case local_zone_always_nxdomain:
			log_nametypeclass(0, "always_nxdomain zone", 
				z->name, 0, z->dclass);
			break;
		default:
			log_nametypeclass(0, "badtyped zone", 
				z->name, 0, z->dclass);
			break;
		}
		local_zone_out(z);
		lock_rw_unlock(&z->lock);
	}
	lock_rw_unlock(&zones->lock);
}
Exemplo n.º 24
0
int 
infra_host(struct infra_cache* infra, struct sockaddr_storage* addr,
        socklen_t addrlen, uint8_t* nm, size_t nmlen, time_t timenow,
	int* edns_vs, uint8_t* edns_lame_known, int* to)
{
	struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen,
		nm, nmlen, 0);
	struct infra_data* data;
	int wr = 0;
	if(e && ((struct infra_data*)e->data)->ttl < timenow) {
		/* it expired, try to reuse existing entry */
		int old = ((struct infra_data*)e->data)->rtt.rto;
		uint8_t tA = ((struct infra_data*)e->data)->timeout_A;
		uint8_t tAAAA = ((struct infra_data*)e->data)->timeout_AAAA;
		uint8_t tother = ((struct infra_data*)e->data)->timeout_other;
		lock_rw_unlock(&e->lock);
		e = infra_lookup_nottl(infra, addr, addrlen, nm, nmlen, 1);
		if(e) {
			/* if its still there we have a writelock, init */
			/* re-initialise */
			/* do not touch lameness, it may be valid still */
			data_entry_init(infra, e, timenow);
			wr = 1;
			/* TOP_TIMEOUT remains on reuse */
			if(old >= USEFUL_SERVER_TOP_TIMEOUT) {
				((struct infra_data*)e->data)->rtt.rto
					= USEFUL_SERVER_TOP_TIMEOUT;
				((struct infra_data*)e->data)->timeout_A = tA;
				((struct infra_data*)e->data)->timeout_AAAA = tAAAA;
				((struct infra_data*)e->data)->timeout_other = tother;
			}
		}
	}
	if(!e) {
		/* insert new entry */
		if(!(e = new_entry(infra, addr, addrlen, nm, nmlen, timenow)))
			return 0;
		data = (struct infra_data*)e->data;
		*edns_vs = data->edns_version;
		*edns_lame_known = data->edns_lame_known;
		*to = rtt_timeout(&data->rtt);
		slabhash_insert(infra->hosts, e->hash, e, data, NULL);
		return 1;
	}
	/* use existing entry */
	data = (struct infra_data*)e->data;
	*edns_vs = data->edns_version;
	*edns_lame_known = data->edns_lame_known;
	*to = rtt_timeout(&data->rtt);
	if(*to >= PROBE_MAXRTO && rtt_notimeout(&data->rtt)*4 <= *to) {
		/* delay other queries, this is the probe query */
		if(!wr) {
			lock_rw_unlock(&e->lock);
			e = infra_lookup_nottl(infra, addr,addrlen,nm,nmlen, 1);
			if(!e) { /* flushed from cache real fast, no use to
				allocate just for the probedelay */
				return 1;
			}
			data = (struct infra_data*)e->data;
		}
		/* add 999 to round up the timeout value from msec to sec,
		 * then add a whole second so it is certain that this probe
		 * has timed out before the next is allowed */
		data->probedelay = timenow + ((*to)+1999)/1000;
	}
	lock_rw_unlock(&e->lock);
	return 1;
}
Exemplo n.º 25
0
int val_neg_dlvlookup(struct val_neg_cache* neg, uint8_t* qname, size_t len,
        uint16_t qclass, struct rrset_cache* rrset_cache, uint32_t now)
{
	/* lookup closest zone */
	struct val_neg_zone* zone;
	struct val_neg_data* data;
	int labs;
	struct ub_packed_rrset_key* nsec;
	struct packed_rrset_data* d;
	uint32_t flags;
	uint8_t* wc;
	struct query_info qinfo;
	if(!neg) return 0;

	log_nametypeclass(VERB_ALGO, "negcache dlvlookup", qname, 
		LDNS_RR_TYPE_DLV, qclass);
	
	labs = dname_count_labels(qname);
	lock_basic_lock(&neg->lock);
	zone = neg_closest_zone_parent(neg, qname, len, labs, qclass);
	while(zone && !zone->in_use)
		zone = zone->parent;
	if(!zone) {
		lock_basic_unlock(&neg->lock);
		return 0;
	}
	log_nametypeclass(VERB_ALGO, "negcache zone", zone->name, 0, 
		zone->dclass);

	/* DLV is defined to use NSEC only */
	if(zone->nsec3_hash) {
		lock_basic_unlock(&neg->lock);
		return 0;
	}

	/* lookup closest data record */
	(void)neg_closest_data(zone, qname, len, labs, &data);
	while(data && !data->in_use)
		data = data->parent;
	if(!data) {
		lock_basic_unlock(&neg->lock);
		return 0;
	}
	log_nametypeclass(VERB_ALGO, "negcache rr", data->name, 
		LDNS_RR_TYPE_NSEC, zone->dclass);

	/* lookup rrset in rrset cache */
	flags = 0;
	if(query_dname_compare(data->name, zone->name) == 0)
		flags = PACKED_RRSET_NSEC_AT_APEX;
	nsec = rrset_cache_lookup(rrset_cache, data->name, data->len,
		LDNS_RR_TYPE_NSEC, zone->dclass, flags, now, 0);

	/* check if secure and TTL ok */
	if(!nsec) {
		lock_basic_unlock(&neg->lock);
		return 0;
	}
	d = (struct packed_rrset_data*)nsec->entry.data;
	if(!d || now > d->ttl) {
		lock_rw_unlock(&nsec->entry.lock);
		/* delete data record if expired */
		neg_delete_data(neg, data);
		lock_basic_unlock(&neg->lock);
		return 0;
	}
	if(d->security != sec_status_secure) {
		lock_rw_unlock(&nsec->entry.lock);
		neg_delete_data(neg, data);
		lock_basic_unlock(&neg->lock);
		return 0;
	}
	verbose(VERB_ALGO, "negcache got secure rrset");

	/* check NSEC security */
	/* check if NSEC proves no DLV type exists */
	/* check if NSEC proves NXDOMAIN for qname */
	qinfo.qname = qname;
	qinfo.qtype = LDNS_RR_TYPE_DLV;
	qinfo.qclass = qclass;
	if(!nsec_proves_nodata(nsec, &qinfo, &wc) &&
		!val_nsec_proves_name_error(nsec, qname)) {
		/* the NSEC is not a denial for the DLV */
		lock_rw_unlock(&nsec->entry.lock);
		lock_basic_unlock(&neg->lock);
		verbose(VERB_ALGO, "negcache not proven");
		return 0;
	}
	/* so the NSEC was a NODATA proof, or NXDOMAIN proof. */

	/* no need to check for wildcard NSEC; no wildcards in DLV repos */
	/* no need to lookup SOA record for client; no response message */

	lock_rw_unlock(&nsec->entry.lock);
	/* if OK touch the LRU for neg_data element */
	neg_lru_touch(neg, data);
	lock_basic_unlock(&neg->lock);
	verbose(VERB_ALGO, "negcache DLV denial proven");
	return 1;
}
Exemplo n.º 26
0
/** Fill TYPE_ANY response with some data from cache */
static struct dns_msg*
fill_any(struct module_env* env,
	uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
	struct regional* region)
{
	time_t now = *env->now;
	struct dns_msg* msg = NULL;
	uint16_t lookup[] = {LDNS_RR_TYPE_A, LDNS_RR_TYPE_AAAA,
		LDNS_RR_TYPE_MX, LDNS_RR_TYPE_SOA, LDNS_RR_TYPE_NS,
		LDNS_RR_TYPE_DNAME, 0};
	int i, num=6; /* number of RR types to look up */
	log_assert(lookup[num] == 0);

	if(env->cfg->deny_any) {
		/* return empty message */
		msg = dns_msg_create(qname, qnamelen, qtype, qclass,
			region, 0);
		if(!msg) {
			return NULL;
		}
		/* set NOTIMPL for RFC 8482 */
		msg->rep->flags |= LDNS_RCODE_NOTIMPL;
		msg->rep->security = sec_status_indeterminate;
		return msg;
	}

	for(i=0; i<num; i++) {
		/* look up this RR for inclusion in type ANY response */
		struct ub_packed_rrset_key* rrset = rrset_cache_lookup(
			env->rrset_cache, qname, qnamelen, lookup[i],
			qclass, 0, now, 0);
		struct packed_rrset_data *d;
		if(!rrset)
			continue;

		/* only if rrset from answer section */
		d = (struct packed_rrset_data*)rrset->entry.data;
		if(d->trust == rrset_trust_add_noAA ||
			d->trust == rrset_trust_auth_noAA ||
			d->trust == rrset_trust_add_AA ||
			d->trust == rrset_trust_auth_AA) {
			lock_rw_unlock(&rrset->entry.lock);
			continue;
		}

		/* create msg if none */
		if(!msg) {
			msg = dns_msg_create(qname, qnamelen, qtype, qclass,
				region, (size_t)(num-i));
			if(!msg) {
				lock_rw_unlock(&rrset->entry.lock);
				return NULL;
			}
		}

		/* add RRset to response */
		if(!dns_msg_ansadd(msg, region, rrset, now)) {
			lock_rw_unlock(&rrset->entry.lock);
			return NULL;
		}
		lock_rw_unlock(&rrset->entry.lock);
	}
	return msg;
}