/**
 * Store potential poison in the cache (only if hardening disabled).
 * The rrset is stored in the cache but removed from the message.
 * So that it will be used for infrastructure purposes, but not be
 * returned to the client.
 * @param pkt: packet
 * @param msg: message parsed
 * @param env: environment with cache
 * @param rrset: to store.
 */
static void
store_rrset(ldns_buffer* pkt, struct msg_parse* msg, struct module_env* env,
            struct rrset_parse* rrset)
{
    struct ub_packed_rrset_key* k;
    struct packed_rrset_data* d;
    struct rrset_ref ref;
    uint32_t now = *env->now;

    k = alloc_special_obtain(env->alloc);
    if(!k)
        return;
    k->entry.data = NULL;
    if(!parse_copy_decompress_rrset(pkt, msg, rrset, NULL, k)) {
        alloc_special_release(env->alloc, k);
        return;
    }
    d = (struct packed_rrset_data*)k->entry.data;
    packed_rrset_ttl_add(d, now);
    ref.key = k;
    ref.id = k->id;
    /*ignore ret: it was in the cache, ref updated */
    (void)rrset_cache_update(env->rrset_cache, &ref,
                             env->alloc, now);
}
Exemplo n.º 2
0
/** move entry into cache */
static int
move_into_cache(struct ub_packed_rrset_key* k, 
	struct packed_rrset_data* d, struct worker* worker)
{
	struct ub_packed_rrset_key* ak;
	struct packed_rrset_data* ad;
	size_t s, i, num = d->count + d->rrsig_count;
	struct rrset_ref ref;
	uint8_t* p;

	ak = alloc_special_obtain(&worker->alloc);
	if(!ak) {
		log_warn("error out of memory");
		return 0;
	}
	ak->entry.data = NULL;
	ak->rk = k->rk;
	ak->entry.hash = rrset_key_hash(&k->rk);
	ak->rk.dname = (uint8_t*)memdup(k->rk.dname, k->rk.dname_len);
	if(!ak->rk.dname) {
		log_warn("error out of memory");
		ub_packed_rrset_parsedelete(ak, &worker->alloc);
		return 0;
	}
	s = sizeof(*ad) + (sizeof(size_t) + sizeof(uint8_t*) + 
		sizeof(time_t))* num;
	for(i=0; i<num; i++)
		s += d->rr_len[i];
	ad = (struct packed_rrset_data*)malloc(s);
	if(!ad) {
		log_warn("error out of memory");
		ub_packed_rrset_parsedelete(ak, &worker->alloc);
		return 0;
	}
	p = (uint8_t*)ad;
	memmove(p, d, sizeof(*ad));
	p += sizeof(*ad);
	memmove(p, &d->rr_len[0], sizeof(size_t)*num);
	p += sizeof(size_t)*num;
	memmove(p, &d->rr_data[0], sizeof(uint8_t*)*num);
	p += sizeof(uint8_t*)*num;
	memmove(p, &d->rr_ttl[0], sizeof(time_t)*num);
	p += sizeof(time_t)*num;
	for(i=0; i<num; i++) {
		memmove(p, d->rr_data[i], d->rr_len[i]);
		p += d->rr_len[i];
	}
	packed_rrset_ptr_fixup(ad);

	ak->entry.data = ad;

	ref.key = ak;
	ref.id = ak->id;
	(void)rrset_cache_update(worker->env.rrset_cache, &ref,
		&worker->alloc, *worker->env.now);
	return 1;
}
Exemplo n.º 3
0
int
dns_cache_store(struct module_env* env, struct query_info* msgqinf,
        struct reply_info* msgrep, int is_referral, time_t leeway, int pside,
	struct regional* region, uint32_t flags)
{
	struct reply_info* rep = NULL;
	/* alloc, malloc properly (not in region, like msg is) */
	rep = reply_info_copy(msgrep, env->alloc, NULL);
	if(!rep)
		return 0;
	/* ttl must be relative ;i.e. 0..86400 not  time(0)+86400.
	 * the env->now is added to message and RRsets in this routine. */
	/* the leeway is used to invalidate other rrsets earlier */

	if(is_referral) {
		/* store rrsets */
		struct rrset_ref ref;
		size_t i;
		for(i=0; i<rep->rrset_count; i++) {
			packed_rrset_ttl_add((struct packed_rrset_data*)
				rep->rrsets[i]->entry.data, *env->now);
			ref.key = rep->rrsets[i];
			ref.id = rep->rrsets[i]->id;
			/*ignore ret: it was in the cache, ref updated */
			/* no leeway for typeNS */
			(void)rrset_cache_update(env->rrset_cache, &ref, 
				env->alloc, *env->now + 
				((ntohs(ref.key->rk.type)==LDNS_RR_TYPE_NS
				 && !pside) ? 0:leeway));
		}
		free(rep);
		return 1;
	} else {
		/* store msg, and rrsets */
		struct query_info qinf;
		hashvalue_type h;

		qinf = *msgqinf;
		qinf.qname = memdup(msgqinf->qname, msgqinf->qname_len);
		if(!qinf.qname) {
			reply_info_parsedelete(rep, env->alloc);
			return 0;
		}
		/* fixup flags to be sensible for a reply based on the cache */
		/* this module means that RA is available. It is an answer QR. 
		 * Not AA from cache. Not CD in cache (depends on client bit). */
		rep->flags |= (BIT_RA | BIT_QR);
		rep->flags &= ~(BIT_AA | BIT_CD);
		h = query_info_hash(&qinf, (uint16_t)flags);
		dns_cache_store_msg(env, &qinf, h, rep, leeway, pside, msgrep,
			flags, region);
		/* qname is used inside query_info_entrysetup, and set to 
		 * NULL. If it has not been used, free it. free(0) is safe. */
		free(qinf.qname);
	}
	return 1;
}
Exemplo n.º 4
0
void 
iter_store_parentside_rrset(struct module_env* env, 
	struct ub_packed_rrset_key* rrset)
{
	struct rrset_ref ref;
	rrset = packed_rrset_copy_alloc(rrset, env->alloc, *env->now);
	if(!rrset) {
		log_err("malloc failure in store_parentside_rrset");
		return;
	}
	rrset->rk.flags |= PACKED_RRSET_PARENT_SIDE;
	rrset->entry.hash = rrset_key_hash(&rrset->rk);
	ref.key = rrset;
	ref.id = rrset->id;
	/* ignore ret: if it was in the cache, ref updated */
	(void)rrset_cache_update(env->rrset_cache, &ref, env->alloc, *env->now);
}
Exemplo n.º 5
0
/** store rrsets in the rrset cache. 
 * @param env: module environment with caches.
 * @param rep: contains list of rrsets to store.
 * @param now: current time.
 * @param leeway: during prefetch how much leeway to update TTLs.
 * 	This makes rrsets (other than type NS) timeout sooner so they get
 * 	updated with a new full TTL.
 * 	Type NS does not get this, because it must not be refreshed from the
 * 	child domain, but keep counting down properly.
 * @param pside: if from parentside discovered NS, so that its NS is okay
 * 	in a prefetch situation to be updated (without becoming sticky).
 * @param qrep: update rrsets here if cache is better
 * @param region: for qrep allocs.
 */
static void
store_rrsets(struct module_env* env, struct reply_info* rep, time_t now,
	time_t leeway, int pside, struct reply_info* qrep,
	struct regional* region)
{
        size_t i;
        /* see if rrset already exists in cache, if not insert it. */
        for(i=0; i<rep->rrset_count; i++) {
                rep->ref[i].key = rep->rrsets[i];
                rep->ref[i].id = rep->rrsets[i]->id;
		/* update ref if it was in the cache */ 
		switch(rrset_cache_update(env->rrset_cache, &rep->ref[i],
                        env->alloc, now + ((ntohs(rep->ref[i].key->rk.type)==
			LDNS_RR_TYPE_NS && !pside)?0:leeway))) {
		case 0: /* ref unchanged, item inserted */
			break;
		case 2: /* ref updated, cache is superior */
			if(region) {
				struct ub_packed_rrset_key* ck;
				lock_rw_rdlock(&rep->ref[i].key->entry.lock);
				/* if deleted rrset, do not copy it */
				if(rep->ref[i].key->id == 0)
					ck = NULL;
				else 	ck = packed_rrset_copy_region(
					rep->ref[i].key, region, now);
				lock_rw_unlock(&rep->ref[i].key->entry.lock);
				if(ck) {
					/* use cached copy if memory allows */
					qrep->rrsets[i] = ck;
				}
			}
			/* no break: also copy key item */
			/* the line below is matched by gcc regex and silences
			 * the fallthrough warning */
			/* fallthrough */
		case 1: /* ref updated, item inserted */
                        rep->rrsets[i] = rep->ref[i].key;
		}
        }
}