Example #1
0
int 
parse_create_msg(sldns_buffer* pkt, struct msg_parse* msg,
	struct alloc_cache* alloc, struct query_info* qinf, 
	struct reply_info** rep, struct regional* region)
{
	log_assert(pkt && msg);
	if(!parse_create_qinfo(pkt, msg, qinf, region))
		return 0;
	if(!parse_create_repinfo(msg, rep, region))
		return 0;
	if(!repinfo_alloc_rrset_keys(*rep, alloc, region))
		return 0;
	if(!parse_copy_decompress(pkt, msg, *rep, region))
		return 0;
	return 1;
}
Example #2
0
/**
 * Fork and init the other threads. Main thread returns for special handling.
 * @param daemon: the daemon with other threads to fork.
 */
static void
daemon_start_others(struct daemon* daemon)
{
	int i;
	log_assert(daemon);
	verbose(VERB_ALGO, "start threads");
	/* skip i=0, is this thread */
	for(i=1; i<daemon->num; i++) {
		ub_thread_create(&daemon->workers[i]->thr_id,
			thread_start, daemon->workers[i]);
#ifdef THREADS_DISABLED
		/* close pipe end of child */
		tube_close_read(daemon->workers[i]->cmd);
#endif /* no threads */
	}
}
Example #3
0
/** stop checklocks */
void checklock_stop(void)
{
	if(key_created) {
		int i;
		key_deleted = 1;
		if(check_locking_order)
			fclose(thread_infos[0]->order_info);
		free(thread_infos[0]);
		thread_infos[0] = NULL;
		for(i = 0; i < THRDEBUG_MAX_THREADS; i++)
			log_assert(thread_infos[i] == NULL);
			/* should have been cleaned up. */
		LOCKRET(pthread_key_delete(thr_debug_key));
		key_created = 0;
	}
}
Example #4
0
/** calloc with stats */
void *unbound_stat_calloc(size_t nmemb, size_t size)
{
	size_t s;
	void* res;
	if(nmemb != 0 && INT_MAX/nmemb < size)
		return NULL; /* integer overflow check */
	s = (nmemb*size==0)?(size_t)1:nmemb*size;
	log_assert(s <= SIZE_MAX-16);
	res = calloc(1, s+16);
	if(!res) return NULL;
	log_info("stat %p=calloc(%u, %u)", res+16, (unsigned)nmemb, (unsigned)size);
	unbound_mem_alloc += s;
	memcpy(res, &s, sizeof(s));
	memcpy(res+8, &mem_special, sizeof(mem_special));
	return res+16;
}
Example #5
0
void state_snoozed_t::enter(event_t *e)
{
  abstract_state_t::enter(e) ;
  log_assert(e->to_be_snoozed > 0) ;

  // compute next trigger time and jump back to queue
  if(e->flags & EventFlags::Aligned_Snooze)
    e->ticker = ticker_align(e->last_triggered, e->to_be_snoozed, machine->transition_started());
  else
    e->ticker = machine->transition_started() + e->to_be_snoozed ;

  e->flags |= EventFlags::Snoozing ;
  e->to_be_snoozed = 0 ; // doesn't need it anymore
  machine->state_scheduler->go_to(e) ;
  machine->invoke_process_transition_queue() ;
}
Example #6
0
int 
reply_info_answer_encode(struct query_info* qinf, struct reply_info* rep, 
	uint16_t id, uint16_t qflags, sldns_buffer* pkt, time_t timenow,
	int cached, struct regional* region, uint16_t udpsize, 
	struct edns_data* edns, int dnssec, int secure)
{
	uint16_t flags;
	int attach_edns = 1;

	if(!cached || rep->authoritative) {
		/* original flags, copy RD and CD bits from query. */
		flags = rep->flags | (qflags & (BIT_RD|BIT_CD)); 
	} else {
		/* remove AA bit, copy RD and CD bits from query. */
		flags = (rep->flags & ~BIT_AA) | (qflags & (BIT_RD|BIT_CD)); 
	}
	if(secure && (dnssec || (qflags&BIT_AD)))
		flags |= BIT_AD;
	/* restore AA bit if we have a local alias and the response can be
	 * authoritative.  Also clear AD bit if set as the local data is the
	 * primary answer. */
	if(qinf->local_alias &&
		(FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NOERROR ||
		FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NXDOMAIN)) {
		flags |= BIT_AA;
		flags &= ~BIT_AD;
	}
	log_assert(flags & BIT_QR); /* QR bit must be on in our replies */
	if(udpsize < LDNS_HEADER_SIZE)
		return 0;
	if(udpsize < LDNS_HEADER_SIZE + calc_edns_field_size(edns)) {
		/* packet too small to contain edns, omit it. */
		attach_edns = 0;
	} else {
		/* reserve space for edns record */
		udpsize -= calc_edns_field_size(edns);
	}

	if(!reply_info_encode(qinf, rep, id, flags, pkt, timenow, region,
		udpsize, dnssec)) {
		log_err("reply encode: out of memory");
		return 0;
	}
	if(attach_edns)
		attach_edns_record(pkt, edns);
	return 1;
}
Example #7
0
void
dt_msg_send_outside_query(dt_env_t *env,
			  struct sockaddr_storage *rsock,
			  enum comm_point_type cptype,
			  uint8_t *zone, size_t zone_len,
			  sldns_buffer *qmsg)
{
	dt_msg_t dm;
	struct timeval qtime;
	uint16_t qflags;

	gettimeofday(&qtime, NULL);
	qflags = sldns_buffer_read_u16_at(qmsg, 2);

	/* type */
	if (qflags & BIT_RD) {
		if (!env->log_forwarder_query_messages)
			return;
		dt_msg_init(env, &dm, DNSTAP__MESSAGE__TYPE__FORWARDER_QUERY);
	} else {
		if (!env->log_resolver_query_messages)
			return;
		dt_msg_init(env, &dm, DNSTAP__MESSAGE__TYPE__RESOLVER_QUERY);
	}

	/* query_zone */
	dm.m.query_zone.data = zone;
	dm.m.query_zone.len = zone_len;
	dm.m.has_query_zone = 1;

	/* query_time_sec, query_time_nsec */
	dt_fill_timeval(&qtime,
			&dm.m.query_time_sec, &dm.m.has_query_time_sec,
			&dm.m.query_time_nsec, &dm.m.has_query_time_nsec);

	/* query_message */
	dt_fill_buffer(qmsg, &dm.m.query_message, &dm.m.has_query_message);

	/* socket_family, socket_protocol, response_address, response_port */
	log_assert(cptype == comm_udp || cptype == comm_tcp);
	dt_msg_fill_net(&dm, rsock, cptype,
			&dm.m.response_address, &dm.m.has_response_address,
			&dm.m.response_port, &dm.m.has_response_port);

	if (dt_pack(&dm.d, &dm.buf, &dm.len_buf))
		dt_send(env, dm.buf, dm.len_buf);
}
Example #8
0
void sysfs_export(struct sysfs_def *def, int gpio)
{
	static char path[PATH_MAX];

	// ensure not already opened (soft_pwm crashes if reopen ??)
	snprintf(path, PATH_MAX, "/sys/class/%s/%d", def->class, gpio);
	path[PATH_MAX-1] = '\0';
	log_assert(!directory_exists(path));

	snprintf(path, PATH_MAX, "/sys/class/%s/export", def->class);
	path[PATH_MAX-1] = '\0';

	static char buffer[15];
	sprintf(buffer, "%d", gpio);

	write_value(path, buffer);
}
Example #9
0
static inline struct content_obj * content_copy(struct content_obj * orig)
{
    if (!orig) {
        return NULL;
    }

    struct content_obj * copy = malloc(sizeof(struct content_obj));
    log_assert(g_log, copy != NULL, "CS: failed to allocate content");
    copy->publisher = orig->publisher;
    copy->name = content_name_create(orig->name->full_name);
    copy->timestamp = orig->timestamp;
    copy->size = orig->size;
    copy->data = malloc(sizeof(uint8_t) * copy->size);
    memcpy(copy->data, orig->data, copy->size);

    return copy;
}
Example #10
0
void *unbound_stat_calloc_lite(size_t nmemb, size_t size, const char* file,
        int line, const char* func)
{
	size_t req;
	void* res;
	if(nmemb != 0 && INT_MAX/nmemb < size)
		return NULL; /* integer overflow check */
	req = nmemb * size;
	log_assert(req <= SIZE_MAX-(lite_pad*2+sizeof(size_t)));
	res = malloc(req+lite_pad*2+sizeof(size_t));
	if(!res) return NULL;
	memmove(res, lite_pre, lite_pad);
	memmove(res+lite_pad, &req, sizeof(size_t));
	memset(res+lite_pad+sizeof(size_t), 0, req);
	memmove(res+lite_pad+req+sizeof(size_t), lite_post, lite_pad);
	return res+lite_pad+sizeof(size_t);
}
Example #11
0
const char *
wmo_err_ident(xbuf *buf)
{
	static char identbuf[128];
	char *cp = identbuf;
	xbuf clone[1];
	int conv;

	(void) memset(identbuf, 0, sizeof(identbuf));

	clone_xbuf(buf, clone, 0);
	
#if 0
	conv = sprintf(cp, "%8u ******************",
			 (unsigned) clone->cnt);
#else
	conv = sprintf(cp, "%8u",
			 (unsigned) clone->cnt);
#endif
	cp += conv;

	/* skip SOH CR CR NL */
	if(skipline(clone, 4) < 0)
		return identbuf;

	{
		wmo_start_t start;
		if( get_wmo_start(clone, &start) == NULL )
			return identbuf;
		conv = sprintf(cp, "         %03d", start.seqno);
		cp += conv;
	}

	{
		wmo_header_t hdr;
		dtime time;
		hdr.time = &time;
		if( get_wmo_header(clone, &hdr) == NULL )
			return identbuf;
		conv = sprintf(cp, "  %s", s_wmo_header(&hdr));
		cp += conv;
		log_assert(cp < &identbuf[sizeof(identbuf)]);
	}
	
	return identbuf;
}
Example #12
0
int iter_msg_from_zone(struct dns_msg* msg, struct delegpt* dp,
        enum response_type type, uint16_t dclass)
{
	if(!msg || !dp || !msg->rep || !dp->name)
		return 0;
	/* SOA RRset - always from reply zone */
	if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
		LDNS_RR_TYPE_SOA, dclass) ||
	   reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
		LDNS_RR_TYPE_SOA, dclass))
		return 1;
	if(type == RESPONSE_TYPE_REFERRAL) {
		size_t i;
		/* if it adds a single label, i.e. we expect .com,
		 * and referral to example.com. NS ... , then origin zone
		 * is .com. For a referral to sub.example.com. NS ... then
		 * we do not know, since example.com. may be in between. */
		for(i=0; i<msg->rep->an_numrrsets+msg->rep->ns_numrrsets; 
			i++) {
			struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
			if(ntohs(s->rk.type) == LDNS_RR_TYPE_NS &&
				ntohs(s->rk.rrset_class) == dclass) {
				int l = dname_count_labels(s->rk.dname);
				if(l == dp->namelabs + 1 &&
					dname_strict_subdomain(s->rk.dname,
					l, dp->name, dp->namelabs))
					return 1;
			}
		}
		return 0;
	}
	log_assert(type==RESPONSE_TYPE_ANSWER || type==RESPONSE_TYPE_CNAME);
	/* not a referral, and not lame delegation (upwards), so, 
	 * any NS rrset must be from the zone itself */
	if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
		LDNS_RR_TYPE_NS, dclass) ||
	   reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
		LDNS_RR_TYPE_NS, dclass))
		return 1;
	/* a DNSKEY set is expected at the zone apex as well */
	/* this is for 'minimal responses' for DNSKEYs */
	if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
		LDNS_RR_TYPE_DNSKEY, dclass))
		return 1;
	return 0;
}
Example #13
0
int
nsec3_hash_name(rbtree_type* table, struct regional* region, sldns_buffer* buf,
	struct ub_packed_rrset_key* nsec3, int rr, uint8_t* dname, 
	size_t dname_len, struct nsec3_cached_hash** hash)
{
	struct nsec3_cached_hash* c;
	struct nsec3_cached_hash looki;
#ifdef UNBOUND_DEBUG
	rbnode_type* n;
#endif
	int r;
	looki.node.key = &looki;
	looki.nsec3 = nsec3;
	looki.rr = rr;
	looki.dname = dname;
	looki.dname_len = dname_len;
	/* lookup first in cache */
	c = (struct nsec3_cached_hash*)rbtree_search(table, &looki);
	if(c) {
		*hash = c;
		return 1;
	}
	/* create a new entry */
	c = (struct nsec3_cached_hash*)regional_alloc(region, sizeof(*c));
	if(!c) return 0;
	c->node.key = c;
	c->nsec3 = nsec3;
	c->rr = rr;
	c->dname = dname;
	c->dname_len = dname_len;
	r = nsec3_calc_hash(region, buf, c);
	if(r != 1)
		return r;
	r = nsec3_calc_b32(region, buf, c);
	if(r != 1)
		return r;
#ifdef UNBOUND_DEBUG
	n =
#else
	(void)
#endif
	rbtree_insert(table, &c->node);
	log_assert(n); /* cannot be duplicate, just did lookup */
	*hash = c;
	return 1;
}
Example #14
0
/** alloc struct, init lock empty */
void 
checklock_init(enum check_lock_type type, struct checked_lock** lock,
        const char* func, const char* file, int line)
{
	struct checked_lock* e = (struct checked_lock*)calloc(1, 
		sizeof(struct checked_lock));
	struct thr_check *thr = (struct thr_check*)pthread_getspecific(
		thr_debug_key);
	if(!e)
		fatal_exit("%s %s %d: out of memory", func, file, line);
	if(!thr) {
		/* this is called when log_init() calls lock_init()
		 * functions, and the test check code has not yet
		 * been initialised.  But luckily, the checklock_start()
		 * routine can be called multiple times without ill effect.
		 */
		checklock_start();
		thr = (struct thr_check*)pthread_getspecific(thr_debug_key);
	}
	if(!thr)
		fatal_exit("%s %s %d: lock_init no thread info", func, file,
			line);
	*lock = e;
	e->type = type;
	e->create_func = func;
	e->create_file = file;
	e->create_line = line;
	e->create_thread = thr->num;
	e->create_instance = thr->locks_created++;
	ordercheck_lockcreate(thr, e);
	LOCKRET(pthread_mutex_init(&e->lock, NULL));
	switch(e->type) {
		case check_lock_mutex:
			LOCKRET(pthread_mutex_init(&e->u.mutex, NULL));
			break;
		case check_lock_spinlock:
			LOCKRET(pthread_spin_init(&e->u.spinlock, PTHREAD_PROCESS_PRIVATE));
			break;
		case check_lock_rwlock:
			LOCKRET(pthread_rwlock_init(&e->u.rwlock, NULL));
			break;
		default:
			log_assert(0);
	}
}
Example #15
0
/** read root hints list */
static int 
read_root_hints_list(struct iter_hints* hints, struct config_file* cfg)
{
	struct config_strlist* p;
	for(p = cfg->root_hints; p; p = p->next) {
		log_assert(p->str);
		if(p->str && p->str[0]) {
			char* f = p->str;
			if(cfg->chrootdir && cfg->chrootdir[0] &&
				strncmp(p->str, cfg->chrootdir, 
				strlen(cfg->chrootdir)) == 0)
				f += strlen(cfg->chrootdir);
			if(!read_root_hints(hints, f))
				return 0;
		}
	}
	return 1;
}
Example #16
0
static int test_item7(void)
{
    int rc = 0;
    char tkey[100];
    char tval[100];
    char val[100];
    int i = 0;

    for (i = 0; i < size; i++) {
        sprintf(tkey, "KEY-%d", i);
        sprintf(tval, "VALUE-%d", i);
        if (i == rank) {
            if (PMI_SUCCESS != (rc = PMI_KVS_Put(jobid, tkey, tval))) {
                log_fatal("PMI_KVS_Put [%s=%s] %d\n", tkey, tval, rc);
                return rc;
            }
        }
    }

    if (PMI_SUCCESS != (rc = PMI_KVS_Commit(jobid))) {
        log_fatal("PMI_KVS_Commit %d\n", rc);
        return rc;
    }

    if (PMI_SUCCESS != (rc = PMI_Barrier())) {
        log_fatal("PMI_Barrier %d\n", rc);
        return rc;
    }

    for (i = 0; i < size; i++) {
        sprintf(tkey, "KEY-%d", i);
        sprintf(tval, "VALUE-%d", i);
        if (PMI_SUCCESS != (rc = PMI_KVS_Get(jobid, tkey, val, sizeof(val)))) {
            log_fatal("PMI_KVS_Get [%s=?] %d\n", tkey, rc);
            return rc;
        }

        log_info("tkey=%s tval=%s val=%s\n", tkey, tval, val);

        log_assert(!strcmp(tval, val), "value does not meet expectation");
    }

    return rc;
}
Example #17
0
/**
 * Store domain name and ancestors into compression tree.
 * @param dname: pointer to uncompressed dname (stored in tree).
 * @param labs: number of labels in dname.
 * @param offset: offset into packet for dname.
 * @param region: how to allocate memory for new node.
 * @param closest: match from previous lookup, used to compress dname.
 *	may be NULL if no previous match.
 *	if the tree has an ancestor of dname already, this must be it.
 * @param insertpt: where to insert the dname in tree. 
 * @return: 0 on memory error.
 */
static int
compress_tree_store(uint8_t* dname, int labs, size_t offset, 
	struct regional* region, struct compress_tree_node* closest, 
	struct compress_tree_node** insertpt)
{
	uint8_t lablen;
	struct compress_tree_node* newnode;
	struct compress_tree_node* prevnode = NULL;
	int uplabs = labs-1; /* does not store root in tree */
	if(closest) uplabs = labs - closest->labs;
	log_assert(uplabs >= 0);
	/* algorithms builds up a vine of dname-labels to hang into tree */
	while(uplabs--) {
		if(offset > PTR_MAX_OFFSET) {
			/* insertion failed, drop vine */
			return 1; /* compression pointer no longer useful */
		}
		if(!(newnode = compress_tree_newnode(dname, labs, offset, 
			region))) {
			/* insertion failed, drop vine */
			return 0;
		}

		if(prevnode) {
			/* chain nodes together, last one has one label more,
			 * so is larger than newnode, thus goes right. */
			newnode->right = prevnode;
			prevnode->parent = newnode;
		}

		/* next label */
		lablen = *dname++;
		dname += lablen;
		offset += lablen+1;
		prevnode = newnode;
		labs--;
	}
	/* if we have a vine, hang the vine into the tree */
	if(prevnode) {
		*insertpt = prevnode;
		prevnode->parent = closest;
	}
	return 1;
}
Example #18
0
/**
 * Hash the query name, type, class and dbacess-secret into lookup buffer.
 * @param qstate: query state with query info
 * 	and env->cfg with secret.
 * @param buf: returned buffer with hash to lookup
 * @param len: length of the buffer.
 */
static void
calc_hash(struct module_qstate* qstate, char* buf, size_t len)
{
	uint8_t clear[1024];
	size_t clen = 0;
	uint8_t hash[CACHEDB_HASHSIZE/8];
	const char* hex = "0123456789ABCDEF";
	const char* secret = qstate->env->cfg->cachedb_secret ?
		qstate->env->cfg->cachedb_secret : "default";
	size_t i;

	/* copy the hash info into the clear buffer */
	if(clen + qstate->qinfo.qname_len < sizeof(clear)) {
		memmove(clear+clen, qstate->qinfo.qname,
			qstate->qinfo.qname_len);
		clen += qstate->qinfo.qname_len;
	}
	if(clen + 4 < sizeof(clear)) {
		uint16_t t = htons(qstate->qinfo.qtype);
		uint16_t c = htons(qstate->qinfo.qclass);
		memmove(clear+clen, &t, 2);
		memmove(clear+clen+2, &c, 2);
		clen += 4;
	}
	if(secret && secret[0] && clen + strlen(secret) < sizeof(clear)) {
		memmove(clear+clen, secret, strlen(secret));
		clen += strlen(secret);
	}
	
	/* hash the buffer */
	secalgo_hash_sha256(clear, clen, hash);
	memset(clear, 0, clen);

	/* hex encode output for portability (some online dbs need
	 * no nulls, no control characters, and so on) */
	log_assert(len >= sizeof(hash)*2 + 1);
	(void)len;
	for(i=0; i<sizeof(hash); i++) {
		buf[i*2] = hex[(hash[i]&0xf0)>>4];
		buf[i*2+1] = hex[hash[i]&0x0f];
	}
	buf[sizeof(hash)*2] = 0;
}
Example #19
0
/**
 * Stop the other threads.
 * @param daemon: the daemon with other threads.
 */
static void
daemon_stop_others(struct daemon* daemon)
{
	int i;
	log_assert(daemon);
	verbose(VERB_ALGO, "stop threads");
	/* skip i=0, is this thread */
	/* use i=0 buffer for sending cmds; because we are #0 */
	for(i=1; i<daemon->num; i++) {
		worker_send_cmd(daemon->workers[i], worker_cmd_quit);
	}
	/* wait for them to quit */
	for(i=1; i<daemon->num; i++) {
		/* join it to make sure its dead */
		verbose(VERB_ALGO, "join %d", i);
		ub_thread_join(daemon->workers[i]->thr_id);
		verbose(VERB_ALGO, "join success %d", i);
	}
}
Example #20
0
/**
 * Calculate space needed for the data and all its parents
 * @param rep: NSEC entries.
 * @return size.
 */
static size_t calc_data_need(struct reply_info* rep)
{
	uint8_t* d;
	size_t i, len, res = 0;

	for(i=rep->an_numrrsets; i<rep->an_numrrsets+rep->ns_numrrsets; i++) {
		if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC) {
			d = rep->rrsets[i]->rk.dname;
			len = rep->rrsets[i]->rk.dname_len;
			res = sizeof(struct val_neg_data) + len;
			while(!dname_is_root(d)) {
				log_assert(len > 1); /* not root label */
				dname_remove_label(&d, &len);
				res += sizeof(struct val_neg_data) + len;
			}
		}
	}
	return res;
}
Example #21
0
void 
qinfo_query_encode(sldns_buffer* pkt, struct query_info* qinfo)
{
	uint16_t flags = 0; /* QUERY, NOERROR */
	const uint8_t* qname = qinfo->local_alias ?
		qinfo->local_alias->rrset->rk.dname : qinfo->qname;
	size_t qname_len = qinfo->local_alias ?
		qinfo->local_alias->rrset->rk.dname_len : qinfo->qname_len;
	sldns_buffer_clear(pkt);
	log_assert(sldns_buffer_remaining(pkt) >= 12+255+4/*max query*/);
	sldns_buffer_skip(pkt, 2); /* id done later */
	sldns_buffer_write_u16(pkt, flags);
	sldns_buffer_write_u16(pkt, 1); /* query count */
	sldns_buffer_write(pkt, "\000\000\000\000\000\000", 6); /* counts */
	sldns_buffer_write(pkt, qname, qname_len);
	sldns_buffer_write_u16(pkt, qinfo->qtype);
	sldns_buffer_write_u16(pkt, qinfo->qclass);
	sldns_buffer_flip(pkt);
}
Example #22
0
/**
 * Parse query section. 
 * @param pkt: packet, position at call must be at start of query section.
 *	at end position is after query section.
 * @param msg: store results here.
 * @return: 0 if OK, or rcode on error.
 */
static int
parse_query_section(sldns_buffer* pkt, struct msg_parse* msg)
{
	if(msg->qdcount == 0)
		return 0;
	if(msg->qdcount > 1)
		return LDNS_RCODE_FORMERR;
	log_assert(msg->qdcount == 1);
	if(sldns_buffer_remaining(pkt) <= 0)
		return LDNS_RCODE_FORMERR;
	msg->qname = sldns_buffer_current(pkt);
	if((msg->qname_len = pkt_dname_len(pkt)) == 0)
		return LDNS_RCODE_FORMERR;
	if(sldns_buffer_remaining(pkt) < sizeof(uint16_t)*2)
		return LDNS_RCODE_FORMERR;
	msg->qtype = sldns_buffer_read_u16(pkt);
	msg->qclass = sldns_buffer_read_u16(pkt);
	return 0;
}
Example #23
0
int delegpt_add_target_mlc(struct delegpt* dp, uint8_t* name, size_t namelen,
	struct sockaddr_storage* addr, socklen_t addrlen, uint8_t bogus,
	uint8_t lame)
{
	struct delegpt_ns* ns = delegpt_find_ns(dp, name, namelen);
	log_assert(dp->dp_type_mlc);
	if(!ns) {
		/* ignore it */
		return 1;
	}
	if(!lame) {
		if(addr_is_ip6(addr, addrlen))
			ns->got6 = 1;
		else	ns->got4 = 1;
		if(ns->got4 && ns->got6)
			ns->resolved = 1;
	}
	return delegpt_add_addr_mlc(dp, addr, addrlen, bogus, lame);
}
Example #24
0
/** Do the name error proof */
static enum sec_status
nsec3_do_prove_nameerror(struct module_env* env, struct nsec3_filter* flt, 
	rbtree_t* ct, struct query_info* qinfo)
{
	struct ce_response ce;
	uint8_t* wc;
	size_t wclen;
	struct ub_packed_rrset_key* wc_rrset;
	int wc_rr;
	enum sec_status sec;

	/* First locate and prove the closest encloser to qname. We will 
	 * use the variant that fails if the closest encloser turns out 
	 * to be qname. */
	sec = nsec3_prove_closest_encloser(env, flt, ct, qinfo, 1, &ce);
	if(sec != sec_status_secure) {
		if(sec == sec_status_bogus)
			verbose(VERB_ALGO, "nsec3 nameerror proof: failed "
				"to prove a closest encloser");
		else 	verbose(VERB_ALGO, "nsec3 nameerror proof: closest "
				"nsec3 is an insecure delegation");
		return sec;
	}
	log_nametypeclass(VERB_ALGO, "nsec3 namerror: proven ce=", ce.ce,0,0);

	/* At this point, we know that qname does not exist. Now we need 
	 * to prove that the wildcard does not exist. */
	log_assert(ce.ce);
	wc = nsec3_ce_wildcard(env->scratch, ce.ce, ce.ce_len, &wclen);
	if(!wc || !find_covering_nsec3(env, flt, ct, wc, wclen, 
		&wc_rrset, &wc_rr)) {
		verbose(VERB_ALGO, "nsec3 nameerror proof: could not prove "
			"that the applicable wildcard did not exist.");
		return sec_status_bogus;
	}

	if(ce.nc_rrset && nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) {
		verbose(VERB_ALGO, "nsec3 nameerror proof: nc has optout");
		return sec_status_insecure;
	}
	return sec_status_secure;
}
Example #25
0
/** set stub server addresses */
static int 
read_stubs_addr(struct config_stub* s, struct delegpt* dp)
{
	struct config_strlist* p;
	struct sockaddr_storage addr;
	socklen_t addrlen;
	for(p = s->addrs; p; p = p->next) {
		log_assert(p->str);
		if(!extstrtoaddr(p->str, &addr, &addrlen)) {
			log_err("cannot parse stub %s ip address: '%s'", 
				s->name, p->str);
			return 0;
		}
		if(!delegpt_add_addr_mlc(dp, &addr, addrlen, 0, 0)) {
			log_err("out of memory");
			return 0;
		}
	}
	return 1;
}
Example #26
0
int mesh_state_add_cb(struct mesh_state* s, struct edns_data* edns,
        ldns_buffer* buf, mesh_cb_func_t cb, void* cb_arg,
	uint16_t qid, uint16_t qflags)
{
	struct mesh_cb* r = regional_alloc(s->s.region, 
		sizeof(struct mesh_cb));
	if(!r)
		return 0;
	r->buf = buf;
	log_assert(fptr_whitelist_mesh_cb(cb)); /* early failure ifmissing*/
	r->cb = cb;
	r->cb_arg = cb_arg;
	r->edns = *edns;
	r->qid = qid;
	r->qflags = qflags;
	r->next = s->cb_list;
	s->cb_list = r;
	return 1;

}
Example #27
0
void state_queued_t::filter_closed(abstract_filter_state_t *f_st)
{
  log_assert(!f_st->is_open) ;
  machine_t::pause_t x(machine) ;
  typedef set<event_pair>::iterator iterator ;
  bool event_found = false ;
  log_debug("event_found=%d", event_found) ;
  for(iterator it=queue.begin(); it!=queue.end(); ++it)
  {
    if(! f_st->filter(it->second))
      continue ;
    event_found = true ;
    log_debug("event_found=%d", event_found) ;
    log_debug("event [%u] found in state '%s', requesting staet '%s'", it->second->cookie.value(), name(), f_st->name()) ;
    machine->request_state(it->second, f_st) ;
  }
  log_debug("event_found=%d", event_found) ;
  if(event_found)
    machine->process_transition_queue() ;
}
Example #28
0
/*
 * Start the next search on this handle right at the beginning
 */
int
keyring_search_reset (KEYRING_HANDLE hd)
{
    log_assert (hd);

    iobuf_close (hd->current.iobuf);
    hd->current.iobuf = NULL;
    hd->current.eof = 0;
    hd->current.error = 0;

    hd->found.kr = NULL;
    hd->found.offset = 0;

    if (hd->current.kr)
      iobuf_ioctl (NULL, IOBUF_IOCTL_INVALIDATE_CACHE, 0,
                   (char*)hd->current.kr->fname);
    hd->current.kr = NULL;

    return 0;
}
Example #29
0
/* takes a hex string and puts into buffer */
void hex_to_buf(sldns_buffer* pkt, const char* hex)
{
	const char* p = hex;
	int val;
	sldns_buffer_clear(pkt);
	while(*p) {
		skip_whites(&p);
		if(sldns_buffer_position(pkt) == sldns_buffer_limit(pkt))
			fatal_exit("hex_to_buf: buffer too small");
		if(!isalnum((unsigned char)*p))
			break;
		val = sldns_hexdigit_to_int(*p++) << 4;
		skip_whites(&p);
		log_assert(*p && isalnum((unsigned char)*p));
		val |= sldns_hexdigit_to_int(*p++);
		sldns_buffer_write_u8(pkt, (uint8_t)val);
		skip_whites(&p);
	}
	sldns_buffer_flip(pkt);
}
/** Synthesize CNAME from DNAME, false if too long */
static int
synth_cname(uint8_t* qname, size_t qnamelen, struct rrset_parse* dname_rrset,
            uint8_t* alias, size_t* aliaslen, ldns_buffer* pkt)
{
    /* we already know that sname is a strict subdomain of DNAME owner */
    uint8_t* dtarg = NULL;
    size_t dtarglen;
    if(!parse_get_cname_target(dname_rrset, &dtarg, &dtarglen))
        return 0;
    log_assert(qnamelen > dname_rrset->dname_len);
    /* DNAME from com. to net. with qname example.com. -> example.net. */
    /* so: \3com\0 to \3net\0 and qname \7example\3com\0 */
    *aliaslen = qnamelen + dtarglen - dname_rrset->dname_len;
    if(*aliaslen > LDNS_MAX_DOMAINLEN)
        return 0; /* should have been RCODE YXDOMAIN */
    /* decompress dnames into buffer, we know it fits */
    dname_pkt_copy(pkt, alias, qname);
    dname_pkt_copy(pkt, alias+(qnamelen-dname_rrset->dname_len), dtarg);
    return 1;
}