/**
 * Update DNS configuration for zone.
 *
 */
static int
dnsconfig_zone(engine_type* engine, zone_type* zone)
{
    int numdns = 0;
    ods_log_assert(engine);
    ods_log_assert(engine->xfrhandler);
    ods_log_assert(engine->xfrhandler->netio);
    ods_log_assert(zone);
    ods_log_assert(zone->adinbound);
    ods_log_assert(zone->adoutbound);
    ods_log_assert(zone->name);

    if (zone->adinbound->type == ADAPTER_DNS) {
        /* zone transfer handler */
        if (!zone->xfrd) {
            ods_log_debug("[%s] add transfer handler for zone %s",
                engine_str, zone->name);
            zone->xfrd = xfrd_create((void*) engine->xfrhandler,
                (void*) zone);
            ods_log_assert(zone->xfrd);
            netio_add_handler(engine->xfrhandler->netio,
                &zone->xfrd->handler);
        } else if (!zone->xfrd->serial_disk_acquired) {
            xfrd_set_timer_now(zone->xfrd);
        }
        numdns++;
    } else if (zone->xfrd) {
        netio_remove_handler(engine->xfrhandler->netio,
            &zone->xfrd->handler);
        xfrd_cleanup(zone->xfrd);
        zone->xfrd = NULL;
    }
    if (zone->adoutbound->type == ADAPTER_DNS) {
        /* notify handler */
        if (!zone->notify) {
            ods_log_debug("[%s] add notify handler for zone %s",
                engine_str, zone->name);
            zone->notify = notify_create((void*) engine->xfrhandler,
                (void*) zone);
            ods_log_assert(zone->notify);
            netio_add_handler(engine->xfrhandler->netio,
                &zone->notify->handler);
        }
        numdns++;
    } else if (zone->notify) {
        netio_remove_handler(engine->xfrhandler->netio,
            &zone->notify->handler);
        notify_cleanup(zone->notify);
        zone->notify = NULL;
    }
    return numdns;
}
示例#2
0
/**
 * Start zone transfer handler.
 *
 */
void
xfrhandler_start(xfrhandler_type* xfrhandler)
{
    ods_log_assert(xfrhandler);
    ods_log_assert(xfrhandler->engine);
    ods_log_debug("[%s] start", xfrh_str);
    /* setup */
    xfrhandler->start_time = time_now();
    /* handlers */
    netio_add_handler(xfrhandler->netio, &xfrhandler->dnshandler);
    /* service */
    while (xfrhandler->need_to_exit == 0) {
        /* dispatch may block for a longer period, so current is gone */
        xfrhandler->got_time = 0;
        ods_log_deeebug("[%s] netio dispatch", xfrh_str);
        if (netio_dispatch(xfrhandler->netio, NULL, NULL) == -1) {
            if (errno != EINTR) {
                ods_log_error("[%s] unable to dispatch netio: %s", xfrh_str,
                    strerror(errno));
            }
        }
    }
    /* shutdown */
    ods_log_debug("[%s] shutdown", xfrh_str);
}
示例#3
0
void
xfrd_init(int socket, struct nsd* nsd)
{
	region_type* region;

	assert(xfrd == 0);
	/* to setup signalhandling */
	nsd->server_kind = NSD_SERVER_BOTH;

	region = region_create(xalloc, free);
	xfrd = (xfrd_state_t*)region_alloc(region, sizeof(xfrd_state_t));
	memset(xfrd, 0, sizeof(xfrd_state_t));
	xfrd->region = region;
	xfrd->xfrd_start_time = time(0);
	xfrd->netio = netio_create(xfrd->region);
	xfrd->nsd = nsd;
	xfrd->packet = buffer_create(xfrd->region, QIOBUFSZ);
	xfrd->udp_waiting_first = NULL;
	xfrd->udp_waiting_last = NULL;
	xfrd->udp_use_num = 0;
	xfrd->ipc_pass = buffer_create(xfrd->region, QIOBUFSZ);
	xfrd->parent_soa_info_pass = 0;

	/* add the handlers already, because this involves allocs */
	xfrd->reload_handler.fd = -1;
	xfrd->reload_handler.timeout = NULL;
	xfrd->reload_handler.user_data = xfrd;
	xfrd->reload_handler.event_types = NETIO_EVENT_TIMEOUT;
	xfrd->reload_handler.event_handler = xfrd_handle_reload;
	xfrd->reload_timeout.tv_sec = 0;
	xfrd->reload_cmd_last_sent = xfrd->xfrd_start_time;
	xfrd->can_send_reload = 1;

	xfrd->ipc_send_blocked = 0;
	xfrd->ipc_handler.fd = socket;
	xfrd->ipc_handler.timeout = NULL;
	xfrd->ipc_handler.user_data = xfrd;
	xfrd->ipc_handler.event_types = NETIO_EVENT_READ;
	xfrd->ipc_handler.event_handler = xfrd_handle_ipc;
	xfrd->ipc_conn = xfrd_tcp_create(xfrd->region);
	/* not reading using ipc_conn yet */
	xfrd->ipc_conn->is_reading = 0;
	xfrd->ipc_conn->fd = xfrd->ipc_handler.fd;
	xfrd->ipc_conn_write = xfrd_tcp_create(xfrd->region);
	xfrd->ipc_conn_write->fd = xfrd->ipc_handler.fd;
	xfrd->need_to_send_reload = 0;
	xfrd->sending_zone_state = 0;
	xfrd->dirty_zones = stack_create(xfrd->region,
			nsd_options_num_zones(nsd->options));

	xfrd->notify_waiting_first = NULL;
	xfrd->notify_waiting_last = NULL;
	xfrd->notify_udp_num = 0;

	xfrd->tcp_set = xfrd_tcp_set_create(xfrd->region);
	xfrd->tcp_set->tcp_timeout = nsd->tcp_timeout;
	srandom((unsigned long) getpid() * (unsigned long) time(NULL));

	DEBUG(DEBUG_XFRD,1, (LOG_INFO, "xfrd pre-startup"));
	diff_snip_garbage(nsd->db, nsd->options);
	xfrd_init_zones();
	xfrd_free_namedb();
	xfrd_read_state(xfrd);
	xfrd_send_expy_all_zones();

	/* add handlers after zone handlers so they are before them in list */
	netio_add_handler(xfrd->netio, &xfrd->reload_handler);
	netio_add_handler(xfrd->netio, &xfrd->ipc_handler);

	DEBUG(DEBUG_XFRD,1, (LOG_INFO, "xfrd startup"));
	xfrd_main();
}
示例#4
0
static void
xfrd_init_zones()
{
	zone_type *dbzone;
	zone_options_t *zone_opt;
	xfrd_zone_t *xzone;
	const dname_type* dname;

	assert(xfrd->zones == 0);
	assert(xfrd->nsd->db != 0);

	xfrd->zones = rbtree_create(xfrd->region,
		(int (*)(const void *, const void *)) dname_compare);
	xfrd->notify_zones = rbtree_create(xfrd->region,
		(int (*)(const void *, const void *)) dname_compare);

	RBTREE_FOR(zone_opt, zone_options_t*, xfrd->nsd->options->zone_options)
	{
		DEBUG(DEBUG_XFRD,1, (LOG_INFO, "Zone %s\n", zone_opt->name));
		dname = dname_parse(xfrd->region, zone_opt->name);
		if(!dname) {
			log_msg(LOG_ERR, "xfrd: Could not parse zone name %s.", zone_opt->name);
			continue;
		}

		dbzone = domain_find_zone(domain_table_find(xfrd->nsd->db->domains, dname));
		if(dbzone && dname_compare(dname, domain_dname(dbzone->apex)) != 0)
			dbzone = 0; /* we found a parent zone */
		DEBUG(DEBUG_XFRD,1, (LOG_INFO, "xfrd: adding %s zone %s\n",
			dbzone?"filled":"empty", zone_opt->name));

		init_notify_send(xfrd->notify_zones, xfrd->netio,
			xfrd->region, dname, zone_opt, dbzone);
		if(!zone_is_slave(zone_opt)) {
			DEBUG(DEBUG_XFRD,1, (LOG_INFO, "xfrd: zone %s, master zone has no outgoing xfr requests", zone_opt->name));
			continue;
		}

		xzone = (xfrd_zone_t*)region_alloc(xfrd->region, sizeof(xfrd_zone_t));
		memset(xzone, 0, sizeof(xfrd_zone_t));
		xzone->apex = dname;
		xzone->apex_str = zone_opt->name;
		xzone->state = xfrd_zone_refreshing;
		xzone->dirty = 0;
		xzone->zone_options = zone_opt;
		/* first retry will use first master */
		xzone->master = 0;
		xzone->master_num = 0;
		xzone->next_master = 0;
		xzone->fresh_xfr_timeout = XFRD_TRANSFER_TIMEOUT_START;

		xzone->soa_nsd_acquired = 0;
		xzone->soa_disk_acquired = 0;
		xzone->soa_notified_acquired = 0;
		/* [0]=1, [1]=0; "." domain name */
		xzone->soa_nsd.prim_ns[0] = 1;
		xzone->soa_nsd.email[0] = 1;
		xzone->soa_disk.prim_ns[0]=1;
		xzone->soa_disk.email[0]=1;
		xzone->soa_notified.prim_ns[0]=1;
		xzone->soa_notified.email[0]=1;

		xzone->zone_handler.fd = -1;
		xzone->zone_handler.timeout = 0;
		xzone->zone_handler.user_data = xzone;
		xzone->zone_handler.event_types =
			NETIO_EVENT_READ|NETIO_EVENT_TIMEOUT;
		xzone->zone_handler.event_handler = xfrd_handle_zone;
		netio_add_handler(xfrd->netio, &xzone->zone_handler);
		xzone->tcp_conn = -1;
		xzone->tcp_waiting = 0;
		xzone->udp_waiting = 0;

		tsig_create_record_custom(&xzone->tsig, xfrd->region, 0, 0, 4);

		if(dbzone && dbzone->soa_rrset && dbzone->soa_rrset->rrs) {
			xzone->soa_nsd_acquired = xfrd_time();
			xzone->soa_disk_acquired = xfrd_time();
			/* we only use the first SOA in the rrset */
			xfrd_copy_soa(&xzone->soa_nsd, dbzone->soa_rrset->rrs);
			xfrd_copy_soa(&xzone->soa_disk, dbzone->soa_rrset->rrs);
		}
		/* set refreshing anyway, we have data but it may be old */
		xfrd_set_refresh_now(xzone);

		xzone->node.key = dname;
		rbtree_insert(xfrd->zones, (rbnode_t*)xzone);
	}
	DEBUG(DEBUG_XFRD,1, (LOG_INFO, "xfrd: started server %d secondary zones", (int)xfrd->zones->count));
}
示例#5
0
/*
 * Serve DNS requests.
 */
void
server_child(struct nsd *nsd)
{
	size_t i;
	region_type *server_region = region_create(xalloc, free);
	netio_type *netio = netio_create(server_region);
	netio_handler_type *tcp_accept_handlers;
	query_type *udp_query;
	sig_atomic_t mode;

	assert(nsd->server_kind != NSD_SERVER_MAIN);
	DEBUG(DEBUG_IPC, 2, (LOG_INFO, "child process started"));

	if (!(nsd->server_kind & NSD_SERVER_TCP)) {
		close_all_sockets(nsd->tcp, nsd->ifs);
	}
	if (!(nsd->server_kind & NSD_SERVER_UDP)) {
		close_all_sockets(nsd->udp, nsd->ifs);
	}

	if (nsd->this_child && nsd->this_child->parent_fd != -1) {
		netio_handler_type *handler;

		handler = (netio_handler_type *) region_alloc(
			server_region, sizeof(netio_handler_type));
		handler->fd = nsd->this_child->parent_fd;
		handler->timeout = NULL;
		handler->user_data = (struct ipc_handler_conn_data*)region_alloc(
			server_region, sizeof(struct ipc_handler_conn_data));
		((struct ipc_handler_conn_data*)handler->user_data)->nsd = nsd;
		((struct ipc_handler_conn_data*)handler->user_data)->conn =
			xfrd_tcp_create(server_region);
		handler->event_types = NETIO_EVENT_READ;
		handler->event_handler = child_handle_parent_command;
		netio_add_handler(netio, handler);
	}

	if (nsd->server_kind & NSD_SERVER_UDP) {
		udp_query = query_create(server_region,
			compressed_dname_offsets, compression_table_size);

		for (i = 0; i < nsd->ifs; ++i) {
			struct udp_handler_data *data;
			netio_handler_type *handler;

			data = (struct udp_handler_data *) region_alloc(
				server_region,
				sizeof(struct udp_handler_data));
			data->query = udp_query;
			data->nsd = nsd;
			data->socket = &nsd->udp[i];

			handler = (netio_handler_type *) region_alloc(
				server_region, sizeof(netio_handler_type));
			handler->fd = nsd->udp[i].s;
			handler->timeout = NULL;
			handler->user_data = data;
			handler->event_types = NETIO_EVENT_READ;
			handler->event_handler = handle_udp;
			netio_add_handler(netio, handler);
		}
	}

	/*
	 * Keep track of all the TCP accept handlers so we can enable
	 * and disable them based on the current number of active TCP
	 * connections.
	 */
	tcp_accept_handlers = (netio_handler_type *) region_alloc(
		server_region, nsd->ifs * sizeof(netio_handler_type));
	if (nsd->server_kind & NSD_SERVER_TCP) {
		for (i = 0; i < nsd->ifs; ++i) {
			struct tcp_accept_handler_data *data;
			netio_handler_type *handler;

			data = (struct tcp_accept_handler_data *) region_alloc(
				server_region,
				sizeof(struct tcp_accept_handler_data));
			data->nsd = nsd;
			data->socket = &nsd->tcp[i];
			data->tcp_accept_handler_count = nsd->ifs;
			data->tcp_accept_handlers = tcp_accept_handlers;

			handler = &tcp_accept_handlers[i];
			handler->fd = nsd->tcp[i].s;
			handler->timeout = NULL;
			handler->user_data = data;
			handler->event_types = NETIO_EVENT_READ | NETIO_EVENT_ACCEPT;
			handler->event_handler = handle_tcp_accept;
			netio_add_handler(netio, handler);
		}
	}

	/* The main loop... */
	while ((mode = nsd->mode) != NSD_QUIT) {
		if(mode == NSD_RUN) nsd->mode = mode = server_signal_mode(nsd);

		/* Do we need to do the statistics... */
		if (mode == NSD_STATS) {
#ifdef BIND8_STATS
			/* Dump the statistics */
			bind8_stats(nsd);
#else /* !BIND8_STATS */
			log_msg(LOG_NOTICE, "Statistics support not enabled at compile time.");
#endif /* BIND8_STATS */

			nsd->mode = NSD_RUN;
		}
		else if (mode == NSD_REAP_CHILDREN) {
			/* got signal, notify parent. parent reaps terminated children. */
			if (nsd->this_child->parent_fd != -1) {
				sig_atomic_t parent_notify = NSD_REAP_CHILDREN;
				if (write(nsd->this_child->parent_fd,
				    &parent_notify,
				    sizeof(parent_notify)) == -1)
				{
					log_msg(LOG_ERR, "problems sending command from %d to parent: %s",
						(int) nsd->this_child->pid, strerror(errno));
				}
			} else /* no parent, so reap 'em */
				while (waitpid(0, NULL, WNOHANG) > 0) ;
			nsd->mode = NSD_RUN;
		}
		else if(mode == NSD_RUN) {
			/* Wait for a query... */
			if (netio_dispatch(netio, NULL, NULL) == -1) {
				if (errno != EINTR) {
					log_msg(LOG_ERR, "netio_dispatch failed: %s", strerror(errno));
					break;
				}
			}
		} else if(mode == NSD_QUIT) {
			/* ignore here, quit */
		} else {
			log_msg(LOG_ERR, "mode bad value %d, back to service.",
				mode);
			nsd->mode = NSD_RUN;
		}
	}

#ifdef	BIND8_STATS
	bind8_stats(nsd);
#endif /* BIND8_STATS */

	namedb_fd_close(nsd->db);
	region_destroy(server_region);
	server_shutdown(nsd);
}
示例#6
0
/*
 * Handle an incoming TCP connection.  The connection is accepted and
 * a new TCP reader event handler is added to NETIO.  The TCP handler
 * is responsible for cleanup when the connection is closed.
 */
static void
handle_tcp_accept(netio_type *netio,
		  netio_handler_type *handler,
		  netio_event_types_type event_types)
{
	struct tcp_accept_handler_data *data
		= (struct tcp_accept_handler_data *) handler->user_data;
	int s;
	struct tcp_handler_data *tcp_data;
	region_type *tcp_region;
	netio_handler_type *tcp_handler;
#ifdef INET6
	struct sockaddr_storage addr;
#else
	struct sockaddr_in addr;
#endif
	socklen_t addrlen;

	if (!(event_types & NETIO_EVENT_READ)) {
		return;
	}

	if (data->nsd->current_tcp_count >= data->nsd->maximum_tcp_count) {
		return;
	}

	/* Accept it... */
	addrlen = sizeof(addr);
	s = accept(handler->fd, (struct sockaddr *) &addr, &addrlen);
	if (s == -1) {
		/**
		 * EMFILE and ENFILE is a signal that the limit of open
		 * file descriptors has been reached. Pause accept().
		 * EINTR is a signal interrupt. The others are various OS ways
		 * of saying that the client has closed the connection.
		 */
		if (errno == EMFILE || errno == ENFILE) {
			if (!slowaccept) {
				slowaccept_timeout.tv_sec = NETIO_SLOW_ACCEPT_TIMEOUT;
				slowaccept_timeout.tv_nsec = 0L;
				timespec_add(&slowaccept_timeout, netio_current_time(netio));
				slowaccept = 1;
				/* We don't want to spam the logs here */
			}
		} else if (errno != EINTR
			&& errno != EWOULDBLOCK
#ifdef ECONNABORTED
			&& errno != ECONNABORTED
#endif /* ECONNABORTED */
#ifdef EPROTO
			&& errno != EPROTO
#endif /* EPROTO */
			) {
			log_msg(LOG_ERR, "accept failed: %s", strerror(errno));
		}
		return;
	}

	if (fcntl(s, F_SETFL, O_NONBLOCK) == -1) {
		log_msg(LOG_ERR, "fcntl failed: %s", strerror(errno));
		close(s);
		return;
	}

	/*
	 * This region is deallocated when the TCP connection is
	 * closed by the TCP handler.
	 */
	tcp_region = region_create(xalloc, free);
	tcp_data = (struct tcp_handler_data *) region_alloc(
		tcp_region, sizeof(struct tcp_handler_data));
	tcp_data->region = tcp_region;
	tcp_data->query = query_create(tcp_region, compressed_dname_offsets,
		compression_table_size);
	tcp_data->nsd = data->nsd;
	tcp_data->query_count = 0;

	tcp_data->tcp_accept_handler_count = data->tcp_accept_handler_count;
	tcp_data->tcp_accept_handlers = data->tcp_accept_handlers;

	tcp_data->query_state = QUERY_PROCESSED;
	tcp_data->bytes_transmitted = 0;
	memcpy(&tcp_data->query->addr, &addr, addrlen);
	tcp_data->query->addrlen = addrlen;

	tcp_handler = (netio_handler_type *) region_alloc(
		tcp_region, sizeof(netio_handler_type));
	tcp_handler->fd = s;
	tcp_handler->timeout = (struct timespec *) region_alloc(
		tcp_region, sizeof(struct timespec));
	tcp_handler->timeout->tv_sec = data->nsd->tcp_timeout;
	tcp_handler->timeout->tv_nsec = 0L;
	timespec_add(tcp_handler->timeout, netio_current_time(netio));

	tcp_handler->user_data = tcp_data;
	tcp_handler->event_types = NETIO_EVENT_READ | NETIO_EVENT_TIMEOUT;
	tcp_handler->event_handler = handle_tcp_reading;

	netio_add_handler(netio, tcp_handler);

	/*
	 * Keep track of the total number of TCP handlers installed so
	 * we can stop accepting connections when the maximum number
	 * of simultaneous TCP connections is reached.
	 */
	++data->nsd->current_tcp_count;
	if (data->nsd->current_tcp_count == data->nsd->maximum_tcp_count) {
		configure_handler_event_types(data->tcp_accept_handler_count,
					      data->tcp_accept_handlers,
					      NETIO_EVENT_NONE);
	}
}
/**
 * Start dns handler.
 *
 */
void
dnshandler_start(dnshandler_type* dnshandler)
{
    size_t i = 0;
    engine_type* engine = NULL;
    netio_handler_type* tcp_accept_handlers = NULL;

    ods_log_assert(dnshandler);
    ods_log_assert(dnshandler->engine);
    ods_log_debug("[%s] start", dnsh_str);
    /* udp */
    for (i=0; i < dnshandler->interfaces->count; i++) {
        struct udp_data* data = NULL;
        netio_handler_type* handler = NULL;
        data = (struct udp_data*) allocator_alloc(dnshandler->allocator,
            sizeof(struct udp_data));
        if (!data) {
            ods_log_error("[%s] unable to start: allocator_alloc() "
                "failed", dnsh_str);
            dnshandler->thread_id = 0;
            engine->need_to_exit = 1;
            break;
        }
        data->query = dnshandler->query;
        data->engine = dnshandler->engine;
        data->socket = &dnshandler->socklist->udp[i];
        handler = (netio_handler_type*) allocator_alloc(
            dnshandler->allocator, sizeof(netio_handler_type));
        if (!handler) {
            ods_log_error("[%s] unable to start: allocator_alloc() "
                "failed", dnsh_str);
            allocator_deallocate(dnshandler->allocator, (void*)data);
            dnshandler->thread_id = 0;
            engine->need_to_exit = 1;
            break;
        }
        handler->fd = dnshandler->socklist->udp[i].s;
        handler->timeout = NULL;
        handler->user_data = data;
        handler->event_types = NETIO_EVENT_READ;
        handler->event_handler = sock_handle_udp;
        ods_log_debug("[%s] add udp network handler fd %u", dnsh_str,
            (unsigned) handler->fd);
        netio_add_handler(dnshandler->netio, handler);
    }
    /* tcp */
    tcp_accept_handlers = (netio_handler_type*) allocator_alloc(
        dnshandler->allocator,
        dnshandler->interfaces->count * sizeof(netio_handler_type));
    for (i=0; i < dnshandler->interfaces->count; i++) {
        struct tcp_accept_data* data = NULL;
        netio_handler_type* handler = NULL;
        data = (struct tcp_accept_data*) allocator_alloc(
            dnshandler->allocator, sizeof(struct tcp_accept_data));
        if (!data) {
            ods_log_error("[%s] unable to start: allocator_alloc() "
                "failed", dnsh_str);
            dnshandler->thread_id = 0;
            engine->need_to_exit = 1;
            return;
        }
        data->engine = dnshandler->engine;
        data->socket = &dnshandler->socklist->udp[i];
        data->tcp_accept_handler_count = dnshandler->interfaces->count;
        data->tcp_accept_handlers = tcp_accept_handlers;
        handler = &tcp_accept_handlers[i];
        handler->fd = dnshandler->socklist->tcp[i].s;
        handler->timeout = NULL;
        handler->user_data = data;
        handler->event_types = NETIO_EVENT_READ;
        handler->event_handler = sock_handle_tcp_accept;
        ods_log_debug("[%s] add tcp network handler fd %u", dnsh_str,
            (unsigned) handler->fd);
        netio_add_handler(dnshandler->netio, handler);
    }
    /* service */
    while (dnshandler->need_to_exit == 0) {
        ods_log_deeebug("[%s] netio dispatch", dnsh_str);
        if (netio_dispatch(dnshandler->netio, NULL, NULL) == -1) {
            if (errno != EINTR) {
                ods_log_error("[%s] unable to dispatch netio: %s", dnsh_str,
                    strerror(errno));
                break;
            }
        }
    }
    /* shutdown */
    ods_log_debug("[%s] shutdown", dnsh_str);
    for (i=0; i < dnshandler->interfaces->count; i++) {
        if (dnshandler->socklist->udp[i].s != -1) {
            close(dnshandler->socklist->udp[i].s);
            freeaddrinfo((void*)dnshandler->socklist->udp[i].addr);
        }
        if (dnshandler->socklist->tcp[i].s != -1) {
            close(dnshandler->socklist->tcp[i].s);
            freeaddrinfo((void*)dnshandler->socklist->tcp[i].addr);
        }
    }
    return;
}