int peer_permit ( GHashTable *conf, peer *p ) { unsigned int login_timeout; g_assert( p != NULL ); if (p->status != PEER_ACCEPT) { if (fw_perform_exec( "PermitCmd", conf, p, TRUE ) == 0) { p->status = PEER_ACCEPT; } else { return -1; } } //peer_extend_timeout(conf, p); if (login_timeout = conf_int( conf, "LoginTimeout" )) p->expire = time(NULL) + login_timeout; if (conf_int(conf, "IdleTimeout")) p->idle_check = time(NULL) + conf_int( conf, "IdleTimeout" ); return 0; }
void settings_load_fields(void *st, const field *fields, int nfields) { for(int i=0;i < nfields;++i) { const field *f = &fields[i]; switch(f->type) { case TYPE_INT: *fieldint(st, f->offset) = conf_int(f->name); break; case TYPE_FLOAT: *fieldfloat(st, f->offset) = conf_float(f->name); break; case TYPE_BOOL: *fieldbool(st, f->offset) = conf_bool(f->name); break; case TYPE_STRING: { // make a copy of the string char **s = fieldstr(st, f->offset); if(*s) { free(*s); } const char *s2 = conf_string(f->name); *s = malloc(strlen(s2)+1); strcpy(*s, s2); } break; } } }
/*! \brief Fetch SOA expire timer and add a timeout grace period. */ static uint32_t soa_graceful_expire(conf_t *conf, const knot_rdataset_t *soa) { // Allow for timeouts. Otherwise zones with very short // expiry may expire before the timeout is reached. conf_val_t *val = &conf->cache.srv_tcp_reply_timeout; return knot_soa_expire(soa) + 2 * conf_int(val); }
static int tcp_wait_for_events(tcp_context_t *tcp) { /* Wait for events. */ fdset_t *set = &tcp->set; int nfds = poll(set->pfd, set->n, TCP_SWEEP_INTERVAL * 1000); /* Mark the time of last poll call. */ time_now(&tcp->last_poll_time); bool is_throttled = (tcp->last_poll_time.tv_sec < tcp->throttle_end.tv_sec); if (!is_throttled) { /* Configuration limit, infer maximal pool size. */ rcu_read_lock(); conf_val_t *val = &conf()->cache.srv_max_tcp_clients; unsigned max_per_set = MAX(conf_int(val) / conf_tcp_threads(conf()), 1); rcu_read_unlock(); /* Subtract master sockets check limits. */ is_throttled = (set->n - tcp->client_threshold) >= max_per_set; } /* Process events. */ unsigned i = 0; while (nfds > 0 && i < set->n) { bool should_close = false; int fd = set->pfd[i].fd; if (set->pfd[i].revents & (POLLERR|POLLHUP|POLLNVAL)) { should_close = (i >= tcp->client_threshold); --nfds; } else if (set->pfd[i].revents & (POLLIN)) { /* Master sockets */ if (i < tcp->client_threshold) { if (!is_throttled && tcp_event_accept(tcp, i) == KNOT_EBUSY) { time_now(&tcp->throttle_end); tcp->throttle_end.tv_sec += tcp_throttle(); } /* Client sockets */ } else { if (tcp_event_serve(tcp, i) != KNOT_EOK) { should_close = true; } } --nfds; } /* Evaluate */ if (should_close) { fdset_remove(set, i); close(fd); } else { ++i; } } return nfds; }
size_t conf_udp_threads_txn( conf_t *conf, knot_db_txn_t *txn) { conf_val_t val = conf_get_txn(conf, txn, C_SRV, C_UDP_WORKERS); int64_t workers = conf_int(&val); if (workers == YP_NIL) { return dt_optimal_size(); } return workers; }
size_t conf_bg_threads_txn( conf_t *conf, knot_db_txn_t *txn) { conf_val_t val = conf_get_txn(conf, txn, C_SRV, C_BG_WORKERS); int64_t workers = conf_int(&val); if (workers == YP_NIL) { return MIN(dt_optimal_size(), CONF_XFERS); } return workers; }
size_t conf_tcp_threads_txn( conf_t *conf, knot_db_txn_t *txn) { conf_val_t val = conf_get_txn(conf, txn, C_SRV, C_TCP_WORKERS); int64_t workers = conf_int(&val); if (workers == YP_NIL) { return MAX(conf_udp_threads_txn(conf, txn) * 2, CONF_XFERS); } return workers; }
static int dnsproxy_fwd(int state, knot_pkt_t *pkt, struct query_data *qdata, void *ctx) { if (pkt == NULL || qdata == NULL || ctx == NULL) { return KNOT_STATE_FAIL; } /* If not already satisfied. */ if (state == KNOT_STATE_DONE) { return state; } struct dnsproxy *proxy = ctx; /* Create a forwarding request. */ struct knot_requestor re; knot_requestor_init(&re, qdata->mm); struct capture_param param; param.sink = pkt; int ret = knot_requestor_overlay(&re, LAYER_CAPTURE, ¶m); if (ret != KNOT_EOK) { return KNOT_STATE_FAIL; } bool is_tcp = net_is_connected(qdata->param->socket); struct knot_request *req; req = knot_request_make(re.mm, (const struct sockaddr *)&proxy->remote, NULL, qdata->query, is_tcp ? 0 : KNOT_RQ_UDP); if (req == NULL) { return state; /* Ignore, not enough memory. */ } /* Forward request. */ ret = knot_requestor_enqueue(&re, req); if (ret == KNOT_EOK) { conf_val_t val = conf_get(conf(), C_SRV, C_TCP_HSHAKE_TIMEOUT); struct timeval tv = { conf_int(&val), 0 }; ret = knot_requestor_exec(&re, &tv); } else { knot_request_free(re.mm, req); } knot_requestor_clear(&re); /* Check result. */ if (ret != KNOT_EOK) { qdata->rcode = KNOT_RCODE_SERVFAIL; return KNOT_STATE_FAIL; /* Forwarding failed, SERVFAIL. */ } return KNOT_STATE_DONE; }
static int tcp_event_serve(tcp_context_t *tcp, unsigned i) { int fd = tcp->set.pfd[i].fd; int ret = tcp_handle(tcp, fd, &tcp->iov[0], &tcp->iov[1]); /* Flush per-query memory. */ mp_flush(tcp->overlay.mm->ctx); if (ret == KNOT_EOK) { /* Update socket activity timer. */ rcu_read_lock(); conf_val_t *val = &conf()->cache.srv_tcp_idle_timeout; fdset_set_watchdog(&tcp->set, i, conf_int(val)); rcu_read_unlock(); } return ret; }
int event_flush(conf_t *conf, zone_t *zone) { assert(zone); /* Reschedule. */ conf_val_t val = conf_zone_get(conf, C_ZONEFILE_SYNC, zone->name); int64_t sync_timeout = conf_int(&val); if (sync_timeout > 0) { zone_events_schedule(zone, ZONE_EVENT_FLUSH, sync_timeout); } /* Check zone contents. */ if (zone_contents_is_empty(zone->contents)) { return KNOT_EOK; } return zone_flush_journal(conf, zone); }
/*! \brief Process query using requestor. */ static int zone_query_request(knot_pkt_t *query, const conf_remote_t *remote, struct process_answer_param *param, knot_mm_t *mm) { /* Create requestor instance. */ struct knot_requestor re; int ret = knot_requestor_init(&re, mm); if (ret != KNOT_EOK) { return ret; } ret = knot_requestor_overlay(&re, KNOT_STATE_ANSWER, param); if (ret != KNOT_EOK) { knot_requestor_clear(&re); return ret; } /* Create a request. */ const struct sockaddr *dst = (const struct sockaddr *)&remote->addr; const struct sockaddr *src = (const struct sockaddr *)&remote->via; struct knot_request *req = knot_request_make(re.mm, dst, src, query, 0); if (req == NULL) { knot_requestor_clear(&re); return KNOT_ENOMEM; } /* Send the queries and process responses. */ ret = knot_requestor_enqueue(&re, req); if (ret == KNOT_EOK) { conf_val_t *val = ¶m->conf->cache.srv_tcp_reply_timeout; int timeout = conf_int(val) * 1000; ret = knot_requestor_exec(&re, timeout); } else { knot_request_free(req, re.mm); } /* Cleanup. */ knot_requestor_clear(&re); return ret; }
int tcp_accept(int fd) { /* Accept incoming connection. */ int incoming = net_accept(fd, NULL); /* Evaluate connection. */ if (incoming >= 0) { #ifdef SO_RCVTIMEO struct timeval tv; rcu_read_lock(); conf_val_t *val = &conf()->cache.srv_tcp_idle_timeout; tv.tv_sec = conf_int(val); rcu_read_unlock(); tv.tv_usec = 0; if (setsockopt(incoming, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)) < 0) { log_warning("TCP, failed to set up watchdog timer" ", fd %d", incoming); } #endif } return incoming; }
/*! \brief Set EDNS section. */ static int prepare_edns(conf_t *conf, zone_t *zone, knot_pkt_t *pkt) { conf_val_t val = conf_zone_get(conf, C_REQUEST_EDNS_OPTION, zone->name); /* Check if an extra EDNS option is configured. */ size_t opt_len; const uint8_t *opt_data = conf_data(&val, &opt_len); if (opt_data == NULL) { return KNOT_EOK; } knot_rrset_t opt_rr; conf_val_t *max_payload = &conf->cache.srv_max_udp_payload; int ret = knot_edns_init(&opt_rr, conf_int(max_payload), 0, KNOT_EDNS_VERSION, &pkt->mm); if (ret != KNOT_EOK) { return ret; } ret = knot_edns_add_option(&opt_rr, wire_read_u64(opt_data), yp_bin_len(opt_data + sizeof(uint64_t)), yp_bin(opt_data + sizeof(uint64_t)), &pkt->mm); if (ret != KNOT_EOK) { knot_rrset_clear(&opt_rr, &pkt->mm); return ret; } knot_pkt_begin(pkt, KNOT_ADDITIONAL); ret = knot_pkt_put(pkt, KNOT_COMPR_HINT_NONE, &opt_rr, KNOT_PF_FREE); if (ret != KNOT_EOK) { knot_rrset_clear(&opt_rr, &pkt->mm); return ret; } return KNOT_EOK; }
static int tcp_event_accept(tcp_context_t *tcp, unsigned i) { /* Accept client. */ int fd = tcp->set.pfd[i].fd; int client = tcp_accept(fd); if (client >= 0) { /* Assign to fdset. */ int next_id = fdset_add(&tcp->set, client, POLLIN, NULL); if (next_id < 0) { close(client); return next_id; /* Contains errno. */ } /* Update watchdog timer. */ rcu_read_lock(); conf_val_t *val = &conf()->cache.srv_tcp_hshake_timeout; fdset_set_watchdog(&tcp->set, next_id, conf_int(val)); rcu_read_unlock(); return KNOT_EOK; } return client; }
int event_load(conf_t *conf, zone_t *zone) { assert(zone); /* Take zone file mtime and load it. */ char *filename = conf_zonefile(conf, zone->name); time_t mtime = zonefile_mtime(filename); free(filename); uint32_t dnssec_refresh = time(NULL); zone_contents_t *contents = NULL; int ret = zone_load_contents(conf, zone->name, &contents); if (ret != KNOT_EOK) { goto fail; } /* Store zonefile serial and apply changes from the journal. */ zone->zonefile_serial = zone_contents_serial(contents); ret = zone_load_journal(conf, zone, contents); if (ret != KNOT_EOK) { goto fail; } /* Post load actions - calculate delta, sign with DNSSEC... */ /*! \todo issue #242 dnssec signing should occur in the special event */ ret = zone_load_post(conf, zone, contents, &dnssec_refresh); if (ret != KNOT_EOK) { if (ret == KNOT_ESPACE) { log_zone_error(zone->name, "journal size is too small " "to fit the changes"); } else { log_zone_error(zone->name, "failed to store changes into " "journal (%s)", knot_strerror(ret)); } goto fail; } /* Check zone contents consistency. */ ret = zone_load_check(conf, contents); if (ret != KNOT_EOK) { goto fail; } /* Everything went alright, switch the contents. */ zone->zonefile_mtime = mtime; zone_contents_t *old = zone_switch_contents(zone, contents); zone->flags &= ~ZONE_EXPIRED; uint32_t old_serial = zone_contents_serial(old); if (old != NULL) { synchronize_rcu(); zone_contents_deep_free(&old); } /* Schedule notify and refresh after load. */ if (zone_is_slave(conf, zone)) { zone_events_schedule(zone, ZONE_EVENT_REFRESH, ZONE_EVENT_NOW); } if (!zone_contents_is_empty(contents)) { zone_events_schedule(zone, ZONE_EVENT_NOTIFY, ZONE_EVENT_NOW); zone->bootstrap_retry = ZONE_EVENT_NOW; } /* Schedule zone resign. */ conf_val_t val = conf_zone_get(conf, C_DNSSEC_SIGNING, zone->name); if (conf_bool(&val)) { schedule_dnssec(zone, dnssec_refresh); } /* Periodic execution. */ val = conf_zone_get(conf, C_ZONEFILE_SYNC, zone->name); int64_t sync_timeout = conf_int(&val); if (sync_timeout >= 0) { zone_events_schedule(zone, ZONE_EVENT_FLUSH, sync_timeout); } uint32_t current_serial = zone_contents_serial(zone->contents); log_zone_info(zone->name, "loaded, serial %u -> %u", old_serial, current_serial); return KNOT_EOK; fail: zone_contents_deep_free(&contents); /* Try to bootstrap the zone if local error. */ if (zone_is_slave(conf, zone) && !zone_events_is_scheduled(zone, ZONE_EVENT_XFER)) { zone_events_schedule(zone, ZONE_EVENT_XFER, ZONE_EVENT_NOW); } return ret; }
int event_dnssec(conf_t *conf, zone_t *zone) { assert(zone); changeset_t ch; int ret = changeset_init(&ch, zone->name); if (ret != KNOT_EOK) { goto done; } uint32_t refresh_at = time(NULL); int sign_flags = 0; if (zone->flags & ZONE_FORCE_RESIGN) { log_zone_info(zone->name, "DNSSEC, dropping previous " "signatures, resigning zone"); zone->flags &= ~ZONE_FORCE_RESIGN; sign_flags = ZONE_SIGN_DROP_SIGNATURES; } else { log_zone_info(zone->name, "DNSSEC, signing zone"); sign_flags = 0; } ret = knot_dnssec_zone_sign(zone->contents, &ch, sign_flags, &refresh_at); if (ret != KNOT_EOK) { goto done; } bool zone_changed = !changeset_empty(&ch); if (zone_changed) { /* Apply change. */ apply_ctx_t a_ctx = { { 0 } }; apply_init_ctx(&a_ctx); zone_contents_t *new_contents = NULL; int ret = apply_changeset(&a_ctx, zone, &ch, &new_contents); if (ret != KNOT_EOK) { log_zone_error(zone->name, "DNSSEC, failed to sign zone (%s)", knot_strerror(ret)); goto done; } /* Write change to journal. */ ret = zone_change_store(conf, zone, &ch); if (ret != KNOT_EOK) { log_zone_error(zone->name, "DNSSEC, failed to sign zone (%s)", knot_strerror(ret)); update_rollback(&a_ctx); update_free_zone(&new_contents); goto done; } /* Switch zone contents. */ zone_contents_t *old_contents = zone_switch_contents(zone, new_contents); zone->flags &= ~ZONE_EXPIRED; synchronize_rcu(); update_free_zone(&old_contents); update_cleanup(&a_ctx); } // Schedule dependent events. schedule_dnssec(zone, refresh_at); if (zone_changed) { zone_events_schedule(zone, ZONE_EVENT_NOTIFY, ZONE_EVENT_NOW); conf_val_t val = conf_zone_get(conf, C_ZONEFILE_SYNC, zone->name); if (conf_int(&val) == 0) { zone_events_schedule(zone, ZONE_EVENT_FLUSH, ZONE_EVENT_NOW); } } done: changeset_clear(&ch); return ret; }
/*! * \brief TCP event handler function. */ static int tcp_handle(tcp_context_t *tcp, int fd, struct iovec *rx, struct iovec *tx) { /* Create query processing parameter. */ struct sockaddr_storage ss; memset(&ss, 0, sizeof(struct sockaddr_storage)); struct process_query_param param = {0}; param.socket = fd; param.remote = &ss; param.server = tcp->server; param.thread_id = tcp->thread_id; rx->iov_len = KNOT_WIRE_MAX_PKTSIZE; tx->iov_len = KNOT_WIRE_MAX_PKTSIZE; /* Receive peer name. */ socklen_t addrlen = sizeof(struct sockaddr_storage); if (getpeername(fd, (struct sockaddr *)&ss, &addrlen) < 0) { ; } /* Timeout. */ rcu_read_lock(); conf_val_t *val = &conf()->cache.srv_tcp_reply_timeout; int timeout = conf_int(val) * 1000; rcu_read_unlock(); /* Receive data. */ int ret = net_dns_tcp_recv(fd, rx->iov_base, rx->iov_len, timeout); if (ret <= 0) { if (ret == KNOT_EAGAIN) { char addr_str[SOCKADDR_STRLEN] = {0}; sockaddr_tostr(addr_str, sizeof(addr_str), &ss); log_warning("TCP, connection timed out, address '%s'", addr_str); } return KNOT_ECONNREFUSED; } else { rx->iov_len = ret; } knot_mm_t *mm = tcp->overlay.mm; /* Initialize processing overlay. */ ret = knot_overlay_init(&tcp->overlay, mm); if (ret != KNOT_EOK) { return ret; } ret = knot_overlay_add(&tcp->overlay, NS_PROC_QUERY, ¶m); if (ret != KNOT_EOK) { return ret; } /* Create packets. */ knot_pkt_t *ans = knot_pkt_new(tx->iov_base, tx->iov_len, mm); knot_pkt_t *query = knot_pkt_new(rx->iov_base, rx->iov_len, mm); /* Input packet. */ (void) knot_pkt_parse(query, 0); int state = knot_overlay_consume(&tcp->overlay, query); /* Resolve until NOOP or finished. */ ret = KNOT_EOK; while (state & (KNOT_STATE_PRODUCE|KNOT_STATE_FAIL)) { state = knot_overlay_produce(&tcp->overlay, ans); /* Send, if response generation passed and wasn't ignored. */ if (ans->size > 0 && !(state & (KNOT_STATE_FAIL|KNOT_STATE_NOOP))) { if (net_dns_tcp_send(fd, ans->wire, ans->size, timeout) != ans->size) { ret = KNOT_ECONNREFUSED; break; } } } /* Reset after processing. */ knot_overlay_finish(&tcp->overlay); knot_overlay_deinit(&tcp->overlay); /* Cleanup. */ knot_pkt_free(&query); knot_pkt_free(&ans); return ret; }
/******* peer.c routines **********/ void peer_extend_timeout( GHashTable *conf, peer *p ) { p->expire = time(NULL) + conf_int( conf, "LoginTimeout" ); }
int event_xfer(conf_t *conf, zone_t *zone) { assert(zone); /* Ignore if not slave zone. */ if (!zone_is_slave(conf, zone)) { return KNOT_EOK; } struct transfer_data data = { 0 }; const char *err_str = ""; /* Determine transfer type. */ bool is_bootstrap = zone_contents_is_empty(zone->contents); if (is_bootstrap || zone->flags & ZONE_FORCE_AXFR) { data.pkt_type = KNOT_QUERY_AXFR; err_str = "AXFR, incoming"; } else { data.pkt_type = KNOT_QUERY_IXFR; err_str = "IXFR, incoming"; } /* Execute zone transfer. */ int ret = zone_master_try(conf, zone, try_xfer, &data, err_str); zone_clear_preferred_master(zone); if (ret != KNOT_EOK) { log_zone_error(zone->name, "%s, failed (%s)", err_str, knot_strerror(ret)); if (is_bootstrap) { zone->bootstrap_retry = bootstrap_next(zone->bootstrap_retry); zone_events_schedule(zone, ZONE_EVENT_XFER, zone->bootstrap_retry); } else { const knot_rdataset_t *soa = zone_soa(zone); zone_events_schedule(zone, ZONE_EVENT_XFER, knot_soa_retry(soa)); start_expire_timer(conf, zone, soa); } return KNOT_EOK; } assert(!zone_contents_is_empty(zone->contents)); const knot_rdataset_t *soa = zone_soa(zone); /* Rechedule events. */ zone_events_schedule(zone, ZONE_EVENT_REFRESH, knot_soa_refresh(soa)); zone_events_schedule(zone, ZONE_EVENT_NOTIFY, ZONE_EVENT_NOW); zone_events_cancel(zone, ZONE_EVENT_EXPIRE); conf_val_t val = conf_zone_get(conf, C_ZONEFILE_SYNC, zone->name); int64_t sync_timeout = conf_int(&val); if (sync_timeout == 0) { zone_events_schedule(zone, ZONE_EVENT_FLUSH, ZONE_EVENT_NOW); } else if (sync_timeout > 0 && !zone_events_is_scheduled(zone, ZONE_EVENT_FLUSH)) { zone_events_schedule(zone, ZONE_EVENT_FLUSH, sync_timeout); } /* Transfer cleanup. */ zone->bootstrap_retry = ZONE_EVENT_NOW; zone->flags &= ~ZONE_FORCE_AXFR; /* Trim extra heap. */ if (!is_bootstrap) { mem_trim(); } return KNOT_EOK; }