/** * IXFR. * */ static query_state query_process_ixfr(query_type* q) { uint16_t count = 0; ods_log_assert(q); ods_log_assert(q->buffer); ods_log_assert(buffer_pkt_qdcount(q->buffer) == 1); /* skip header and question section */ buffer_skip(q->buffer, BUFFER_PKT_HEADER_SIZE); if (!buffer_skip_rr(q->buffer, 1)) { ods_log_error("[%s] dropped packet: zone %s received bad ixfr " "request (bad question section)", query_str, q->zone->name); return QUERY_DISCARDED; } /* answer section is empty */ ods_log_assert(buffer_pkt_ancount(q->buffer) == 0); /* examine auth section */ q->startpos = buffer_position(q->buffer); count = buffer_pkt_nscount(q->buffer); if (count) { if (!buffer_skip_dname(q->buffer) || !query_parse_soa(q->buffer, &(q->serial))) { ods_log_error("[%s] dropped packet: zone %s received bad ixfr " "request (bad soa in auth section)", query_str, q->zone->name); return QUERY_DISCARDED; } ods_log_debug("[%s] found ixfr request zone %s serial=%u", query_str, q->zone->name, q->serial); return QUERY_PROCESSED; } ods_log_debug("[%s] ixfr request zone %s has no auth section", query_str, q->zone->name); q->serial = 0; return QUERY_PROCESSED; }
/** * Publish the NSEC3 parameters as indicated by the signer configuration. * */ ods_status zone_publish_nsec3param(zone_type* zone) { rrset_type* rrset = NULL; rr_type* n3prr = NULL; ldns_rr* rr = NULL; ods_status status = ODS_STATUS_OK; if (!zone || !zone->name || !zone->db || !zone->signconf) { return ODS_STATUS_ASSERT_ERR; } if (!zone->signconf->nsec3params) { /* NSEC */ ods_log_assert(zone->signconf->nsec_type == LDNS_RR_TYPE_NSEC); return ODS_STATUS_OK; } if (!zone->signconf->nsec3params->rr) { rr = ldns_rr_new_frm_type(LDNS_RR_TYPE_NSEC3PARAMS); if (!rr) { ods_log_error("[%s] unable to publish nsec3params for zone %s: " "error creating rr (%s)", zone_str, zone->name, ods_status2str(status)); return ODS_STATUS_MALLOC_ERR; } ldns_rr_set_class(rr, zone->klass); ldns_rr_set_ttl(rr, 0); ldns_rr_set_owner(rr, ldns_rdf_clone(zone->apex)); ldns_nsec3_add_param_rdfs(rr, zone->signconf->nsec3params->algorithm, 0, zone->signconf->nsec3params->iterations, zone->signconf->nsec3params->salt_len, zone->signconf->nsec3params->salt_data); /** * Always set bit 7 of the flags to zero, * according to rfc5155 section 11 */ ldns_set_bit(ldns_rdf_data(ldns_rr_rdf(rr, 1)), 7, 0); zone->signconf->nsec3params->rr = rr; } ods_log_assert(zone->signconf->nsec3params->rr); status = zone_add_rr(zone, zone->signconf->nsec3params->rr, 0); if (status == ODS_STATUS_UNCHANGED) { /* rr already exists, adjust pointer */ rrset = zone_lookup_rrset(zone, zone->apex, LDNS_RR_TYPE_NSEC3PARAMS); ods_log_assert(rrset); n3prr = rrset_lookup_rr(rrset, zone->signconf->nsec3params->rr); ods_log_assert(n3prr); if (n3prr->rr != zone->signconf->nsec3params->rr) { ldns_rr_free(zone->signconf->nsec3params->rr); } zone->signconf->nsec3params->rr = n3prr->rr; status = ODS_STATUS_OK; } else if (status != ODS_STATUS_OK) { ods_log_error("[%s] unable to publish nsec3params for zone %s: " "error adding nsec3params (%s)", zone_str, zone->name, ods_status2str(status)); } return status; }
/** * Queue RRset for signing. * */ static void worker_queue_rrset(worker_type* worker, fifoq_type* q, rrset_type* rrset) { ods_status status = ODS_STATUS_UNCHANGED; int tries = 0; ods_log_assert(worker); ods_log_assert(q); ods_log_assert(rrset); lock_basic_lock(&q->q_lock); status = fifoq_push(q, (void*) rrset, worker, &tries); while (status == ODS_STATUS_UNCHANGED) { tries++; if (worker->need_to_exit) { lock_basic_unlock(&q->q_lock); return; } /** * Apparently the queue is full. Lets take a small break to not hog CPU. * The worker will release the signq lock while sleeping and will * automatically grab the lock when the queue is nonfull. * Queue is nonfull at 10% of the queue size. */ lock_basic_sleep(&q->q_nonfull, &q->q_lock, 5); status = fifoq_push(q, (void*) rrset, worker, &tries); } lock_basic_unlock(&q->q_lock); ods_log_assert(status == ODS_STATUS_OK); lock_basic_lock(&worker->worker_lock); worker->jobs_appointed += 1; lock_basic_unlock(&worker->worker_lock); return; }
/** * Load zone signconf. * */ ods_status tools_signconf(zone_type* zone) { ods_status status = ODS_STATUS_OK; signconf_type* new_signconf = NULL; ods_log_assert(zone); ods_log_assert(zone->name); status = zone_load_signconf(zone, &new_signconf); if (status == ODS_STATUS_OK) { ods_log_assert(new_signconf); /* Denial of Existence Rollover? */ if (signconf_compare_denial(zone->signconf, new_signconf) == TASK_NSECIFY) { /** * Or NSEC -> NSEC3, or NSEC3 -> NSEC, or NSEC3 params changed. * All NSEC(3)s become invalid. */ namedb_wipe_denial(zone->db); namedb_cleanup_denials(zone->db); namedb_init_denials(zone->db); } /* all ok, switch signer configuration */ signconf_cleanup(zone->signconf); ods_log_debug("[%s] zone %s switch to new signconf", tools_str, zone->name); zone->signconf = new_signconf; signconf_log(zone->signconf, zone->name); zone->default_ttl = (uint32_t) duration2time(zone->signconf->soa_min); } else if (status != ODS_STATUS_UNCHANGED) { ods_log_error("[%s] unable to load signconf for zone %s: %s", tools_str, zone->name, ods_status2str(status)); } return status; }
/** * Compare tasks. * */ int task_compare(const void* a, const void* b) { task_type* x = (task_type*)a; task_type* y = (task_type*)b; zone_type* zx = NULL; zone_type* zy = NULL; ods_log_assert(x); ods_log_assert(y); zx = (zone_type*) x->zone; zy = (zone_type*) y->zone; if (!ldns_dname_compare((const void*) zx->apex, (const void*) zy->apex)) { /* if dname is the same, consider the same task */ return 0; } /* order task on time, what to do, dname */ if (x->when != y->when) { return (int) x->when - y->when; } if (x->what != y->what) { return (int) x->what - y->what; } /* this is unfair, it prioritizes zones that are first in canonical line */ return ldns_dname_compare((const void*) zx->apex, (const void*) zy->apex); }
/** * Set udp socket to non-blocking and bind. * */ static ods_status sock_fcntl_and_bind(sock_type* sock, const char* node, const char* port, const char* stype, const char* fam) { ods_log_assert(sock); ods_log_assert(port); ods_log_assert(stype); ods_log_assert(fam); if (fcntl(sock->s, F_SETFL, O_NONBLOCK) == -1) { ods_log_error("[%s] unable to set %s/%s socket '%s:%s' to " "non-blocking: fcntl() failed (%s)", sock_str, stype, fam, node?node:"localhost", port, strerror(errno)); return ODS_STATUS_SOCK_FCNTL_NONBLOCK; } ods_log_debug("[%s] bind %s/%s socket '%s:%s'", sock_str, stype, fam, node?node:"localhost", port, strerror(errno)); if (bind(sock->s, (struct sockaddr *) sock->addr->ai_addr, sock->addr->ai_addrlen) != 0) { ods_log_error("[%s] unable to bind %s/%s socket '%s:%s': bind() " "failed (%s)", sock_str, stype, fam, node?node:"localhost", port, strerror(errno)); return ODS_STATUS_SOCK_BIND; } return ODS_STATUS_OK; }
/** * Create listening socket. * */ static ods_status socket_listen(sock_type* sock, struct addrinfo hints, int socktype, const char* node, const char* port, unsigned* ip6_support) { ods_status status = ODS_STATUS_OK; int r = 0; ods_log_assert(sock); ods_log_assert(port); *ip6_support = 1; hints.ai_socktype = socktype; /* getaddrinfo */ if ((r = getaddrinfo(node, port, &hints, &sock->addr)) != 0 || !sock->addr) { ods_log_error("[%s] unable to parse address '%s:%s': getaddrinfo() " "failed (%s %s)", sock_str, node?node:"localhost", port, gai_strerror(r), #ifdef EAI_SYSTEM r==EAI_SYSTEM?(char*)strerror(errno):""); #else ""); #endif if (hints.ai_family == AF_INET6 && r==EAFNOSUPPORT) { *ip6_support = 0; } return ODS_STATUS_SOCK_GETADDRINFO; }
/** * Make sure that no appointed jobs have failed. * */ static ods_status worker_check_jobs(worker_type* worker, task_type* task) { ods_log_assert(worker); ods_log_assert(task); lock_basic_lock(&worker->worker_lock); if (worker->jobs_failed) { ods_log_error("[%s[%i]] sign zone %s failed: %u RRsets failed", worker2str(worker->type), worker->thread_num, task_who2str(task), worker->jobs_failed); lock_basic_unlock(&worker->worker_lock); return ODS_STATUS_ERR; } else if (worker->jobs_completed != worker->jobs_appointed) { ods_log_error("[%s[%i]] sign zone %s failed: processed %u of %u " "RRsets", worker2str(worker->type), worker->thread_num, task_who2str(task), worker->jobs_completed, worker->jobs_appointed); lock_basic_unlock(&worker->worker_lock); return ODS_STATUS_ERR; } else if (worker->need_to_exit) { ods_log_debug("[%s[%i]] sign zone %s failed: worker needs to exit", worker2str(worker->type), worker->thread_num, task_who2str(task)); lock_basic_unlock(&worker->worker_lock); return ODS_STATUS_ERR; } else { ods_log_debug("[%s[%i]] sign zone %s ok: %u of %u RRsets " "succeeded", worker2str(worker->type), worker->thread_num, task_who2str(task), worker->jobs_completed, worker->jobs_appointed); ods_log_assert(worker->jobs_appointed == worker->jobs_completed); } lock_basic_unlock(&worker->worker_lock); return ODS_STATUS_OK; }
/** * Check if query does not overflow. * */ static int query_overflow(query_type* q) { ods_log_assert(q); ods_log_assert(q->buffer); return buffer_position(q->buffer) > (q->maxlen - q->reserved_space); }
static void engine_stop_drudgers(engine_type* engine) { #if HAVE_DRUDGERS size_t i = 0; #endif ods_log_assert(engine); ods_log_assert(engine->config); ods_log_debug("[%s] stop drudgers", engine_str); #if HAVE_DRUDGERS /* tell them to exit and wake up sleepyheads */ for (i=0; i < (size_t) engine->config->num_signer_threads; i++) { engine->drudgers[i]->need_to_exit = 1; } worker_notify_all(&engine->signq->q_lock, &engine->signq->q_threshold); /* head count */ for (i=0; i < (size_t) engine->config->num_signer_threads; i++) { ods_log_debug("[%s] join drudger %i", engine_str, i+1); ods_thread_join(engine->drudgers[i]->thread_id); engine->drudgers[i]->engine = NULL; } #endif return; }
/** * Encode response. * */ static void response_encode(query_type* q, response_type* r) { uint16_t counts[LDNS_SECTION_ANY]; ldns_pkt_section s = LDNS_SECTION_QUESTION; size_t i = 0; ods_log_assert(q); ods_log_assert(r); for (s = LDNS_SECTION_ANSWER; s < LDNS_SECTION_ANY; s++) { counts[s] = 0; } for (s = LDNS_SECTION_ANSWER; s < LDNS_SECTION_ANY; s++) { for (i = 0; i < r->rrset_count; i++) { if (r->sections[i] == s) { counts[s] += response_encode_rrset(q, r->rrsets[i], s); } } } buffer_pkt_set_ancount(q->buffer, counts[LDNS_SECTION_ANSWER]); buffer_pkt_set_nscount(q->buffer, counts[LDNS_SECTION_AUTHORITY]); buffer_pkt_set_arcount(q->buffer, counts[LDNS_SECTION_ADDITIONAL]); buffer_pkt_set_qr(q->buffer); buffer_pkt_set_aa(q->buffer); return; }
/** * Self pipe trick (see Unix Network Programming). * */ static int self_pipe_trick(engine_type* engine) { int sockfd, ret; struct sockaddr_un servaddr; const char* servsock_filename = ODS_SE_SOCKFILE; ods_log_assert(engine); ods_log_assert(engine->cmdhandler); sockfd = socket(AF_UNIX, SOCK_STREAM, 0); if (sockfd < 0) { ods_log_error("[%s] unable to connect to command handler: " "socket() failed (%s)", engine_str, strerror(errno)); return 1; } else { bzero(&servaddr, sizeof(servaddr)); servaddr.sun_family = AF_UNIX; strncpy(servaddr.sun_path, servsock_filename, sizeof(servaddr.sun_path) - 1); ret = connect(sockfd, (const struct sockaddr*) &servaddr, sizeof(servaddr)); if (ret != 0) { ods_log_error("[%s] unable to connect to command handler: " "connect() failed (%s)", engine_str, strerror(errno)); close(sockfd); return 1; } else { /* self-pipe trick */ ods_writen(sockfd, "", 1); close(sockfd); } } return 0; }
/** * Drop privileges. * */ static ods_status engine_privdrop(engine_type* engine) { ods_status status = ODS_STATUS_OK; uid_t uid = -1; gid_t gid = -1; ods_log_assert(engine); ods_log_assert(engine->config); ods_log_debug("[%s] drop privileges", engine_str); if (engine->config->username && engine->config->group) { ods_log_verbose("[%s] drop privileges to user %s, group %s", engine_str, engine->config->username, engine->config->group); } else if (engine->config->username) { ods_log_verbose("[%s] drop privileges to user %s", engine_str, engine->config->username); } else if (engine->config->group) { ods_log_verbose("[%s] drop privileges to group %s", engine_str, engine->config->group); } if (engine->config->chroot) { ods_log_verbose("[%s] chroot to %s", engine_str, engine->config->chroot); } status = privdrop(engine->config->username, engine->config->group, engine->config->chroot, &uid, &gid); engine->uid = uid; engine->gid = gid; privclose(engine->config->username, engine->config->group); return status; }
/** * Get time. * */ static time_t notify_time(notify_type* notify) { ods_log_assert(notify); ods_log_assert(notify->xfrhandler); return xfrhandler_time((xfrhandler_type*) notify->xfrhandler); }
/** * Start zone transfer handler. * */ void xfrhandler_start(xfrhandler_type* xfrhandler) { ods_log_assert(xfrhandler); ods_log_assert(xfrhandler->engine); ods_log_debug("[%s] start", xfrh_str); /* setup */ xfrhandler->start_time = time_now(); /* handlers */ netio_add_handler(xfrhandler->netio, &xfrhandler->dnshandler); /* service */ while (xfrhandler->need_to_exit == 0) { /* dispatch may block for a longer period, so current is gone */ xfrhandler->got_time = 0; ods_log_deeebug("[%s] netio dispatch", xfrh_str); if (netio_dispatch(xfrhandler->netio, NULL, NULL) == -1) { if (errno != EINTR) { ods_log_error("[%s] unable to dispatch netio: %s", xfrh_str, strerror(errno)); } } } /* shutdown */ ods_log_debug("[%s] shutdown", xfrh_str); }
/** * Find TSIG RR. * */ static int query_find_tsig(query_type* q) { size_t saved_pos = 0; size_t rrcount = 0; size_t i = 0; ods_log_assert(q); ods_log_assert(q->tsig_rr); ods_log_assert(q->buffer); if (buffer_pkt_arcount(q->buffer) == 0) { q->tsig_rr->status = TSIG_NOT_PRESENT; return 1; } saved_pos = buffer_position(q->buffer); rrcount = buffer_pkt_qdcount(q->buffer) + buffer_pkt_ancount(q->buffer) + buffer_pkt_nscount(q->buffer); buffer_set_position(q->buffer, BUFFER_PKT_HEADER_SIZE); for (i=0; i < rrcount; i++) { if (!buffer_skip_rr(q->buffer, i < buffer_pkt_qdcount(q->buffer))) { buffer_set_position(q->buffer, saved_pos); return 0; } } rrcount = buffer_pkt_arcount(q->buffer); ods_log_assert(rrcount != 0); if (!tsig_rr_parse(q->tsig_rr, q->buffer)) { ods_log_debug("[%s] got bad tsig", query_str); return 0; } if (q->tsig_rr->status != TSIG_NOT_PRESENT) { --rrcount; } if (rrcount) { if (edns_rr_parse(q->edns_rr, q->buffer)) { --rrcount; } } if (rrcount && q->tsig_rr->status == TSIG_NOT_PRESENT) { /* see if tsig is after the edns record */ if (!tsig_rr_parse(q->tsig_rr, q->buffer)) { ods_log_debug("[%s] got bad tsig", query_str); return 0; } if (q->tsig_rr->status != TSIG_NOT_PRESENT) { --rrcount; } } if (rrcount > 0) { ods_log_debug("[%s] too many additional rrs", query_str); return 0; } buffer_set_position(q->buffer, saved_pos); return 1; }
/** * QUERY. * */ static query_state query_process_query(query_type* q, ldns_rr_type qtype, engine_type* engine) { dnsout_type* dnsout = NULL; if (!q || !q->zone) { return QUERY_DISCARDED; } ods_log_assert(q->zone->name); ods_log_debug("[%s] incoming query qtype=%s for zone %s", query_str, rrset_type2str(qtype), q->zone->name); /* sanity checks */ if (buffer_pkt_qdcount(q->buffer) != 1 || buffer_pkt_tc(q->buffer)) { buffer_pkt_set_flags(q->buffer, 0); return query_formerr(q); } if (buffer_pkt_ancount(q->buffer) != 0 || (qtype != LDNS_RR_TYPE_IXFR && buffer_pkt_nscount(q->buffer) != 0)) { buffer_pkt_set_flags(q->buffer, 0); return query_formerr(q); } /* acl */ if (!q->zone->adoutbound || q->zone->adoutbound->type != ADAPTER_DNS) { ods_log_error("[%s] zone %s is not configured to have output dns " "adapter", query_str, q->zone->name); return query_refused(q); } ods_log_assert(q->zone->adoutbound->config); dnsout = (dnsout_type*) q->zone->adoutbound->config; /* acl also in use for soa and other queries */ if (!acl_find(dnsout->provide_xfr, &q->addr, q->tsig_rr)) { return query_refused(q); } /* ixfr? */ if (qtype == LDNS_RR_TYPE_IXFR) { if (query_process_ixfr(q) != QUERY_PROCESSED) { buffer_pkt_set_flags(q->buffer, 0); return query_formerr(q); } query_prepare(q); ods_log_assert(q->zone->name); ods_log_debug("[%s] incoming ixfr request serial=%u for zone %s", query_str, q->serial, q->zone->name); return ixfr(q, engine); } query_prepare(q); /* axfr? */ if (qtype == LDNS_RR_TYPE_AXFR) { ods_log_assert(q->zone->name); ods_log_debug("[%s] incoming axfr request for zone %s", query_str, q->zone->name); return axfr(q, engine); } /* (soa) query */ return query_response(q, qtype); }
/** * Update serial. * */ ods_status zone_update_serial(zone_type* zone) { ods_status status = ODS_STATUS_OK; rrset_type* rrset = NULL; rr_type* soa = NULL; ldns_rr* rr = NULL; ldns_rdf* soa_rdata = NULL; ods_log_assert(zone); ods_log_assert(zone->apex); ods_log_assert(zone->name); ods_log_assert(zone->db); ods_log_assert(zone->signconf); if (zone->db->serial_updated) { /* already done, unmark and return ok */ ods_log_debug("[%s] zone %s soa serial already up to date", zone_str, zone->name); zone->db->serial_updated = 0; return ODS_STATUS_OK; } rrset = zone_lookup_rrset(zone, zone->apex, LDNS_RR_TYPE_SOA); ods_log_assert(rrset); ods_log_assert(rrset->rrs); ods_log_assert(rrset->rrs[0].rr); rr = ldns_rr_clone(rrset->rrs[0].rr); if (!rr) { ods_log_error("[%s] unable to update zone %s soa serial: failed to " "clone soa rr", zone_str, zone->name); return ODS_STATUS_ERR; } status = namedb_update_serial(zone->db, zone->signconf->soa_serial, zone->db->inbserial); if (status != ODS_STATUS_OK) { ods_log_error("[%s] unable to update zone %s soa serial: %s", zone_str, zone->name, ods_status2str(status)); ldns_rr_free(rr); return status; } ods_log_verbose("[%s] zone %s set soa serial to %u", zone_str, zone->name, zone->db->intserial); soa_rdata = ldns_rr_set_rdf(rr, ldns_native2rdf_int32(LDNS_RDF_TYPE_INT32, zone->db->intserial), SE_SOA_RDATA_SERIAL); if (soa_rdata) { ldns_rdf_deep_free(soa_rdata); soa_rdata = NULL; } else { ods_log_error("[%s] unable to update zone %s soa serial: failed to " "replace soa serial rdata", zone_str, zone->name); ldns_rr_free(rr); return ODS_STATUS_ERR; } soa = rrset_add_rr(rrset, rr); ods_log_assert(soa); rrset_diff(rrset, 0, 0); zone->db->serial_updated = 0; return ODS_STATUS_OK; }
static void engine_create_drudgers(engine_type* engine) { size_t i = 0; ods_log_assert(engine); ods_log_assert(engine->config); CHECKALLOC(engine->drudgers = (worker_type**) malloc(((size_t)engine->config->num_signer_threads) * sizeof(worker_type*))); for (i=0; i < (size_t) engine->config->num_signer_threads; i++) { engine->drudgers[i] = worker_create(i, WORKER_DRUDGER); } }
/** * Parse elements from the configuration file. * */ const char* parse_conf_string(const char* cfgfile, const char* expr, int required) { xmlDocPtr doc = NULL; xmlXPathContextPtr xpathCtx = NULL; xmlXPathObjectPtr xpathObj = NULL; xmlChar *xexpr = NULL; const char* string = NULL; ods_log_assert(expr); ods_log_assert(cfgfile); /* Load XML document */ doc = xmlParseFile(cfgfile); if (doc == NULL) { return NULL; } /* Create xpath evaluation context */ xpathCtx = xmlXPathNewContext(doc); if (xpathCtx == NULL) { ods_log_error("[%s] unable to create new XPath context for cfgile " "%s expr %s", parser_str, cfgfile, (char*) expr); xmlFreeDoc(doc); return NULL; } /* Get string */ xexpr = (unsigned char*) expr; xpathObj = xmlXPathEvalExpression(xexpr, xpathCtx); if (xpathObj == NULL || xpathObj->nodesetval == NULL || xpathObj->nodesetval->nodeNr <= 0) { if (required) { ods_log_error("[%s] unable to evaluate required element %s in " "cfgfile %s", parser_str, (char*) xexpr, cfgfile); } xmlXPathFreeContext(xpathCtx); if (xpathObj) { xmlXPathFreeObject(xpathObj); } xmlFreeDoc(doc); return NULL; } if (xpathObj->nodesetval != NULL && xpathObj->nodesetval->nodeNr > 0) { string = (const char*) xmlXPathCastToString(xpathObj); xmlXPathFreeContext(xpathCtx); xmlXPathFreeObject(xpathObj); xmlFreeDoc(doc); return string; } xmlXPathFreeContext(xpathCtx); xmlXPathFreeObject(xpathObj); xmlFreeDoc(doc); return NULL; }
/** * Wake up all workers. * */ void engine_wakeup_workers(engine_type* engine) { size_t i = 0; ods_log_assert(engine); ods_log_assert(engine->config); ods_log_debug("[%s] wake up workers", engine_str); /* wake up sleepyheads */ for (i=0; i < (size_t) engine->config->num_worker_threads; i++) { worker_wakeup(engine->workers[i]); } }
/** * Push an interface to the listener. * */ interface_type* listener_push(listener_type* listener, char* address, int family, char* port) { interface_type* ifs_old = NULL; ods_log_assert(listener); ods_log_assert(address); ifs_old = listener->interfaces; listener->interfaces = (interface_type*) allocator_alloc( listener->allocator, (listener->count + 1) * sizeof(interface_type)); if (!listener->interfaces) { ods_log_error("[%s] unable to add interface: allocator_alloc() failed", listener_str); exit(1); } if (ifs_old) { memcpy(listener->interfaces, ifs_old, (listener->count) * sizeof(interface_type)); } allocator_deallocate(listener->allocator, (void*) ifs_old); listener->count++; listener->interfaces[listener->count -1].address = allocator_strdup(listener->allocator, address); listener->interfaces[listener->count -1].family = family; if (port) { listener->interfaces[listener->count -1].port = allocator_strdup(listener->allocator, port); } else{ listener->interfaces[listener->count -1].port = NULL; } memset(&listener->interfaces[listener->count -1].addr, 0, sizeof(union acl_addr_storage)); if (listener->interfaces[listener->count -1].family == AF_INET6 && strlen(listener->interfaces[listener->count -1].address) > 0) { if (inet_pton(listener->interfaces[listener->count -1].family, listener->interfaces[listener->count -1].address, &listener->interfaces[listener->count -1].addr.addr6) != 1) { ods_log_error("[%s] bad ip address '%s'", listener->interfaces[listener->count -1].address); return NULL; } } else if (listener->interfaces[listener->count -1].family == AF_INET && strlen(listener->interfaces[listener->count -1].address) > 0) { if (inet_pton(listener->interfaces[listener->count -1].family, listener->interfaces[listener->count -1].address, &listener->interfaces[listener->count -1].addr.addr) != 1) { ods_log_error("[%s] bad ip address '%s'", listener->interfaces[listener->count -1].address); return NULL; } } return &listener->interfaces[listener->count -1]; }
/** * Process DNSKEY. * */ static void adapi_process_dnskey(zone_type* zone, ldns_rr* rr) { uint32_t tmp = 0; ods_log_assert(rr); ods_log_assert(zone); ods_log_assert(zone->name); ods_log_assert(zone->signconf); tmp = (uint32_t) duration2time(zone->signconf->dnskey_ttl); ods_log_verbose("[%s] zone %s set dnskey ttl to %u", adapi_str, zone->name, tmp); ldns_rr_set_ttl(rr, tmp); }
/** * Add RR to query. * */ int query_add_rr(query_type* q, ldns_rr* rr) { size_t i = 0; size_t tc_mark = 0; size_t rdlength_pos = 0; uint16_t rdlength = 0; ods_log_assert(q); ods_log_assert(q->buffer); ods_log_assert(rr); /* set truncation mark, in case rr does not fit */ tc_mark = buffer_position(q->buffer); /* owner type class ttl */ if (!buffer_available(q->buffer, ldns_rdf_size(ldns_rr_owner(rr)))) { goto query_add_rr_tc; } buffer_write_rdf(q->buffer, ldns_rr_owner(rr)); if (!buffer_available(q->buffer, sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t) + sizeof(rdlength))) { goto query_add_rr_tc; } buffer_write_u16(q->buffer, (uint16_t) ldns_rr_get_type(rr)); buffer_write_u16(q->buffer, (uint16_t) ldns_rr_get_class(rr)); buffer_write_u32(q->buffer, (uint32_t) ldns_rr_ttl(rr)); /* skip rdlength */ rdlength_pos = buffer_position(q->buffer); buffer_skip(q->buffer, sizeof(rdlength)); /* write rdata */ for (i=0; i < ldns_rr_rd_count(rr); i++) { if (!buffer_available(q->buffer, ldns_rdf_size(ldns_rr_rdf(rr, i)))) { goto query_add_rr_tc; } buffer_write_rdf(q->buffer, ldns_rr_rdf(rr, i)); } if (!query_overflow(q)) { /* write rdlength */ rdlength = buffer_position(q->buffer) - rdlength_pos - sizeof(rdlength); buffer_write_u16_at(q->buffer, rdlength_pos, rdlength); /* position updated by buffer_write() */ return 1; } query_add_rr_tc: buffer_set_position(q->buffer, tc_mark); ods_log_assert(!query_overflow(q)); return 0; }
void engine_start_drudgers(engine_type* engine) { size_t i = 0; ods_log_assert(engine); ods_log_assert(engine->config); ods_log_debug("[%s] start drudgers", engine_str); for (i=0; i < (size_t) engine->config->num_signer_threads; i++) { engine->drudgers[i]->need_to_exit = 0; engine->drudgers[i]->engine = (void*) engine; ods_thread_create(&engine->drudgers[i]->thread_id, worker_thread_start, engine->drudgers[i]); } }
/** * Set tcp socket to reusable. * */ static void sock_tcp_reuseaddr(sock_type* sock, const char* node, const char* port, int on, const char* fam) { ods_log_assert(sock); ods_log_assert(port); ods_log_assert(fam); if (setsockopt(sock->s, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) < 0) { ods_log_error("[%s] unable to set tcp/%s socket '%s:%s' to " "reuse-addr: setsockopt() failed (%s)", sock_str, fam, node?node:"localhost", port, strerror(errno)); } return; }
/** * Compare two zones. * */ static int zone_compare(const void* a, const void* b) { zone_type* x = (zone_type*)a; zone_type* y = (zone_type*)b; ods_log_assert(x); ods_log_assert(y); if (x->klass != y->klass) { if (x->klass < y->klass) { return -1; } return 1; } return ldns_dname_compare(x->apex, y->apex); }
/** * Listen on tcp socket. * */ static ods_status sock_tcp_listen(sock_type* sock, const char* node, const char* port, const char* fam) { ods_log_assert(sock); ods_log_assert(port); ods_log_assert(fam); if (listen(sock->s, SOCK_TCP_BACKLOG) == -1) { ods_log_error("[%s] unable to listen on tcp/%s socket '%s:%s': " "listen() failed (%s)", sock_str, fam, node?node:"localhost", port, strerror(errno)); return ODS_STATUS_SOCK_LISTEN; } return ODS_STATUS_OK; }
static void engine_create_drudgers(engine_type* engine) { size_t i = 0; ods_log_assert(engine); ods_log_assert(engine->config); ods_log_assert(engine->allocator); engine->drudgers = (worker_type**) allocator_alloc(engine->allocator, ((size_t)engine->config->num_signer_threads) * sizeof(worker_type*)); for (i=0; i < (size_t) engine->config->num_signer_threads; i++) { engine->drudgers[i] = worker_create(engine->allocator, i, WORKER_DRUDGER); } return; }
static int run_flush(int sockfd, cmdhandler_ctx_type* context, const char *cmd) { engine_type* engine = getglobalcontext(context); (void)cmd; ods_log_debug("[%s] flush tasks command", module_str); ods_log_assert(engine); ods_log_assert(engine->taskq); schedule_flush(engine->taskq); client_printf(sockfd, "All tasks scheduled immediately.\n"); ods_log_verbose("[cmdhandler] all tasks scheduled immediately"); return 0; }