conf_val_t conf_default_get_txn( conf_t *conf, knot_db_txn_t *txn, const yp_name_t *key1_name) { conf_val_t val = { NULL }; if (key1_name == NULL) { val.code = KNOT_EINVAL; CONF_LOG(LOG_DEBUG, "conf_default_get (%s)", knot_strerror(val.code)); return val; } conf_db_get(conf, txn, C_TPL, key1_name, CONF_DEFAULT_ID + 1, CONF_DEFAULT_ID[0], &val); switch (val.code) { default: CONF_LOG(LOG_ERR, "failed to read default '%s/%s' (%s)", C_TPL + 1, key1_name + 1, knot_strerror(val.code)); // FALLTHROUGH case KNOT_EOK: case KNOT_ENOENT: break; } return val; }
bool conf_id_exists_txn( conf_t *conf, knot_db_txn_t *txn, const yp_name_t *key0_name, conf_val_t *id) { if (key0_name == NULL || id == NULL || id->code != KNOT_EOK) { CONF_LOG(LOG_DEBUG, "conf_id_exists (%s)", knot_strerror(KNOT_EINVAL)); return false; } conf_val(id); int ret = conf_db_get(conf, txn, key0_name, NULL, id->data, id->len, NULL); switch (ret) { case KNOT_EOK: return true; default: CONF_LOG(LOG_ERR, "failed to check '%s' for identifier (%s)", key0_name + 1, knot_strerror(ret)); // FALLTHROUGH case KNOT_YP_EINVAL_ID: return false; } }
conf_val_t conf_get_txn( conf_t *conf, knot_db_txn_t *txn, const yp_name_t *key0_name, const yp_name_t *key1_name) { conf_val_t val = { NULL }; if (key0_name == NULL || key1_name == NULL) { val.code = KNOT_EINVAL; CONF_LOG(LOG_DEBUG, "conf_get (%s)", knot_strerror(val.code)); return val; } conf_db_get(conf, txn, key0_name, key1_name, NULL, 0, &val); switch (val.code) { default: CONF_LOG(LOG_ERR, "failed to read '%s/%s' (%s)", key0_name + 1, key1_name + 1, knot_strerror(val.code)); // FALLTHROUGH case KNOT_EOK: case KNOT_ENOENT: return val; } }
bool conf_rawid_exists_txn( conf_t *conf, knot_db_txn_t *txn, const yp_name_t *key0_name, const uint8_t *id, size_t id_len) { if (key0_name == NULL || id == NULL) { CONF_LOG(LOG_DEBUG, "conf_rawid_exists (%s)", knot_strerror(KNOT_EINVAL)); return false; } int ret = conf_db_get(conf, txn, key0_name, NULL, id, id_len, NULL); switch (ret) { case KNOT_EOK: return true; default: CONF_LOG(LOG_ERR, "failed to check '%s' for identifier (%s)", key0_name + 1, knot_strerror(ret)); // FALLTHROUGH case KNOT_ENOENT: case KNOT_YP_EINVAL_ID: return false; } }
conf_val_t conf_id_get_txn( conf_t *conf, knot_db_txn_t *txn, const yp_name_t *key0_name, const yp_name_t *key1_name, conf_val_t *id) { conf_val_t val = { NULL }; if (key0_name == NULL || key1_name == NULL || id == NULL || id->code != KNOT_EOK) { val.code = KNOT_EINVAL; CONF_LOG(LOG_DEBUG, "conf_id_get (%s)", knot_strerror(val.code)); return val; } conf_val(id); conf_db_get(conf, txn, key0_name, key1_name, id->data, id->len, &val); switch (val.code) { default: CONF_LOG(LOG_ERR, "failed to read '%s/%s' with identifier (%s)", key0_name + 1, key1_name + 1, knot_strerror(val.code)); // FALLTHROUGH case KNOT_EOK: case KNOT_ENOENT: return val; } }
conf_val_t conf_zone_get_txn( conf_t *conf, knot_db_txn_t *txn, const yp_name_t *key1_name, const knot_dname_t *dname) { conf_val_t val = { NULL }; if (key1_name == NULL || dname == NULL) { val.code = KNOT_EINVAL; CONF_LOG(LOG_DEBUG, "conf_zone_get (%s)", knot_strerror(val.code)); return val; } int dname_size = knot_dname_size(dname); // Try to get explicit value. conf_db_get(conf, txn, C_ZONE, key1_name, dname, dname_size, &val); switch (val.code) { case KNOT_EOK: return val; default: CONF_LOG_ZONE(LOG_ERR, dname, "failed to read '%s/%s' (%s)", C_ZONE + 1, key1_name + 1, knot_strerror(val.code)); // FALLTHROUGH case KNOT_ENOENT: break; } // Check if a template is available. conf_db_get(conf, txn, C_ZONE, C_TPL, dname, dname_size, &val); switch (val.code) { case KNOT_EOK: // Use the specified template. conf_val(&val); conf_db_get(conf, txn, C_TPL, key1_name, val.data, val.len, &val); break; default: CONF_LOG_ZONE(LOG_ERR, dname, "failed to read '%s/%s' (%s)", C_ZONE + 1, C_TPL + 1, knot_strerror(val.code)); // FALLTHROUGH case KNOT_ENOENT: // Use the default template. conf_db_get(conf, txn, C_TPL, key1_name, CONF_DEFAULT_ID + 1, CONF_DEFAULT_ID[0], &val); } switch (val.code) { default: CONF_LOG_ZONE(LOG_ERR, dname, "failed to read '%s/%s' (%s)", C_TPL + 1, key1_name + 1, knot_strerror(val.code)); // FALLTHROUGH case KNOT_EOK: case KNOT_ENOENT: break; } return val; }
int axfr_query_process(knot_pkt_t *pkt, struct query_data *qdata) { if (pkt == NULL || qdata == NULL) { return KNOT_NS_PROC_FAIL; } int ret = KNOT_EOK; struct timeval now = {0}; /* If AXFR is disabled, respond with NOTIMPL. */ if (qdata->param->proc_flags & NS_QUERY_NO_AXFR) { qdata->rcode = KNOT_RCODE_NOTIMPL; return KNOT_NS_PROC_FAIL; } /* Initialize on first call. */ if (qdata->ext == NULL) { ret = axfr_query_init(qdata); if (ret != KNOT_EOK) { AXFROUT_LOG(LOG_ERR, "failed to start (%s)", knot_strerror(ret)); return KNOT_NS_PROC_FAIL; } else { AXFROUT_LOG(LOG_INFO, "started, serial %u", zone_contents_serial(qdata->zone->contents)); } } /* Reserve space for TSIG. */ knot_pkt_reserve(pkt, knot_tsig_wire_maxsize(qdata->sign.tsig_key)); /* Answer current packet (or continue). */ struct axfr_proc *axfr = (struct axfr_proc *)qdata->ext; ret = xfr_process_list(pkt, &axfr_process_node_tree, qdata); switch(ret) { case KNOT_ESPACE: /* Couldn't write more, send packet and continue. */ return KNOT_NS_PROC_FULL; /* Check for more. */ case KNOT_EOK: /* Last response. */ gettimeofday(&now, NULL); AXFROUT_LOG(LOG_INFO, "finished, %.02f seconds, %u messages, %u bytes", time_diff(&axfr->proc.tstamp, &now) / 1000.0, axfr->proc.npkts, axfr->proc.nbytes); return KNOT_NS_PROC_DONE; break; default: /* Generic error. */ AXFROUT_LOG(LOG_ERR, "failed (%s)", knot_strerror(ret)); return KNOT_NS_PROC_FAIL; } }
int event_refresh(conf_t *conf, zone_t *zone) { assert(zone); /* Ignore if not slave zone. */ if (!zone_is_slave(conf, zone)) { return KNOT_EOK; } if (zone_contents_is_empty(zone->contents)) { /* No contents, schedule retransfer now. */ zone_events_schedule(zone, ZONE_EVENT_XFER, ZONE_EVENT_NOW); return KNOT_EOK; } int ret = zone_master_try(conf, zone, try_refresh, NULL, "refresh"); const knot_rdataset_t *soa = zone_soa(zone); if (ret != KNOT_EOK) { log_zone_error(zone->name, "refresh, failed (%s)", knot_strerror(ret)); /* Schedule next retry. */ zone_events_schedule(zone, ZONE_EVENT_REFRESH, knot_soa_retry(soa)); start_expire_timer(conf, zone, soa); } else { /* SOA query answered, reschedule refresh timer. */ zone_events_schedule(zone, ZONE_EVENT_REFRESH, knot_soa_refresh(soa)); } return KNOT_EOK; }
int event_notify(conf_t *conf, zone_t *zone) { assert(zone); /* Check zone contents. */ if (zone_contents_is_empty(zone->contents)) { return KNOT_EOK; } /* Walk through configured remotes and send messages. */ conf_val_t notify = conf_zone_get(conf, C_NOTIFY, zone->name); while (notify.code == KNOT_EOK) { conf_val_t addr = conf_id_get(conf, C_RMT, C_ADDR, ¬ify); size_t addr_count = conf_val_count(&addr); for (int i = 0; i < addr_count; i++) { conf_remote_t slave = conf_remote(conf, ¬ify, i); int ret = zone_query_execute(conf, zone, KNOT_QUERY_NOTIFY, &slave); if (ret == KNOT_EOK) { ZONE_QUERY_LOG(LOG_INFO, zone, &slave, "NOTIFY, outgoing", "serial %u", zone_contents_serial(zone->contents)); break; } else { ZONE_QUERY_LOG(LOG_WARNING, zone, &slave, "NOTIFY, outgoing", "failed (%s)", knot_strerror(ret)); } } conf_val_next(¬ify); } return KNOT_EOK; }
/*! * \brief Parse NSEC3 parameters and fill structure with NSEC3 parameters. */ static bool parse_nsec3_params(knot_nsec3_params_t *params, const char *salt, const char *algorithm, const char *iterations) { int result; result = knot_str2uint8t(algorithm, ¶ms->algorithm); if (result != KNOT_EOK) { fprintf(stderr, "Invalid algorithm number.\n"); return false; } result = knot_str2uint16t(iterations, ¶ms->iterations); if (result != KNOT_EOK) { fprintf(stderr, "Invalid iteration count: %s\n", knot_strerror(result)); return false; } size_t salt_length = 0; uint8_t *salt_data = NULL; if (salt[0] != '\0') { result = hex_decode(salt, &salt_data, &salt_length); if (result != KNOT_EOK) { fprintf(stderr, "Invalid salt: %s\n", knot_strerror(result)); return false; } } if (salt_length > UINT8_MAX) { fprintf(stderr, "Invalid salt: Maximal length is %d bytes.\n", UINT8_MAX); free(salt_data); return false; } params->salt = salt_data; params->salt_length = (uint8_t)salt_length; return true; }
static int try_refresh(conf_t *conf, zone_t *zone, const conf_remote_t *master, void *ctx) { assert(zone); assert(master); int ret = zone_query_execute(conf, zone, KNOT_QUERY_NORMAL, master); if (ret != KNOT_EOK && ret != KNOT_LAYER_ERROR) { ZONE_QUERY_LOG(LOG_WARNING, zone, master, "refresh, outgoing", "failed (%s)", knot_strerror(ret)); } return ret; }
int zone_contents_create_diff(const zone_contents_t *z1, const zone_contents_t *z2, changeset_t *changeset) { int ret = zone_contents_diff(z1, z2, changeset); if (ret != KNOT_EOK) { dbg_zonediff("zone_diff: create_changesets: " "Could not diff zones. " "Reason: %s.\n", knot_strerror(ret)); return ret; } dbg_zonediff("Changesets created successfully!\n"); return KNOT_EOK; }
static int zone_contents_add_nsec3_node(zone_contents_t *zone, zone_node_t *node) { if (zone == NULL || node == NULL) { return KNOT_EINVAL; } int ret = 0; if ((ret = zone_contents_check_node(zone, node)) != 0) { dbg_zone("Failed node check: %s\n", knot_strerror(ret)); return ret; } /* Create NSEC3 tree if not exists. */ if (zone->nsec3_nodes == NULL) { zone->nsec3_nodes = zone_tree_create(); if (zone->nsec3_nodes == NULL) { return KNOT_ENOMEM; } } // how to know if this is successfull?? ret = zone_tree_insert(zone->nsec3_nodes, node); if (ret != KNOT_EOK) { dbg_zone("Failed to insert node into NSEC3 tree: %s.\n", knot_strerror(ret)); return ret; } // no parents to be created, the only parent is the zone apex // set the apex as the parent of the node node_set_parent(node, zone->apex); // cannot be wildcard child, so nothing to be done return KNOT_EOK; }
void conf_iter_next( conf_t *conf, conf_iter_t *iter) { (void)conf_db_iter_next(conf, iter); switch (iter->code) { default: CONF_LOG(LOG_ERR, "failed to read next item (%s)", knot_strerror(iter->code)); // FALLTHROUGH case KNOT_EOK: case KNOT_EOF: return; } }
static int knot_zone_diff_add_node(const zone_node_t *node, changeset_t *changeset) { /* Add all rrsets from node. */ for (unsigned i = 0; i < node->rrset_count; i++) { knot_rrset_t rrset = node_rrset_at(node, i); int ret = changeset_add_rrset(changeset, &rrset); if (ret != KNOT_EOK) { dbg_zonediff("zone_diff: add_node: Cannot add RRSet (%s).\n", knot_strerror(ret)); return ret; } } return KNOT_EOK; }
/*!< \todo possibly not needed! */ static int knot_zone_diff_add_new_nodes(zone_node_t **node_ptr, void *data) { if (node_ptr == NULL || *node_ptr == NULL || data == NULL) { dbg_zonediff("zone_diff: add_new_nodes: NULL arguments.\n"); return KNOT_EINVAL; } zone_node_t *node = *node_ptr; struct zone_diff_param *param = (struct zone_diff_param *)data; if (param->changeset == NULL) { dbg_zonediff("zone_diff: add_new_nodes: NULL arguments.\n"); return KNOT_EINVAL; } /* * If a node is not present in the second zone, it is a new node * and has to be added to changeset. Differencies on the RRSet level are * already handled. */ const knot_dname_t *node_owner = node->owner; /* * Node should definitely have an owner, otherwise it would not be in * the tree. */ assert(node_owner); zone_node_t *new_node = NULL; zone_tree_get(param->nodes, node_owner, &new_node); int ret = KNOT_EOK; if (!new_node) { assert(node); ret = knot_zone_diff_add_node(node, param->changeset); if (ret != KNOT_EOK) { dbg_zonediff("zone_diff: add_new_nodes: Cannot add " "node: %p to changeset. Reason: %s.\n", node->owner, knot_strerror(ret)); } } return ret; }
conf_val_t conf_iter_id( conf_t *conf, conf_iter_t *iter) { conf_val_t val = { NULL }; val.code = conf_db_iter_id(conf, iter, &val.blob, &val.blob_len); switch (val.code) { default: CONF_LOG(LOG_ERR, "failed to read identifier (%s)", knot_strerror(val.code)); // FALLTHROUGH case KNOT_EOK: val.item = iter->item; return val; } }
static int knot_zone_diff_remove_node(changeset_t *changeset, const zone_node_t *node) { /* Remove all the RRSets of the node. */ for (unsigned i = 0; i < node->rrset_count; i++) { knot_rrset_t rrset = node_rrset_at(node, i); int ret = changeset_rem_rrset(changeset, &rrset); if (ret != KNOT_EOK) { dbg_zonediff("zone_diff: remove_node: Failed to " "remove rrset. Error: %s\n", knot_strerror(ret)); return ret; } } return KNOT_EOK; }
conf_iter_t conf_iter_txn( conf_t *conf, knot_db_txn_t *txn, const yp_name_t *key0_name) { conf_iter_t iter = { NULL }; (void)conf_db_iter_begin(conf, txn, key0_name, &iter); switch (iter.code) { default: CONF_LOG(LOG_ERR, "failed to iterate thgrough '%s' (%s)", key0_name + 1, knot_strerror(iter.code)); // FALLTHROUGH case KNOT_EOK: case KNOT_ENOENT: return iter; } }
int notify_create_response(knot_packet_t *request, uint8_t *buffer, size_t *size) { knot_packet_t *response = knot_packet_new(KNOT_PACKET_PREALLOC_QUERY); CHECK_ALLOC_LOG(response, KNOT_ENOMEM); /* Set maximum packet size. */ int rc = knot_packet_set_max_size(response, *size); if (rc == KNOT_EOK) { rc = knot_response_init_from_query(response, request, 1); } /* Aggregated result check. */ if (rc != KNOT_EOK) { dbg_notify("%s: failed to init response packet: %s", "notify_create_response", knot_strerror(rc)); knot_packet_free(&response); return KNOT_EINVAL; } // TODO: copy the SOA in Answer section uint8_t *wire = NULL; size_t wire_size = 0; rc = knot_packet_to_wire(response, &wire, &wire_size); if (rc != KNOT_EOK) { knot_packet_free(&response); return rc; } if (wire_size > *size) { knot_packet_free(&response); return KNOT_ESPACE; } memcpy(buffer, wire, wire_size); *size = wire_size; knot_packet_dump(response); knot_packet_free(&response); return KNOT_EOK; }
int knot_tsig_append(uint8_t *msg, size_t *msg_len, size_t msg_max_len, const knot_rrset_t *tsig_rr) { size_t tsig_wire_len = 0; uint16_t rr_count = 0; /* Write RRSet to wire */ int ret = KNOT_ERROR; ret = knot_rrset_to_wire(tsig_rr, msg + *msg_len, &tsig_wire_len, msg_max_len - *msg_len, &rr_count, NULL); if (ret != KNOT_EOK) { dbg_tsig("TSIG: rrset_to_wire = %s\n", knot_strerror(ret)); return ret; } *msg_len += tsig_wire_len; knot_wire_set_arcount(msg, knot_wire_get_arcount(msg) + 1); return KNOT_EOK; }
static int rosedb_add(struct cache *cache, MDB_txn *txn, int argc, char *argv[]) { printf("ADD %s\t%s\t%s\t%s\t%s\t%s\n", argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]); knot_dname_t key[KNOT_DNAME_MAXLEN] = { '\0' }; knot_dname_from_str(key, argv[0], sizeof(key)); knot_dname_to_lower(key); struct entry entry; int ret = parse_rdata(&entry, argv[0], argv[1], argv[3], atoi(argv[2]), cache->pool); entry.threat_code = argv[4]; entry.syslog_ip = argv[5]; if (ret != 0) { fprintf(stderr, "PARSE: %s\n", knot_strerror(ret)); return ret; } ret = cache_insert(txn, cache->dbi, key, &entry); if (ret != 0) { fprintf(stderr, "%s\n", mdb_strerror(ret)); } return ret; }
int event_dnssec(conf_t *conf, zone_t *zone) { assert(zone); changeset_t ch; int ret = changeset_init(&ch, zone->name); if (ret != KNOT_EOK) { goto done; } uint32_t refresh_at = time(NULL); int sign_flags = 0; if (zone->flags & ZONE_FORCE_RESIGN) { log_zone_info(zone->name, "DNSSEC, dropping previous " "signatures, resigning zone"); zone->flags &= ~ZONE_FORCE_RESIGN; sign_flags = ZONE_SIGN_DROP_SIGNATURES; } else { log_zone_info(zone->name, "DNSSEC, signing zone"); sign_flags = 0; } ret = knot_dnssec_zone_sign(zone->contents, &ch, sign_flags, &refresh_at); if (ret != KNOT_EOK) { goto done; } bool zone_changed = !changeset_empty(&ch); if (zone_changed) { /* Apply change. */ apply_ctx_t a_ctx = { { 0 } }; apply_init_ctx(&a_ctx); zone_contents_t *new_contents = NULL; int ret = apply_changeset(&a_ctx, zone, &ch, &new_contents); if (ret != KNOT_EOK) { log_zone_error(zone->name, "DNSSEC, failed to sign zone (%s)", knot_strerror(ret)); goto done; } /* Write change to journal. */ ret = zone_change_store(conf, zone, &ch); if (ret != KNOT_EOK) { log_zone_error(zone->name, "DNSSEC, failed to sign zone (%s)", knot_strerror(ret)); update_rollback(&a_ctx); update_free_zone(&new_contents); goto done; } /* Switch zone contents. */ zone_contents_t *old_contents = zone_switch_contents(zone, new_contents); zone->flags &= ~ZONE_EXPIRED; synchronize_rcu(); update_free_zone(&old_contents); update_cleanup(&a_ctx); } // Schedule dependent events. schedule_dnssec(zone, refresh_at); if (zone_changed) { zone_events_schedule(zone, ZONE_EVENT_NOTIFY, ZONE_EVENT_NOW); conf_val_t val = conf_zone_get(conf, C_ZONEFILE_SYNC, zone->name); if (conf_int(&val) == 0) { zone_events_schedule(zone, ZONE_EVENT_FLUSH, ZONE_EVENT_NOW); } } done: changeset_clear(&ch); return ret; }
int knot_tsig_sign(uint8_t *msg, size_t *msg_len, size_t msg_max_len, const uint8_t *request_mac, size_t request_mac_len, uint8_t *digest, size_t *digest_len, const knot_tsig_key_t *key, uint16_t tsig_rcode, uint64_t request_time_signed) { if (!msg || !msg_len || !key || digest == NULL || digest_len == NULL) { return KNOT_EINVAL; } knot_rrset_t *tmp_tsig = knot_rrset_new(key->name, KNOT_RRTYPE_TSIG, KNOT_CLASS_ANY, NULL); if (!tmp_tsig) { dbg_tsig("TSIG: tmp_tsig = NULL\n"); return KNOT_ENOMEM; } /* Create rdata for TSIG RR. */ uint16_t rdata_rcode = 0; if (tsig_rcode == KNOT_RCODE_BADTIME) rdata_rcode = tsig_rcode; tsig_create_rdata(tmp_tsig, tsig_alg_to_dname(key->algorithm), knot_tsig_digest_length(key->algorithm), rdata_rcode); /* Distinguish BADTIME response. */ if (tsig_rcode == KNOT_RCODE_BADTIME) { /* Set client's time signed into the time signed field. */ tsig_rdata_set_time_signed(tmp_tsig, request_time_signed); /* Store current time into Other data. */ uint8_t time_signed[6]; time_t curr_time = time(NULL); uint64_t time64 = curr_time; knot_wire_write_u48(time_signed, time64); tsig_rdata_set_other_data(tmp_tsig, 6, time_signed); } else { tsig_rdata_store_current_time(tmp_tsig); /* Set other len. */ tsig_rdata_set_other_data(tmp_tsig, 0, 0); } tsig_rdata_set_fudge(tmp_tsig, KNOT_TSIG_FUDGE_DEFAULT); /* Set original ID */ tsig_rdata_set_orig_id(tmp_tsig, knot_wire_get_id(msg)); uint8_t digest_tmp[KNOT_TSIG_MAX_DIGEST_SIZE]; size_t digest_tmp_len = 0; int ret = KNOT_ERROR; ret = knot_tsig_create_sign_wire(msg, *msg_len, /*msg_max_len,*/ request_mac, request_mac_len, digest_tmp, &digest_tmp_len, tmp_tsig, key); if (ret != KNOT_EOK) { dbg_tsig("TSIG: could not create wire or sign wire: %s\n", knot_strerror(ret)); knot_rrset_free(&tmp_tsig, NULL); return ret; } /* Set the digest. */ size_t tsig_wire_len = 0; dbg_tsig("TSIG: msg_len=%zu, msg_max_len=%zu, tsig_max_len=%zu\n", *msg_len, msg_max_len, tsig_wire_len); uint16_t rr_count = 0; tsig_rdata_set_mac(tmp_tsig, digest_tmp_len, digest_tmp); /* Write RRSet to wire */ ret = knot_rrset_to_wire(tmp_tsig, msg + *msg_len, &tsig_wire_len, msg_max_len - *msg_len, &rr_count, NULL); if (ret != KNOT_EOK) { dbg_tsig("TSIG: rrset_to_wire = %s\n", knot_strerror(ret)); *digest_len = 0; knot_rrset_free(&tmp_tsig, NULL); return ret; } knot_rrset_free(&tmp_tsig, NULL); dbg_tsig("TSIG: written TSIG RR (wire len %zu)\n", tsig_wire_len); *msg_len += tsig_wire_len; uint16_t arcount = knot_wire_get_arcount(msg); knot_wire_set_arcount(msg, ++arcount); // everything went ok, save the digest to the output parameter memcpy(digest, digest_tmp, digest_tmp_len); *digest_len = digest_tmp_len; return KNOT_EOK; }
int event_xfer(conf_t *conf, zone_t *zone) { assert(zone); /* Ignore if not slave zone. */ if (!zone_is_slave(conf, zone)) { return KNOT_EOK; } struct transfer_data data = { 0 }; const char *err_str = ""; /* Determine transfer type. */ bool is_bootstrap = zone_contents_is_empty(zone->contents); if (is_bootstrap || zone->flags & ZONE_FORCE_AXFR) { data.pkt_type = KNOT_QUERY_AXFR; err_str = "AXFR, incoming"; } else { data.pkt_type = KNOT_QUERY_IXFR; err_str = "IXFR, incoming"; } /* Execute zone transfer. */ int ret = zone_master_try(conf, zone, try_xfer, &data, err_str); zone_clear_preferred_master(zone); if (ret != KNOT_EOK) { log_zone_error(zone->name, "%s, failed (%s)", err_str, knot_strerror(ret)); if (is_bootstrap) { zone->bootstrap_retry = bootstrap_next(zone->bootstrap_retry); zone_events_schedule(zone, ZONE_EVENT_XFER, zone->bootstrap_retry); } else { const knot_rdataset_t *soa = zone_soa(zone); zone_events_schedule(zone, ZONE_EVENT_XFER, knot_soa_retry(soa)); start_expire_timer(conf, zone, soa); } return KNOT_EOK; } assert(!zone_contents_is_empty(zone->contents)); const knot_rdataset_t *soa = zone_soa(zone); /* Rechedule events. */ zone_events_schedule(zone, ZONE_EVENT_REFRESH, knot_soa_refresh(soa)); zone_events_schedule(zone, ZONE_EVENT_NOTIFY, ZONE_EVENT_NOW); zone_events_cancel(zone, ZONE_EVENT_EXPIRE); conf_val_t val = conf_zone_get(conf, C_ZONEFILE_SYNC, zone->name); int64_t sync_timeout = conf_int(&val); if (sync_timeout == 0) { zone_events_schedule(zone, ZONE_EVENT_FLUSH, ZONE_EVENT_NOW); } else if (sync_timeout > 0 && !zone_events_is_scheduled(zone, ZONE_EVENT_FLUSH)) { zone_events_schedule(zone, ZONE_EVENT_FLUSH, sync_timeout); } /* Transfer cleanup. */ zone->bootstrap_retry = ZONE_EVENT_NOW; zone->flags &= ~ZONE_FORCE_AXFR; /* Trim extra heap. */ if (!is_bootstrap) { mem_trim(); } return KNOT_EOK; }
static int knot_tsig_check_digest(const knot_rrset_t *tsig_rr, const uint8_t *wire, size_t size, const uint8_t *request_mac, size_t request_mac_len, const knot_tsig_key_t *tsig_key, uint64_t prev_time_signed, int use_times) { if (!wire || !tsig_key) { return KNOT_EINVAL; } /* No TSIG record means verification failure. */ if (tsig_rr == NULL) { return KNOT_TSIG_EBADKEY; } /* Check time signed. */ int ret = knot_tsig_check_time_signed(tsig_rr, prev_time_signed); if (ret != KNOT_EOK) { return ret; } dbg_tsig_verb("TSIG: time checked.\n"); /* Check that libknot knows the algorithm. */ ret = knot_tsig_check_algorithm(tsig_rr); if (ret != KNOT_EOK) { return ret; } dbg_tsig_verb("TSIG: algorithm checked.\n"); /* Check that key is valid, ie. the same as given in args. */ ret = knot_tsig_check_key(tsig_rr, tsig_key); if (ret != KNOT_EOK) { return ret; } dbg_tsig_verb("TSIG: key validity checked.\n"); uint8_t *wire_to_sign = malloc(sizeof(uint8_t) * size); if (!wire_to_sign) { ERR_ALLOC_FAILED; return KNOT_ENOMEM; } memset(wire_to_sign, 0, sizeof(uint8_t) * size); memcpy(wire_to_sign, wire, size); uint8_t digest_tmp[KNOT_TSIG_MAX_DIGEST_SIZE]; size_t digest_tmp_len = 0; assert(tsig_rr->rrs.rr_count > 0); if (use_times) { /* Wire is not a single packet, TSIG RRs must be stripped already. */ ret = knot_tsig_create_sign_wire_next(wire_to_sign, size, request_mac, request_mac_len, digest_tmp, &digest_tmp_len, tsig_rr, tsig_key); } else { ret = knot_tsig_create_sign_wire(wire_to_sign, size, request_mac, request_mac_len, digest_tmp, &digest_tmp_len, tsig_rr, tsig_key); } assert(tsig_rr->rrs.rr_count > 0); free(wire_to_sign); if (ret != KNOT_EOK) { dbg_tsig("Failed to create wire format for checking: %s.\n", knot_strerror(ret)); return ret; } dbg_tsig_verb("TSIG: digest calculated\n"); /* Compare MAC from TSIG RR RDATA with just computed digest. */ /*!< \todo move to function. */ const knot_dname_t *alg_name = tsig_rdata_alg_name(tsig_rr); knot_tsig_algorithm_t alg = tsig_alg_from_name(alg_name); /*! \todo [TSIG] TRUNCATION */ uint16_t mac_length = tsig_rdata_mac_length(tsig_rr); const uint8_t *tsig_mac = tsig_rdata_mac(tsig_rr); if (mac_length != knot_tsig_digest_length(alg)) { dbg_tsig("TSIG: calculated digest length and given length do " "not match!\n"); return KNOT_TSIG_EBADSIG; } dbg_tsig_verb("TSIG: calc digest :\n"); dbg_tsig_hex_verb((char *)digest_tmp, digest_tmp_len); dbg_tsig_verb("TSIG: given digest:\n"); dbg_tsig_hex_verb((char *)tsig_mac, mac_length); if (memcmp(tsig_mac, digest_tmp, mac_length) != 0) { return KNOT_TSIG_EBADSIG; } return KNOT_EOK; }
int event_load(conf_t *conf, zone_t *zone) { assert(zone); /* Take zone file mtime and load it. */ char *filename = conf_zonefile(conf, zone->name); time_t mtime = zonefile_mtime(filename); free(filename); uint32_t dnssec_refresh = time(NULL); zone_contents_t *contents = NULL; int ret = zone_load_contents(conf, zone->name, &contents); if (ret != KNOT_EOK) { goto fail; } /* Store zonefile serial and apply changes from the journal. */ zone->zonefile_serial = zone_contents_serial(contents); ret = zone_load_journal(conf, zone, contents); if (ret != KNOT_EOK) { goto fail; } /* Post load actions - calculate delta, sign with DNSSEC... */ /*! \todo issue #242 dnssec signing should occur in the special event */ ret = zone_load_post(conf, zone, contents, &dnssec_refresh); if (ret != KNOT_EOK) { if (ret == KNOT_ESPACE) { log_zone_error(zone->name, "journal size is too small " "to fit the changes"); } else { log_zone_error(zone->name, "failed to store changes into " "journal (%s)", knot_strerror(ret)); } goto fail; } /* Check zone contents consistency. */ ret = zone_load_check(conf, contents); if (ret != KNOT_EOK) { goto fail; } /* Everything went alright, switch the contents. */ zone->zonefile_mtime = mtime; zone_contents_t *old = zone_switch_contents(zone, contents); zone->flags &= ~ZONE_EXPIRED; uint32_t old_serial = zone_contents_serial(old); if (old != NULL) { synchronize_rcu(); zone_contents_deep_free(&old); } /* Schedule notify and refresh after load. */ if (zone_is_slave(conf, zone)) { zone_events_schedule(zone, ZONE_EVENT_REFRESH, ZONE_EVENT_NOW); } if (!zone_contents_is_empty(contents)) { zone_events_schedule(zone, ZONE_EVENT_NOTIFY, ZONE_EVENT_NOW); zone->bootstrap_retry = ZONE_EVENT_NOW; } /* Schedule zone resign. */ conf_val_t val = conf_zone_get(conf, C_DNSSEC_SIGNING, zone->name); if (conf_bool(&val)) { schedule_dnssec(zone, dnssec_refresh); } /* Periodic execution. */ val = conf_zone_get(conf, C_ZONEFILE_SYNC, zone->name); int64_t sync_timeout = conf_int(&val); if (sync_timeout >= 0) { zone_events_schedule(zone, ZONE_EVENT_FLUSH, sync_timeout); } uint32_t current_serial = zone_contents_serial(zone->contents); log_zone_info(zone->name, "loaded, serial %u -> %u", old_serial, current_serial); return KNOT_EOK; fail: zone_contents_deep_free(&contents); /* Try to bootstrap the zone if local error. */ if (zone_is_slave(conf, zone) && !zone_events_is_scheduled(zone, ZONE_EVENT_XFER)) { zone_events_schedule(zone, ZONE_EVENT_XFER, ZONE_EVENT_NOW); } return ret; }
/*! * \brief Create a zone event query, send it, wait for the response and process it. * * \note Everything in this function is executed synchronously, returns when * the query processing is either complete or an error occurs. */ static int zone_query_execute(conf_t *conf, zone_t *zone, uint16_t pkt_type, const conf_remote_t *remote) { /* Create a memory pool for this task. */ knot_mm_t mm; mm_ctx_mempool(&mm, MM_DEFAULT_BLKSIZE); /* Create a query message. */ knot_pkt_t *query = zone_query(zone, pkt_type, &mm); if (query == NULL) { mp_delete(mm.ctx); return KNOT_ENOMEM; } /* Set EDNS section. */ int ret = prepare_edns(conf, zone, query); if (ret != KNOT_EOK) { knot_pkt_free(&query); mp_delete(mm.ctx); return ret; } /* Answer processing parameters. */ struct process_answer_param param = { .zone = zone, .conf = conf, .query = query, .remote = &remote->addr }; const knot_tsig_key_t *key = remote->key.name != NULL ? &remote->key : NULL; tsig_init(¶m.tsig_ctx, key); ret = tsig_sign_packet(¶m.tsig_ctx, query); if (ret != KNOT_EOK) { tsig_cleanup(¶m.tsig_ctx); knot_pkt_free(&query); mp_delete(mm.ctx); return ret; } /* Process the query. */ ret = zone_query_request(query, remote, ¶m, &mm); /* Cleanup. */ tsig_cleanup(¶m.tsig_ctx); knot_pkt_free(&query); mp_delete(mm.ctx); return ret; } /* @note Module specific, expects some variables set. */ #define ZONE_XFER_LOG(severity, pkt_type, msg, ...) \ if (pkt_type == KNOT_QUERY_AXFR) { \ ZONE_QUERY_LOG(severity, zone, master, "AXFR, incoming", msg, ##__VA_ARGS__); \ } else { \ ZONE_QUERY_LOG(severity, zone, master, "IXFR, incoming", msg, ##__VA_ARGS__); \ } /*! \brief Execute zone transfer request. */ static int zone_query_transfer(conf_t *conf, zone_t *zone, const conf_remote_t *master, uint16_t pkt_type) { assert(zone); assert(master); int ret = zone_query_execute(conf, zone, pkt_type, master); if (ret != KNOT_EOK) { /* IXFR failed, revert to AXFR. */ if (pkt_type == KNOT_QUERY_IXFR) { ZONE_XFER_LOG(LOG_NOTICE, pkt_type, "fallback to AXFR"); return zone_query_transfer(conf, zone, master, KNOT_QUERY_AXFR); } /* Log connection errors. */ ZONE_XFER_LOG(LOG_WARNING, pkt_type, "failed (%s)", knot_strerror(ret)); } return ret; }
static int axfr_answer_packet(knot_pkt_t *pkt, struct xfr_proc *proc) { assert(pkt != NULL); assert(proc != NULL); /* Update counters. */ proc->npkts += 1; proc->nbytes += pkt->size; /* Init zone creator. */ zcreator_t zc = {.z = proc->contents, .master = false, .ret = KNOT_EOK }; const knot_pktsection_t *answer = knot_pkt_section(pkt, KNOT_ANSWER); for (uint16_t i = 0; i < answer->count; ++i) { const knot_rrset_t *rr = &answer->rr[i]; if (rr->type == KNOT_RRTYPE_SOA && node_rrtype_exists(zc.z->apex, KNOT_RRTYPE_SOA)) { return KNOT_NS_PROC_DONE; } else { int ret = zcreator_step(&zc, rr); if (ret != KNOT_EOK) { return KNOT_NS_PROC_FAIL; } } } return KNOT_NS_PROC_MORE; } int axfr_answer_process(knot_pkt_t *pkt, struct answer_data *adata) { if (pkt == NULL || adata == NULL) { return KNOT_NS_PROC_FAIL; } /* Check RCODE. */ uint8_t rcode = knot_wire_get_rcode(pkt->wire); if (rcode != KNOT_RCODE_NOERROR) { lookup_table_t *lut = lookup_by_id(knot_rcode_names, rcode); if (lut != NULL) { AXFRIN_LOG(LOG_ERR, "server responded with %s", lut->name); } return KNOT_NS_PROC_FAIL; } /* Initialize processing with first packet. */ if (adata->ext == NULL) { NS_NEED_TSIG_SIGNED(&adata->param->tsig_ctx, 0); AXFRIN_LOG(LOG_INFO, "starting"); int ret = axfr_answer_init(adata); if (ret != KNOT_EOK) { AXFRIN_LOG(LOG_ERR, "failed (%s)", knot_strerror(ret)); return KNOT_NS_PROC_FAIL; } } else { NS_NEED_TSIG_SIGNED(&adata->param->tsig_ctx, 100); } /* Process answer packet. */ int ret = axfr_answer_packet(pkt, (struct xfr_proc *)adata->ext); if (ret == KNOT_NS_PROC_DONE) { NS_NEED_TSIG_SIGNED(&adata->param->tsig_ctx, 0); /* This was the last packet, finalize zone and publish it. */ int fret = axfr_answer_finalize(adata); if (fret != KNOT_EOK) { ret = KNOT_NS_PROC_FAIL; } } return ret; }
journal_t* journal_open(const char *fn, size_t fslimit, int mode, uint16_t bflags) { /*! \todo Memory mapping may be faster than stdio? (issue #964) */ if (fn == NULL) { return NULL; } /* Check for lazy mode. */ if (mode & JOURNAL_LAZY) { dbg_journal("journal: opening journal %s lazily\n", fn); journal_t *j = malloc(sizeof(journal_t)); if (j != NULL) { memset(j, 0, sizeof(journal_t)); j->fd = -1; j->path = strdup(fn); j->fslimit = fslimit; j->bflags = bflags; j->refs = 1; } return j; } /* Open journal file for r/w (returns error if not exists). */ int fd = open(fn, O_RDWR); if (fd < 0) { if (errno == ENOENT) { if(journal_create(fn, JOURNAL_NCOUNT) == KNOT_EOK) { return journal_open(fn, fslimit, mode, bflags); } } return NULL; } /* File lock. */ struct flock fl; memset(&fl, 0, sizeof(struct flock)); fl.l_type = F_WRLCK; fl.l_whence = SEEK_SET; fl.l_start = 0; fl.l_len = 0; fl.l_pid = getpid(); /* Attempt to lock. */ dbg_journal_verb("journal: locking journal %s\n", fn); int ret = fcntl(fd, F_SETLK, &fl); /* Lock. */ if (ret < 0) { struct flock efl; memcpy(&efl, &fl, sizeof(struct flock)); fcntl(fd, F_GETLK, &efl); log_server_warning("Journal file '%s' is locked by process " "PID=%d, waiting for process to " "release lock.\n", fn, efl.l_pid); ret = fcntl(fd, F_SETLKW, &fl); } fl.l_type = F_UNLCK; dbg_journal("journal: locked journal %s (returned %d)\n", fn, ret); /* Read magic bytes. */ dbg_journal("journal: reading magic bytes\n"); const char magic_req[MAGIC_LENGTH] = JOURNAL_MAGIC; char magic[MAGIC_LENGTH]; if (!sfread(magic, MAGIC_LENGTH, fd)) { dbg_journal_detail("journal: cannot read magic bytes\n"); fcntl(fd, F_SETLK, &fl); close(fd); return NULL; } if (memcmp(magic, magic_req, MAGIC_LENGTH) != 0) { log_server_warning("Journal file '%s' version is too old, " "it will be flushed.\n", fn); fcntl(fd, F_SETLK, &fl); close(fd); if (journal_create(fn, JOURNAL_NCOUNT) == KNOT_EOK) { return journal_open(fn, fslimit, mode, bflags); } return NULL; } crc_t crc = 0; if (!sfread(&crc, sizeof(crc_t), fd)) { dbg_journal_detail("journal: cannot read CRC\n"); fcntl(fd, F_SETLK, &fl); close(fd); return NULL; } /* Recalculate CRC. */ char buf[4096]; ssize_t rb = 0; crc_t crc_calc = crc_init(); while((rb = read(fd, buf, sizeof(buf))) > 0) { crc_calc = crc_update(crc_calc, (const unsigned char *)buf, rb); } /* Compare */ if (crc == crc_calc) { /* Rewind. */ if (lseek(fd, MAGIC_LENGTH + sizeof(crc_t), SEEK_SET) < 0) { fcntl(fd, F_SETLK, &fl); close(fd); return NULL; } } else { log_server_warning("Journal file '%s' CRC error, " "it will be flushed.\n", fn); fcntl(fd, F_SETLK, &fl); close(fd); if (journal_create(fn, JOURNAL_NCOUNT) == KNOT_EOK) { return journal_open(fn, fslimit, mode, bflags); } return NULL; } /* Read maximum number of entries. */ uint16_t max_nodes = 512; if (!sfread(&max_nodes, sizeof(uint16_t), fd)) { dbg_journal_detail("journal: cannot read max_nodes\n"); fcntl(fd, F_SETLK, &fl); close(fd); return NULL; } /* Check max_nodes, but this is riddiculous. */ if (max_nodes == 0) { dbg_journal_detail("journal: max_nodes is invalid\n"); fcntl(fd, F_SETLK, &fl); close(fd); return NULL; } /* Allocate journal structure. */ const size_t node_len = sizeof(journal_node_t); journal_t *j = malloc(sizeof(journal_t) + max_nodes * node_len); if (j == NULL) { dbg_journal_detail("journal: cannot allocate journal\n"); fcntl(fd, F_SETLK, &fl); close(fd); return NULL; } memset(j, 0, sizeof(journal_t) + max_nodes * node_len); j->qhead = j->qtail = 0; j->fd = fd; j->max_nodes = max_nodes; j->bflags = bflags; j->refs = 1; /* Load node queue state. */ if (!sfread(&j->qhead, sizeof(uint16_t), fd)) { dbg_journal_detail("journal: cannot read qhead\n"); fcntl(fd, F_SETLK, &fl); close(fd); free(j); return NULL; } /* Load queue tail. */ if (!sfread(&j->qtail, sizeof(uint16_t), fd)) { dbg_journal_detail("journal: cannot read qtail\n"); fcntl(fd, F_SETLK, &fl); close(fd); free(j); return NULL; } /* Check head + tail */ if (j->qtail > max_nodes || j->qhead > max_nodes) { dbg_journal_detail("journal: queue pointers corrupted\n"); fcntl(fd, F_SETLK, &fl); close(fd); free(j); return NULL; } /* Load empty segment descriptor. */ if (!sfread(&j->free, node_len, fd)) { dbg_journal_detail("journal: cannot read free segment ptr\n"); fcntl(fd, F_SETLK, &fl); close(fd); free(j); return NULL; } /* Read journal descriptors table. */ if (!sfread(&j->nodes, max_nodes * node_len, fd)) { dbg_journal_detail("journal: cannot read node table\n"); fcntl(fd, F_SETLK, &fl); close(fd); free(j); return NULL; } /* Get journal file size. */ struct stat st; if (stat(fn, &st) < 0) { dbg_journal_detail("journal: cannot get journal fsize\n"); fcntl(fd, F_SETLK, &fl); close(fd); free(j); return NULL; } /* Set file size. */ j->fsize = st.st_size; if (fslimit == 0) { j->fslimit = FSLIMIT_INF; } else { j->fslimit = (size_t)fslimit; } dbg_journal("journal: opened journal size=%u, queue=<%u, %u>, fd=%d\n", max_nodes, j->qhead, j->qtail, j->fd); /* Check node queue. */ unsigned qtail_free = (jnode_flags(j, j->qtail) <= JOURNAL_FREE); unsigned qhead_free = j->max_nodes - 1; /* Left of qhead must be free.*/ if (j->qhead > 0) { qhead_free = (j->qhead - 1); } qhead_free = (jnode_flags(j, qhead_free) <= JOURNAL_FREE); if ((j->qhead != j->qtail) && (!qtail_free || !qhead_free)) { log_server_warning("Recovering journal '%s' metadata " "after crash.\n", fn); ret = journal_recover(j); if (ret != KNOT_EOK) { log_server_error("Journal file '%s' is unrecoverable, " "metadata corrupted - %s\n", fn, knot_strerror(ret)); fcntl(fd, F_SETLK, &fl); close(fd); free(j); return NULL; } } /* Save file lock. */ fl.l_type = F_WRLCK; memcpy(&j->fl, &fl, sizeof(struct flock)); return j; }