static ya_result logger_channel_file_append(const char *fullpath, uid_t uid, gid_t gid, u16 mode, file_data* sd) { output_stream errlog_os; output_stream buffered_errlog_os; ya_result return_code; if(FAIL(return_code = file_output_stream_open_ex(fullpath, O_CREAT|O_APPEND|O_RDWR, mode, &errlog_os))) { sd->fd = -1; return return_code; } /* * Change ownership of the file here. */ int fd = fd_output_stream_get_filedescriptor(&errlog_os); if(fchown(fd, uid, gid) < 0) { return_code = ERRNO_ERROR; output_stream_close(&errlog_os); sd->fd = -1; return return_code; } sd->fd = fd; if(FAIL(return_code = buffer_output_stream_init(&errlog_os, &buffered_errlog_os, FILE_CHANNEL_BUFFER_SIZE))) { output_stream_close(&errlog_os); sd->fd = -1; return return_code; } sd->os.data = buffered_errlog_os.data; sd->os.vtbl = buffered_errlog_os.vtbl; return SUCCESS; }
static void logger_channel_file_close(logger_channel* chan) { file_data* sd = (file_data*)chan->data; output_stream_flush(&sd->os); output_stream_close(&sd->os); free(sd->file_name); chan->vtbl = NULL; sd->os.data = NULL; sd->os.vtbl = NULL; free(chan->data); chan->data = NULL; }
static void xfr_input_stream_close(input_stream *is) { xfr_input_stream_data *data = (xfr_input_stream_data*)is->data; if(data->need_cleanup_tsig) { log_err("TSIG has not been cleared"); data->need_cleanup_tsig = FALSE; } output_stream_close(&data->pipe_stream_output); input_stream_close(&data->pipe_stream_input); free(data->first_soa_record); #ifdef DEBUG memset(data, 0xfe, sizeof(xfr_input_stream_data)); #endif ZFREE(data, xfr_input_stream_data); // used to be leaked ? input_stream_set_void(is); }
static ya_result journal_ix_append_ixfr_stream(journal *jh, input_stream *ixfr_wire_is) { journal_ix *jix = (journal_ix*)jh; journal_ix_writelock(jix); /* * Move at the end of the file * Check that the wire starts with the last soa/serial * Append the wire * update the last serial */ // read the record ya_result return_value; dns_resource_record rr; dns_resource_record_init(&rr); if((return_value = dns_resource_record_read(&rr, ixfr_wire_is)) <= 0) { /* FAIL or EOF */ dns_resource_record_clear(&rr); journal_ix_writeunlock(jix); log_err("journal: ix: unable to read record: %r", return_value); return return_value; } /* * The first record is an SOA and our starting point (to be deleted) */ #ifdef DEBUG rdata_desc rdatadesc = {rr.tctr.qtype, rr.rdata_size, rr.rdata}; log_debug("journal: ix: DEL %{dnsname} %{typerdatadesc}", rr.name, &rdatadesc); #endif if(rr.tctr.qtype != TYPE_SOA) { u16 rtype = rr.tctr.qtype; dns_resource_record_clear(&rr); journal_ix_writeunlock(jix); log_err("journal: ix: expected SOA record but got %{dnstype} instead", &rtype); return ZDB_JOURNAL_SOA_RECORD_EXPECTED; } /* * check the journal file exists/is defined * do it now if not * proceed */ if(((jix->first_serial == 0) && (jix->last_serial == 0)) || (jix->fd == -1)) { /* the file does not exists yet */ if(FAIL(return_value = rr_soa_get_serial(rr.rdata, rr.rdata_size, &jix->first_serial))) { dns_resource_record_clear(&rr); journal_ix_writeunlock(jix); log_err("journal: ix: unable to read record: %r", return_value); return return_value; } int fd = open_create_ex(jix->journal_name, O_RDWR|O_CREAT, 0644); if(fd < 0) { return_value = ERRNO_ERROR; dns_resource_record_clear(&rr); journal_ix_writeunlock(jix); log_err("journal: ix: unable to open journal file '%s': %r", jix->journal_name, return_value); return return_value; } log_info("journal: ix: journal file created '%s'", jix->journal_name); jix->fd = fd; } if(FAIL(return_value = journal_ix_ensure_opened(jix))) { return return_value; } u64 valid_offset = lseek(jix->fd, 0, SEEK_END); u64 current_offset = valid_offset; u32 valid_serial = jix->last_serial; u32 potential_serial = valid_serial; s64 valid_page_offset = jix->last_page_offset; s64 potential_page_offset = current_offset; #ifdef DEBUG log_debug("journal: ix: ready to append to journal after serial %08x (%d) at offset %lld", valid_serial, valid_serial, valid_offset); #endif u8 mode = 0; /* 0: del, 1: add */ output_stream fos; output_stream bos; fd_output_stream_attach(&fos, jix->fd); buffer_output_stream_init(&bos, &fos, 512); for(;;) { /* write the first */ if(FAIL(return_value = dns_resource_record_write(&rr, &bos))) { /* this is VERY bad */ log_err("journal: ix: error writing a record to the journal: %r", return_value); break; } /* update the current offset */ current_offset += return_value; if((return_value = dns_resource_record_read(&rr, ixfr_wire_is)) <= 0) /* no bytes read OR error, there is no macro for this */ { /* error or end of stream */ if(return_value == 0) /* end of stream */ { if(mode != 0) /* on add mode so everything should be fine */ { valid_offset = current_offset; valid_serial = potential_serial; valid_page_offset = potential_page_offset; } else /* but on delete mode instead of add mode */ { log_err("journal: ix: ixfr stream unexpected eof"); return_value = UNEXPECTED_EOF; /* we have an error */ } } break; } if(rr.tctr.qtype == TYPE_SOA) { mode ^= 1; #ifdef DEBUG rdata_desc rdatadesc = {rr.tctr.qtype, rr.rdata_size, rr.rdata}; log_debug("journal: ix: %s %{dnsname} %{typerdatadesc}", (mode!=0)?"add":"del", rr.name, &rdatadesc); #endif if(mode == 0) { /* * new SOA to delete * * it's a new "page" (delete -> add) * * the offset before we write this record is the highest valid one in the file * so the error correcting truncation will be made at that offset */ valid_offset = current_offset; /* * the serial number that has been added with the previous page */ valid_serial = potential_serial; /* * the offset of the previous page */ valid_page_offset = potential_page_offset; /* * the new page starts here : update */ potential_page_offset = current_offset; } else { /* * new SOA add * * this is the second half of the page, we know what serial it is about */ if(FAIL(return_value = rr_soa_get_serial(rr.rdata, rr.rdata_size, &potential_serial))) { break; } } } #ifdef DEBUG else { rdata_desc rdatadesc = {rr.tctr.qtype, rr.rdata_size, rr.rdata}; log_debug("journal: ix: %s %{dnsname} %{typerdatadesc}", (mode!=0)?"add":"del", rr.name, &rdatadesc); } #endif } if(FAIL(return_value)) { /* * The journal is only valid up to valid_offset with serial ... */ log_err("journal: ix: rewinding journal up to last valid point (%lld)", valid_offset); ftruncate(jix->fd, valid_offset); } #ifdef DEBUG log_debug("journal: ix: page offset got from %d to %d", jix->last_page_offset, valid_page_offset); log_debug("journal: ix: serial got from %d to %d", jix->last_serial, valid_serial); #endif jix->last_page_offset = valid_page_offset; jix->last_serial = valid_serial; /* * rename the file */ if(ISOK(return_value)) { char new_name[PATH_MAX]; memcpy(new_name, jix->journal_name, jix->journal_name_len); snformat(&new_name[jix->journal_name_len - FIRST_FROM_END], 8 + 1 + 8 + 1 + IX_EXT_STRLEN + 1, "%08x-%08x." IX_EXT , jix->first_serial, jix->last_serial); if(rename(jix->journal_name, new_name) >= 0) { memcpy(jix->journal_name, new_name, jix->journal_name_len); } } /* */ #ifdef DEBUG log_debug("journal: ix: fd=%i from=%08x to=%08x soa@%lld file=%s", jix->fd, jix->first_serial, jix->last_serial, jix->last_page_offset, (jix->journal_name!=NULL)?jix->journal_name:"NONE-YET"); #endif output_stream_flush(&bos); fd_output_stream_detach(buffer_output_stream_get_filtered(&bos)); output_stream_close(&bos); dns_resource_record_clear(&rr); journal_ix_writeunlock(jix); if(ISOK(return_value)) { #ifdef DEBUG log_debug("journal: ix: page added (fd=%i from=%08x to=%08x soa@%lld file=%s): %r", jix->fd, jix->first_serial, jix->last_serial, jix->last_page_offset, (jix->journal_name!=NULL)?jix->journal_name:"NONE-YET", return_value); #endif return TYPE_IXFR; /* that's what the caller expects to handle the new journal pages */ } else { log_err("journal: ix: failed to add page"); return return_value; } }
void log_memdump_ex(logger_handle* hndl, u32 level, const void* data_pointer_, size_t size_, size_t line_size, bool hex, bool text, bool address) { if((hndl == NULL) || (level >= MSG_LEVEL_COUNT) || (hndl->channels[level].offset < 0)) { return; } output_stream os; char buffer[4096]; bytearray_output_stream_init((u8*)buffer, sizeof (buffer), &os); u8* data_pointer = (u8*)data_pointer_; s32 size = size_; int dump_size; int i; do { dump_size = MIN(line_size, size); u8* data; if(address) { osformat(&os, "%p ", data_pointer); } if(hex) { data = data_pointer; for(i = 0; i < dump_size; i++) { osformat(&os, "%02x", *data++); if((i & 3) == 3) { output_stream_write_u8(&os, (u8)' '); } } for(; i < line_size; i++) { osprint(&os, " "); if((i & 3) == 0) { osprint(&os, " "); } } } if(hex & text) { output_stream_write(&os, (u8*)" | ", 3); } if(text) { data = data_pointer; for(i = 0; i < dump_size; i++) { char c = *data++; if(c < ' ') { c = '.'; } else if(c == '%') { output_stream_write_u8(&os, '%'); } output_stream_write_u8(&os, (u8)c); } } data_pointer += dump_size; size -= dump_size; if(size != 0) { output_stream_write_u8(&os, 0); logger_handle_msg(hndl, level, "%s", bytearray_output_stream_buffer(&os)); bytearray_output_stream_reset(&os); } } while(size > 0); //if(size_ > line_size) if(bytearray_output_stream_size(&os) > 0) { output_stream_write_u8(&os, 0); logger_handle_msg(hndl, level, "%s", bytearray_output_stream_buffer(&os)); } output_stream_close(&os); }
static ya_result logger_channel_file_reopen(logger_channel* chan) { ya_result return_code; file_data* sd = (file_data*)chan->data; output_stream_flush(&sd->os); /* open a new file stream */ output_stream errlog_os; if(FAIL(return_code = file_output_stream_open_ex(sd->file_name, O_CREAT|O_APPEND|O_RDWR, sd->mode, &errlog_os))) { logger_channel_file_flush(chan); logger_channel_file_msg(chan, LOG_NOTICE, "unable to reopen '%s': %r, resuming on original", sd->file_name, return_code); logger_channel_file_flush(chan); return return_code; } /* change ownership of the file */ int fd = fd_output_stream_get_filedescriptor(&errlog_os); if(fchown(fd, sd->uid, sd->gid) < 0) { return_code = ERRNO_ERROR; output_stream_close(&errlog_os); logger_channel_file_flush(chan); logger_channel_file_msg(chan, LOG_NOTICE, "unable to fchown '%s': %r, resuming on original", sd->file_name, return_code); logger_channel_file_flush(chan); return return_code; } logger_channel_file_flush(chan); logger_channel_file_msg(chan, LOG_NOTICE, "reopening '%s'", sd->file_name); logger_channel_file_flush(chan); output_stream* fos = buffer_output_stream_get_filtered(&sd->os); /* exchange the file descriptors */ fd_output_stream_attach(fd, fos); fd_output_stream_attach(sd->fd, &errlog_os); sd->fd = fd; output_stream_close(&errlog_os); logger_channel_file_flush(chan); logger_channel_file_msg(chan, LOG_NOTICE, "reopened '%s'", sd->file_name); logger_channel_file_flush(chan); return return_code; }
ya_result zdb_icmtl_replay(zdb_zone *zone) { ya_result return_value; u32 serial; zdb_zone_double_lock(zone, ZDB_ZONE_MUTEX_SIMPLEREADER, ZDB_ZONE_MUTEX_LOAD); return_value = zdb_zone_getserial(zone, &serial); // zone is locked if(FAIL(return_value)) { zdb_zone_double_unlock(zone, ZDB_ZONE_MUTEX_SIMPLEREADER, ZDB_ZONE_MUTEX_LOAD); log_err("journal: %{dnsname}: error reading serial for zone: %r", zone->origin, return_value); return return_value; } input_stream is; #if ICMTL_DUMP_JOURNAL_RECORDS log_debug("journal: zdb_icmtl_replay(%{dnsname})", zone->origin); logger_flush(); #endif u32 first_serial; u32 last_serial; if(FAIL(return_value = zdb_zone_journal_get_serial_range(zone, &first_serial, &last_serial))) { zdb_zone_double_unlock(zone, ZDB_ZONE_MUTEX_SIMPLEREADER, ZDB_ZONE_MUTEX_LOAD); if(return_value == ZDB_ERROR_ICMTL_NOTFOUND) { return_value = SUCCESS; } else { log_err("journal: %{dnsname}: error opening journal for zone: %r", zone->origin, return_value); } return return_value; } log_debug("journal: %{dnsname}: zone serial is %i, journal covers serials from %i to %i", zone->origin, serial, first_serial, last_serial); if(last_serial == serial) { zdb_zone_double_unlock(zone, ZDB_ZONE_MUTEX_SIMPLEREADER, ZDB_ZONE_MUTEX_LOAD); log_debug("journal: %{dnsname}: nothing to read from the journal", zone->origin); return 0; } if(serial_lt(serial, first_serial)) { zdb_zone_double_unlock(zone, ZDB_ZONE_MUTEX_SIMPLEREADER, ZDB_ZONE_MUTEX_LOAD); log_warn("journal: %{dnsname}: first serial from the journal is after the zone", zone->origin); // should invalidate the journal zdb_zone_journal_delete(zone); return 0; } if(serial_gt(serial, last_serial)) { zdb_zone_double_unlock(zone, ZDB_ZONE_MUTEX_SIMPLEREADER, ZDB_ZONE_MUTEX_LOAD); log_warn("journal: %{dnsname}: last serial from the journal is before the zone", zone->origin); // should invalidate the journal zdb_zone_journal_delete(zone); return 0; } if(FAIL(return_value = zdb_zone_journal_get_ixfr_stream_at_serial(zone, serial, &is, NULL))) { zdb_zone_double_unlock(zone, ZDB_ZONE_MUTEX_SIMPLEREADER, ZDB_ZONE_MUTEX_LOAD); log_err("journal: %{dnsname}: error reading journal from serial %d: %r",zone->origin, serial, return_value); return return_value; } log_info("journal: %{dnsname}: replaying from serial %u",zone->origin, serial); buffer_input_stream_init(&is, &is, ZDB_ICMTL_REPLAY_BUFFER_SIZE); u16 shutdown_test_countdown = ZDB_ICMTL_REPLAY_SHUTDOWN_POLL_PERIOD; u32 current_serial = serial; /* * Read all records from [ SOA ... SOA ... [ SOA in memory */ output_stream baos; input_stream bais; dns_resource_record rr; int baos_rr_count = 0; int baos_soa_count = 0; bool was_nsec3 = zdb_zone_is_nsec3(zone); bytearray_output_stream_init_ex(&baos, NULL, ZDB_ICMTL_REPLAY_BUFFER_SIZE, BYTEARRAY_DYNAMIC); dns_resource_record_init(&rr); // 0: gather, 1: commit, 2: commit & stop for(int replay_state = ZDB_ICMTL_REPLAY_GATHER; replay_state != ZDB_ICMTL_REPLAY_COMMIT_AND_STOP;) { // ensure it's not supposed to shutdown (every few iterations) if(--shutdown_test_countdown <= 0) { if(dnscore_shuttingdown()) { return_value = STOPPED_BY_APPLICATION_SHUTDOWN; break; } shutdown_test_countdown = ZDB_ICMTL_REPLAY_SHUTDOWN_POLL_PERIOD; } // read the next record if((return_value = dns_resource_record_read(&rr, &is)) <= 0) { if(ISOK(return_value)) { log_info("journal: %{dnsname}: reached the end of the journal file", zone->origin); replay_state = ZDB_ICMTL_REPLAY_COMMIT_AND_STOP; } else { log_err("journal: broken journal: %r", return_value); logger_flush(); // broken journal (flush is slow, but this is bad, so : keep it) replay_state = ZDB_ICMTL_REPLAY_STOP; } } else // first record must be an SOA (or it's wrong) if(baos_rr_count == 0) // first record ? { if(rr.tctr.qtype != TYPE_SOA) // must be SOA { // expected an SOA return_value = ERROR; break; } ++baos_soa_count; // 0 -> 1 // this is not mandatory but clearer to read } else // the page ends with an SOA or end of stream if(rr.tctr.qtype == TYPE_SOA) { if(baos_soa_count == 2) { // this record is the start of the next stream, keep it for the next iteration replay_state = ZDB_ICMTL_REPLAY_COMMIT; } ++baos_soa_count; } ++baos_rr_count; if((replay_state & ZDB_ICMTL_REPLAY_COMMIT) != 0) { log_info("journal: %{dnsname}: committing changes", zone->origin); u64 ts_start = timeus(); zdb_zone_exchange_locks(zone, ZDB_ZONE_MUTEX_SIMPLEREADER, ZDB_ZONE_MUTEX_LOAD); bytearray_input_stream_init_const(&bais, bytearray_output_stream_buffer(&baos), bytearray_output_stream_size(&baos)); return_value = zdb_icmtl_replay_commit(zone, &bais, ¤t_serial); zdb_zone_exchange_locks(zone, ZDB_ZONE_MUTEX_LOAD, ZDB_ZONE_MUTEX_SIMPLEREADER); input_stream_close(&bais); u64 ts_stop = timeus(); if(ts_stop < ts_start) // time change { ts_stop = ts_start; } u64 ts_delta = ts_stop - ts_start; if(ISOK(return_value)) { if(ts_delta < 1000) { log_info("journal: %{dnsname}: committed changes (%lluus)", zone->origin, ts_delta); } else if(ts_delta < 1000000) { double ts_delta_s = ts_delta; ts_delta_s /= 1000.0; log_info("journal: %{dnsname}: committed changes (%5.2fms)", zone->origin, ts_delta_s); } else { double ts_delta_s = ts_delta; ts_delta_s /= 1000000.0; log_info("journal: %{dnsname}: committed changes (%5.2fs)", zone->origin, ts_delta_s); } } else { log_err("journal: %{dnsname}: failed to committed changes", zone->origin); break; } // the current page has been processed if(replay_state == ZDB_ICMTL_REPLAY_COMMIT_AND_STOP) { // no more page to read break; } // reset the state for the next page // note: the next written record will be the last read SOA baos_rr_count = 1; baos_soa_count = 1; replay_state = ZDB_ICMTL_REPLAY_GATHER; bytearray_output_stream_reset(&baos); } // end if replay_state is ZDB_ICMTL_REPLAY_COMMIT (mask) dns_resource_record_write(&rr, &baos); } input_stream_close(&is); output_stream_close(&baos); dns_resource_record_clear(&rr); // cleanup destroyed nsec3 chains if(ISOK(return_value)) { bool is_nsec3 = zdb_zone_is_nsec3(zone); if(is_nsec3 && !was_nsec3) { // the chain has just been created, but is probably missing internal links log_debug("journal: %{dnsname}: zone switched to NSEC3 by reading the journal: updating links", zone->origin); zdb_zone_exchange_locks(zone, ZDB_ZONE_MUTEX_SIMPLEREADER, ZDB_ZONE_MUTEX_LOAD); nsec3_zone_update_chain0_links(zone); zdb_zone_exchange_locks(zone, ZDB_ZONE_MUTEX_LOAD, ZDB_ZONE_MUTEX_SIMPLEREADER); log_debug("journal: %{dnsname}: zone switched to NSEC3 by reading the journal: links updated", zone->origin); } if(FAIL(return_value = zdb_zone_getserial(zone, &serial))) // zone is locked { zdb_zone_double_unlock(zone, ZDB_ZONE_MUTEX_SIMPLEREADER, ZDB_ZONE_MUTEX_LOAD); log_err("journal: %{dnsname}: error reading confirmation serial for zone: %r",zone->origin, return_value); return return_value; } if(serial != last_serial) { log_warn("journal: %{dnsname}: expected serial to be %i but it is %i instead",zone->origin, last_serial, serial); } #if 0 // ICMTL_DUMP_JOURNAL_RECORDS if(is_nsec) { nsec_logdump_tree(zone); logger_flush(); } #endif } zdb_zone_double_unlock(zone, ZDB_ZONE_MUTEX_SIMPLEREADER, ZDB_ZONE_MUTEX_LOAD); log_info("journal: %{dnsname}: done", zone->origin); return return_value; }
int generation_information(int gen, multipop *mpop, int stt_interval, int bestn) { int i, j; int newbest; static int fd = -1; popstats *gen_stats; int ret = 0; FILE *bout, *hout; /* number of decimal digits to use when printing fitness values. */ if (fd == -1) fd = atoi(get_parameter("output.digits")); /* allocate stats records for the current generation. */ gen_stats = (popstats *) MALLOC((mpop->size + 1) * sizeof(popstats)); for (i = 0; i < mpop->size + 1; ++i) { gen_stats[i].bestn = bestn; gen_stats[i].size = -1; } oprintf( OUT_GEN, 90, "=== GENERATION %d ===\n", gen); oprintf( OUT_PRG, 90, "=== GENERATION %d ===\n", gen); /* for each subpopulation... */ for (i = 0; i < mpop->size; ++i) { /* calculate stats for subpopulation. */ calculate_pop_stats(gen_stats + i + 1, mpop->pop[i], gen, i); /* accumulate that into stats for whole popluation... */ accumulate_pop_stats(gen_stats, gen_stats + i + 1); /* ...and stats for this subpopulation over the whole run. */ accumulate_pop_stats(run_stats + i + 1, gen_stats + i + 1); /* if only one subpop, don't print out the subpop stuff. */ if (mpop->size == 1) continue; /** print much stuff to .gen, .prg, and .stt files. */ if (test_detail_level(90)) { oprintf( OUT_GEN, 90, " subpopulation %d:\n", i + 1); oprintf( OUT_GEN, 90, " generation:\n"); oprintf( OUT_GEN, 90, " mean: nodes: %.3lf (%d-%d); depth: %.3lf (%d-%d)\n", (double) gen_stats[i + 1].totalnodes / gen_stats[i + 1].size, gen_stats[i + 1].minnodes, gen_stats[i + 1].maxnodes, (double) gen_stats[i + 1].totaldepth / gen_stats[i + 1].size, gen_stats[i + 1].mindepth, gen_stats[i + 1].maxdepth); oprintf( OUT_GEN, 90, " best: nodes: %d; depth: %d\n", gen_stats[i + 1].bestnodes, gen_stats[i + 1].bestdepth); oprintf( OUT_GEN, 90, " worst: nodes: %d; depth: %d\n", gen_stats[i + 1].worstnodes, gen_stats[i + 1].worstdepth); oprintf( OUT_GEN, 90, " run: (%d trees)\n", run_stats[i + 1].size); oprintf( OUT_GEN, 90, " mean: nodes: %.3lf (%d-%d); depth: %.3lf (%d-%d)\n", (double) run_stats[i + 1].totalnodes / run_stats[i + 1].size, run_stats[i + 1].minnodes, run_stats[i + 1].maxnodes, (double) run_stats[i + 1].totaldepth / run_stats[i + 1].size, run_stats[i + 1].mindepth, run_stats[i + 1].maxdepth); oprintf( OUT_GEN, 90, " best: nodes: %d; depth: %d\n", run_stats[i + 1].bestnodes, run_stats[i + 1].bestdepth); oprintf( OUT_GEN, 90, " worst: nodes: %d; depth: %d\n", run_stats[i + 1].worstnodes, run_stats[i + 1].worstdepth); } if (test_detail_level(90)) { oprintf( OUT_PRG, 90, " subpopulation %d:\n", i + 1); oprintf( OUT_PRG, 90, " generation stats:\n"); oprintf( OUT_PRG, 90, " mean: hits: %.3lf (%d-%d); standardized fitness: %.*lf\n", (double) gen_stats[i + 1].totalhits / gen_stats[i + 1].size, gen_stats[i + 1].minhits, gen_stats[i + 1].maxhits, fd, (double) gen_stats[i + 1].totalfit / gen_stats[i + 1].size); oprintf( OUT_PRG, 90, " best: hits: %d; standardized fitness: %.*lf\n", gen_stats[i + 1].besthits, fd, (double) gen_stats[i + 1].bestfit); oprintf( OUT_PRG, 90, " worst: hits: %d; standardized fitness: %.*lf\n", gen_stats[i + 1].worsthits, fd, (double) gen_stats[i + 1].worstfit); oprintf( OUT_PRG, 90, " run stats: (%d trees)\n", run_stats[i + 1].size); oprintf( OUT_PRG, 90, " mean: hits: %.3lf (%d-%d); standardized fitness: %.*lf\n", (double) run_stats[i + 1].totalhits / run_stats[i + 1].size, run_stats[i + 1].minhits, run_stats[i + 1].maxhits, fd, (double) run_stats[i + 1].totalfit / run_stats[i + 1].size); oprintf( OUT_PRG, 90, " best: hits: %d; standardized fitness: %.*lf; generation: %d\n", run_stats[i + 1].besthits, fd, (double) run_stats[i + 1].bestfit, run_stats[i + 1].bestgen); oprintf( OUT_PRG, 90, " worst: hits: %d; standardized fitness: %.*lf; generation: %d\n", run_stats[i + 1].worsthits, fd, (double) run_stats[i + 1].worstfit, run_stats[i + 1].worstgen); } if (gen % stt_interval == 0) { oprintf( OUT_STT, 50, "%d %d ", gen, i + 1); oprintf( OUT_STT, 50, "%.*lf %.*lf %.*lf ", fd, gen_stats[i + 1].totalfit / gen_stats[i + 1].size, fd, gen_stats[i + 1].bestfit, fd, gen_stats[i + 1].worstfit); oprintf( OUT_STT, 50, "%.3lf %.3lf %d %d %d %d ", (double) gen_stats[i + 1].totalnodes / gen_stats[i + 1].size, (double) gen_stats[i + 1].totaldepth / gen_stats[i + 1].size, gen_stats[i + 1].bestnodes, gen_stats[i + 1].bestdepth, gen_stats[i + 1].worstnodes, gen_stats[i + 1].worstdepth); oprintf( OUT_STT, 50, "%.*lf %.*lf %.*lf ", fd, run_stats[i + 1].totalfit / run_stats[i + 1].size, fd, run_stats[i + 1].bestfit, fd, run_stats[i + 1].worstfit); oprintf( OUT_STT, 50, "%.3lf %.3lf %d %d %d %d ", (double) run_stats[i + 1].totalnodes / run_stats[i + 1].size, (double) run_stats[i + 1].totaldepth / run_stats[i + 1].size, run_stats[i + 1].bestnodes, run_stats[i + 1].bestdepth, run_stats[i + 1].worstnodes, run_stats[i + 1].worstdepth); oprintf( OUT_STT, 50, "\n"); } } /* merge stats for current generation into overall run stats. */ newbest = accumulate_pop_stats(run_stats, gen_stats); /** more printing. **/ if (test_detail_level(90)) { oprintf( OUT_GEN, 90, " total population:\n"); oprintf( OUT_GEN, 90, " generation:\n"); oprintf( OUT_GEN, 90, " mean: nodes: %.3lf (%d-%d); depth: %.3lf (%d-%d)\n", (double) gen_stats[0].totalnodes / gen_stats[0].size, gen_stats[0].minnodes, gen_stats[0].maxnodes, (double) gen_stats[0].totaldepth / gen_stats[0].size, gen_stats[0].mindepth, gen_stats[0].maxdepth); oprintf( OUT_GEN, 90, " best: nodes: %d; depth: %d\n", gen_stats[0].bestnodes, gen_stats[0].bestdepth); oprintf( OUT_GEN, 90, " worst: nodes: %d; depth: %d\n", gen_stats[0].worstnodes, gen_stats[0].worstdepth); oprintf( OUT_GEN, 90, " run: (%d trees)\n", run_stats[0].size); oprintf( OUT_GEN, 90, " mean: nodes: %.3lf (%d-%d); depth: %.3lf (%d-%d)\n", (double) run_stats[0].totalnodes / run_stats[0].size, run_stats[0].minnodes, run_stats[0].maxnodes, (double) run_stats[0].totaldepth / run_stats[0].size, run_stats[0].mindepth, run_stats[0].maxdepth); oprintf( OUT_GEN, 90, " best: nodes: %d; depth: %d\n", run_stats[0].bestnodes, run_stats[0].bestdepth); oprintf( OUT_GEN, 90, " worst: nodes: %d; depth: %d\n", run_stats[0].worstnodes, run_stats[0].worstdepth); } if (test_detail_level(90)) { oprintf( OUT_PRG, 90, " total population:\n"); oprintf( OUT_PRG, 90, " generation stats:\n"); oprintf( OUT_PRG, 90, " mean: hits: %.3lf (%d-%d); standardized fitness: %.*lf\n", (double) gen_stats[0].totalhits / gen_stats[0].size, gen_stats[0].minhits, gen_stats[0].maxhits, fd, (double) gen_stats[0].totalfit / gen_stats[0].size); oprintf( OUT_PRG, 90, " best: hits: %d; standardized fitness: %.*lf\n", gen_stats[0].besthits, fd, (double) gen_stats[0].bestfit); oprintf( OUT_PRG, 90, " worst: hits: %d; standardized fitness: %.*lf\n", gen_stats[0].worsthits, fd, (double) gen_stats[0].worstfit); oprintf( OUT_PRG, 90, " run stats: (%d trees)\n", run_stats[0].size); oprintf( OUT_PRG, 90, " mean: hits: %.3lf (%d-%d); standardized fitness: %.*lf\n", (double) run_stats[0].totalhits / run_stats[0].size, run_stats[0].minhits, run_stats[0].maxhits, fd, (double) run_stats[0].totalfit / run_stats[0].size); oprintf( OUT_PRG, 90, " best: hits: %d; standardized fitness: %.*lf; generation: %d\n", run_stats[0].besthits, fd, (double) run_stats[0].bestfit, run_stats[0].bestgen); oprintf( OUT_PRG, 90, " worst: hits: %d; standardized fitness: %.*lf; generation: %d\n", run_stats[0].worsthits, fd, (double) run_stats[0].worstfit, run_stats[0].worstgen); } if (gen % stt_interval == 0) { if (test_detail_level(50)) { oprintf( OUT_STT, 50, "%d 0 ", gen); oprintf( OUT_STT, 50, "%.*lf %.*lf %.*lf ", fd, gen_stats[0].totalfit / gen_stats[0].size, fd, gen_stats[0].bestfit, fd, gen_stats[0].worstfit); oprintf( OUT_STT, 50, "%.3lf %.3lf %d %d %d %d ", (double) gen_stats[0].totalnodes / gen_stats[0].size, (double) gen_stats[0].totaldepth / gen_stats[0].size, gen_stats[0].bestnodes, gen_stats[0].bestdepth, gen_stats[0].worstnodes, gen_stats[0].worstdepth); oprintf( OUT_STT, 50, "%.*lf %.*lf %.*lf ", fd, run_stats[0].totalfit / run_stats[0].size, fd, run_stats[0].bestfit, fd, run_stats[0].worstfit); oprintf( OUT_STT, 50, "%.3lf %.3lf %d %d %d %d ", (double) run_stats[0].totalnodes / run_stats[0].size, (double) run_stats[0].totaldepth / run_stats[0].size, run_stats[0].bestnodes, run_stats[0].bestdepth, run_stats[0].worstnodes, run_stats[0].worstdepth); oprintf( OUT_STT, 50, "\n"); } } /* rewrite the .bst file, and append to the .his file. */ output_stream_open( OUT_BST); oprintf( OUT_BST, 10, "=== BEST-OF-RUN ===\n"); oprintf( OUT_BST, 10, " generation: %d\n", run_stats[0].bestgen); if (mpop->size > 1) oprintf( OUT_BST, 10, " subpopulation: %d\n", run_stats[0].bestpop + 1); oprintf( OUT_BST, 10, " nodes: %d\n", run_stats[0].bestnodes); oprintf( OUT_BST, 10, " depth: %d\n", run_stats[0].bestdepth); oprintf( OUT_BST, 10, " hits: %d\n", run_stats[0].besthits); oprintf( OUT_HIS, 10, "=== BEST-OF-RUN ===\n"); oprintf( OUT_HIS, 10, " current generation: %d\n", gen); oprintf( OUT_HIS, 10, " generation: %d\n", run_stats[0].bestgen); if (mpop->size > 1) oprintf( OUT_HIS, 10, " subpopulation: %d\n", run_stats[0].bestpop + 1); oprintf( OUT_HIS, 10, " nodes: %d\n", run_stats[0].bestnodes); oprintf( OUT_HIS, 10, " depth: %d\n", run_stats[0].bestdepth); oprintf( OUT_HIS, 10, " hits: %d\n", run_stats[0].besthits); /* retrieve the (FILE *) for the .bst and .his files, so that the trees can be printed to them. */ bout = output_filehandle( OUT_BST); hout = output_filehandle( OUT_HIS); if (run_stats[0].bestn == 1) { oprintf( OUT_BST, 20, "TOP INDIVIDUAL:\n\n"); oprintf( OUT_HIS, 20, "TOP INDIVIDUAL:\n\n"); } else { oprintf( OUT_BST, 20, "TOP %d INDIVIDUALS (in order):\n\n", run_stats[0].bestn); oprintf( OUT_HIS, 20, "TOP %d INDIVIDUALS (in order):\n\n", run_stats[0].bestn); } for (i = 0; i < run_stats[0].bestn; ++i) { oprintf( OUT_BST, 20, "\n\n-- #%d --\n", i + 1); oprintf( OUT_BST, 20, " hits: %d\n", run_stats[0].best[i]->ind->hits); oprintf( OUT_BST, 20, " raw fitness: %.*lf\n", fd, run_stats[0].best[i]->ind->r_fitness); oprintf( OUT_BST, 20, " standardized fitness: %.*lf\n", fd, run_stats[0].best[i]->ind->s_fitness); oprintf( OUT_BST, 20, " adjusted fitness: %.*lf\n", fd, run_stats[0].best[i]->ind->a_fitness); oprintf( OUT_HIS, 20, "\n\n-- #%d --\n", i + 1); oprintf( OUT_HIS, 20, " hits: %d\n", run_stats[0].best[i]->ind->hits); oprintf( OUT_HIS, 20, " raw fitness: %.*lf\n", fd, run_stats[0].best[i]->ind->r_fitness); oprintf( OUT_HIS, 20, " standardized fitness: %.*lf\n", fd, run_stats[0].best[i]->ind->s_fitness); oprintf( OUT_HIS, 20, " adjusted fitness: %.*lf\n", fd, run_stats[0].best[i]->ind->a_fitness); /* print the tree to both files here. */ if (test_detail_level(20)) { pretty_print_individual(run_stats[0].best[i]->ind, bout); pretty_print_individual(run_stats[0].best[i]->ind, hout); } } /* call the end-of-evaluation callback. returns 1 if user termination criterion is met, 0 otherwise. */ ret = app_end_of_evaluation(gen, mpop, newbest, gen_stats, run_stats); /* close the .bst file. */ output_stream_close( OUT_BST); /* free stats structures for current generation. */ for (i = 0; i < mpop->size + 1; ++i) { for (j = 0; j < gen_stats[i].bestn; ++j) --gen_stats[i].best[j]->refcount; FREE(gen_stats[i].best); } FREE(gen_stats); /* deallocate saved individuals that are no longer needed. */ saved_individual_gc(); /* return value the application callback gave us. */ if(termination_override==1) {termination_override=0; return 1;} return ret; }
void dnscore_finalize() { /* * No need to "finalize" format, dnsformat and rfc */ if(dnscore_finalizing) { /* OOPS : ALREADY BUSY SHUTTING DOWN */ /* DO NOT USE LOGGER HERE ! */ return; } dnscore_finalizing = TRUE; dnscore_shutdown(); #ifndef NDEBUG log_debug("exit: destroying the thread pool"); #endif logger_flush(); thread_pool_destroy(); #ifdef DEBUG log_debug("exit: bye (pid=%hd)", getpid()); logger_flush(); #endif scheduler_finalize(); logger_flush(); logger_finalize(); /** @note does a logger_stop */ logger_handle_finalize(); #ifndef NDEBUG /* * It may not be required right now, but in case the stdstream are filtered/buffered * this will flush them. */ #if HAS_TSIG_SUPPORT tsig_finalize(); #endif stdstream_flush_both_terms(); error_unregister_all(); rfc_finalize(); format_class_finalize(); #endif #ifndef NDEBUG #if ZDB_DEBUG_MALLOC != 0 debug_stat(TRUE); #endif #endif stdstream_flush_both_terms(); output_stream_close(&__termerr__); output_stream_close(&__termout__); }