void listener_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { global_context_t *gc = global_context(h); if (type == PN_REACTOR_QUIESCED) gc->quiesce_count++; else gc->quiesce_count = 0; switch (type) { case PN_CONNECTION_INIT: { pn_connection_t *connection = pn_event_connection(event); // New incoming connection on listener socket. Give each a separate handler. pn_handler_t *ch = connection_handler(gc); pn_handshaker_t *handshaker = pn_handshaker(); pn_handler_add(ch, handshaker); pn_decref(handshaker); pn_record_t *record = pn_connection_attachments(connection); pn_record_set_handler(record, ch); pn_decref(ch); } break; case PN_REACTOR_QUIESCED: { // Two quiesce in a row means we have been idle for a timout period if (gc->opts->timeout != -1 && gc->quiesce_count > 1) global_shutdown(gc); } break; case PN_REACTOR_INIT: { pn_reactor_t *reactor = pn_event_reactor(event); start_listener(gc, reactor); // hack to let test scripts know when the receivers are ready (so // that the senders may be started) if (gc->opts->ready_text) { fprintf(stdout, "%s\n", gc->opts->ready_text); fflush(stdout); } if (gc->opts->timeout != -1) pn_reactor_set_timeout(pn_event_reactor(event), gc->opts->timeout); } break; case PN_REACTOR_FINAL: { if (gc->received == 0) statistics_start(gc->stats); statistics_report(gc->stats, gc->sent, gc->received); } break; default: break; } }
void listener_dispatch ( pn_handler_t *h, pn_event_t * event, pn_event_type_t type ) { global_context_t * gc = global_context ( h ); if ( type == PN_REACTOR_QUIESCED ) gc->quiesce_count++; else gc->quiesce_count = 0; switch (type) { case PN_CONNECTION_INIT: { pn_connection_t * connection = pn_event_connection ( event ); // New incoming connection on listener socket. Give each a separate handler. pn_handler_t *ch = connection_handler(gc); pn_handshaker_t *handshaker = pn_handshaker(); pn_handler_add(ch, handshaker); pn_decref(handshaker); pn_record_t *record = pn_connection_attachments(connection); pn_record_set_handler(record, ch); pn_decref(ch); } break; case PN_REACTOR_INIT: { pn_reactor_t *reactor = pn_event_reactor(event); start_listener(gc, reactor); } break; case PN_REACTOR_FINAL: { if (gc->received == 0) statistics_start(gc->stats); //statistics_report(gc->stats, gc->sent, gc->received); fclose ( gc->report_fp ); if ( gc->received > 0 ) fprintf ( stderr, "reactor-recv received %d messages.\n", gc->received ); } break; default: break; } }
int main(int argc, char** argv) { Options_t opts; Statistics_t stats; uint64_t sent = 0; uint64_t received = 0; int forwarding_index = 0; int rc; pn_message_t *message; pn_messenger_t *messenger; parse_options( argc, argv, &opts ); const int forward = opts.forwarding_targets.count != 0; message = pn_message(); messenger = pn_messenger( opts.name ); /* load the various command line options if they're set */ if (opts.certificate) { rc = pn_messenger_set_certificate(messenger, opts.certificate); check_messenger(messenger); check( rc == 0, "Failed to set certificate" ); } if (opts.privatekey) { rc = pn_messenger_set_private_key(messenger, opts.privatekey); check_messenger(messenger); check( rc == 0, "Failed to set private key" ); } if (opts.password) { rc = pn_messenger_set_password(messenger, opts.password); check_messenger(messenger); check( rc == 0, "Failed to set password" ); } if (opts.ca_db) { rc = pn_messenger_set_trusted_certificates(messenger, opts.ca_db); check_messenger(messenger); check( rc == 0, "Failed to set trusted CA database" ); } if (opts.incoming_window) { // RAFI: seems to cause receiver to hang: pn_messenger_set_incoming_window( messenger, opts.incoming_window ); } pn_messenger_set_timeout( messenger, opts.timeout ); pn_messenger_start(messenger); check_messenger(messenger); int i; for (i = 0; i < opts.subscriptions.count; i++) { pn_messenger_subscribe(messenger, opts.subscriptions.addresses[i]); check_messenger(messenger); LOG("Subscribing to '%s'\n", opts.subscriptions.addresses[i]); } // hack to let test scripts know when the receivers are ready (so // that the senders may be started) if (opts.ready_text) { fprintf(stdout, "%s\n", opts.ready_text); fflush(stdout); } while (!opts.msg_count || received < opts.msg_count) { LOG("Calling pn_messenger_recv(%d)\n", opts.recv_count); rc = pn_messenger_recv(messenger, opts.recv_count); check_messenger(messenger); check(rc == 0 || (opts.timeout == 0 && rc == PN_TIMEOUT), "pn_messenger_recv() failed"); // start the timer only after receiving the first msg if (received == 0) statistics_start( &stats ); LOG("Messages on incoming queue: %d\n", pn_messenger_incoming(messenger)); while (pn_messenger_incoming(messenger)) { pn_messenger_get(messenger, message); check_messenger(messenger); received++; // TODO: header decoding? // uint64_t id = pn_message_get_correlation_id( message ).u.as_ulong; statistics_msg_received( &stats, message ); if (opts.reply) { const char *reply_addr = pn_message_get_reply_to( message ); if (reply_addr) { LOG("Replying to: %s\n", reply_addr ); pn_message_set_address( message, reply_addr ); pn_message_set_creation_time( message, msgr_now() ); pn_messenger_put(messenger, message); sent++; } } if (forward) { const char *forward_addr = opts.forwarding_targets.addresses[forwarding_index]; forwarding_index = NEXT_ADDRESS(opts.forwarding_targets, forwarding_index); LOG("Forwarding to: %s\n", forward_addr ); pn_message_set_address( message, forward_addr ); pn_message_set_reply_to( message, NULL ); // else points to origin sender pn_message_set_creation_time( message, msgr_now() ); pn_messenger_put(messenger, message); sent++; } } LOG("Messages received=%llu sent=%llu\n", received, sent); } // this will flush any pending sends if (pn_messenger_outgoing(messenger) > 0) { LOG("Calling pn_messenger_send()\n"); rc = pn_messenger_send(messenger, -1); check_messenger(messenger); check(rc == 0, "pn_messenger_send() failed"); } rc = pn_messenger_stop(messenger); check(rc == 0, "pn_messenger_stop() failed"); check_messenger(messenger); statistics_report( &stats, sent, received ); pn_messenger_free(messenger); pn_message_free(message); addresses_free( &opts.subscriptions ); addresses_free( &opts.forwarding_targets ); return 0; }
void connection_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { connection_context_t *cc = connection_context(h); bool replying = cc->global->opts->reply; switch (type) { case PN_LINK_REMOTE_OPEN: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { check(cc->recv_link == NULL, "Multiple incomming links on one connection"); cc->recv_link = link; pn_connection_t *conn = pn_event_connection(event); pn_list_add(cc->global->active_connections, conn); if (cc->global->shutting_down) { pn_connection_close(conn); break; } if (replying) { // Set up a reply link and defer granting credit to the incoming link pn_connection_t *conn = pn_session_connection(pn_link_session(link)); pn_session_t *ssn = pn_session(conn); pn_session_open(ssn); char name[100]; // prefer a multiplatform uuid generator sprintf(name, "reply_sender_%d", cc->connection_id); cc->reply_link = pn_sender(ssn, name); pn_link_open(cc->reply_link); } else { pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(h, fc); pn_decref(fc); } } } break; case PN_LINK_FLOW: { if (replying) { pn_link_t *reply_link = pn_event_link(event); // pn_flowcontroller handles the non-reply case check(reply_link == cc->reply_link, "internal error"); // Grant the sender as much credit as just given to us for replies int delta = pn_link_credit(reply_link) - pn_link_credit(cc->recv_link); if (delta > 0) pn_link_flow(cc->recv_link, delta); } } break; case PN_DELIVERY: { pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { if (cc->global->received == 0) statistics_start(cc->global->stats); size_t encoded_size = pn_delivery_pending(dlv); cc->global->encoded_data = ensure_buffer(cc->global->encoded_data, encoded_size, &cc->global->encoded_data_size); check(cc->global->encoded_data, "decoding buffer realloc failure"); ssize_t n = pn_link_recv(recv_link, cc->global->encoded_data, encoded_size); check(n == (ssize_t) encoded_size, "message data read fail"); pn_message_t *msg = cc->global->message; int err = pn_message_decode(msg, cc->global->encoded_data, n); check(err == 0, "message decode error"); cc->global->received++; pn_delivery_settle(dlv); statistics_msg_received(cc->global->stats, msg); if (replying) { const char *reply_addr = pn_message_get_reply_to(msg); if (reply_addr) { pn_link_t *rl = cc->reply_link; check(pn_link_credit(rl) > 0, "message received without corresponding reply credit"); LOG("Replying to: %s\n", reply_addr ); pn_message_set_address(msg, reply_addr); pn_message_set_creation_time(msg, msgr_now()); char tag[8]; void *ptr = &tag; *((uint64_t *) ptr) = cc->global->sent; pn_delivery_t *dlv = pn_delivery(rl, pn_dtag(tag, 8)); size_t size = cc->global->encoded_data_size; int err = pn_message_encode(msg, cc->global->encoded_data, &size); check(err == 0, "message encoding error"); pn_link_send(rl, cc->global->encoded_data, size); pn_delivery_settle(dlv); cc->global->sent++; } } } if (cc->global->received >= cc->global->opts->msg_count) { global_shutdown(cc->global); } } break; case PN_CONNECTION_UNBOUND: { pn_connection_t *conn = pn_event_connection(event); pn_list_remove(cc->global->active_connections, conn); pn_connection_release(conn); } break; default: break; } }
int main(int argc, char** argv) { Options_t opts; Statistics_t stats; uint64_t sent = 0; uint64_t received = 0; int target_index = 0; int rc; pn_message_t *message = 0; pn_message_t *reply_message = 0; pn_messenger_t *messenger = 0; parse_options( argc, argv, &opts ); messenger = pn_messenger( opts.name ); if (opts.certificate) { rc = pn_messenger_set_certificate(messenger, opts.certificate); check( rc == 0, "Failed to set certificate" ); } if (opts.privatekey) { rc = pn_messenger_set_private_key(messenger, opts.privatekey); check( rc == 0, "Failed to set private key" ); } if (opts.password) { rc = pn_messenger_set_password(messenger, opts.password); free(opts.password); check( rc == 0, "Failed to set password" ); } if (opts.ca_db) { rc = pn_messenger_set_trusted_certificates(messenger, opts.ca_db); check( rc == 0, "Failed to set trusted CA database" ); } if (opts.outgoing_window) { pn_messenger_set_outgoing_window( messenger, opts.outgoing_window ); } pn_messenger_set_timeout( messenger, opts.timeout ); pn_messenger_start(messenger); message = pn_message(); check(message, "failed to allocate a message"); pn_message_set_reply_to(message, "~"); pn_data_t *body = pn_message_body(message); char *data = (char *)calloc(1, opts.msg_size); pn_data_put_binary(body, pn_bytes(opts.msg_size, data)); free(data); pn_atom_t id; id.type = PN_ULONG; #if 0 // TODO: how do we effectively benchmark header processing overhead??? pn_data_t *props = pn_message_properties(message); pn_data_put_map(props); pn_data_enter(props); // //pn_data_put_string(props, pn_bytes(6, "string")); //pn_data_put_string(props, pn_bytes(10, "this is awkward")); // //pn_data_put_string(props, pn_bytes(4, "long")); pn_data_put_long(props, 12345); // //pn_data_put_string(props, pn_bytes(9, "timestamp")); pn_data_put_timestamp(props, (pn_timestamp_t) 54321); pn_data_exit(props); #endif const int get_replies = opts.get_replies; if (get_replies) { // disable the timeout so that pn_messenger_recv() won't block reply_message = pn_message(); check(reply_message, "failed to allocate a message"); } statistics_start( &stats ); while (!opts.msg_count || (sent < opts.msg_count)) { // setup the message to send pn_message_set_address(message, opts.targets.addresses[target_index]); target_index = NEXT_ADDRESS(opts.targets, target_index); id.u.as_ulong = sent; pn_message_set_correlation_id( message, id ); pn_message_set_creation_time( message, msgr_now() ); pn_messenger_put(messenger, message); sent++; if (opts.send_batch && (pn_messenger_outgoing(messenger) >= (int)opts.send_batch)) { if (get_replies) { while (received < sent) { // this will also transmit any pending sent messages received += process_replies( messenger, reply_message, &stats, opts.recv_count ); } } else { LOG("Calling pn_messenger_send()\n"); rc = pn_messenger_send(messenger, -1); check((rc == 0 || rc == PN_TIMEOUT), "pn_messenger_send() failed"); } } check_messenger(messenger); } LOG("Messages received=%llu sent=%llu\n", received, sent); if (get_replies) { // wait for the last of the replies while (received < sent) { int count = process_replies( messenger, reply_message, &stats, opts.recv_count ); check( count > 0 || (opts.timeout == 0), "Error: timed out waiting for reply messages\n"); received += count; LOG("Messages received=%llu sent=%llu\n", received, sent); } } else if (pn_messenger_outgoing(messenger) > 0) { LOG("Calling pn_messenger_send()\n"); rc = pn_messenger_send(messenger, -1); check(rc == 0, "pn_messenger_send() failed"); } rc = pn_messenger_stop(messenger); check(rc == 0, "pn_messenger_stop() failed"); check_messenger(messenger); statistics_report( &stats, sent, received ); pn_messenger_free(messenger); pn_message_free(message); if (reply_message) pn_message_free( reply_message ); addresses_free( &opts.targets ); return 0; }
void connection_dispatch ( pn_handler_t *h, pn_event_t *event, pn_event_type_t type ) { connection_context_t *cc = connection_context(h); switch ( type ) { case PN_LINK_REMOTE_OPEN: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { check(cc->recv_link == NULL, "Multiple incomming links on one connection"); cc->recv_link = link; pn_connection_t *conn = pn_event_connection(event); pn_list_add(cc->global->active_connections, conn); if (cc->global->shutting_down) { pn_connection_close(conn); break; } pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(h, fc); pn_decref(fc); } } break; case PN_DELIVERY: { pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { if (cc->global->received == 0) statistics_start(cc->global->stats); size_t encoded_size = pn_delivery_pending(dlv); cc->global->encoded_data = ensure_buffer(cc->global->encoded_data, encoded_size, &cc->global->encoded_data_size); check(cc->global->encoded_data, "decoding buffer realloc failure"); /* If this was the first message received, initialize our reporting. */ if ( ! cc->global->received ) rr_init ( & cc->global->resource_reporter ); ssize_t n = pn_link_recv(recv_link, cc->global->encoded_data, encoded_size); check(n == (ssize_t) encoded_size, "message data read fail"); //fprintf ( stderr, "MDEBUG encoded_size == %d\n", encoded_size ); pn_message_t *msg = cc->global->message; int err = pn_message_decode ( msg, cc->global->encoded_data, n ); check ( err == 0, "message decode error" ); /* MICK -- annotate! ================================ */ if ( cc->global->opts->timestamping ) { double message_timestamp; if ( get_message_timestamp ( msg, & message_timestamp ) ) { double now = now_timestamp ( ); cc->global->total_latency += (now - message_timestamp); } else { fprintf ( stderr, "receiver: no timestamp at msg count %d.\n", cc->global->received ); exit ( 1 ); } } /* MICK -- end annotate! ============================= */ cc->global->received++; /*--------------------------------------- Do a report ---------------------------------------*/ if ( ! ( cc->global->received % cc->global->opts->report_frequency ) ) { static bool first_time = true; double cpu_percentage; int rss; double sslr = rr_seconds_since_last_report ( & cc->global->resource_reporter ); rr_report ( & cc->global->resource_reporter, & cpu_percentage, & rss ); double throughput = (double)(cc->global->opts->report_frequency) / sslr; if ( first_time ) { if ( cc->global->opts->timestamping ) { if ( cc->global->opts->print_message_size ) fprintf(cc->global->report_fp, "msg_size\trecv_msgs\tcpu\trss\tthroughput\tlatency\n"); else fprintf(cc->global->report_fp, "recv_msgs\tcpu\trss\tthroughput\tlatency\n"); } else { if ( cc->global->opts->print_message_size ) fprintf(cc->global->report_fp, "msg_size\trecv_msgs\tcpu\trss\tthroughput\n"); else fprintf(cc->global->report_fp, "recv_msgs\tcpu\trss\tthroughput\n"); } first_time = false; } if ( cc->global->opts->timestamping ) { double average_latency = cc->global->total_latency / cc->global->opts->report_frequency; average_latency *= 1000.0; // in msec. cc->global->total_latency = 0; fprintf ( cc->global->report_fp, "%d\t%lf\t%d\t%lf\t%lf\n", cc->global->received, cpu_percentage, rss, throughput, average_latency ); } else { // was: // "recv_msgs: %10d cpu: %5.1lf rss: %6d throughput: %8.0lf\n" if ( cc->global->opts->print_message_size ) { fprintf ( cc->global->report_fp, "%d\t%d\t%lf\t%d\t%lf\n", cc->global->opts->message_size, cc->global->received, cpu_percentage, rss, throughput ); } else { fprintf ( cc->global->report_fp, "%d\t%lf\t%d\t%lf\n", cc->global->received, cpu_percentage, rss, throughput ); } } } pn_delivery_settle(dlv); // move this up statistics_msg_received(cc->global->stats, msg); } if (cc->global->received >= cc->global->opts->msg_count) { global_shutdown(cc->global); } } break; case PN_CONNECTION_UNBOUND: { pn_connection_t *conn = pn_event_connection(event); pn_list_remove(cc->global->active_connections, conn); pn_connection_release(conn); } break; default: break; } }
void sender_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { sender_context_t *sc = sender_context(h); switch (type) { case PN_CONNECTION_INIT: { pn_connection_t *conn = pn_event_connection(event); pn_connection_set_container(conn, pn_string_get(sc->container_id)); pn_connection_set_hostname(conn, pn_string_get(sc->hostname)); pn_connection_open(conn); pn_session_t *ssn = pn_session(conn); pn_session_open(ssn); pn_link_t *snd = pn_sender(ssn, "sender"); const char *path = pn_url_get_path(sc->send_url); if (path && strlen(path)) { pn_terminus_set_address(pn_link_target(snd), path); pn_terminus_set_address(pn_link_source(snd), path); } pn_link_open(snd); } break; case PN_LINK_FLOW: { pn_link_t *snd = pn_event_link(event); while (pn_link_credit(snd) > 0 && sc->sent < sc->opts->msg_count) { if (sc->sent == 0) statistics_start(sc->stats); char tag[8]; void *ptr = &tag; *((uint64_t *) ptr) = sc->sent; pn_delivery_t *dlv = pn_delivery(snd, pn_dtag(tag, 8)); // setup the message to send pn_message_t *msg = sc->message; pn_message_set_address(msg, sc->opts->targets.addresses[0]); sc->id.u.as_ulong = sc->sent; pn_message_set_correlation_id(msg, sc->id); pn_message_set_creation_time(msg, msgr_now()); size_t size = sc->encoded_data_size; int err = pn_message_encode(msg, sc->encoded_data, &size); check(err == 0, "message encoding error"); pn_link_send(snd, sc->encoded_data, size); pn_delivery_settle(dlv); sc->sent++; } if (sc->sent == sc->opts->msg_count && !sc->opts->get_replies) { pn_link_close(snd); pn_connection_t *conn = pn_event_connection(event); pn_connection_close(conn); } } break; case PN_LINK_INIT: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { // Response messages link. Could manage credit and deliveries in this handler but // a dedicated handler also works. pn_handler_t *replyto = replyto_handler(sc); pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(replyto, fc); pn_decref(fc); pn_handshaker_t *handshaker = pn_handshaker(); pn_handler_add(replyto, handshaker); pn_decref(handshaker); pn_record_t *record = pn_link_attachments(link); pn_record_set_handler(record, replyto); pn_decref(replyto); } } break; case PN_CONNECTION_LOCAL_CLOSE: { statistics_report(sc->stats, sc->sent, sc->received); } break; default: break; } }
/* main -- dnsproxy main function */ int main(int argc, char *argv[]) { int ch; struct passwd *pw = NULL; struct sockaddr_in addr; struct event evq, eva; const char *config = "/etc/dnsproxy.conf"; int daemonize = 0; /* Process commandline arguments */ while ((ch = getopt(argc, argv, "c:dhV")) != -1) { switch (ch) { case 'c': config = optarg; break; case 'd': daemonize = 1; break; case 'V': fprintf(stderr, PACKAGE_STRING "\n"); exit(0); /* FALLTHROUGH */ case 'h': default: fprintf(stderr, "usage: dnsproxy [-c file] [-dhV]\n" \ "\t-c file Read configuration from file\n" \ "\t-d Detach and run as a daemon\n" \ "\t-h This help text\n" \ "\t-V Show version information\n"); exit(1); } } /* Parse configuration and check required parameters */ if (!parse(config)) fatal("unable to parse configuration"); if (!authoritative || !recursive) fatal("No authoritative or recursive server defined"); if (!listenat) listenat = strdup("0.0.0.0"); /* Create and bind query socket */ if ((sock_query = socket(AF_INET, SOCK_DGRAM, 0)) == -1) fatal("unable to create socket: %s", strerror(errno)); memset(&addr, 0, sizeof(struct sockaddr_in)); addr.sin_addr.s_addr = inet_addr(listenat); addr.sin_port = htons(port); addr.sin_family = AF_INET; if (bind(sock_query, (struct sockaddr *)&addr, sizeof(addr)) != 0) fatal("unable to bind socket: %s", strerror(errno)); /* Create and bind answer socket */ if ((sock_answer = socket(AF_INET, SOCK_DGRAM, 0)) == -1) fatal("unable to create socket: %s", strerror(errno)); memset(&addr, 0, sizeof(struct sockaddr_in)); addr.sin_family = AF_INET; if (bind(sock_answer, (struct sockaddr *)&addr, sizeof(addr)) != 0) fatal("unable to bind socket: %s", strerror(errno)); /* Fill sockaddr_in structs for both servers */ memset(&authoritative_addr, 0, sizeof(struct sockaddr_in)); authoritative_addr.sin_addr.s_addr = inet_addr(authoritative); authoritative_addr.sin_port = htons(authoritative_port); authoritative_addr.sin_family = AF_INET; memset(&recursive_addr, 0, sizeof(struct sockaddr_in)); recursive_addr.sin_addr.s_addr = inet_addr(recursive); recursive_addr.sin_port = htons(recursive_port); recursive_addr.sin_family = AF_INET; /* Daemonize if requested and switch to syslog */ if (daemonize) { if (daemon(0, 0) == -1) fatal("unable to daemonize"); log_syslog("dnsproxy"); } /* Find less privileged user */ if (user) { pw = getpwnam(user); if (!pw) fatal("unable to find user %s", user); } /* Do a chroot if requested */ if (chrootdir) { if (chdir(chrootdir) || chroot(chrootdir)) fatal("unable to chroot to %s", chrootdir); chdir("/"); } /* Drop privileges */ if (user) { if (setgroups(1, &pw->pw_gid) < 0) fatal("setgroups: %s", strerror(errno)); #if defined(HAVE_SETRESGID) if (setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) < 0) fatal("setresgid: %s", strerror(errno)); #elif defined(HAVE_SETREGID) if (setregid(pw->pw_gid, pw->pw_gid) < 0) fatal("setregid: %s", strerror(errno)); #else if (setegid(pw->pw_gid) < 0) fatal("setegid: %s", strerror(errno)); if (setgid(pw->pw_gid) < 0) fatal("setgid: %s", strerror(errno)); #endif #if defined(HAVE_SETRESUID) if (setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid) < 0) fatal("setresuid: %s", strerror(errno)); #elif defined(HAVE_SETREUID) if (setreuid(pw->pw_uid, pw->pw_uid) < 0) fatal("setreuid: %s", strerror(errno)); #else if (seteuid(pw->pw_uid) < 0) fatal("seteuid: %s", strerror(errno)); if (setuid(pw->pw_uid) < 0) fatal("setuid: %s", strerror(errno)); #endif } /* Init event handling */ event_init(); event_set(&evq, sock_query, EV_READ, do_query, &evq); event_add(&evq, NULL); event_set(&eva, sock_answer, EV_READ, do_answer, &eva); event_add(&eva, NULL); /* Zero counters and start statistics timer */ statistics_start(); /* Take care of signals */ if (signal(SIGINT, signal_handler) == SIG_ERR) fatal("unable to mask signal SIGINT: %s", strerror(errno)); if (signal(SIGTERM, signal_handler) == SIG_ERR) fatal("unable to mask signal SIGTERM: %s", strerror(errno)); if (signal(SIGHUP, SIG_IGN) == SIG_ERR) fatal("unable to mask signal SIGHUP: %s", strerror(errno)); event_sigcb = signal_event; /* Start libevent main loop */ event_dispatch(); return 0; }