void listener_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { global_context_t *gc = global_context(h); if (type == PN_REACTOR_QUIESCED) gc->quiesce_count++; else gc->quiesce_count = 0; switch (type) { case PN_CONNECTION_INIT: { pn_connection_t *connection = pn_event_connection(event); // New incoming connection on listener socket. Give each a separate handler. pn_handler_t *ch = connection_handler(gc); pn_handshaker_t *handshaker = pn_handshaker(); pn_handler_add(ch, handshaker); pn_decref(handshaker); pn_record_t *record = pn_connection_attachments(connection); pn_record_set_handler(record, ch); pn_decref(ch); } break; case PN_REACTOR_QUIESCED: { // Two quiesce in a row means we have been idle for a timout period if (gc->opts->timeout != -1 && gc->quiesce_count > 1) global_shutdown(gc); } break; case PN_REACTOR_INIT: { pn_reactor_t *reactor = pn_event_reactor(event); start_listener(gc, reactor); // hack to let test scripts know when the receivers are ready (so // that the senders may be started) if (gc->opts->ready_text) { fprintf(stdout, "%s\n", gc->opts->ready_text); fflush(stdout); } if (gc->opts->timeout != -1) pn_reactor_set_timeout(pn_event_reactor(event), gc->opts->timeout); } break; case PN_REACTOR_FINAL: { if (gc->received == 0) statistics_start(gc->stats); statistics_report(gc->stats, gc->sent, gc->received); } break; default: break; } }
static void _shutdown_subsystems(void) { fserve_shutdown(); xslt_shutdown(); refbuf_shutdown(); slave_shutdown(); auth_shutdown(); yp_shutdown(); stats_shutdown(); global_shutdown(); connection_shutdown(); config_shutdown(); resolver_shutdown(); sock_shutdown(); thread_shutdown(); /* Now that these are done, we can stop the loggers. */ _stop_logging(); log_shutdown(); xmlCleanupParser(); }
void shutdown_subsystems(void) { connection_shutdown(); slave_shutdown(); fserve_shutdown(); stats_shutdown(); stop_logging(); config_shutdown(); refbuf_shutdown(); resolver_shutdown(); sock_shutdown(); DEBUG0 ("library cleanups"); #ifdef HAVE_CURL curl_global_cleanup(); #endif /* Now that these are done, we can stop the loggers. */ log_shutdown(); xslt_shutdown(); thread_shutdown(); global_shutdown(); }
void connection_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { connection_context_t *cc = connection_context(h); bool replying = cc->global->opts->reply; switch (type) { case PN_LINK_REMOTE_OPEN: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { check(cc->recv_link == NULL, "Multiple incomming links on one connection"); cc->recv_link = link; pn_connection_t *conn = pn_event_connection(event); pn_list_add(cc->global->active_connections, conn); if (cc->global->shutting_down) { pn_connection_close(conn); break; } if (replying) { // Set up a reply link and defer granting credit to the incoming link pn_connection_t *conn = pn_session_connection(pn_link_session(link)); pn_session_t *ssn = pn_session(conn); pn_session_open(ssn); char name[100]; // prefer a multiplatform uuid generator sprintf(name, "reply_sender_%d", cc->connection_id); cc->reply_link = pn_sender(ssn, name); pn_link_open(cc->reply_link); } else { pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(h, fc); pn_decref(fc); } } } break; case PN_LINK_FLOW: { if (replying) { pn_link_t *reply_link = pn_event_link(event); // pn_flowcontroller handles the non-reply case check(reply_link == cc->reply_link, "internal error"); // Grant the sender as much credit as just given to us for replies int delta = pn_link_credit(reply_link) - pn_link_credit(cc->recv_link); if (delta > 0) pn_link_flow(cc->recv_link, delta); } } break; case PN_DELIVERY: { pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { if (cc->global->received == 0) statistics_start(cc->global->stats); size_t encoded_size = pn_delivery_pending(dlv); cc->global->encoded_data = ensure_buffer(cc->global->encoded_data, encoded_size, &cc->global->encoded_data_size); check(cc->global->encoded_data, "decoding buffer realloc failure"); ssize_t n = pn_link_recv(recv_link, cc->global->encoded_data, encoded_size); check(n == (ssize_t) encoded_size, "message data read fail"); pn_message_t *msg = cc->global->message; int err = pn_message_decode(msg, cc->global->encoded_data, n); check(err == 0, "message decode error"); cc->global->received++; pn_delivery_settle(dlv); statistics_msg_received(cc->global->stats, msg); if (replying) { const char *reply_addr = pn_message_get_reply_to(msg); if (reply_addr) { pn_link_t *rl = cc->reply_link; check(pn_link_credit(rl) > 0, "message received without corresponding reply credit"); LOG("Replying to: %s\n", reply_addr ); pn_message_set_address(msg, reply_addr); pn_message_set_creation_time(msg, msgr_now()); char tag[8]; void *ptr = &tag; *((uint64_t *) ptr) = cc->global->sent; pn_delivery_t *dlv = pn_delivery(rl, pn_dtag(tag, 8)); size_t size = cc->global->encoded_data_size; int err = pn_message_encode(msg, cc->global->encoded_data, &size); check(err == 0, "message encoding error"); pn_link_send(rl, cc->global->encoded_data, size); pn_delivery_settle(dlv); cc->global->sent++; } } } if (cc->global->received >= cc->global->opts->msg_count) { global_shutdown(cc->global); } } break; case PN_CONNECTION_UNBOUND: { pn_connection_t *conn = pn_event_connection(event); pn_list_remove(cc->global->active_connections, conn); pn_connection_release(conn); } break; default: break; } }
void connection_dispatch ( pn_handler_t *h, pn_event_t *event, pn_event_type_t type ) { connection_context_t *cc = connection_context(h); switch ( type ) { case PN_LINK_REMOTE_OPEN: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { check(cc->recv_link == NULL, "Multiple incomming links on one connection"); cc->recv_link = link; pn_connection_t *conn = pn_event_connection(event); pn_list_add(cc->global->active_connections, conn); if (cc->global->shutting_down) { pn_connection_close(conn); break; } pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(h, fc); pn_decref(fc); } } break; case PN_DELIVERY: { pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { if (cc->global->received == 0) statistics_start(cc->global->stats); size_t encoded_size = pn_delivery_pending(dlv); cc->global->encoded_data = ensure_buffer(cc->global->encoded_data, encoded_size, &cc->global->encoded_data_size); check(cc->global->encoded_data, "decoding buffer realloc failure"); /* If this was the first message received, initialize our reporting. */ if ( ! cc->global->received ) rr_init ( & cc->global->resource_reporter ); ssize_t n = pn_link_recv(recv_link, cc->global->encoded_data, encoded_size); check(n == (ssize_t) encoded_size, "message data read fail"); //fprintf ( stderr, "MDEBUG encoded_size == %d\n", encoded_size ); pn_message_t *msg = cc->global->message; int err = pn_message_decode ( msg, cc->global->encoded_data, n ); check ( err == 0, "message decode error" ); /* MICK -- annotate! ================================ */ if ( cc->global->opts->timestamping ) { double message_timestamp; if ( get_message_timestamp ( msg, & message_timestamp ) ) { double now = now_timestamp ( ); cc->global->total_latency += (now - message_timestamp); } else { fprintf ( stderr, "receiver: no timestamp at msg count %d.\n", cc->global->received ); exit ( 1 ); } } /* MICK -- end annotate! ============================= */ cc->global->received++; /*--------------------------------------- Do a report ---------------------------------------*/ if ( ! ( cc->global->received % cc->global->opts->report_frequency ) ) { static bool first_time = true; double cpu_percentage; int rss; double sslr = rr_seconds_since_last_report ( & cc->global->resource_reporter ); rr_report ( & cc->global->resource_reporter, & cpu_percentage, & rss ); double throughput = (double)(cc->global->opts->report_frequency) / sslr; if ( first_time ) { if ( cc->global->opts->timestamping ) { if ( cc->global->opts->print_message_size ) fprintf(cc->global->report_fp, "msg_size\trecv_msgs\tcpu\trss\tthroughput\tlatency\n"); else fprintf(cc->global->report_fp, "recv_msgs\tcpu\trss\tthroughput\tlatency\n"); } else { if ( cc->global->opts->print_message_size ) fprintf(cc->global->report_fp, "msg_size\trecv_msgs\tcpu\trss\tthroughput\n"); else fprintf(cc->global->report_fp, "recv_msgs\tcpu\trss\tthroughput\n"); } first_time = false; } if ( cc->global->opts->timestamping ) { double average_latency = cc->global->total_latency / cc->global->opts->report_frequency; average_latency *= 1000.0; // in msec. cc->global->total_latency = 0; fprintf ( cc->global->report_fp, "%d\t%lf\t%d\t%lf\t%lf\n", cc->global->received, cpu_percentage, rss, throughput, average_latency ); } else { // was: // "recv_msgs: %10d cpu: %5.1lf rss: %6d throughput: %8.0lf\n" if ( cc->global->opts->print_message_size ) { fprintf ( cc->global->report_fp, "%d\t%d\t%lf\t%d\t%lf\n", cc->global->opts->message_size, cc->global->received, cpu_percentage, rss, throughput ); } else { fprintf ( cc->global->report_fp, "%d\t%lf\t%d\t%lf\n", cc->global->received, cpu_percentage, rss, throughput ); } } } pn_delivery_settle(dlv); // move this up statistics_msg_received(cc->global->stats, msg); } if (cc->global->received >= cc->global->opts->msg_count) { global_shutdown(cc->global); } } break; case PN_CONNECTION_UNBOUND: { pn_connection_t *conn = pn_event_connection(event); pn_list_remove(cc->global->active_connections, conn); pn_connection_release(conn); } break; default: break; } }