void replyto_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { sender_context_t *sc = replyto_sender_context(h); switch (type) { case PN_DELIVERY: { check(sc->opts->get_replies, "Unexpected reply message"); pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { size_t encoded_size = pn_delivery_pending(dlv); check(encoded_size <= sc->encoded_data_size, "decoding buffer too small"); ssize_t n = pn_link_recv(recv_link, sc->encoded_data, encoded_size); check(n == (ssize_t)encoded_size, "read fail on reply link"); pn_message_t *msg = sc->reply_message; int err = pn_message_decode(msg, sc->encoded_data, n); check(err == 0, "message decode error"); statistics_msg_received(sc->stats, msg); sc->received++; pn_delivery_settle(dlv); } if (sc->received == sc->opts->msg_count) { pn_link_close(recv_link); pn_connection_t *conn = pn_event_connection(event); pn_connection_close(conn); } } break; default: break; } }
static pn_event_type_t message_stream_handler(test_handler_t *th, pn_event_t *e) { struct message_stream_context *ctx = (struct message_stream_context*)th->context; switch (pn_event_type(e)) { case PN_CONNECTION_BOUND: pn_transport_set_max_frame(pn_event_transport(e), FRAME); return PN_EVENT_NONE; case PN_SESSION_INIT: pn_session_set_incoming_capacity(pn_event_session(e), FRAME); /* Single frame incoming */ pn_session_set_outgoing_window(pn_event_session(e), 1); /* Single frame outgoing */ return PN_EVENT_NONE; case PN_LINK_REMOTE_OPEN: common_handler(th, e); if (pn_link_is_receiver(pn_event_link(e))) { pn_link_flow(pn_event_link(e), 1); } else { ctx->sender = pn_event_link(e); } return PN_EVENT_NONE; case PN_LINK_FLOW: /* Start a delivery */ if (pn_link_is_sender(pn_event_link(e)) && !ctx->dlv) { ctx->dlv = pn_delivery(pn_event_link(e), pn_dtag("x", 1)); } return PN_LINK_FLOW; case PN_CONNECTION_WAKE: { /* Send a chunk */ ssize_t remains = ctx->size - ctx->sent; ssize_t n = (CHUNK < remains) ? CHUNK : remains; TEST_CHECK(th->t, n == pn_link_send(ctx->sender, ctx->send_buf.start + ctx->sent, n)); ctx->sent += n; if (ctx->sent == ctx->size) { TEST_CHECK(th->t, pn_link_advance(ctx->sender)); } return PN_CONNECTION_WAKE; } case PN_DELIVERY: { /* Receive a delivery - smaller than a chunk? */ pn_delivery_t *dlv = pn_event_delivery(e); if (pn_delivery_readable(dlv)) { ssize_t n = pn_delivery_pending(dlv); rwbytes_ensure(&ctx->recv_buf, ctx->received + n); TEST_ASSERT(n == pn_link_recv(pn_event_link(e), ctx->recv_buf.start + ctx->received, n)); ctx->received += n; } ctx->complete = !pn_delivery_partial(dlv); return PN_DELIVERY; } default: return common_handler(th, e); } }
void messaging_adapter::on_delivery(proton_event &pe) { pn_event_t *cevent = pe.pn_event(); pn_link_t *lnk = pn_event_link(cevent); pn_delivery_t *dlv = pn_event_delivery(cevent); link_context& lctx = link_context::get(lnk); if (pn_link_is_receiver(lnk)) { delivery d(make_wrapper<delivery>(dlv)); if (!pn_delivery_partial(dlv) && pn_delivery_readable(dlv)) { // generate on_message pn_connection_t *pnc = pn_session_connection(pn_link_session(lnk)); connection_context& ctx = connection_context::get(pnc); // Reusable per-connection message. // Avoid expensive heap malloc/free overhead. // See PROTON-998 class message &msg(ctx.event_message); msg.decode(d); if (pn_link_state(lnk) & PN_LOCAL_CLOSED) { if (lctx.auto_accept) d.release(); } else { delegate_.on_message(d, msg); if (lctx.auto_accept && !d.settled()) d.accept(); if (lctx.draining && !pn_link_credit(lnk)) { lctx.draining = false; receiver r(make_wrapper<receiver>(lnk)); delegate_.on_receiver_drain_finish(r); } } } else if (pn_delivery_updated(dlv) && d.settled()) { delegate_.on_delivery_settle(d); } if (lctx.draining && pn_link_credit(lnk) == 0) { lctx.draining = false; pn_link_set_drain(lnk, false); receiver r(make_wrapper<receiver>(lnk)); delegate_.on_receiver_drain_finish(r); if (lctx.pending_credit) { pn_link_flow(lnk, lctx.pending_credit); lctx.pending_credit = 0; } } credit_topup(lnk); } else { tracker t(make_wrapper<tracker>(dlv)); // sender if (pn_delivery_updated(dlv)) { uint64_t rstate = pn_delivery_remote_state(dlv); if (rstate == PN_ACCEPTED) { delegate_.on_tracker_accept(t); } else if (rstate == PN_REJECTED) { delegate_.on_tracker_reject(t); } else if (rstate == PN_RELEASED || rstate == PN_MODIFIED) { delegate_.on_tracker_release(t); } if (t.settled()) { delegate_.on_tracker_settle(t); } if (lctx.auto_settle) t.settle(); } } }
void connection_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { connection_context_t *cc = connection_context(h); bool replying = cc->global->opts->reply; switch (type) { case PN_LINK_REMOTE_OPEN: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { check(cc->recv_link == NULL, "Multiple incomming links on one connection"); cc->recv_link = link; pn_connection_t *conn = pn_event_connection(event); pn_list_add(cc->global->active_connections, conn); if (cc->global->shutting_down) { pn_connection_close(conn); break; } if (replying) { // Set up a reply link and defer granting credit to the incoming link pn_connection_t *conn = pn_session_connection(pn_link_session(link)); pn_session_t *ssn = pn_session(conn); pn_session_open(ssn); char name[100]; // prefer a multiplatform uuid generator sprintf(name, "reply_sender_%d", cc->connection_id); cc->reply_link = pn_sender(ssn, name); pn_link_open(cc->reply_link); } else { pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(h, fc); pn_decref(fc); } } } break; case PN_LINK_FLOW: { if (replying) { pn_link_t *reply_link = pn_event_link(event); // pn_flowcontroller handles the non-reply case check(reply_link == cc->reply_link, "internal error"); // Grant the sender as much credit as just given to us for replies int delta = pn_link_credit(reply_link) - pn_link_credit(cc->recv_link); if (delta > 0) pn_link_flow(cc->recv_link, delta); } } break; case PN_DELIVERY: { pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { if (cc->global->received == 0) statistics_start(cc->global->stats); size_t encoded_size = pn_delivery_pending(dlv); cc->global->encoded_data = ensure_buffer(cc->global->encoded_data, encoded_size, &cc->global->encoded_data_size); check(cc->global->encoded_data, "decoding buffer realloc failure"); ssize_t n = pn_link_recv(recv_link, cc->global->encoded_data, encoded_size); check(n == (ssize_t) encoded_size, "message data read fail"); pn_message_t *msg = cc->global->message; int err = pn_message_decode(msg, cc->global->encoded_data, n); check(err == 0, "message decode error"); cc->global->received++; pn_delivery_settle(dlv); statistics_msg_received(cc->global->stats, msg); if (replying) { const char *reply_addr = pn_message_get_reply_to(msg); if (reply_addr) { pn_link_t *rl = cc->reply_link; check(pn_link_credit(rl) > 0, "message received without corresponding reply credit"); LOG("Replying to: %s\n", reply_addr ); pn_message_set_address(msg, reply_addr); pn_message_set_creation_time(msg, msgr_now()); char tag[8]; void *ptr = &tag; *((uint64_t *) ptr) = cc->global->sent; pn_delivery_t *dlv = pn_delivery(rl, pn_dtag(tag, 8)); size_t size = cc->global->encoded_data_size; int err = pn_message_encode(msg, cc->global->encoded_data, &size); check(err == 0, "message encoding error"); pn_link_send(rl, cc->global->encoded_data, size); pn_delivery_settle(dlv); cc->global->sent++; } } } if (cc->global->received >= cc->global->opts->msg_count) { global_shutdown(cc->global); } } break; case PN_CONNECTION_UNBOUND: { pn_connection_t *conn = pn_event_connection(event); pn_list_remove(cc->global->active_connections, conn); pn_connection_release(conn); } break; default: break; } }
void connection_dispatch ( pn_handler_t *h, pn_event_t *event, pn_event_type_t type ) { connection_context_t *cc = connection_context(h); switch ( type ) { case PN_LINK_REMOTE_OPEN: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { check(cc->recv_link == NULL, "Multiple incomming links on one connection"); cc->recv_link = link; pn_connection_t *conn = pn_event_connection(event); pn_list_add(cc->global->active_connections, conn); if (cc->global->shutting_down) { pn_connection_close(conn); break; } pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(h, fc); pn_decref(fc); } } break; case PN_DELIVERY: { pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { if (cc->global->received == 0) statistics_start(cc->global->stats); size_t encoded_size = pn_delivery_pending(dlv); cc->global->encoded_data = ensure_buffer(cc->global->encoded_data, encoded_size, &cc->global->encoded_data_size); check(cc->global->encoded_data, "decoding buffer realloc failure"); /* If this was the first message received, initialize our reporting. */ if ( ! cc->global->received ) rr_init ( & cc->global->resource_reporter ); ssize_t n = pn_link_recv(recv_link, cc->global->encoded_data, encoded_size); check(n == (ssize_t) encoded_size, "message data read fail"); //fprintf ( stderr, "MDEBUG encoded_size == %d\n", encoded_size ); pn_message_t *msg = cc->global->message; int err = pn_message_decode ( msg, cc->global->encoded_data, n ); check ( err == 0, "message decode error" ); /* MICK -- annotate! ================================ */ if ( cc->global->opts->timestamping ) { double message_timestamp; if ( get_message_timestamp ( msg, & message_timestamp ) ) { double now = now_timestamp ( ); cc->global->total_latency += (now - message_timestamp); } else { fprintf ( stderr, "receiver: no timestamp at msg count %d.\n", cc->global->received ); exit ( 1 ); } } /* MICK -- end annotate! ============================= */ cc->global->received++; /*--------------------------------------- Do a report ---------------------------------------*/ if ( ! ( cc->global->received % cc->global->opts->report_frequency ) ) { static bool first_time = true; double cpu_percentage; int rss; double sslr = rr_seconds_since_last_report ( & cc->global->resource_reporter ); rr_report ( & cc->global->resource_reporter, & cpu_percentage, & rss ); double throughput = (double)(cc->global->opts->report_frequency) / sslr; if ( first_time ) { if ( cc->global->opts->timestamping ) { if ( cc->global->opts->print_message_size ) fprintf(cc->global->report_fp, "msg_size\trecv_msgs\tcpu\trss\tthroughput\tlatency\n"); else fprintf(cc->global->report_fp, "recv_msgs\tcpu\trss\tthroughput\tlatency\n"); } else { if ( cc->global->opts->print_message_size ) fprintf(cc->global->report_fp, "msg_size\trecv_msgs\tcpu\trss\tthroughput\n"); else fprintf(cc->global->report_fp, "recv_msgs\tcpu\trss\tthroughput\n"); } first_time = false; } if ( cc->global->opts->timestamping ) { double average_latency = cc->global->total_latency / cc->global->opts->report_frequency; average_latency *= 1000.0; // in msec. cc->global->total_latency = 0; fprintf ( cc->global->report_fp, "%d\t%lf\t%d\t%lf\t%lf\n", cc->global->received, cpu_percentage, rss, throughput, average_latency ); } else { // was: // "recv_msgs: %10d cpu: %5.1lf rss: %6d throughput: %8.0lf\n" if ( cc->global->opts->print_message_size ) { fprintf ( cc->global->report_fp, "%d\t%d\t%lf\t%d\t%lf\n", cc->global->opts->message_size, cc->global->received, cpu_percentage, rss, throughput ); } else { fprintf ( cc->global->report_fp, "%d\t%lf\t%d\t%lf\n", cc->global->received, cpu_percentage, rss, throughput ); } } } pn_delivery_settle(dlv); // move this up statistics_msg_received(cc->global->stats, msg); } if (cc->global->received >= cc->global->opts->msg_count) { global_shutdown(cc->global); } } break; case PN_CONNECTION_UNBOUND: { pn_connection_t *conn = pn_event_connection(event); pn_list_remove(cc->global->active_connections, conn); pn_connection_release(conn); } break; default: break; } }