void replyto_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { sender_context_t *sc = replyto_sender_context(h); switch (type) { case PN_DELIVERY: { check(sc->opts->get_replies, "Unexpected reply message"); pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { size_t encoded_size = pn_delivery_pending(dlv); check(encoded_size <= sc->encoded_data_size, "decoding buffer too small"); ssize_t n = pn_link_recv(recv_link, sc->encoded_data, encoded_size); check(n == (ssize_t)encoded_size, "read fail on reply link"); pn_message_t *msg = sc->reply_message; int err = pn_message_decode(msg, sc->encoded_data, n); check(err == 0, "message decode error"); statistics_msg_received(sc->stats, msg); sc->received++; pn_delivery_settle(dlv); } if (sc->received == sc->opts->msg_count) { pn_link_close(recv_link); pn_connection_t *conn = pn_event_connection(event); pn_connection_close(conn); } } break; default: break; } }
static void CORE_delivery_update(void *context, qdr_delivery_t *dlv, uint64_t disp, bool settled) { pn_delivery_t *pnd = (pn_delivery_t*) qdr_delivery_get_context(dlv); if (!pnd) return; // // If the disposition has changed, update the proton delivery. // if (disp != pn_delivery_remote_state(pnd)) { if (disp == PN_MODIFIED) pn_disposition_set_failed(pn_delivery_local(pnd), true); pn_delivery_update(pnd, disp); } // // If the delivery is settled, remove the linkage and settle the proton delivery. // if (settled) { qdr_delivery_set_context(dlv, 0); pn_delivery_set_context(pnd, 0); pn_delivery_settle(pnd); qdr_delivery_decref(dlv); } }
uint64_t BufferedTransfer::updated() { disposition = pn_delivery_remote_state(out.handle); if (disposition) { pn_delivery_settle(out.handle); out.settled = true; } return disposition; }
bool BufferedTransfer::settle() { if (out.settled && !in.settled) { pn_delivery_update(in.handle, disposition); pn_delivery_settle(in.handle); in.settled = true; } return out.settled && in.settled; }
int pni_store_update(pni_store_t *store, pn_sequence_t id, pn_status_t status, int flags, bool settle, bool match) { assert(store); if (!pni_store_tracking(store, id)) { return 0; } size_t start; if (PN_CUMULATIVE & flags) { start = store->lwm; } else { start = id; } for (pn_sequence_t i = start; i <= id; i++) { pni_entry_t *e = pni_store_entry(store, i); if (e) { pn_delivery_t *d = e->delivery; if (d) { if (!pn_delivery_local_state(d)) { if (match) { pn_delivery_update(d, pn_delivery_remote_state(d)); } else { switch (status) { case PN_STATUS_ACCEPTED: pn_delivery_update(d, PN_ACCEPTED); break; case PN_STATUS_REJECTED: pn_delivery_update(d, PN_REJECTED); break; default: break; } } pni_entry_updated(e); } } if (settle) { if (d) { pn_delivery_settle(d); } pn_hash_del(store->tracked, e->id); } } } while (store->hwm - store->lwm > 0 && !pn_hash_get(store->tracked, store->lwm)) { store->lwm++; } return 0; }
/** * Delivery Disposition Handler */ static void router_disp_handler(void* context, dx_link_t *link, pn_delivery_t *delivery) { pn_link_t *pn_link = pn_delivery_link(delivery); if (pn_link_is_sender(pn_link)) { pn_disposition_t disp = pn_delivery_remote_state(delivery); dx_message_t *msg = pn_delivery_get_context(delivery); pn_delivery_t *activate = 0; if (msg) { assert(delivery == dx_message_out_delivery(msg)); if (disp != 0) { activate = dx_message_in_delivery(msg); pn_delivery_update(activate, disp); // TODO - handling of the data accompanying RECEIVED/MODIFIED } if (pn_delivery_settled(delivery)) { // // Downstream delivery has been settled. Propagate the settlement // upstream. // activate = dx_message_in_delivery(msg); pn_delivery_settle(activate); pn_delivery_settle(delivery); dx_free_message(msg); } if (activate) { // // Activate the upstream/incoming link so that the settlement will // get pushed out. // dx_link_t *act_link = (dx_link_t*) pn_link_get_context(pn_delivery_link(activate)); dx_link_activate(act_link); } return; } } pn_delivery_settle(delivery); }
void pni_entry_finalize(void *object) { pni_entry_t *entry = (pni_entry_t *) object; assert(entry->free); pn_delivery_t *d = entry->delivery; if (d) { pn_delivery_settle(d); pni_entry_set_delivery(entry, NULL); } }
tracker sender::send(const message &message) { uint64_t id = ++tag_counter; pn_delivery_t *dlv = pn_delivery(pn_object(), pn_dtag(reinterpret_cast<const char*>(&id), sizeof(id))); std::vector<char> buf; message.encode(buf); assert(!buf.empty()); pn_link_send(pn_object(), &buf[0], buf.size()); pn_link_advance(pn_object()); if (pn_link_snd_settle_mode(pn_object()) == PN_SND_SETTLED) pn_delivery_settle(dlv); if (!pn_link_credit(pn_object())) link_context::get(pn_object()).draining = false; return make_wrapper<tracker>(dlv); }
static void CORE_link_deliver(void *context, qdr_link_t *link, qdr_delivery_t *dlv, bool settled) { qd_router_t *router = (qd_router_t*) context; qd_link_t *qlink = (qd_link_t*) qdr_link_get_context(link); if (!qlink) return; pn_link_t *plink = qd_link_pn(qlink); if (!plink) return; const char *tag; int tag_length; qdr_delivery_tag(dlv, &tag, &tag_length); pn_delivery(plink, pn_dtag(tag, tag_length)); pn_delivery_t *pdlv = pn_link_current(plink); // // If the remote send settle mode is set to 'settled', we should settle the delivery on behalf of the receiver. // bool remote_snd_settled = qd_link_remote_snd_settle_mode(qlink) == PN_SND_SETTLED; if (!settled && !remote_snd_settled) { pn_delivery_set_context(pdlv, dlv); qdr_delivery_set_context(dlv, pdlv); qdr_delivery_incref(dlv); } qd_message_send(qdr_delivery_message(dlv), qlink, qdr_link_strip_annotations_out(link)); if (!settled && remote_snd_settled) // Tell the core that the delivery has been accepted and settled, since we are settling on behalf of the receiver qdr_delivery_update_disposition(router->router_core, dlv, PN_ACCEPTED, true, false); if (settled || remote_snd_settled) pn_delivery_settle(pdlv); pn_link_advance(plink); }
static void do_receive(pn_delivery_t *pnd) { pn_link_t *pn_link = pn_delivery_link(pnd); qd_link_t *link = (qd_link_t*) pn_link_get_context(pn_link); if (link) { qd_node_t *node = link->node; if (node) { node->ntype->rx_handler(node->context, link, pnd); return; } } // // Reject the delivery if we couldn't find a node to handle it // pn_link_advance(pn_link); pn_link_flow(pn_link, 1); pn_delivery_update(pnd, PN_REJECTED); pn_delivery_settle(pnd); }
/** * Delivery Disposition Handler */ static void AMQP_disposition_handler(void* context, qd_link_t *link, pn_delivery_t *pnd) { qd_router_t *router = (qd_router_t*) context; qdr_delivery_t *delivery = (qdr_delivery_t*) pn_delivery_get_context(pnd); bool give_reference = false; // // It's important to not do any processing without a qdr_delivery. When pre-settled // multi-frame deliveries arrive, it's possible for the settlement to register before // the whole message arrives. Such premature settlement indications must be ignored. // if (!delivery) return; // // If the delivery is settled, remove the linkage between the PN and QDR deliveries. // if (pn_delivery_settled(pnd)) { pn_delivery_set_context(pnd, 0); qdr_delivery_set_context(delivery, 0); // // Don't decref the delivery here. Rather, we will _give_ the reference to the core. // give_reference = true; } // // Update the disposition of the delivery // qdr_delivery_update_disposition(router->router_core, delivery, pn_delivery_remote_state(pnd), pn_delivery_settled(pnd), give_reference); // // If settled, close out the delivery // if (pn_delivery_settled(pnd)) pn_delivery_settle(pnd); }
/** * Outbound Delivery Handler */ static void router_tx_handler(void* context, dx_link_t *link, pn_delivery_t *delivery) { dx_router_t *router = (dx_router_t*) context; pn_link_t *pn_link = pn_delivery_link(delivery); dx_router_link_t *rlink = (dx_router_link_t*) dx_link_get_context(link); dx_message_t *msg; size_t size; sys_mutex_lock(router->lock); msg = DEQ_HEAD(rlink->out_fifo); if (!msg) { // TODO - Recind the delivery sys_mutex_unlock(router->lock); return; } DEQ_REMOVE_HEAD(rlink->out_fifo); size = (DEQ_SIZE(rlink->out_fifo)); sys_mutex_unlock(router->lock); dx_message_send(msg, pn_link); // // If there is no incoming delivery, it was pre-settled. In this case, // we must pre-settle the outgoing delivery as well. // if (dx_message_in_delivery(msg)) { pn_delivery_set_context(delivery, (void*) msg); dx_message_set_out_delivery(msg, delivery); } else { pn_delivery_settle(delivery); dx_free_message(msg); } pn_link_advance(pn_link); pn_link_offered(pn_link, size); }
void connection_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { connection_context_t *cc = connection_context(h); bool replying = cc->global->opts->reply; switch (type) { case PN_LINK_REMOTE_OPEN: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { check(cc->recv_link == NULL, "Multiple incomming links on one connection"); cc->recv_link = link; pn_connection_t *conn = pn_event_connection(event); pn_list_add(cc->global->active_connections, conn); if (cc->global->shutting_down) { pn_connection_close(conn); break; } if (replying) { // Set up a reply link and defer granting credit to the incoming link pn_connection_t *conn = pn_session_connection(pn_link_session(link)); pn_session_t *ssn = pn_session(conn); pn_session_open(ssn); char name[100]; // prefer a multiplatform uuid generator sprintf(name, "reply_sender_%d", cc->connection_id); cc->reply_link = pn_sender(ssn, name); pn_link_open(cc->reply_link); } else { pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(h, fc); pn_decref(fc); } } } break; case PN_LINK_FLOW: { if (replying) { pn_link_t *reply_link = pn_event_link(event); // pn_flowcontroller handles the non-reply case check(reply_link == cc->reply_link, "internal error"); // Grant the sender as much credit as just given to us for replies int delta = pn_link_credit(reply_link) - pn_link_credit(cc->recv_link); if (delta > 0) pn_link_flow(cc->recv_link, delta); } } break; case PN_DELIVERY: { pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { if (cc->global->received == 0) statistics_start(cc->global->stats); size_t encoded_size = pn_delivery_pending(dlv); cc->global->encoded_data = ensure_buffer(cc->global->encoded_data, encoded_size, &cc->global->encoded_data_size); check(cc->global->encoded_data, "decoding buffer realloc failure"); ssize_t n = pn_link_recv(recv_link, cc->global->encoded_data, encoded_size); check(n == (ssize_t) encoded_size, "message data read fail"); pn_message_t *msg = cc->global->message; int err = pn_message_decode(msg, cc->global->encoded_data, n); check(err == 0, "message decode error"); cc->global->received++; pn_delivery_settle(dlv); statistics_msg_received(cc->global->stats, msg); if (replying) { const char *reply_addr = pn_message_get_reply_to(msg); if (reply_addr) { pn_link_t *rl = cc->reply_link; check(pn_link_credit(rl) > 0, "message received without corresponding reply credit"); LOG("Replying to: %s\n", reply_addr ); pn_message_set_address(msg, reply_addr); pn_message_set_creation_time(msg, msgr_now()); char tag[8]; void *ptr = &tag; *((uint64_t *) ptr) = cc->global->sent; pn_delivery_t *dlv = pn_delivery(rl, pn_dtag(tag, 8)); size_t size = cc->global->encoded_data_size; int err = pn_message_encode(msg, cc->global->encoded_data, &size); check(err == 0, "message encoding error"); pn_link_send(rl, cc->global->encoded_data, size); pn_delivery_settle(dlv); cc->global->sent++; } } } if (cc->global->received >= cc->global->opts->msg_count) { global_shutdown(cc->global); } } break; case PN_CONNECTION_UNBOUND: { pn_connection_t *conn = pn_event_connection(event); pn_list_remove(cc->global->active_connections, conn); pn_connection_release(conn); } break; default: break; } }
/* Process each event emitted by the protocol engine */ static void dispatcher(pn_handler_t *handler, pn_event_t *event, pn_event_type_t type) { protocolState_t *ps = PROTOCOL_STATE(handler); const configSettings_t *cfg = ps->config; //DBGPRINTF("omamqp1: Event received: %s\n", pn_event_type_name(type)); switch (type) { case PN_LINK_REMOTE_OPEN: DBGPRINTF("omamqp1: Message bus opened link.\n"); break; case PN_DELIVERY: // has the message been delivered to the message bus? if (ps->delivery) { assert(ps->delivery == pn_event_delivery(event)); if (pn_delivery_updated(ps->delivery)) { rsRetVal result = RS_RET_IDLE; uint64_t rs = pn_delivery_remote_state(ps->delivery); switch (rs) { case PN_ACCEPTED: DBGPRINTF("omamqp1: Message ACCEPTED by message bus\n"); result = RS_RET_OK; break; case PN_REJECTED: dbgprintf("omamqp1: message bus rejected log message: invalid message - dropping\n"); // message bus considers this a 'bad message'. Cannot be redelivered. // Likely a configuration error. Drop the message by returning OK result = RS_RET_OK; break; case PN_RELEASED: case PN_MODIFIED: // the message bus cannot accept the message. This may be temporary - retry // up to maxRetries before dropping if (++ps->retries >= cfg->maxRetries) { dbgprintf("omamqp1: message bus failed to accept message - dropping\n"); result = RS_RET_OK; } else { dbgprintf("omamqp1: message bus cannot accept message, retrying\n"); result = RS_RET_SUSPENDED; } break; case PN_RECEIVED: // not finished yet, wait for next delivery update break; default: // no other terminal states defined, so ignore anything else dbgprintf("omamqp1: unknown delivery state=0x%lX, assuming message accepted\n", (unsigned long) pn_delivery_remote_state(ps->delivery)); result = RS_RET_OK; break; } if (result != RS_RET_IDLE) { // the command is complete threadIPC_t *ipc = ps->ipc; pthread_mutex_lock(&ipc->lock); assert(ipc->command == COMMAND_SEND); ipc->result = result; ipc->command = COMMAND_DONE; pthread_cond_signal(&ipc->condition); pthread_mutex_unlock(&ipc->lock); pn_delivery_settle(ps->delivery); ps->delivery = NULL; if (result == RS_RET_OK) { ps->retries = 0; } } } } break; case PN_CONNECTION_BOUND: if (!cfg->bDisableSASL) { // force use of SASL, even allowing PLAIN authentication pn_sasl_t *sasl = pn_sasl(pn_event_transport(event)); #if PN_VERSION_MAJOR == 0 && PN_VERSION_MINOR >= 10 pn_sasl_set_allow_insecure_mechs(sasl, true); #else // proton version <= 0.9 only supports PLAIN authentication const char *user = cfg->username ? (char *)cfg->username : pn_url_get_username(cfg->url); if (user) { pn_sasl_plain(sasl, user, (cfg->password ? (char *) cfg->password : pn_url_get_password(cfg->url))); } #endif } if (cfg->idleTimeout) { // configured as seconds, set as milliseconds pn_transport_set_idle_timeout(pn_event_transport(event), cfg->idleTimeout * 1000); } break; case PN_CONNECTION_UNBOUND: DBGPRINTF("omamqp1: cleaning up connection resources\n"); pn_connection_release(pn_event_connection(event)); ps->conn = NULL; ps->sender = NULL; ps->delivery = NULL; break; case PN_TRANSPORT_ERROR: { // TODO: if auth failure, does it make sense to retry??? pn_transport_t *tport = pn_event_transport(event); pn_condition_t *cond = pn_transport_condition(tport); if (pn_condition_is_set(cond)) { _log_error("transport failure", cond); } dbgprintf("omamqp1: network transport failed, reconnecting...\n"); // the protocol thread will attempt to reconnect if it is not // being shut down } break; default: break; } }
/** * Inbound Delivery Handler */ static void AMQP_rx_handler(void* context, qd_link_t *link, pn_delivery_t *pnd) { qd_router_t *router = (qd_router_t*) context; pn_link_t *pn_link = qd_link_pn(link); qdr_link_t *rlink = (qdr_link_t*) qd_link_get_context(link); qdr_delivery_t *delivery = 0; qd_message_t *msg; // // Receive the message into a local representation. If the returned message // pointer is NULL, we have not yet received a complete message. // // Note: In the link-routing case, consider cutting the message through. There's // no reason to wait for the whole message to be received before starting to // send it. // msg = qd_message_receive(pnd); if (!msg) return; // // Consume the delivery. // pn_link_advance(pn_link); // // If there's no router link, free the message and finish. It's likely that the link // is closing. // if (!rlink) { qd_message_free(msg); return; } // // Handle the link-routed case // if (qdr_link_is_routed(rlink)) { pn_delivery_tag_t dtag = pn_delivery_tag(pnd); delivery = qdr_link_deliver_to_routed_link(rlink, msg, pn_delivery_settled(pnd), (uint8_t*) dtag.start, dtag.size); if (delivery) { if (pn_delivery_settled(pnd)) pn_delivery_settle(pnd); else { pn_delivery_set_context(pnd, delivery); qdr_delivery_set_context(delivery, pnd); qdr_delivery_incref(delivery); } } return; } // // Determine if the incoming link is anonymous. If the link is addressed, // there are some optimizations we can take advantage of. // bool anonymous_link = qdr_link_is_anonymous(rlink); // // Determine if the user of this connection is allowed to proxy the // user_id of messages. A message user_id is proxied when the // property value differs from the authenticated user name of the connection. // If the user is not allowed to proxy the user_id then the message user_id // must be blank or it must be equal to the connection user name. // bool check_user = false; qd_connection_t *conn = qd_link_connection(link); if (conn->policy_settings) check_user = !conn->policy_settings->allowUserIdProxy; // // Validate the content of the delivery as an AMQP message. This is done partially, only // to validate that we can find the fields we need to route the message. // // If the link is anonymous, we must validate through the message properties to find the // 'to' field. If the link is not anonymous, we don't need the 'to' field as we will be // using the address from the link target. // qd_message_depth_t validation_depth = (anonymous_link || check_user) ? QD_DEPTH_PROPERTIES : QD_DEPTH_MESSAGE_ANNOTATIONS; bool valid_message = qd_message_check(msg, validation_depth); if (valid_message) { if (check_user) { // This connection must not allow proxied user_id qd_iterator_t *userid_iter = qd_message_field_iterator(msg, QD_FIELD_USER_ID); if (userid_iter) { // The user_id property has been specified if (qd_iterator_remaining(userid_iter) > 0) { // user_id property in message is not blank if (!qd_iterator_equal(userid_iter, (const unsigned char *)conn->user_id)) { // This message is rejected: attempted user proxy is disallowed qd_log(router->log_source, QD_LOG_DEBUG, "Message rejected due to user_id proxy violation. User:%s", conn->user_id); pn_link_flow(pn_link, 1); pn_delivery_update(pnd, PN_REJECTED); pn_delivery_settle(pnd); qd_message_free(msg); qd_iterator_free(userid_iter); return; } } qd_iterator_free(userid_iter); } } qd_parsed_field_t *in_ma = qd_message_message_annotations(msg); qd_bitmask_t *link_exclusions; bool strip = qdr_link_strip_annotations_in(rlink); qd_iterator_t *ingress_iter = router_annotate_message(router, in_ma, msg, &link_exclusions, strip); if (anonymous_link) { qd_iterator_t *addr_iter = 0; int phase = 0; // // If the message has delivery annotations, get the to-override field from the annotations. // if (in_ma) { qd_parsed_field_t *ma_to = qd_parse_value_by_key(in_ma, QD_MA_TO); if (ma_to) { addr_iter = qd_iterator_dup(qd_parse_raw(ma_to)); phase = qd_message_get_phase_annotation(msg); } } // // Still no destination address? Use the TO field from the message properties. // if (!addr_iter) addr_iter = qd_message_field_iterator(msg, QD_FIELD_TO); if (addr_iter) { qd_iterator_reset_view(addr_iter, ITER_VIEW_ADDRESS_HASH); if (phase > 0) qd_iterator_annotate_phase(addr_iter, '0' + (char) phase); delivery = qdr_link_deliver_to(rlink, msg, ingress_iter, addr_iter, pn_delivery_settled(pnd), link_exclusions); } } else { const char *term_addr = pn_terminus_get_address(qd_link_remote_target(link)); if (!term_addr) term_addr = pn_terminus_get_address(qd_link_source(link)); if (term_addr) { qd_composed_field_t *to_override = qd_compose_subfield(0); qd_compose_insert_string(to_override, term_addr); qd_message_set_to_override_annotation(msg, to_override); int phase = qdr_link_phase(rlink); if (phase != 0) qd_message_set_phase_annotation(msg, phase); } delivery = qdr_link_deliver(rlink, msg, ingress_iter, pn_delivery_settled(pnd), link_exclusions); } if (delivery) { if (pn_delivery_settled(pnd)) pn_delivery_settle(pnd); else { pn_delivery_set_context(pnd, delivery); qdr_delivery_set_context(delivery, pnd); qdr_delivery_incref(delivery); } } else { // // The message is now and will always be unroutable because there is no address. // pn_link_flow(pn_link, 1); pn_delivery_update(pnd, PN_REJECTED); pn_delivery_settle(pnd); qd_message_free(msg); } // // Rules for delivering messages: // // For addressed (non-anonymous) links: // to-override must be set (done in the core?) // uses qdr_link_deliver to hand over to the core // // For anonymous links: // If there's a to-override in the annotations, use that address // Or, use the 'to' field in the message properties // } else { // // Message is invalid. Reject the message and don't involve the router core. // pn_link_flow(pn_link, 1); pn_delivery_update(pnd, PN_REJECTED); pn_delivery_settle(pnd); qd_message_free(msg); } }
void connection_dispatch ( pn_handler_t *h, pn_event_t *event, pn_event_type_t type ) { connection_context_t *cc = connection_context(h); switch ( type ) { case PN_LINK_REMOTE_OPEN: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { check(cc->recv_link == NULL, "Multiple incomming links on one connection"); cc->recv_link = link; pn_connection_t *conn = pn_event_connection(event); pn_list_add(cc->global->active_connections, conn); if (cc->global->shutting_down) { pn_connection_close(conn); break; } pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(h, fc); pn_decref(fc); } } break; case PN_DELIVERY: { pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { if (cc->global->received == 0) statistics_start(cc->global->stats); size_t encoded_size = pn_delivery_pending(dlv); cc->global->encoded_data = ensure_buffer(cc->global->encoded_data, encoded_size, &cc->global->encoded_data_size); check(cc->global->encoded_data, "decoding buffer realloc failure"); /* If this was the first message received, initialize our reporting. */ if ( ! cc->global->received ) rr_init ( & cc->global->resource_reporter ); ssize_t n = pn_link_recv(recv_link, cc->global->encoded_data, encoded_size); check(n == (ssize_t) encoded_size, "message data read fail"); //fprintf ( stderr, "MDEBUG encoded_size == %d\n", encoded_size ); pn_message_t *msg = cc->global->message; int err = pn_message_decode ( msg, cc->global->encoded_data, n ); check ( err == 0, "message decode error" ); /* MICK -- annotate! ================================ */ if ( cc->global->opts->timestamping ) { double message_timestamp; if ( get_message_timestamp ( msg, & message_timestamp ) ) { double now = now_timestamp ( ); cc->global->total_latency += (now - message_timestamp); } else { fprintf ( stderr, "receiver: no timestamp at msg count %d.\n", cc->global->received ); exit ( 1 ); } } /* MICK -- end annotate! ============================= */ cc->global->received++; /*--------------------------------------- Do a report ---------------------------------------*/ if ( ! ( cc->global->received % cc->global->opts->report_frequency ) ) { static bool first_time = true; double cpu_percentage; int rss; double sslr = rr_seconds_since_last_report ( & cc->global->resource_reporter ); rr_report ( & cc->global->resource_reporter, & cpu_percentage, & rss ); double throughput = (double)(cc->global->opts->report_frequency) / sslr; if ( first_time ) { if ( cc->global->opts->timestamping ) { if ( cc->global->opts->print_message_size ) fprintf(cc->global->report_fp, "msg_size\trecv_msgs\tcpu\trss\tthroughput\tlatency\n"); else fprintf(cc->global->report_fp, "recv_msgs\tcpu\trss\tthroughput\tlatency\n"); } else { if ( cc->global->opts->print_message_size ) fprintf(cc->global->report_fp, "msg_size\trecv_msgs\tcpu\trss\tthroughput\n"); else fprintf(cc->global->report_fp, "recv_msgs\tcpu\trss\tthroughput\n"); } first_time = false; } if ( cc->global->opts->timestamping ) { double average_latency = cc->global->total_latency / cc->global->opts->report_frequency; average_latency *= 1000.0; // in msec. cc->global->total_latency = 0; fprintf ( cc->global->report_fp, "%d\t%lf\t%d\t%lf\t%lf\n", cc->global->received, cpu_percentage, rss, throughput, average_latency ); } else { // was: // "recv_msgs: %10d cpu: %5.1lf rss: %6d throughput: %8.0lf\n" if ( cc->global->opts->print_message_size ) { fprintf ( cc->global->report_fp, "%d\t%d\t%lf\t%d\t%lf\n", cc->global->opts->message_size, cc->global->received, cpu_percentage, rss, throughput ); } else { fprintf ( cc->global->report_fp, "%d\t%lf\t%d\t%lf\n", cc->global->received, cpu_percentage, rss, throughput ); } } } pn_delivery_settle(dlv); // move this up statistics_msg_received(cc->global->stats, msg); } if (cc->global->received >= cc->global->opts->msg_count) { global_shutdown(cc->global); } } break; case PN_CONNECTION_UNBOUND: { pn_connection_t *conn = pn_event_connection(event); pn_list_remove(cc->global->active_connections, conn); pn_connection_release(conn); } break; default: break; } }
void transfer::settle() { pn_delivery_settle(pn_object()); }
/** * Inbound Delivery Handler */ static void router_rx_handler(void* context, dx_link_t *link, pn_delivery_t *delivery) { dx_router_t *router = (dx_router_t*) context; pn_link_t *pn_link = pn_delivery_link(delivery); dx_message_t *msg; int valid_message = 0; // // Receive the message into a local representation. If the returned message // pointer is NULL, we have not yet received a complete message. // sys_mutex_lock(router->lock); msg = dx_message_receive(delivery); sys_mutex_unlock(router->lock); if (!msg) return; // // Validate the message through the Properties section // valid_message = dx_message_check(msg, DX_DEPTH_PROPERTIES); pn_link_advance(pn_link); pn_link_flow(pn_link, 1); if (valid_message) { dx_field_iterator_t *iter = dx_message_field_iterator(msg, DX_FIELD_TO); dx_router_link_t *rlink; if (iter) { dx_field_iterator_reset(iter, ITER_VIEW_NO_HOST); sys_mutex_lock(router->lock); int result = hash_retrieve(router->out_hash, iter, (void*) &rlink); dx_field_iterator_free(iter); if (result == 0) { // // To field is valid and contains a known destination. Enqueue on // the output fifo for the next-hop-to-destination. // pn_link_t* pn_outlink = dx_link_pn(rlink->link); DEQ_INSERT_TAIL(rlink->out_fifo, msg); pn_link_offered(pn_outlink, DEQ_SIZE(rlink->out_fifo)); dx_link_activate(rlink->link); } else { // // To field contains an unknown address. Release the message. // pn_delivery_update(delivery, PN_RELEASED); pn_delivery_settle(delivery); } sys_mutex_unlock(router->lock); } } else { // // Message is invalid. Reject the message. // pn_delivery_update(delivery, PN_REJECTED); pn_delivery_settle(delivery); pn_delivery_set_context(delivery, 0); dx_free_message(msg); } }
void SenderContext::Delivery::settle() { pn_delivery_settle(token); }
void sender_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { sender_context_t *sc = sender_context(h); switch (type) { case PN_CONNECTION_INIT: { pn_connection_t *conn = pn_event_connection(event); pn_connection_set_container(conn, pn_string_get(sc->container_id)); pn_connection_set_hostname(conn, pn_string_get(sc->hostname)); pn_connection_open(conn); pn_session_t *ssn = pn_session(conn); pn_session_open(ssn); pn_link_t *snd = pn_sender(ssn, "sender"); const char *path = pn_url_get_path(sc->send_url); if (path && strlen(path)) { pn_terminus_set_address(pn_link_target(snd), path); pn_terminus_set_address(pn_link_source(snd), path); } pn_link_open(snd); } break; case PN_LINK_FLOW: { pn_link_t *snd = pn_event_link(event); while (pn_link_credit(snd) > 0 && sc->sent < sc->opts->msg_count) { if (sc->sent == 0) statistics_start(sc->stats); char tag[8]; void *ptr = &tag; *((uint64_t *) ptr) = sc->sent; pn_delivery_t *dlv = pn_delivery(snd, pn_dtag(tag, 8)); // setup the message to send pn_message_t *msg = sc->message; pn_message_set_address(msg, sc->opts->targets.addresses[0]); sc->id.u.as_ulong = sc->sent; pn_message_set_correlation_id(msg, sc->id); pn_message_set_creation_time(msg, msgr_now()); size_t size = sc->encoded_data_size; int err = pn_message_encode(msg, sc->encoded_data, &size); check(err == 0, "message encoding error"); pn_link_send(snd, sc->encoded_data, size); pn_delivery_settle(dlv); sc->sent++; } if (sc->sent == sc->opts->msg_count && !sc->opts->get_replies) { pn_link_close(snd); pn_connection_t *conn = pn_event_connection(event); pn_connection_close(conn); } } break; case PN_LINK_INIT: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { // Response messages link. Could manage credit and deliveries in this handler but // a dedicated handler also works. pn_handler_t *replyto = replyto_handler(sc); pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(replyto, fc); pn_decref(fc); pn_handshaker_t *handshaker = pn_handshaker(); pn_handler_add(replyto, handshaker); pn_decref(handshaker); pn_record_t *record = pn_link_attachments(link); pn_record_set_handler(record, replyto); pn_decref(replyto); } } break; case PN_CONNECTION_LOCAL_CLOSE: { statistics_report(sc->stats, sc->sent, sc->received); } break; default: break; } }
/* Process each event posted by the reactor. */ static void event_handler(pn_handler_t *handler, pn_event_t *event, pn_event_type_t type) { app_data_t *data = GET_APP_DATA(handler); switch (type) { case PN_CONNECTION_INIT: { // Create and open all the endpoints needed to send a message // pn_connection_t *conn; pn_session_t *ssn; pn_link_t *sender; conn = pn_event_connection(event); pn_connection_open(conn); ssn = pn_session(conn); pn_session_open(ssn); sender = pn_sender(ssn, "MySender"); // we do not wait for ack until the last message pn_link_set_snd_settle_mode(sender, PN_SND_MIXED); if (!data->anon) { pn_terminus_set_address(pn_link_target(sender), data->target); } pn_link_open(sender); } break; case PN_LINK_FLOW: { // the remote has given us some credit, now we can send messages // static long tag = 0; // a simple tag generator pn_delivery_t *delivery; pn_link_t *sender = pn_event_link(event); int credit = pn_link_credit(sender); while (credit > 0 && data->count > 0) { --credit; --data->count; ++tag; delivery = pn_delivery(sender, pn_dtag((const char *)&tag, sizeof(tag))); pn_link_send(sender, data->msg_data, data->msg_len); pn_link_advance(sender); if (data->count > 0) { // send pre-settled until the last one, then wait for an ack on // the last sent message. This allows the sender to send // messages as fast as possible and then exit when the consumer // has dealt with the last one. // pn_delivery_settle(delivery); } } } break; case PN_DELIVERY: { // Since the example sends all messages but the last pre-settled // (pre-acked), only the last message's delivery will get updated with // the remote state (acked/nacked). // pn_delivery_t *dlv = pn_event_delivery(event); if (pn_delivery_updated(dlv) && pn_delivery_remote_state(dlv)) { uint64_t rs = pn_delivery_remote_state(dlv); int done = 1; switch (rs) { case PN_RECEIVED: // This is not a terminal state - it is informational, and the // peer is still processing the message. done = 0; break; case PN_ACCEPTED: pn_delivery_settle(dlv); if (!quiet) fprintf(stdout, "Send complete!\n"); break; case PN_REJECTED: case PN_RELEASED: case PN_MODIFIED: pn_delivery_settle(dlv); fprintf(stderr, "Message not accepted - code:%lu\n", (unsigned long)rs); break; default: // ??? no other terminal states defined, so ignore anything else pn_delivery_settle(dlv); fprintf(stderr, "Unknown delivery failure - code=%lu\n", (unsigned long)rs); break; } if (done) { // initiate clean shutdown of the endpoints pn_link_t *link = pn_delivery_link(dlv); pn_session_t *ssn = pn_link_session(link); pn_link_close(link); pn_session_close(ssn); pn_connection_close(pn_session_connection(ssn)); } } } break; case PN_TRANSPORT_ERROR: { // The connection to the peer failed. // pn_transport_t *tport = pn_event_transport(event); pn_condition_t *cond = pn_transport_condition(tport); fprintf(stderr, "Network transport failed!\n"); if (pn_condition_is_set(cond)) { const char *name = pn_condition_get_name(cond); const char *desc = pn_condition_get_description(cond); fprintf(stderr, " Error: %s Description: %s\n", (name) ? name : "<error name not provided>", (desc) ? desc : "<no description provided>"); } // pn_reactor_process() will exit with a false return value, stopping // the main loop. } break; default: break; } }
int main ( int argc, char ** argv ) { char info[1000]; int expected = (argc > 1) ? atoi(argv[1]) : 100000; int received = 0; int size = 32; int msg_size = 50; bool done = false; int initial_credit = 500, new_credit = 250, low_credit_limit = 250; char const * host = "0.0.0.0"; char const * port = "5672"; bool sasl_done = false; pn_driver_t * driver; pn_listener_t * listener; pn_connector_t * connector; pn_connection_t * connection; pn_session_t * session; pn_link_t * link; pn_delivery_t * delivery; char * message_data = (char *) malloc ( MY_BUF_SIZE ); int message_data_capacity = MY_BUF_SIZE; fprintf ( stderr, "drecv expecting %d messages.\n", expected ); driver = pn_driver ( ); if ( ! pn_listener(driver, host, port, 0) ) { fprintf ( stderr, "listener creation failed.\n" ); exit ( 1 ); } while ( ! done) { pn_driver_wait ( driver, -1 ); if ( (listener = pn_driver_listener(driver)) ) pn_listener_accept( listener ); if ( (connector = pn_driver_connector(driver)) ) { pn_connector_process ( connector ); if ( ! sasl_done ) if( ! (sasl_done = get_sasl_over_with(connector) )) continue; connection = pn_connector_connection ( connector ); /*========================================================= Open everything that is ready on the other side but not here. =========================================================*/ pn_state_t hes_ready_im_not = PN_LOCAL_UNINIT | PN_REMOTE_ACTIVE; if (pn_connection_state(connection) == hes_ready_im_not) pn_connection_open( connection); for ( session = pn_session_head(connection, hes_ready_im_not); session; session = pn_session_next(session, hes_ready_im_not) ) pn_session_open(session); for ( link = pn_link_head(connection, hes_ready_im_not); link; link = pn_link_next(link, hes_ready_im_not) ) { pn_terminus_copy(pn_link_source(link), pn_link_remote_source(link)); pn_terminus_copy(pn_link_target(link), pn_link_remote_target(link)); pn_link_open ( link ); if ( pn_link_is_receiver(link) ) pn_link_flow ( link, initial_credit ); } /*========================================================== Get all available deliveries. ==========================================================*/ for ( delivery = pn_work_head ( connection ); delivery; delivery = pn_work_next ( delivery ) ) { if ( pn_delivery_readable(delivery) ) { link = pn_delivery_link ( delivery ); while ( PN_EOS != pn_link_recv(link, message_data, MY_BUF_SIZE) ) ; pn_link_advance ( link ); pn_delivery_update ( delivery, PN_ACCEPTED ); pn_delivery_settle ( delivery ); if ( ++ received >= expected ) { sprintf ( info, "received %d messages", received ); print_timestamp ( stderr, info ); done = true; } // a progress report for long tests. if ( ! (received % 5000000) ) fprintf ( stderr, "received: %d\n", received ); if ( pn_link_credit(link) <= low_credit_limit ) pn_link_flow ( link, new_credit ); } else { // TODO // Why am I getting writables? // And what to do with them? } } /*=============================================================== Shut down everything that the other side has closed. ===============================================================*/ pn_state_t active_here_closed_there = PN_LOCAL_ACTIVE | PN_REMOTE_CLOSED; if ( pn_connection_state(connection) == active_here_closed_there ) pn_connection_close ( connection ); for ( session = pn_session_head(connection, active_here_closed_there); session; session = pn_session_next(session, active_here_closed_there) ) pn_session_close ( session ); for ( link = pn_link_head(connection, active_here_closed_there); link; link = pn_link_next(link, active_here_closed_there) ) pn_link_close ( link ); if ( pn_connector_closed(connector) ) { pn_connection_free ( pn_connector_connection(connector) ); pn_connector_free ( connector ); done = true; } else pn_connector_process(connector); } } pn_driver_free(driver); return 0; }