void qd_link_free(qd_link_t *link) { if (!link) return; if (link->pn_link) pn_decref(link->pn_link); if (link->pn_sess) pn_decref(link->pn_sess); link->pn_link = 0; link->pn_sess = 0; free_qd_link_t(link); }
void listener_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { global_context_t *gc = global_context(h); if (type == PN_REACTOR_QUIESCED) gc->quiesce_count++; else gc->quiesce_count = 0; switch (type) { case PN_CONNECTION_INIT: { pn_connection_t *connection = pn_event_connection(event); // New incoming connection on listener socket. Give each a separate handler. pn_handler_t *ch = connection_handler(gc); pn_handshaker_t *handshaker = pn_handshaker(); pn_handler_add(ch, handshaker); pn_decref(handshaker); pn_record_t *record = pn_connection_attachments(connection); pn_record_set_handler(record, ch); pn_decref(ch); } break; case PN_REACTOR_QUIESCED: { // Two quiesce in a row means we have been idle for a timout period if (gc->opts->timeout != -1 && gc->quiesce_count > 1) global_shutdown(gc); } break; case PN_REACTOR_INIT: { pn_reactor_t *reactor = pn_event_reactor(event); start_listener(gc, reactor); // hack to let test scripts know when the receivers are ready (so // that the senders may be started) if (gc->opts->ready_text) { fprintf(stdout, "%s\n", gc->opts->ready_text); fflush(stdout); } if (gc->opts->timeout != -1) pn_reactor_set_timeout(pn_event_reactor(event), gc->opts->timeout); } break; case PN_REACTOR_FINAL: { if (gc->received == 0) statistics_start(gc->stats); statistics_report(gc->stats, gc->sent, gc->received); } break; default: break; } }
static void test_list_refcount(size_t capacity) { void *one = pn_class_new(PN_OBJECT, 0); void *two = pn_class_new(PN_OBJECT, 0); void *three = pn_class_new(PN_OBJECT, 0); void *four = pn_class_new(PN_OBJECT, 0); pn_list_t *list = pn_list(PN_OBJECT, 0); assert(!pn_list_add(list, one)); assert(!pn_list_add(list, two)); assert(!pn_list_add(list, three)); assert(!pn_list_add(list, four)); assert(pn_list_get(list, 0) == one); assert(pn_list_get(list, 1) == two); assert(pn_list_get(list, 2) == three); assert(pn_list_get(list, 3) == four); assert(pn_list_size(list) == 4); assert(pn_refcount(one) == 2); assert(pn_refcount(two) == 2); assert(pn_refcount(three) == 2); assert(pn_refcount(four) == 2); pn_list_del(list, 1, 2); assert(pn_list_size(list) == 2); assert(pn_refcount(one) == 2); assert(pn_refcount(two) == 1); assert(pn_refcount(three) == 1); assert(pn_refcount(four) == 2); assert(pn_list_get(list, 0) == one); assert(pn_list_get(list, 1) == four); assert(!pn_list_add(list, one)); assert(pn_list_size(list) == 3); assert(pn_refcount(one) == 3); pn_decref(list); assert(pn_refcount(one) == 1); assert(pn_refcount(two) == 1); assert(pn_refcount(three) == 1); assert(pn_refcount(four) == 1); pn_decref(one); pn_decref(two); pn_decref(three); pn_decref(four); }
void listener_dispatch ( pn_handler_t *h, pn_event_t * event, pn_event_type_t type ) { global_context_t * gc = global_context ( h ); if ( type == PN_REACTOR_QUIESCED ) gc->quiesce_count++; else gc->quiesce_count = 0; switch (type) { case PN_CONNECTION_INIT: { pn_connection_t * connection = pn_event_connection ( event ); // New incoming connection on listener socket. Give each a separate handler. pn_handler_t *ch = connection_handler(gc); pn_handshaker_t *handshaker = pn_handshaker(); pn_handler_add(ch, handshaker); pn_decref(handshaker); pn_record_t *record = pn_connection_attachments(connection); pn_record_set_handler(record, ch); pn_decref(ch); } break; case PN_REACTOR_INIT: { pn_reactor_t *reactor = pn_event_reactor(event); start_listener(gc, reactor); } break; case PN_REACTOR_FINAL: { if (gc->received == 0) statistics_start(gc->stats); //statistics_report(gc->stats, gc->sent, gc->received); fclose ( gc->report_fp ); if ( gc->received > 0 ) fprintf ( stderr, "reactor-recv received %d messages.\n", gc->received ); } break; default: break; } }
/* Test waking up a connection that is idle */ static void test_connection_wake(test_t *t) { test_proactor_t tps[] = { test_proactor(t, open_wake_handler), test_proactor(t, listen_handler) }; pn_proactor_t *client = tps[0].proactor; pn_listener_t *l = test_listen(&tps[1], ""); pn_connection_t *c = pn_connection(); pn_incref(c); /* Keep a reference for wake() after free */ pn_proactor_connect2(client, c, NULL, listener_info(l).connect); TEST_ETYPE_EQUAL(t, PN_CONNECTION_REMOTE_OPEN, TEST_PROACTORS_RUN(tps)); TEST_CHECK(t, pn_proactor_get(client) == NULL); /* Should be idle */ pn_connection_wake(c); TEST_ETYPE_EQUAL(t, PN_CONNECTION_WAKE, TEST_PROACTORS_RUN(tps)); TEST_ETYPE_EQUAL(t, PN_TRANSPORT_CLOSED, TEST_PROACTORS_RUN(tps)); TEST_ETYPE_EQUAL(t, PN_TRANSPORT_CLOSED, TEST_PROACTORS_RUN(tps)); /* Both ends */ /* The pn_connection_t is still valid so wake is legal but a no-op */ TEST_ETYPE_EQUAL(t, PN_PROACTOR_INACTIVE, TEST_PROACTORS_RUN(tps)); TEST_ETYPE_EQUAL(t, PN_EVENT_NONE, TEST_PROACTORS_GET(tps)); /* No more wake */ /* Verify we don't get a wake after close even if they happen together */ pn_connection_t *c2 = pn_connection(); pn_proactor_connect2(client, c2, NULL, listener_info(l).connect); TEST_ETYPE_EQUAL(t, PN_CONNECTION_REMOTE_OPEN, TEST_PROACTORS_RUN(tps)); pn_connection_wake(c2); pn_proactor_disconnect(client, NULL); pn_connection_wake(c2); TEST_ETYPE_EQUAL(t, PN_TRANSPORT_CLOSED, test_proactors_run(&tps[0], 1)); TEST_ETYPE_EQUAL(t, PN_PROACTOR_INACTIVE, test_proactors_run(&tps[0], 1)); TEST_ETYPE_EQUAL(t, PN_EVENT_NONE, test_proactors_get(&tps[0], 1)); /* No late wake */ TEST_PROACTORS_DESTROY(tps); /* The pn_connection_t is still valid so wake is legal but a no-op */ pn_connection_wake(c); pn_decref(c); }
// Send a command to the protocol thread and // wait for the command to complete static rsRetVal _issue_command(threadIPC_t *ipc, pn_reactor_t *reactor, commands_t command, pn_message_t *message) { DEFiRet; DBGPRINTF("omamqp1: Sending command %d to protocol thread\n", command); pthread_mutex_lock(&ipc->lock); if (message) { assert(ipc->message == NULL); ipc->message = message; } assert(ipc->command == COMMAND_DONE); ipc->command = command; pn_reactor_wakeup(reactor); while (ipc->command != COMMAND_DONE) { pthread_cond_wait(&ipc->condition, &ipc->lock); } iRet = ipc->result; if (ipc->message) { pn_decref(ipc->message); ipc->message = NULL; } pthread_mutex_unlock(&ipc->lock); DBGPRINTF("omamqp1: Command %d completed, status=%d\n", command, iRet); RETiRet; }
void pn_selector_update(pn_selector_t *selector, pn_selectable_t *selectable) { // A selectable's fd may switch from PN_INVALID_SCOKET to a working socket between // update calls. If a selectable without a valid socket has a deadline, we need // a dummy iocpdesc_t to participate in the deadlines list. int idx = pni_selectable_get_index(selectable); assert(idx >= 0); pn_timestamp_t deadline = pn_selectable_get_deadline(selectable); pn_socket_t sock = pn_selectable_get_fd(selectable); iocpdesc_t *iocpd = (iocpdesc_t *) pn_list_get(selector->iocp_descriptors, idx); if (!iocpd && deadline && sock == PN_INVALID_SOCKET) { iocpd = pni_deadline_desc(selector->iocp); assert(iocpd); pn_list_set(selector->iocp_descriptors, idx, iocpd); pn_decref(iocpd); // life is solely tied to iocp_descriptors list iocpd->selector = selector; iocpd->selectable = selectable; } else if (iocpd && iocpd->deadline_desc && sock != PN_INVALID_SOCKET) { // Switching to a real socket. Stop using a deadline descriptor. deadlines_update(iocpd, 0); // decref descriptor in list and pick up a real iocpd below pn_list_set(selector->iocp_descriptors, idx, NULL); iocpd = NULL; } // The selectables socket may be set long after it has been added if (!iocpd && sock != PN_INVALID_SOCKET) { iocpd = pni_iocpdesc_map_get(selector->iocp, sock); if (!iocpd) { // Socket created outside proton. Hook it up to iocp. iocpd = pni_iocpdesc_create(selector->iocp, sock, true); assert(iocpd); if (iocpd) pni_iocpdesc_start(iocpd); } if (iocpd) { pn_list_set(selector->iocp_descriptors, idx, iocpd); iocpd->selector = selector; iocpd->selectable = selectable; } } if (iocpd) { assert(sock == iocpd->socket || iocpd->closing); int interests = PN_ERROR; // Always if (pn_selectable_is_reading(selectable)) { interests |= PN_READABLE; } if (pn_selectable_is_writing(selectable)) { interests |= PN_WRITABLE; } if (deadline) { interests |= PN_EXPIRED; } interests_update(iocpd, interests); deadlines_update(iocpd, deadline); } }
void connection_cleanup ( pn_handler_t *h ) { connection_context_t *cc = connection_context(h); // Undo pn_incref() from connection_context_init() pn_decref(cc->global->listener_handler); }
void pn_transform_rule(pn_transform_t *transform, const char *pattern, const char *substitution) { assert(transform); pn_rule_t *rule = pn_rule(pattern, substitution); pn_list_add(transform->rules, rule); pn_decref(rule); }
// If the context is not present, create it with value x. template <class T> static T& ref(id id_) { T* ctx = context::ptr<T>(id_); if (!ctx) { ctx = create<T>(); pn_record_def(id_.first, id_.second, pn_class()); pn_record_set(id_.first, id_.second, ctx); pn_decref(ctx); } return *ctx; }
static void test_hash(void) { void *one = pn_class_new(PN_OBJECT, 0); void *two = pn_class_new(PN_OBJECT, 0); void *three = pn_class_new(PN_OBJECT, 0); pn_hash_t *hash = pn_hash(PN_OBJECT, 4, 0.75); pn_hash_put(hash, 0, NULL); pn_hash_put(hash, 1, one); pn_hash_put(hash, 2, two); pn_hash_put(hash, 3, three); pn_hash_put(hash, 4, one); pn_hash_put(hash, 5, two); pn_hash_put(hash, 6, three); pn_hash_put(hash, 7, one); pn_hash_put(hash, 8, two); pn_hash_put(hash, 9, three); pn_hash_put(hash, 10, one); pn_hash_put(hash, 11, two); pn_hash_put(hash, 12, three); pn_hash_put(hash, 18, one); assert(pn_hash_get(hash, 2) == two); assert(pn_hash_get(hash, 5) == two); assert(pn_hash_get(hash, 18) == one); assert(pn_hash_get(hash, 0) == NULL); assert(pn_hash_size(hash) == 14); pn_hash_del(hash, 5); assert(pn_hash_get(hash, 5) == NULL); assert(pn_hash_size(hash) == 13); pn_hash_del(hash, 18); assert(pn_hash_get(hash, 18) == NULL); assert(pn_hash_size(hash) == 12); pn_decref(hash); pn_decref(one); pn_decref(two); pn_decref(three); }
pn_connection_t *pn_reactor_connection(pn_reactor_t *reactor, pn_handler_t *handler) { assert(reactor); pn_connection_t *connection = pn_connection(); pn_record_t *record = pn_connection_attachments(connection); pn_record_set_handler(record, handler); pn_connection_collect(connection, pn_reactor_collector(reactor)); pn_list_add(pn_reactor_children(reactor), connection); pni_record_init_reactor(record, reactor); pn_decref(connection); return connection; }
void pni_handle_open(pn_reactor_t *reactor, pn_event_t *event) { assert(reactor); assert(event); pn_connection_t *conn = pn_event_connection(event); if (!(pn_connection_state(conn) & PN_REMOTE_UNINIT)) { return; } pn_transport_t *transport = pn_transport(); pn_transport_bind(transport, conn); pn_decref(transport); }
static void pn_event_finalize(pn_event_t *event) { // decref before adding to the free list if (event->clazz && event->context) { pn_class_decref(event->clazz, event->context); } pn_list_t *pool = event->pool; if (pool && pn_refcount(pool) > 1) { event->pool = NULL; event->type = PN_EVENT_NONE; event->clazz = NULL; event->context = NULL; event->next = NULL; pn_record_clear(event->attachments); pn_list_add(pool, event); } else { pn_decref(event->attachments); } pn_decref(pool); }
static void test_map_iteration(int n) { pn_list_t *pairs = pn_list(PN_OBJECT, 2*n); for (int i = 0; i < n; i++) { void *key = pn_class_new(PN_OBJECT, 0); void *value = pn_class_new(PN_OBJECT, 0); pn_list_add(pairs, key); pn_list_add(pairs, value); pn_decref(key); pn_decref(value); } pn_map_t *map = pn_map(PN_OBJECT, PN_OBJECT, 0, 0.75); assert(pn_map_head(map) == 0); for (int i = 0; i < n; i++) { pn_map_put(map, pn_list_get(pairs, 2*i), pn_list_get(pairs, 2*i + 1)); } for (pn_handle_t entry = pn_map_head(map); entry; entry = pn_map_next(map, entry)) { void *key = pn_map_key(map, entry); void *value = pn_map_value(map, entry); ssize_t idx = pn_list_index(pairs, key); assert(idx >= 0); assert(pn_list_get(pairs, idx) == key); assert(pn_list_get(pairs, idx + 1) == value); pn_list_del(pairs, idx, 2); } assert(pn_list_size(pairs) == 0); pn_decref(map); pn_decref(pairs); }
void pni_reactor_set_connection_peer_address(pn_connection_t *connection, const char *host, const char *port) { pn_url_t *url = pn_url(); pn_url_set_host(url, host); pn_url_set_port(url, port); pn_record_t *record = pn_connection_attachments(connection); if (!pn_record_has(record, PNI_CONN_PEER_ADDRESS)) { pn_record_def(record, PNI_CONN_PEER_ADDRESS, PN_OBJECT); } pn_record_set(record, PNI_CONN_PEER_ADDRESS, url); pn_decref(url); }
void pni_entry_free(pni_entry_t *entry) { if (!entry) return; pni_stream_t *stream = entry->stream; pni_store_t *store = stream->store; LL_REMOVE(stream, stream, entry); LL_REMOVE(store, store, entry); entry->free = true; pn_buffer_free(entry->bytes); entry->bytes = NULL; pn_decref(entry); store->size--; }
void connector::connect() { connection_.host(address_.host_port()); pn_transport_t *pnt = pn_transport(); transport t(pnt); if (!address_.username().empty()) connection_.user(address_.username()); if (!address_.password().empty()) connection_.password(address_.password()); t.bind(connection_); pn_decref(pnt); // Apply options to the new transport. options_.apply(connection_); transport_configured_ = true; }
bool pn_collector_pop(pn_collector_t *collector) { pn_event_t *event = collector->head; if (event) { collector->head = event->next; } else { return false; } if (!collector->head) { collector->tail = NULL; } pn_decref(event); return true; }
void pn_reactor_set_connection_host(pn_reactor_t *reactor, pn_connection_t *connection, const char *host, const char *port) { (void)reactor; // ignored pn_url_t *url = pn_url(); pn_url_set_host(url, host); pn_url_set_port(url, port); pn_record_t *record = pn_connection_attachments(connection); if (!pn_record_has(record, PNI_CONN_URL)) { pn_record_def(record, PNI_CONN_URL, PN_OBJECT); } pn_record_set(record, PNI_CONN_URL, url); pn_decref(url); }
void connector::connect() { pn_transport_t *pnt = pn_transport(); transport t(make_wrapper(pnt)); if (!address_.user().empty()) connection_.user(address_.user()); if (!address_.password().empty()) connection_.password(address_.password()); pn_transport_bind(pnt, unwrap(connection_)); pn_decref(pnt); // Apply options to the new transport. options_.apply(connection_); // if virtual-host not set, use host from address as default if (!options_.is_virtual_host_set()) pn_connection_set_hostname(unwrap(connection_), address_.host().c_str()); transport_configured_ = true; }
static void test_list(size_t capacity) { pn_list_t *list = pn_list(PN_WEAKREF, 0); assert(pn_list_size(list) == 0); assert(!pn_list_add(list, (void *) 0)); assert(!pn_list_add(list, (void *) 1)); assert(!pn_list_add(list, (void *) 2)); assert(!pn_list_add(list, (void *) 3)); assert(pn_list_get(list, 0) == (void *) 0); assert(pn_list_get(list, 1) == (void *) 1); assert(pn_list_get(list, 2) == (void *) 2); assert(pn_list_get(list, 3) == (void *) 3); assert(pn_list_size(list) == 4); pn_list_del(list, 1, 2); assert(pn_list_size(list) == 2); assert(pn_list_get(list, 0) == (void *) 0); assert(pn_list_get(list, 1) == (void *) 3); pn_decref(list); }
static void test_refcounting(int refs) { void *obj = pn_class_new(PN_OBJECT, 0); assert(pn_refcount(obj) == 1); for (int i = 0; i < refs; i++) { pn_incref(obj); assert(pn_refcount(obj) == i + 2); } assert(pn_refcount(obj) == refs + 1); for (int i = 0; i < refs; i++) { pn_decref(obj); assert(pn_refcount(obj) == refs - i); } assert(pn_refcount(obj) == 1); pn_free(obj); }
static void pn_collector_finalize(pn_collector_t *collector) { pn_collector_drain(collector); pn_decref(collector->pool); }
void connection_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { connection_context_t *cc = connection_context(h); bool replying = cc->global->opts->reply; switch (type) { case PN_LINK_REMOTE_OPEN: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { check(cc->recv_link == NULL, "Multiple incomming links on one connection"); cc->recv_link = link; pn_connection_t *conn = pn_event_connection(event); pn_list_add(cc->global->active_connections, conn); if (cc->global->shutting_down) { pn_connection_close(conn); break; } if (replying) { // Set up a reply link and defer granting credit to the incoming link pn_connection_t *conn = pn_session_connection(pn_link_session(link)); pn_session_t *ssn = pn_session(conn); pn_session_open(ssn); char name[100]; // prefer a multiplatform uuid generator sprintf(name, "reply_sender_%d", cc->connection_id); cc->reply_link = pn_sender(ssn, name); pn_link_open(cc->reply_link); } else { pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(h, fc); pn_decref(fc); } } } break; case PN_LINK_FLOW: { if (replying) { pn_link_t *reply_link = pn_event_link(event); // pn_flowcontroller handles the non-reply case check(reply_link == cc->reply_link, "internal error"); // Grant the sender as much credit as just given to us for replies int delta = pn_link_credit(reply_link) - pn_link_credit(cc->recv_link); if (delta > 0) pn_link_flow(cc->recv_link, delta); } } break; case PN_DELIVERY: { pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { if (cc->global->received == 0) statistics_start(cc->global->stats); size_t encoded_size = pn_delivery_pending(dlv); cc->global->encoded_data = ensure_buffer(cc->global->encoded_data, encoded_size, &cc->global->encoded_data_size); check(cc->global->encoded_data, "decoding buffer realloc failure"); ssize_t n = pn_link_recv(recv_link, cc->global->encoded_data, encoded_size); check(n == (ssize_t) encoded_size, "message data read fail"); pn_message_t *msg = cc->global->message; int err = pn_message_decode(msg, cc->global->encoded_data, n); check(err == 0, "message decode error"); cc->global->received++; pn_delivery_settle(dlv); statistics_msg_received(cc->global->stats, msg); if (replying) { const char *reply_addr = pn_message_get_reply_to(msg); if (reply_addr) { pn_link_t *rl = cc->reply_link; check(pn_link_credit(rl) > 0, "message received without corresponding reply credit"); LOG("Replying to: %s\n", reply_addr ); pn_message_set_address(msg, reply_addr); pn_message_set_creation_time(msg, msgr_now()); char tag[8]; void *ptr = &tag; *((uint64_t *) ptr) = cc->global->sent; pn_delivery_t *dlv = pn_delivery(rl, pn_dtag(tag, 8)); size_t size = cc->global->encoded_data_size; int err = pn_message_encode(msg, cc->global->encoded_data, &size); check(err == 0, "message encoding error"); pn_link_send(rl, cc->global->encoded_data, size); pn_delivery_settle(dlv); cc->global->sent++; } } } if (cc->global->received >= cc->global->opts->msg_count) { global_shutdown(cc->global); } } break; case PN_CONNECTION_UNBOUND: { pn_connection_t *conn = pn_event_connection(event); pn_list_remove(cc->global->active_connections, conn); pn_connection_release(conn); } break; default: break; } }
static void test_map(void) { void *one = pn_class_new(PN_OBJECT, 0); void *two = pn_class_new(PN_OBJECT, 0); void *three = pn_class_new(PN_OBJECT, 0); pn_map_t *map = pn_map(PN_OBJECT, PN_OBJECT, 4, 0.75); assert(pn_map_size(map) == 0); pn_string_t *key = pn_string("key"); pn_string_t *dup = pn_string("key"); pn_string_t *key1 = pn_string("key1"); pn_string_t *key2 = pn_string("key2"); assert(!pn_map_put(map, key, one)); assert(pn_map_size(map) == 1); assert(!pn_map_put(map, key1, two)); assert(pn_map_size(map) == 2); assert(!pn_map_put(map, key2, three)); assert(pn_map_size(map) == 3); assert(pn_map_get(map, dup) == one); assert(!pn_map_put(map, dup, one)); assert(pn_map_size(map) == 3); assert(!pn_map_put(map, dup, two)); assert(pn_map_size(map) == 3); assert(pn_map_get(map, dup) == two); assert(pn_refcount(key) == 2); assert(pn_refcount(dup) == 1); assert(pn_refcount(key1) == 2); assert(pn_refcount(key2) == 2); assert(pn_refcount(one) == 1); assert(pn_refcount(two) == 3); assert(pn_refcount(three) == 2); pn_map_del(map, key1); assert(pn_map_size(map) == 2); assert(pn_refcount(key) == 2); assert(pn_refcount(dup) == 1); assert(pn_refcount(key1) == 1); assert(pn_refcount(key2) == 2); assert(pn_refcount(one) == 1); assert(pn_refcount(two) == 2); assert(pn_refcount(three) == 2); pn_decref(one); pn_decref(two); pn_decref(three); pn_decref(key); pn_decref(dup); pn_decref(key1); pn_decref(key2); pn_decref(map); }
void pni_iocpdesc_map_push(iocpdesc_t *iocpd) { pn_hash_put(iocpd->iocp->iocpdesc_map, iocpd->socket, iocpd); pn_decref(iocpd); assert(pn_refcount(iocpd) == 1); }
void connection_dispatch ( pn_handler_t *h, pn_event_t *event, pn_event_type_t type ) { connection_context_t *cc = connection_context(h); switch ( type ) { case PN_LINK_REMOTE_OPEN: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { check(cc->recv_link == NULL, "Multiple incomming links on one connection"); cc->recv_link = link; pn_connection_t *conn = pn_event_connection(event); pn_list_add(cc->global->active_connections, conn); if (cc->global->shutting_down) { pn_connection_close(conn); break; } pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(h, fc); pn_decref(fc); } } break; case PN_DELIVERY: { pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { if (cc->global->received == 0) statistics_start(cc->global->stats); size_t encoded_size = pn_delivery_pending(dlv); cc->global->encoded_data = ensure_buffer(cc->global->encoded_data, encoded_size, &cc->global->encoded_data_size); check(cc->global->encoded_data, "decoding buffer realloc failure"); /* If this was the first message received, initialize our reporting. */ if ( ! cc->global->received ) rr_init ( & cc->global->resource_reporter ); ssize_t n = pn_link_recv(recv_link, cc->global->encoded_data, encoded_size); check(n == (ssize_t) encoded_size, "message data read fail"); //fprintf ( stderr, "MDEBUG encoded_size == %d\n", encoded_size ); pn_message_t *msg = cc->global->message; int err = pn_message_decode ( msg, cc->global->encoded_data, n ); check ( err == 0, "message decode error" ); /* MICK -- annotate! ================================ */ if ( cc->global->opts->timestamping ) { double message_timestamp; if ( get_message_timestamp ( msg, & message_timestamp ) ) { double now = now_timestamp ( ); cc->global->total_latency += (now - message_timestamp); } else { fprintf ( stderr, "receiver: no timestamp at msg count %d.\n", cc->global->received ); exit ( 1 ); } } /* MICK -- end annotate! ============================= */ cc->global->received++; /*--------------------------------------- Do a report ---------------------------------------*/ if ( ! ( cc->global->received % cc->global->opts->report_frequency ) ) { static bool first_time = true; double cpu_percentage; int rss; double sslr = rr_seconds_since_last_report ( & cc->global->resource_reporter ); rr_report ( & cc->global->resource_reporter, & cpu_percentage, & rss ); double throughput = (double)(cc->global->opts->report_frequency) / sslr; if ( first_time ) { if ( cc->global->opts->timestamping ) { if ( cc->global->opts->print_message_size ) fprintf(cc->global->report_fp, "msg_size\trecv_msgs\tcpu\trss\tthroughput\tlatency\n"); else fprintf(cc->global->report_fp, "recv_msgs\tcpu\trss\tthroughput\tlatency\n"); } else { if ( cc->global->opts->print_message_size ) fprintf(cc->global->report_fp, "msg_size\trecv_msgs\tcpu\trss\tthroughput\n"); else fprintf(cc->global->report_fp, "recv_msgs\tcpu\trss\tthroughput\n"); } first_time = false; } if ( cc->global->opts->timestamping ) { double average_latency = cc->global->total_latency / cc->global->opts->report_frequency; average_latency *= 1000.0; // in msec. cc->global->total_latency = 0; fprintf ( cc->global->report_fp, "%d\t%lf\t%d\t%lf\t%lf\n", cc->global->received, cpu_percentage, rss, throughput, average_latency ); } else { // was: // "recv_msgs: %10d cpu: %5.1lf rss: %6d throughput: %8.0lf\n" if ( cc->global->opts->print_message_size ) { fprintf ( cc->global->report_fp, "%d\t%d\t%lf\t%d\t%lf\n", cc->global->opts->message_size, cc->global->received, cpu_percentage, rss, throughput ); } else { fprintf ( cc->global->report_fp, "%d\t%lf\t%d\t%lf\n", cc->global->received, cpu_percentage, rss, throughput ); } } } pn_delivery_settle(dlv); // move this up statistics_msg_received(cc->global->stats, msg); } if (cc->global->received >= cc->global->opts->msg_count) { global_shutdown(cc->global); } } break; case PN_CONNECTION_UNBOUND: { pn_connection_t *conn = pn_event_connection(event); pn_list_remove(cc->global->active_connections, conn); pn_connection_release(conn); } break; default: break; } }
void pn_collector_free(pn_collector_t *collector) { assert(collector); pn_collector_release(collector); pn_decref(collector); }
void sender_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { sender_context_t *sc = sender_context(h); switch (type) { case PN_CONNECTION_INIT: { pn_connection_t *conn = pn_event_connection(event); pn_connection_set_container(conn, pn_string_get(sc->container_id)); pn_connection_set_hostname(conn, pn_string_get(sc->hostname)); pn_connection_open(conn); pn_session_t *ssn = pn_session(conn); pn_session_open(ssn); pn_link_t *snd = pn_sender(ssn, "sender"); const char *path = pn_url_get_path(sc->send_url); if (path && strlen(path)) { pn_terminus_set_address(pn_link_target(snd), path); pn_terminus_set_address(pn_link_source(snd), path); } pn_link_open(snd); } break; case PN_LINK_FLOW: { pn_link_t *snd = pn_event_link(event); while (pn_link_credit(snd) > 0 && sc->sent < sc->opts->msg_count) { if (sc->sent == 0) statistics_start(sc->stats); char tag[8]; void *ptr = &tag; *((uint64_t *) ptr) = sc->sent; pn_delivery_t *dlv = pn_delivery(snd, pn_dtag(tag, 8)); // setup the message to send pn_message_t *msg = sc->message; pn_message_set_address(msg, sc->opts->targets.addresses[0]); sc->id.u.as_ulong = sc->sent; pn_message_set_correlation_id(msg, sc->id); pn_message_set_creation_time(msg, msgr_now()); size_t size = sc->encoded_data_size; int err = pn_message_encode(msg, sc->encoded_data, &size); check(err == 0, "message encoding error"); pn_link_send(snd, sc->encoded_data, size); pn_delivery_settle(dlv); sc->sent++; } if (sc->sent == sc->opts->msg_count && !sc->opts->get_replies) { pn_link_close(snd); pn_connection_t *conn = pn_event_connection(event); pn_connection_close(conn); } } break; case PN_LINK_INIT: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { // Response messages link. Could manage credit and deliveries in this handler but // a dedicated handler also works. pn_handler_t *replyto = replyto_handler(sc); pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(replyto, fc); pn_decref(fc); pn_handshaker_t *handshaker = pn_handshaker(); pn_handler_add(replyto, handshaker); pn_decref(handshaker); pn_record_t *record = pn_link_attachments(link); pn_record_set_handler(record, replyto); pn_decref(replyto); } } break; case PN_CONNECTION_LOCAL_CLOSE: { statistics_report(sc->stats, sc->sent, sc->received); } break; default: break; } }