qd_link_t *qd_link(qd_node_t *node, qd_connection_t *conn, qd_direction_t dir, const char* name) { qd_link_t *link = new_qd_link_t(); link->pn_sess = pn_session(qd_connection_pn(conn)); pn_session_set_incoming_capacity(link->pn_sess, 1000000); if (dir == QD_OUTGOING) link->pn_link = pn_sender(link->pn_sess, name); else link->pn_link = pn_receiver(link->pn_sess, name); link->direction = dir; link->context = node->context; link->node = node; link->drain_mode = pn_link_get_drain(link->pn_link); link->remote_snd_settle_mode = pn_link_remote_snd_settle_mode(link->pn_link); link->close_sess_with_link = true; // // Keep the borrowed references // pn_incref(link->pn_link); pn_incref(link->pn_sess); pn_link_set_context(link->pn_link, link); pn_session_open(link->pn_sess); return link; }
void process_connection(ldp_connection_t *conn, pn_event_t *event) { fprintf(stderr, "connection event %s\n", pn_event_type_name(pn_event_type(event))); if (pn_event_type(event) == PN_CONNECTION_REMOTE_STATE) { pn_session_t *session = pn_session(pn_event_connection(event)); pn_session_open(session); } }
session connection::default_session() { connection_context& ctx = connection_context::get(pn_object()); if (!ctx.default_session) { // Note we can't use a proton::session here because we don't want to own // a session reference. The connection owns the session, owning it here as well // would create a circular ownership. ctx.default_session = pn_session(pn_object()); pn_session_open(ctx.default_session); } return make_wrapper(ctx.default_session); }
/* Test sending/receiving a message in chunks */ static void test_message_stream(test_t *t) { test_proactor_t tps[] ={ test_proactor(t, message_stream_handler), test_proactor(t, message_stream_handler) }; pn_proactor_t *client = tps[0].proactor; pn_listener_t *l = test_listen(&tps[1], ""); struct message_stream_context ctx = { 0 }; tps[0].handler.context = &ctx; tps[1].handler.context = &ctx; /* Encode a large (not very) message to send in chunks */ char *body = (char*)malloc(BODY); memset(body, 'x', BODY); pn_message_t *m = pn_message(); pn_data_put_binary(pn_message_body(m), pn_bytes(BODY, body)); free(body); ctx.size = message_encode(m, &ctx.send_buf); pn_message_free(m); pn_connection_t *c = pn_connection(); pn_proactor_connect2(client, c, NULL, listener_info(l).connect); pn_session_t *ssn = pn_session(c); pn_session_open(ssn); pn_link_t *snd = pn_sender(ssn, "x"); pn_link_open(snd); TEST_PROACTORS_RUN_UNTIL(tps, PN_LINK_FLOW); /* Send and receive the message in chunks */ do { pn_connection_wake(c); /* Initiate send/receive of one chunk */ do { /* May be multiple receives for one send */ TEST_PROACTORS_RUN_UNTIL(tps, PN_DELIVERY); } while (ctx.received < ctx.sent); } while (!ctx.complete); TEST_CHECK(t, ctx.received == ctx.size); TEST_CHECK(t, ctx.sent == ctx.size); TEST_CHECK(t, !memcmp(ctx.send_buf.start, ctx.recv_buf.start, ctx.size)); free(ctx.send_buf.start); free(ctx.recv_buf.start); TEST_PROACTORS_DESTROY(tps); }
boost::shared_ptr<SessionContext> ConnectionContext::newSession(bool transactional, const std::string& n) { qpid::sys::ScopedLock<qpid::sys::Monitor> l(lock); if (transactional) throw qpid::messaging::MessagingException("Transactions not yet supported"); std::string name = n.empty() ? qpid::framing::Uuid(true).str() : n; SessionMap::const_iterator i = sessions.find(name); if (i == sessions.end()) { boost::shared_ptr<SessionContext> s(new SessionContext(connection)); s->session = pn_session(connection); pn_session_open(s->session); sessions[name] = s; wakeupDriver(); while (pn_session_state(s->session) & PN_REMOTE_UNINIT) { wait(); } return s; } else { throw qpid::messaging::KeyError(std::string("Session already exists: ") + name); } }
pn_link_t *pn_messenger_link(pn_messenger_t *messenger, const char *address, bool sender) { char buf[(address ? strlen(address) : 0) + 1]; if (address) { strcpy(buf, address); } else { buf[0] = '\0'; } char *domain; char *name; parse_address(address ? buf : NULL, &domain, &name); pn_connection_t *connection = pn_messenger_domain(messenger, domain); if (!connection) return NULL; pn_link_t *link = pn_link_head(connection, PN_LOCAL_ACTIVE); while (link) { if (pn_is_sender(link) == sender) { const char *terminus = pn_is_sender(link) ? pn_target(link) : pn_source(link); if (pn_streq(name, terminus)) return link; } link = pn_link_next(link, PN_LOCAL_ACTIVE); } pn_session_t *ssn = pn_session(connection); pn_session_open(ssn); link = sender ? pn_sender(ssn, "sender-xxx") : pn_receiver(ssn, "receiver-xxx"); // XXX if (sender) { pn_set_target(link, name); pn_set_source(link, name); } else { pn_set_target(link, name); pn_set_source(link, name); } pn_link_open(link); return link; }
void Interconnect::process() { QPID_LOG(trace, id << " processing interconnect"); if (closeRequested) { close(); } else { if ((pn_connection_state(connection) & UNINIT) == UNINIT) { QPID_LOG_CAT(debug, model, id << " interconnect open initiated"); pn_connection_set_container(connection, getBroker().getFederationTag().c_str()); setProperties(connection); pn_connection_open(connection); out.connectionEstablished(); setInterconnectDomain(domain); } if (!isOpened && (pn_connection_state(connection) & PN_REMOTE_ACTIVE)) { QPID_LOG_CAT(debug, model, id << " interconnect open completed, attaching link"); isOpened = true; readPeerProperties(); const char* containerid(pn_connection_remote_container(connection)); if (containerid) { setContainerId(std::string(containerid)); } opened(); getBroker().getConnectionObservers().opened(*this); pn_session_t* s = pn_session(connection); pn_session_open(s); boost::shared_ptr<Session> ssn(new Session(s, *this, out)); sessions[s] = ssn; pn_link_t* l = incoming ? pn_receiver(s, name.c_str()) : pn_sender(s, name.c_str()); pn_link_open(l); ssn->attach(l, source, target, relay); } Connection::process(); } }
void connection_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { connection_context_t *cc = connection_context(h); bool replying = cc->global->opts->reply; switch (type) { case PN_LINK_REMOTE_OPEN: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { check(cc->recv_link == NULL, "Multiple incomming links on one connection"); cc->recv_link = link; pn_connection_t *conn = pn_event_connection(event); pn_list_add(cc->global->active_connections, conn); if (cc->global->shutting_down) { pn_connection_close(conn); break; } if (replying) { // Set up a reply link and defer granting credit to the incoming link pn_connection_t *conn = pn_session_connection(pn_link_session(link)); pn_session_t *ssn = pn_session(conn); pn_session_open(ssn); char name[100]; // prefer a multiplatform uuid generator sprintf(name, "reply_sender_%d", cc->connection_id); cc->reply_link = pn_sender(ssn, name); pn_link_open(cc->reply_link); } else { pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(h, fc); pn_decref(fc); } } } break; case PN_LINK_FLOW: { if (replying) { pn_link_t *reply_link = pn_event_link(event); // pn_flowcontroller handles the non-reply case check(reply_link == cc->reply_link, "internal error"); // Grant the sender as much credit as just given to us for replies int delta = pn_link_credit(reply_link) - pn_link_credit(cc->recv_link); if (delta > 0) pn_link_flow(cc->recv_link, delta); } } break; case PN_DELIVERY: { pn_link_t *recv_link = pn_event_link(event); pn_delivery_t *dlv = pn_event_delivery(event); if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) { if (cc->global->received == 0) statistics_start(cc->global->stats); size_t encoded_size = pn_delivery_pending(dlv); cc->global->encoded_data = ensure_buffer(cc->global->encoded_data, encoded_size, &cc->global->encoded_data_size); check(cc->global->encoded_data, "decoding buffer realloc failure"); ssize_t n = pn_link_recv(recv_link, cc->global->encoded_data, encoded_size); check(n == (ssize_t) encoded_size, "message data read fail"); pn_message_t *msg = cc->global->message; int err = pn_message_decode(msg, cc->global->encoded_data, n); check(err == 0, "message decode error"); cc->global->received++; pn_delivery_settle(dlv); statistics_msg_received(cc->global->stats, msg); if (replying) { const char *reply_addr = pn_message_get_reply_to(msg); if (reply_addr) { pn_link_t *rl = cc->reply_link; check(pn_link_credit(rl) > 0, "message received without corresponding reply credit"); LOG("Replying to: %s\n", reply_addr ); pn_message_set_address(msg, reply_addr); pn_message_set_creation_time(msg, msgr_now()); char tag[8]; void *ptr = &tag; *((uint64_t *) ptr) = cc->global->sent; pn_delivery_t *dlv = pn_delivery(rl, pn_dtag(tag, 8)); size_t size = cc->global->encoded_data_size; int err = pn_message_encode(msg, cc->global->encoded_data, &size); check(err == 0, "message encoding error"); pn_link_send(rl, cc->global->encoded_data, size); pn_delivery_settle(dlv); cc->global->sent++; } } } if (cc->global->received >= cc->global->opts->msg_count) { global_shutdown(cc->global); } } break; case PN_CONNECTION_UNBOUND: { pn_connection_t *conn = pn_event_connection(event); pn_list_remove(cc->global->active_connections, conn); pn_connection_release(conn); } break; default: break; } }
/* runs the protocol engine, allowing it to handle TCP socket I/O and timer * events in the background. */ static void *amqp1_thread(void *arg) { DBGPRINTF("omamqp1: Protocol thread started\n"); pn_handler_t *handler = (pn_handler_t *)arg; protocolState_t *ps = PROTOCOL_STATE(handler); const configSettings_t *cfg = ps->config; // have pn_reactor_process() exit after 5 sec to poll for commands pn_reactor_set_timeout(ps->reactor, 5000); pn_reactor_start(ps->reactor); while (!ps->stopped) { // setup a connection: const char *host = pn_url_get_host(cfg->url); const char *port = pn_url_get_port(cfg->url); if (!port) port = "5672"; #if PN_VERSION_MAJOR == 0 && PN_VERSION_MINOR >= 13 ps->conn = pn_reactor_connection_to_host(ps->reactor, host, port, handler); pn_connection_set_hostname(ps->conn, host); #else { char host_addr[300]; ps->conn = pn_reactor_connection(ps->reactor, handler); snprintf(host_addr, sizeof(host_addr), "%s:%s", host, port); pn_connection_set_hostname(ps->conn, host_addr); } #endif pn_connection_set_container(ps->conn, "rsyslogd-omamqp1"); #if PN_VERSION_MAJOR == 0 && PN_VERSION_MINOR >= 10 // proton version <= 0.9 did not support Cyrus SASL const char *user = cfg->username ? (char *)cfg->username : pn_url_get_username(cfg->url); if (user) pn_connection_set_user(ps->conn, user); const char *pword = cfg->password ? (char *) cfg->password : pn_url_get_password(cfg->url); if (pword) pn_connection_set_password(ps->conn, pword); #endif pn_connection_open(ps->conn); pn_session_t *ssn = pn_session(ps->conn); pn_session_open(ssn); ps->sender = pn_sender(ssn, (char *)cfg->target); pn_link_set_snd_settle_mode(ps->sender, PN_SND_UNSETTLED); char *addr = (char *)ps->config->target; pn_terminus_set_address(pn_link_target(ps->sender), addr); pn_terminus_set_address(pn_link_source(ps->sender), addr); pn_link_open(ps->sender); // run the protocol engine until the connection closes or thread is shut down sbool engine_running = true; while (engine_running) { engine_running = pn_reactor_process(ps->reactor); _poll_command(ps); } DBGPRINTF("omamqp1: reactor finished\n"); _abort_command(ps); // unblock main thread if necessary // delay reconnectDelay seconds before re-connecting: int delay = ps->config->reconnectDelay; while (delay-- > 0 && !ps->stopped) { srSleep(1, 0); _poll_command(ps); } } pn_reactor_stop(ps->reactor); // stop command is now done: threadIPC_t *ipc = ps->ipc; pthread_mutex_lock(&ipc->lock); ipc->result = RS_RET_OK; ipc->command = COMMAND_DONE; pthread_cond_signal(&ipc->condition); pthread_mutex_unlock(&ipc->lock); DBGPRINTF("omamqp1: Protocol thread stopped\n"); return 0; }
void client_callback(pn_connector_t *ctor) { struct client_context *ctx = pn_connector_context(ctor); if (pn_connector_closed(ctor)) { ctx->done = true; } pn_sasl_t *sasl = pn_connector_sasl(ctor); while (pn_sasl_state(sasl) != PN_SASL_PASS) { pn_sasl_state_t st = pn_sasl_state(sasl); switch (st) { case PN_SASL_IDLE: return; case PN_SASL_CONF: if (ctx->mechanism && !strcmp(ctx->mechanism, "PLAIN")) { pn_sasl_plain(sasl, ctx->username, ctx->password); } else { pn_sasl_mechanisms(sasl, ctx->mechanism); pn_sasl_client(sasl); } break; case PN_SASL_STEP: if (pn_sasl_pending(sasl)) { fprintf(stderr, "challenge failed\n"); ctx->done = true; } return; case PN_SASL_FAIL: fprintf(stderr, "authentication failed\n"); ctx->done = true; return; case PN_SASL_PASS: break; } } pn_connection_t *connection = pn_connector_connection(ctor); char tagstr[1024]; char msg[ctx->size]; char data[ctx->size + 16]; for (int i = 0; i < ctx->size; i++) { msg[i] = 'x'; } size_t ndata = pn_message_data(data, ctx->size + 16, msg, ctx->size); if (!ctx->init) { ctx->init = true; char container[1024]; if (gethostname(container, 1024)) pn_fatal("hostname lookup failed"); pn_connection_set_container(connection, container); pn_connection_set_hostname(connection, ctx->hostname); pn_session_t *ssn = pn_session(connection); pn_connection_open(connection); pn_session_open(ssn); if (ctx->send_count) { pn_link_t *snd = pn_sender(ssn, "sender"); pn_set_target(snd, ctx->address); pn_link_open(snd); char buf[16]; for (int i = 0; i < ctx->send_count; i++) { sprintf(buf, "%x", i); pn_delivery(snd, pn_dtag(buf, strlen(buf))); } } if (ctx->recv_count) { pn_link_t *rcv = pn_receiver(ssn, "receiver"); pn_set_source(rcv, ctx->address); pn_link_open(rcv); pn_flow(rcv, ctx->recv_count < ctx->high ? ctx->recv_count : ctx->high); } } pn_delivery_t *delivery = pn_work_head(connection); while (delivery) { pn_delivery_tag_t tag = pn_delivery_tag(delivery); pn_quote_data(tagstr, 1024, tag.bytes, tag.size); pn_link_t *link = pn_link(delivery); if (pn_writable(delivery)) { pn_send(link, data, ndata); if (pn_advance(link)) { if (!ctx->quiet) printf("sent delivery: %s\n", tagstr); } } else if (pn_readable(delivery)) { if (!ctx->quiet) { printf("received delivery: %s\n", tagstr); printf(" payload = \""); } while (true) { size_t n = pn_recv(link, msg, 1024); if (n == PN_EOS) { pn_advance(link); pn_disposition(delivery, PN_ACCEPTED); pn_settle(delivery); if (!--ctx->recv_count) { pn_link_close(link); } break; } else if (!ctx->quiet) { pn_print_data(msg, n); } } if (!ctx->quiet) printf("\"\n"); if (pn_credit(link) < ctx->low && pn_credit(link) < ctx->recv_count) { pn_flow(link, (ctx->recv_count < ctx->high ? ctx->recv_count : ctx->high) - pn_credit(link)); } } if (pn_updated(delivery)) { if (!ctx->quiet) printf("disposition for %s: %u\n", tagstr, pn_remote_disposition(delivery)); pn_clear(delivery); pn_settle(delivery); if (!--ctx->send_count) { pn_link_close(link); } } delivery = pn_work_next(delivery); } if (!ctx->send_count && !ctx->recv_count) { printf("closing\n"); // XXX: how do we close the session? //pn_close((pn_endpoint_t *) ssn); pn_connection_close(connection); } if (pn_connection_state(connection) == (PN_LOCAL_CLOSED | PN_REMOTE_CLOSED)) { ctx->done = true; } }
session connection::open_session(const session_options &opts) { session s(make_wrapper<session>(pn_session(pn_object()))); // TODO: error check, too many sessions, no mem... if (!!s) s.open(opts); return s; }
void sender_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type) { sender_context_t *sc = sender_context(h); switch (type) { case PN_CONNECTION_INIT: { pn_connection_t *conn = pn_event_connection(event); pn_connection_set_container(conn, pn_string_get(sc->container_id)); pn_connection_set_hostname(conn, pn_string_get(sc->hostname)); pn_connection_open(conn); pn_session_t *ssn = pn_session(conn); pn_session_open(ssn); pn_link_t *snd = pn_sender(ssn, "sender"); const char *path = pn_url_get_path(sc->send_url); if (path && strlen(path)) { pn_terminus_set_address(pn_link_target(snd), path); pn_terminus_set_address(pn_link_source(snd), path); } pn_link_open(snd); } break; case PN_LINK_FLOW: { pn_link_t *snd = pn_event_link(event); while (pn_link_credit(snd) > 0 && sc->sent < sc->opts->msg_count) { if (sc->sent == 0) statistics_start(sc->stats); char tag[8]; void *ptr = &tag; *((uint64_t *) ptr) = sc->sent; pn_delivery_t *dlv = pn_delivery(snd, pn_dtag(tag, 8)); // setup the message to send pn_message_t *msg = sc->message; pn_message_set_address(msg, sc->opts->targets.addresses[0]); sc->id.u.as_ulong = sc->sent; pn_message_set_correlation_id(msg, sc->id); pn_message_set_creation_time(msg, msgr_now()); size_t size = sc->encoded_data_size; int err = pn_message_encode(msg, sc->encoded_data, &size); check(err == 0, "message encoding error"); pn_link_send(snd, sc->encoded_data, size); pn_delivery_settle(dlv); sc->sent++; } if (sc->sent == sc->opts->msg_count && !sc->opts->get_replies) { pn_link_close(snd); pn_connection_t *conn = pn_event_connection(event); pn_connection_close(conn); } } break; case PN_LINK_INIT: { pn_link_t *link = pn_event_link(event); if (pn_link_is_receiver(link)) { // Response messages link. Could manage credit and deliveries in this handler but // a dedicated handler also works. pn_handler_t *replyto = replyto_handler(sc); pn_flowcontroller_t *fc = pn_flowcontroller(1024); pn_handler_add(replyto, fc); pn_decref(fc); pn_handshaker_t *handshaker = pn_handshaker(); pn_handler_add(replyto, handshaker); pn_decref(handshaker); pn_record_t *record = pn_link_attachments(link); pn_record_set_handler(record, replyto); pn_decref(replyto); } } break; case PN_CONNECTION_LOCAL_CLOSE: { statistics_report(sc->stats, sc->sent, sc->received); } break; default: break; } }
session& connection::create_session() { return *session::cast(pn_session(pn_cast(this))); }
/* Process each event posted by the reactor. */ static void event_handler(pn_handler_t *handler, pn_event_t *event, pn_event_type_t type) { app_data_t *data = GET_APP_DATA(handler); switch (type) { case PN_CONNECTION_INIT: { // Create and open all the endpoints needed to send a message // pn_connection_t *conn; pn_session_t *ssn; pn_link_t *sender; conn = pn_event_connection(event); pn_connection_open(conn); ssn = pn_session(conn); pn_session_open(ssn); sender = pn_sender(ssn, "MySender"); // we do not wait for ack until the last message pn_link_set_snd_settle_mode(sender, PN_SND_MIXED); if (!data->anon) { pn_terminus_set_address(pn_link_target(sender), data->target); } pn_link_open(sender); } break; case PN_LINK_FLOW: { // the remote has given us some credit, now we can send messages // static long tag = 0; // a simple tag generator pn_delivery_t *delivery; pn_link_t *sender = pn_event_link(event); int credit = pn_link_credit(sender); while (credit > 0 && data->count > 0) { --credit; --data->count; ++tag; delivery = pn_delivery(sender, pn_dtag((const char *)&tag, sizeof(tag))); pn_link_send(sender, data->msg_data, data->msg_len); pn_link_advance(sender); if (data->count > 0) { // send pre-settled until the last one, then wait for an ack on // the last sent message. This allows the sender to send // messages as fast as possible and then exit when the consumer // has dealt with the last one. // pn_delivery_settle(delivery); } } } break; case PN_DELIVERY: { // Since the example sends all messages but the last pre-settled // (pre-acked), only the last message's delivery will get updated with // the remote state (acked/nacked). // pn_delivery_t *dlv = pn_event_delivery(event); if (pn_delivery_updated(dlv) && pn_delivery_remote_state(dlv)) { uint64_t rs = pn_delivery_remote_state(dlv); int done = 1; switch (rs) { case PN_RECEIVED: // This is not a terminal state - it is informational, and the // peer is still processing the message. done = 0; break; case PN_ACCEPTED: pn_delivery_settle(dlv); if (!quiet) fprintf(stdout, "Send complete!\n"); break; case PN_REJECTED: case PN_RELEASED: case PN_MODIFIED: pn_delivery_settle(dlv); fprintf(stderr, "Message not accepted - code:%lu\n", (unsigned long)rs); break; default: // ??? no other terminal states defined, so ignore anything else pn_delivery_settle(dlv); fprintf(stderr, "Unknown delivery failure - code=%lu\n", (unsigned long)rs); break; } if (done) { // initiate clean shutdown of the endpoints pn_link_t *link = pn_delivery_link(dlv); pn_session_t *ssn = pn_link_session(link); pn_link_close(link); pn_session_close(ssn); pn_connection_close(pn_session_connection(ssn)); } } } break; case PN_TRANSPORT_ERROR: { // The connection to the peer failed. // pn_transport_t *tport = pn_event_transport(event); pn_condition_t *cond = pn_transport_condition(tport); fprintf(stderr, "Network transport failed!\n"); if (pn_condition_is_set(cond)) { const char *name = pn_condition_get_name(cond); const char *desc = pn_condition_get_description(cond); fprintf(stderr, " Error: %s Description: %s\n", (name) ? name : "<error name not provided>", (desc) ? desc : "<no description provided>"); } // pn_reactor_process() will exit with a false return value, stopping // the main loop. } break; default: break; } }