void amqp::Sender::send(IMessage const &message, Address const &address) { pn_data_t *pnmessage_body = NULL; pn_message_t *pnmessage = pn_message(); if (pnmessage == NULL) { throw std::exception("ERROR: Message could not be created."); } pn_message_set_address(pnmessage, address.toString().c_str()); _addMetaToMessage(pnmessage, message); pnmessage_body = pn_message_body(pnmessage); pn_data_put_binary(pnmessage_body, pn_bytes(message.getSize(), message.getBytes())); pn_messenger_put(m_messenger, pnmessage); if (isError()) { _throwError(); } // To avoid traffic flud and speed up the solution better to use blocking scokets in tracking mode if (isTraking()) { Log("Sending messages to %s\n", address.toString().c_str()); m_tracker = pn_messenger_outgoing_tracker(m_messenger); pn_messenger_send(m_messenger, -1); // sync } else { pn_messenger_send(m_messenger, 1); // async } if (isError()) { _throwError(); } _checkTracking(); pn_message_free(pnmessage); }
int sendMessage(pn_messenger_t * messenger) { //char * address = (char *) "amqps://{SAS Key Name}:{SAS key}@{namespace name}.servicebus.windows.net/{event hub name}"; char * address = (char *) "amqps://*****:*****@kelvin-flight.servicebus.windows.net/example"; char * msgtext = (char *) "Hello from C!"; pn_message_t * message; pn_data_t * body; message = pn_message(); pn_message_set_address(message, address); pn_message_set_content_type(message, (char*) "application/octect-stream"); pn_message_set_inferred(message, true); body = pn_message_body(message); pn_data_put_binary(body, pn_bytes(strlen(msgtext), msgtext)); pn_messenger_put(messenger, message); check(messenger); pn_messenger_send(messenger, 1); check(messenger); pn_message_free(message); }
int main(int argc, char** argv) { Options_t opts; Statistics_t stats; uint64_t sent = 0; uint64_t received = 0; int forwarding_index = 0; int rc; pn_message_t *message; pn_messenger_t *messenger; parse_options( argc, argv, &opts ); const int forward = opts.forwarding_targets.count != 0; message = pn_message(); messenger = pn_messenger( opts.name ); /* load the various command line options if they're set */ if (opts.certificate) { rc = pn_messenger_set_certificate(messenger, opts.certificate); check_messenger(messenger); check( rc == 0, "Failed to set certificate" ); } if (opts.privatekey) { rc = pn_messenger_set_private_key(messenger, opts.privatekey); check_messenger(messenger); check( rc == 0, "Failed to set private key" ); } if (opts.password) { rc = pn_messenger_set_password(messenger, opts.password); check_messenger(messenger); check( rc == 0, "Failed to set password" ); } if (opts.ca_db) { rc = pn_messenger_set_trusted_certificates(messenger, opts.ca_db); check_messenger(messenger); check( rc == 0, "Failed to set trusted CA database" ); } if (opts.incoming_window) { // RAFI: seems to cause receiver to hang: pn_messenger_set_incoming_window( messenger, opts.incoming_window ); } pn_messenger_set_timeout( messenger, opts.timeout ); pn_messenger_start(messenger); check_messenger(messenger); int i; for (i = 0; i < opts.subscriptions.count; i++) { pn_messenger_subscribe(messenger, opts.subscriptions.addresses[i]); check_messenger(messenger); LOG("Subscribing to '%s'\n", opts.subscriptions.addresses[i]); } // hack to let test scripts know when the receivers are ready (so // that the senders may be started) if (opts.ready_text) { fprintf(stdout, "%s\n", opts.ready_text); fflush(stdout); } while (!opts.msg_count || received < opts.msg_count) { LOG("Calling pn_messenger_recv(%d)\n", opts.recv_count); rc = pn_messenger_recv(messenger, opts.recv_count); check_messenger(messenger); check(rc == 0 || (opts.timeout == 0 && rc == PN_TIMEOUT), "pn_messenger_recv() failed"); // start the timer only after receiving the first msg if (received == 0) statistics_start( &stats ); LOG("Messages on incoming queue: %d\n", pn_messenger_incoming(messenger)); while (pn_messenger_incoming(messenger)) { pn_messenger_get(messenger, message); check_messenger(messenger); received++; // TODO: header decoding? // uint64_t id = pn_message_get_correlation_id( message ).u.as_ulong; statistics_msg_received( &stats, message ); if (opts.reply) { const char *reply_addr = pn_message_get_reply_to( message ); if (reply_addr) { LOG("Replying to: %s\n", reply_addr ); pn_message_set_address( message, reply_addr ); pn_message_set_creation_time( message, msgr_now() ); pn_messenger_put(messenger, message); sent++; } } if (forward) { const char *forward_addr = opts.forwarding_targets.addresses[forwarding_index]; forwarding_index = NEXT_ADDRESS(opts.forwarding_targets, forwarding_index); LOG("Forwarding to: %s\n", forward_addr ); pn_message_set_address( message, forward_addr ); pn_message_set_reply_to( message, NULL ); // else points to origin sender pn_message_set_creation_time( message, msgr_now() ); pn_messenger_put(messenger, message); sent++; } } LOG("Messages received=%llu sent=%llu\n", received, sent); } // this will flush any pending sends if (pn_messenger_outgoing(messenger) > 0) { LOG("Calling pn_messenger_send()\n"); rc = pn_messenger_send(messenger, -1); check_messenger(messenger); check(rc == 0, "pn_messenger_send() failed"); } rc = pn_messenger_stop(messenger); check(rc == 0, "pn_messenger_stop() failed"); check_messenger(messenger); statistics_report( &stats, sent, received ); pn_messenger_free(messenger); pn_message_free(message); addresses_free( &opts.subscriptions ); addresses_free( &opts.forwarding_targets ); return 0; }
int main(int argc, char** argv) { Options_t opts; Statistics_t stats; uint64_t sent = 0; uint64_t received = 0; int target_index = 0; int rc; pn_message_t *message = 0; pn_message_t *reply_message = 0; pn_messenger_t *messenger = 0; parse_options( argc, argv, &opts ); messenger = pn_messenger( opts.name ); if (opts.certificate) { rc = pn_messenger_set_certificate(messenger, opts.certificate); check( rc == 0, "Failed to set certificate" ); } if (opts.privatekey) { rc = pn_messenger_set_private_key(messenger, opts.privatekey); check( rc == 0, "Failed to set private key" ); } if (opts.password) { rc = pn_messenger_set_password(messenger, opts.password); free(opts.password); check( rc == 0, "Failed to set password" ); } if (opts.ca_db) { rc = pn_messenger_set_trusted_certificates(messenger, opts.ca_db); check( rc == 0, "Failed to set trusted CA database" ); } if (opts.outgoing_window) { pn_messenger_set_outgoing_window( messenger, opts.outgoing_window ); } pn_messenger_set_timeout( messenger, opts.timeout ); pn_messenger_start(messenger); message = pn_message(); check(message, "failed to allocate a message"); pn_message_set_reply_to(message, "~"); pn_data_t *body = pn_message_body(message); char *data = (char *)calloc(1, opts.msg_size); pn_data_put_binary(body, pn_bytes(opts.msg_size, data)); free(data); pn_atom_t id; id.type = PN_ULONG; #if 0 // TODO: how do we effectively benchmark header processing overhead??? pn_data_t *props = pn_message_properties(message); pn_data_put_map(props); pn_data_enter(props); // //pn_data_put_string(props, pn_bytes(6, "string")); //pn_data_put_string(props, pn_bytes(10, "this is awkward")); // //pn_data_put_string(props, pn_bytes(4, "long")); pn_data_put_long(props, 12345); // //pn_data_put_string(props, pn_bytes(9, "timestamp")); pn_data_put_timestamp(props, (pn_timestamp_t) 54321); pn_data_exit(props); #endif const int get_replies = opts.get_replies; if (get_replies) { // disable the timeout so that pn_messenger_recv() won't block reply_message = pn_message(); check(reply_message, "failed to allocate a message"); } statistics_start( &stats ); while (!opts.msg_count || (sent < opts.msg_count)) { // setup the message to send pn_message_set_address(message, opts.targets.addresses[target_index]); target_index = NEXT_ADDRESS(opts.targets, target_index); id.u.as_ulong = sent; pn_message_set_correlation_id( message, id ); pn_message_set_creation_time( message, msgr_now() ); pn_messenger_put(messenger, message); sent++; if (opts.send_batch && (pn_messenger_outgoing(messenger) >= (int)opts.send_batch)) { if (get_replies) { while (received < sent) { // this will also transmit any pending sent messages received += process_replies( messenger, reply_message, &stats, opts.recv_count ); } } else { LOG("Calling pn_messenger_send()\n"); rc = pn_messenger_send(messenger, -1); check((rc == 0 || rc == PN_TIMEOUT), "pn_messenger_send() failed"); } } check_messenger(messenger); } LOG("Messages received=%llu sent=%llu\n", received, sent); if (get_replies) { // wait for the last of the replies while (received < sent) { int count = process_replies( messenger, reply_message, &stats, opts.recv_count ); check( count > 0 || (opts.timeout == 0), "Error: timed out waiting for reply messages\n"); received += count; LOG("Messages received=%llu sent=%llu\n", received, sent); } } else if (pn_messenger_outgoing(messenger) > 0) { LOG("Calling pn_messenger_send()\n"); rc = pn_messenger_send(messenger, -1); check(rc == 0, "pn_messenger_send() failed"); } rc = pn_messenger_stop(messenger); check(rc == 0, "pn_messenger_stop() failed"); check_messenger(messenger); statistics_report( &stats, sent, received ); pn_messenger_free(messenger); pn_message_free(message); if (reply_message) pn_message_free( reply_message ); addresses_free( &opts.targets ); return 0; }
mama_status qpidBridgeMamaPublisher_send (publisherBridge publisher, mamaMsg msg) { mama_status status = MAMA_STATUS_OK; qpidPublisherBridge* impl = (qpidPublisherBridge*) publisher; const char* qpidError = NULL; endpoint_t* targets = NULL; size_t targetCount = 0; size_t targetInc = 0; char* url = NULL; qpidMsgType type = QPID_MSG_PUB_SUB; if (NULL == impl) { mama_log (MAMA_LOG_LEVEL_ERROR, "qpidBridgeMamaPublisher_send(): No publisher."); return MAMA_STATUS_NULL_ARG; } else if (NULL == msg) { return MAMA_STATUS_NULL_ARG; } /* Get the bridge message type if specified already by inbox handlers */ qpidBridgeMamaMsgImpl_getMsgType (impl->mMamaBridgeMsg, &type); switch (type) { case QPID_MSG_INBOX_REQUEST: /* Use the publisher's default send subject */ qpidBridgeMamaMsg_setSendSubject (impl->mMamaBridgeMsg, impl->mSubject, impl->mSource); /* Use the publisher's default send destination for request */ qpidBridgePublisherImpl_enqueueMessageForAddress ( msg, (char*) impl->mUri, impl); break; case QPID_MSG_INBOX_RESPONSE: /* The url should already be set for inbox responses as the replyTo */ qpidBridgeMamaMsgImpl_getDestination (impl->mMamaBridgeMsg, &url); /* Send out this message to the URL already provided */ qpidBridgePublisherImpl_enqueueMessageForAddress (msg, url, impl); break; default: /* Use the publisher's default send subject */ qpidBridgeMamaMsg_setSendSubject (impl->mMamaBridgeMsg, impl->mSubject, impl->mSource); if (QPID_TRANSPORT_TYPE_P2P == qpidBridgeMamaTransportImpl_getType ( (transportBridge) impl->mTransport)) { /* For each known downstream destination */ status = endpointPool_getRegistered (impl->mTransport->mPubEndpoints, impl->mSubject, &targets, &targetCount); if (targetCount == 0) { mama_log (MAMA_LOG_LEVEL_FINEST, "qpidBridgeMamaPublisher_send(): " "No one subscribed to subject '%s', not publishing.", impl->mSubject); return MAMA_STATUS_OK; } /* Push the message out to the send queue for each interested party */ for (targetInc = 0; targetInc < targetCount; targetInc++) { url = (char*) targets[targetInc]; qpidBridgePublisherImpl_enqueueMessageForAddress (msg, url, impl); } } else { qpidBridgePublisherImpl_enqueueMessageForAddress (msg, impl->mUri, impl); } break; } /* Note the messages don't actually get published until here */ if (pn_messenger_send(impl->mTransport->mOutgoing, QPID_MESSENGER_SEND_TIMEOUT)) { qpidError = PN_MESSENGER_ERROR (impl->mTransport->mOutgoing); mama_log (MAMA_LOG_LEVEL_SEVERE, "qpidBridgeMamaPublisher_send(): " "Qpid Error:[%s]", qpidError); return MAMA_STATUS_PLATFORM; } /* Reset the message type for the next publish */ qpidBridgeMamaMsgImpl_setMsgType (impl->mMamaBridgeMsg, QPID_MSG_PUB_SUB); return status; }