Esempio n. 1
0
void process_flow(ldp_connection_t *conn, pn_event_t *event) {
    fprintf(stderr, "flow event %s\n", pn_event_type_name(pn_event_type(event)));

    pn_link_t *sender = pn_event_link(event);

    pn_message_t *message = pn_message();
    pn_message_set_address(message, "amqp://foo/bar");
    pn_data_t *body = pn_message_body(message);
    char *msgtext = "hello world!";
    pn_data_put_string(body, pn_bytes(strlen(msgtext), msgtext));
    pn_buffer_t *buffer = pn_buffer(1000);
    char *encoded = pn_buffer_bytes(buffer).start;
    size_t size = pn_buffer_capacity(buffer);
    int err = pn_message_encode(message, encoded, &size);
    if (err) {
        fprintf(stderr, "trouble encoding message\n");
    } else {
        char tag[8];
        static uint64_t next_tag;
        *((uint64_t*)tag) = ++next_tag;
        pn_delivery_t *d = pn_delivery(sender, pn_dtag(tag, 8));
        pn_link_send(sender, encoded, size);
        pn_link_advance(sender);
    }
    pn_buffer_free(buffer);
    pn_message_free(message);
}
Esempio n. 2
0
void SenderContext::Delivery::send(pn_link_t* sender)
{
    pn_delivery_tag_t tag;
    tag.size = sizeof(id);
    tag.bytes = reinterpret_cast<const char*>(&id);
    token = pn_delivery(sender, tag);
    pn_link_send(sender, encoded.getData(), encoded.getSize());
    pn_link_advance(sender);
}
Esempio n. 3
0
static pn_event_type_t message_stream_handler(test_handler_t *th, pn_event_t *e) {
  struct message_stream_context *ctx = (struct message_stream_context*)th->context;
  switch (pn_event_type(e)) {
   case PN_CONNECTION_BOUND:
    pn_transport_set_max_frame(pn_event_transport(e), FRAME);
    return PN_EVENT_NONE;

   case PN_SESSION_INIT:
    pn_session_set_incoming_capacity(pn_event_session(e), FRAME); /* Single frame incoming */
    pn_session_set_outgoing_window(pn_event_session(e), 1);       /* Single frame outgoing */
    return PN_EVENT_NONE;

   case PN_LINK_REMOTE_OPEN:
    common_handler(th, e);
    if (pn_link_is_receiver(pn_event_link(e))) {
      pn_link_flow(pn_event_link(e), 1);
    } else {
      ctx->sender = pn_event_link(e);
    }
    return PN_EVENT_NONE;

   case PN_LINK_FLOW:           /* Start a delivery */
    if (pn_link_is_sender(pn_event_link(e)) && !ctx->dlv) {
      ctx->dlv = pn_delivery(pn_event_link(e), pn_dtag("x", 1));
    }
    return PN_LINK_FLOW;

   case PN_CONNECTION_WAKE: {     /* Send a chunk */
     ssize_t remains = ctx->size - ctx->sent;
     ssize_t n = (CHUNK < remains) ? CHUNK : remains;
     TEST_CHECK(th->t, n == pn_link_send(ctx->sender, ctx->send_buf.start + ctx->sent, n));
     ctx->sent += n;
     if (ctx->sent == ctx->size) {
       TEST_CHECK(th->t, pn_link_advance(ctx->sender));
     }
     return PN_CONNECTION_WAKE;
   }

   case PN_DELIVERY: {          /* Receive a delivery - smaller than a chunk? */
     pn_delivery_t *dlv = pn_event_delivery(e);
     if (pn_delivery_readable(dlv)) {
       ssize_t n = pn_delivery_pending(dlv);
       rwbytes_ensure(&ctx->recv_buf, ctx->received + n);
       TEST_ASSERT(n == pn_link_recv(pn_event_link(e), ctx->recv_buf.start + ctx->received, n));
       ctx->received += n;
     }
     ctx->complete = !pn_delivery_partial(dlv);
     return PN_DELIVERY;
   }

   default:
    return common_handler(th, e);
  }
}
Esempio n. 4
0
tracker sender::send(const message &message) {
    uint64_t id = ++tag_counter;
    pn_delivery_t *dlv =
        pn_delivery(pn_object(), pn_dtag(reinterpret_cast<const char*>(&id), sizeof(id)));
    std::vector<char> buf;
    message.encode(buf);
    assert(!buf.empty());
    pn_link_send(pn_object(), &buf[0], buf.size());
    pn_link_advance(pn_object());
    if (pn_link_snd_settle_mode(pn_object()) == PN_SND_SETTLED)
        pn_delivery_settle(dlv);
    if (!pn_link_credit(pn_object()))
        link_context::get(pn_object()).draining = false;
    return make_wrapper<tracker>(dlv);
}
Esempio n. 5
0
void connection_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type)
{
  connection_context_t *cc = connection_context(h);
  bool replying = cc->global->opts->reply;

  switch (type) {
  case PN_LINK_REMOTE_OPEN:
    {
      pn_link_t *link = pn_event_link(event);
      if (pn_link_is_receiver(link)) {
        check(cc->recv_link == NULL, "Multiple incomming links on one connection");
        cc->recv_link = link;
        pn_connection_t *conn = pn_event_connection(event);
        pn_list_add(cc->global->active_connections, conn);
        if (cc->global->shutting_down) {
          pn_connection_close(conn);
          break;
        }
        if (replying) {
          // Set up a reply link and defer granting credit to the incoming link
          pn_connection_t *conn = pn_session_connection(pn_link_session(link));
          pn_session_t *ssn = pn_session(conn);
          pn_session_open(ssn);
          char name[100]; // prefer a multiplatform uuid generator
          sprintf(name, "reply_sender_%d", cc->connection_id);
          cc->reply_link = pn_sender(ssn, name);
          pn_link_open(cc->reply_link);
        }
        else {
          pn_flowcontroller_t *fc = pn_flowcontroller(1024);
          pn_handler_add(h, fc);
          pn_decref(fc);
        }
      }
    }
    break;
  case PN_LINK_FLOW:
    {
      if (replying) {
        pn_link_t *reply_link = pn_event_link(event);
        // pn_flowcontroller handles the non-reply case
        check(reply_link == cc->reply_link, "internal error");

        // Grant the sender as much credit as just given to us for replies
        int delta = pn_link_credit(reply_link) - pn_link_credit(cc->recv_link);
        if (delta > 0)
          pn_link_flow(cc->recv_link, delta);
      }
    }
    break;
  case PN_DELIVERY:
    {
      pn_link_t *recv_link = pn_event_link(event);
      pn_delivery_t *dlv = pn_event_delivery(event);
      if (pn_link_is_receiver(recv_link) && !pn_delivery_partial(dlv)) {
        if (cc->global->received == 0) statistics_start(cc->global->stats);

        size_t encoded_size = pn_delivery_pending(dlv);
        cc->global->encoded_data = ensure_buffer(cc->global->encoded_data, encoded_size,
                                                 &cc->global->encoded_data_size);
        check(cc->global->encoded_data, "decoding buffer realloc failure");

        ssize_t n = pn_link_recv(recv_link, cc->global->encoded_data, encoded_size);
        check(n == (ssize_t) encoded_size, "message data read fail");
        pn_message_t *msg = cc->global->message;
        int err = pn_message_decode(msg, cc->global->encoded_data, n);
        check(err == 0, "message decode error");
        cc->global->received++;
        pn_delivery_settle(dlv);
        statistics_msg_received(cc->global->stats, msg);

        if (replying) {
          const char *reply_addr = pn_message_get_reply_to(msg);
          if (reply_addr) {
            pn_link_t *rl = cc->reply_link;
            check(pn_link_credit(rl) > 0, "message received without corresponding reply credit");
            LOG("Replying to: %s\n", reply_addr );

            pn_message_set_address(msg, reply_addr);
            pn_message_set_creation_time(msg, msgr_now());

            char tag[8];
            void *ptr = &tag;
            *((uint64_t *) ptr) = cc->global->sent;
            pn_delivery_t *dlv = pn_delivery(rl, pn_dtag(tag, 8));
            size_t size = cc->global->encoded_data_size;
            int err = pn_message_encode(msg, cc->global->encoded_data, &size);
            check(err == 0, "message encoding error");
            pn_link_send(rl, cc->global->encoded_data, size);
            pn_delivery_settle(dlv);

            cc->global->sent++;
          }
        }
      }
      if (cc->global->received >= cc->global->opts->msg_count) {
        global_shutdown(cc->global);
      }
    }
    break;
  case PN_CONNECTION_UNBOUND:
    {
      pn_connection_t *conn = pn_event_connection(event);
      pn_list_remove(cc->global->active_connections, conn);
      pn_connection_release(conn);
    }
    break;
  default:
    break;
  }
}
Esempio n. 6
0
// check if a command needs processing
static void _poll_command(protocolState_t *ps)
{
	if (ps->stopped) return;

	threadIPC_t *ipc = ps->ipc;

	pthread_mutex_lock(&ipc->lock);

	switch (ipc->command) {

	case COMMAND_SHUTDOWN:
	    DBGPRINTF("omamqp1: Protocol thread processing shutdown command\n");
	    ps->stopped = true;
	    _close_connection(ps);
	    // wait for the shutdown to complete before ack'ing this command
	    break;

	case COMMAND_IS_READY:
	    DBGPRINTF("omamqp1: Protocol thread processing ready query command\n");
	    ipc->result = _is_ready(ps->sender)
	                  ? RS_RET_OK
	                  : RS_RET_SUSPENDED;
	    ipc->command = COMMAND_DONE;
	    pthread_cond_signal(&ipc->condition);
	    break;

	case COMMAND_SEND:
	    if (ps->delivery) break;  // currently processing this command
	    DBGPRINTF("omamqp1: Protocol thread processing send message command\n");
	    if (!_is_ready(ps->sender)) {
	        ipc->result = RS_RET_SUSPENDED;
	        ipc->command = COMMAND_DONE;
	        pthread_cond_signal(&ipc->condition);
	        break;
	    }

	    // send the message
	    ++ps->tag;
	    ps->delivery = pn_delivery(ps->sender,
	                               pn_dtag((const char *)&ps->tag, sizeof(ps->tag)));
	    pn_message_t *message = ipc->message;
	    assert(message);

	    int rc = 0;
	    size_t len = ps->buffer_size;
	    do {
	        rc = pn_message_encode(message, ps->encode_buffer, &len);
	        if (rc == PN_OVERFLOW) {
	            _grow_buffer(ps);
	            len = ps->buffer_size;
	        }
	    } while (rc == PN_OVERFLOW);

	    pn_link_send(ps->sender, ps->encode_buffer, len);
	    pn_link_advance(ps->sender);
	    ++ps->msgs_sent;
	    // command completes when remote updates the delivery (see PN_DELIVERY)
	    break;

	case COMMAND_DONE:
	    break;
	}

	pthread_mutex_unlock(&ipc->lock);
}
Esempio n. 7
0
void sender_dispatch(pn_handler_t *h, pn_event_t *event, pn_event_type_t type)
{
  sender_context_t *sc = sender_context(h);

  switch (type) {
  case PN_CONNECTION_INIT:
    {
      pn_connection_t *conn = pn_event_connection(event);
      pn_connection_set_container(conn, pn_string_get(sc->container_id));
      pn_connection_set_hostname(conn, pn_string_get(sc->hostname));
      pn_connection_open(conn);
      pn_session_t *ssn = pn_session(conn);
      pn_session_open(ssn);
      pn_link_t *snd = pn_sender(ssn, "sender");
      const char *path = pn_url_get_path(sc->send_url);
      if (path && strlen(path)) {
        pn_terminus_set_address(pn_link_target(snd), path);
        pn_terminus_set_address(pn_link_source(snd), path);
      }
      pn_link_open(snd);
    }
    break;
  case PN_LINK_FLOW:
    {
      pn_link_t *snd = pn_event_link(event);
      while (pn_link_credit(snd) > 0 && sc->sent < sc->opts->msg_count) {
        if (sc->sent == 0)
          statistics_start(sc->stats);

        char tag[8];
        void *ptr = &tag;
        *((uint64_t *) ptr) = sc->sent;
        pn_delivery_t *dlv = pn_delivery(snd, pn_dtag(tag, 8));

        // setup the message to send
        pn_message_t *msg = sc->message;
        pn_message_set_address(msg, sc->opts->targets.addresses[0]);
        sc->id.u.as_ulong = sc->sent;
        pn_message_set_correlation_id(msg, sc->id);
        pn_message_set_creation_time(msg, msgr_now());

        size_t size = sc->encoded_data_size;
        int err = pn_message_encode(msg, sc->encoded_data, &size);
        check(err == 0, "message encoding error");
        pn_link_send(snd, sc->encoded_data, size);
        pn_delivery_settle(dlv);
        sc->sent++;
      }
      if (sc->sent == sc->opts->msg_count && !sc->opts->get_replies) {
        pn_link_close(snd);
        pn_connection_t *conn = pn_event_connection(event);
        pn_connection_close(conn);
      }
    }
    break;
  case PN_LINK_INIT:
    {
      pn_link_t *link = pn_event_link(event);
      if (pn_link_is_receiver(link)) {
        // Response messages link.  Could manage credit and deliveries in this handler but
        // a dedicated handler also works.
        pn_handler_t *replyto = replyto_handler(sc);
        pn_flowcontroller_t *fc = pn_flowcontroller(1024);
        pn_handler_add(replyto, fc);
        pn_decref(fc);
        pn_handshaker_t *handshaker = pn_handshaker();
        pn_handler_add(replyto, handshaker);
        pn_decref(handshaker);
        pn_record_t *record = pn_link_attachments(link);
        pn_record_set_handler(record, replyto);
        pn_decref(replyto);
      }
    }
    break;
  case PN_CONNECTION_LOCAL_CLOSE:
    {
      statistics_report(sc->stats, sc->sent, sc->received);
    }
    break;
  default:
    break;
  }
}
Esempio n. 8
0
void qd_message_send(qd_message_t *in_msg,
					qd_link_t *link,
					bool strip_annotations)
{
    qd_message_pvt_t     *msg     = (qd_message_pvt_t*) in_msg;
    qd_message_content_t *content = msg->content;
    qd_buffer_t          *buf     = DEQ_HEAD(content->buffers);
    unsigned char        *cursor;
    pn_link_t            *pnl     = qd_link_pn(link);

    char repr[qd_message_repr_len()];
    qd_log(log_source, QD_LOG_TRACE, "Sending %s on link %s",
           qd_message_repr(in_msg, repr, sizeof(repr)),
           pn_link_name(pnl));

    qd_buffer_list_t new_ma;
    DEQ_INIT(new_ma);

    if (strip_annotations || compose_message_annotations(msg, &new_ma)) {
        //
        // This is the case where the message annotations have been modified.
        // The message send must be divided into sections:  The existing header;
        // the new message annotations; the rest of the existing message.
        // Note that the original message annotations that are still in the
        // buffer chain must not be sent.
        //
        // Start by making sure that we've parsed the message sections through
        // the message annotations
        //
        // ??? NO LONGER NECESSARY???
        if (!qd_message_check(in_msg, QD_DEPTH_MESSAGE_ANNOTATIONS)) {
            qd_log(log_source, QD_LOG_ERROR, "Cannot send: %s", qd_error_message);
            return;
        }

        //
        // Send header if present
        //
        cursor = qd_buffer_base(buf);
        if (content->section_message_header.length > 0) {
            buf    = content->section_message_header.buffer;
            cursor = content->section_message_header.offset + qd_buffer_base(buf);
            advance(&cursor, &buf,
                    content->section_message_header.length + content->section_message_header.hdr_length,
                    send_handler, (void*) pnl);
        }

        //
        // Send new message annotations
        //
        qd_buffer_t *da_buf = DEQ_HEAD(new_ma);
        while (da_buf) {
            pn_link_send(pnl, (char*) qd_buffer_base(da_buf), qd_buffer_size(da_buf));
            da_buf = DEQ_NEXT(da_buf);
        }
        qd_buffer_list_free_buffers(&new_ma);

        //
        // Skip over replaced message annotations
        //
        if (content->section_message_annotation.length > 0)
            advance(&cursor, &buf,
                    content->section_message_annotation.hdr_length + content->section_message_annotation.length,
                    0, 0);

        //
        // Send remaining partial buffer
        //
        if (buf) {
            size_t len = qd_buffer_size(buf) - (cursor - qd_buffer_base(buf));
            advance(&cursor, &buf, len, send_handler, (void*) pnl);
        }

        // Fall through to process the remaining buffers normally
        // Note that 'advance' will have moved us to the next buffer in the chain.
    }

    while (buf) {
        pn_link_send(pnl, (char*) qd_buffer_base(buf), qd_buffer_size(buf));
        buf = DEQ_NEXT(buf);
    }
}
Esempio n. 9
0
static void send_handler(void *context, const unsigned char *start, int length)
{
    pn_link_t *pnl = (pn_link_t*) context;
    pn_link_send(pnl, (const char*) start, length);
}
Esempio n. 10
0
/* Process each event posted by the reactor.
 */
static void event_handler(pn_handler_t *handler,
                          pn_event_t *event,
                          pn_event_type_t type)
{
    app_data_t *data = GET_APP_DATA(handler);

    switch (type) {

    case PN_CONNECTION_INIT: {
        // Create and open all the endpoints needed to send a message
        //
        pn_connection_t *conn;
        pn_session_t *ssn;
        pn_link_t *sender;

        conn = pn_event_connection(event);
        pn_connection_open(conn);
        ssn = pn_session(conn);
        pn_session_open(ssn);
        sender = pn_sender(ssn, "MySender");
        // we do not wait for ack until the last message
        pn_link_set_snd_settle_mode(sender, PN_SND_MIXED);
        if (!data->anon) {
            pn_terminus_set_address(pn_link_target(sender), data->target);
        }
        pn_link_open(sender);
    } break;

    case PN_LINK_FLOW: {
        // the remote has given us some credit, now we can send messages
        //
        static long tag = 0;  // a simple tag generator
        pn_delivery_t *delivery;
        pn_link_t *sender = pn_event_link(event);
        int credit = pn_link_credit(sender);
        while (credit > 0 && data->count > 0) {
            --credit;
            --data->count;
            ++tag;
            delivery = pn_delivery(sender,
                                   pn_dtag((const char *)&tag, sizeof(tag)));
            pn_link_send(sender, data->msg_data, data->msg_len);
            pn_link_advance(sender);
            if (data->count > 0) {
                // send pre-settled until the last one, then wait for an ack on
                // the last sent message. This allows the sender to send
                // messages as fast as possible and then exit when the consumer
                // has dealt with the last one.
                //
                pn_delivery_settle(delivery);
            }
        }
    } break;

    case PN_DELIVERY: {
        // Since the example sends all messages but the last pre-settled
        // (pre-acked), only the last message's delivery will get updated with
        // the remote state (acked/nacked).
        //
        pn_delivery_t *dlv = pn_event_delivery(event);
        if (pn_delivery_updated(dlv) && pn_delivery_remote_state(dlv)) {
            uint64_t rs = pn_delivery_remote_state(dlv);
            int done = 1;
            switch (rs) {
            case PN_RECEIVED:
                // This is not a terminal state - it is informational, and the
                // peer is still processing the message.
                done = 0;
                break;
            case PN_ACCEPTED:
                pn_delivery_settle(dlv);
                if (!quiet) fprintf(stdout, "Send complete!\n");
                break;
            case PN_REJECTED:
            case PN_RELEASED:
            case PN_MODIFIED:
                pn_delivery_settle(dlv);
                fprintf(stderr, "Message not accepted - code:%lu\n", (unsigned long)rs);
                break;
            default:
                // ??? no other terminal states defined, so ignore anything else
                pn_delivery_settle(dlv);
                fprintf(stderr, "Unknown delivery failure - code=%lu\n", (unsigned long)rs);
                break;
            }

            if (done) {
                // initiate clean shutdown of the endpoints
                pn_link_t *link = pn_delivery_link(dlv);
                pn_session_t *ssn = pn_link_session(link);
                pn_link_close(link);
                pn_session_close(ssn);
                pn_connection_close(pn_session_connection(ssn));
            }
        }
    } break;

    case PN_TRANSPORT_ERROR: {
        // The connection to the peer failed.
        //
        pn_transport_t *tport = pn_event_transport(event);
        pn_condition_t *cond = pn_transport_condition(tport);
        fprintf(stderr, "Network transport failed!\n");
        if (pn_condition_is_set(cond)) {
            const char *name = pn_condition_get_name(cond);
            const char *desc = pn_condition_get_description(cond);
            fprintf(stderr, "    Error: %s  Description: %s\n",
                    (name) ? name : "<error name not provided>",
                    (desc) ? desc : "<no description provided>");
        }
        // pn_reactor_process() will exit with a false return value, stopping
        // the main loop.
    } break;

    default:
        break;
    }
}
Esempio n. 11
0
bool BufferedTransfer::write(pn_link_t* link)
{
    pn_link_send(link, &data[0], data.size());
    return pn_link_advance(link);
}