コード例 #1
0
ファイル: sws.c プロジェクト: threatstack/nanomsg
/*  Ceases further I/O on the underlying socket and prepares to send a
    close handshake on the next receive. */
static void nn_sws_fail_conn (struct nn_sws *self, int code, char *reason)
{
    size_t reason_len;
    size_t payload_len;
    uint8_t mask [4];
    uint8_t *payload_pos;

    nn_assert_state (self, NN_SWS_STATE_ACTIVE);

    /*  Destroy any remnant incoming message fragments. */
    nn_msg_array_term (&self->inmsg_array);

    /*  Reason string + code. */
    reason_len = strlen (reason);
    payload_len = reason_len + 2;

    /*  Ensure text is short enough to also include code and framing. */
    nn_assert (payload_len <= NN_SWS_MAX_SMALL_PAYLOAD);

    /*  RFC 6455 section 5.5.1. */
    self->fail_msg [0] = NN_SWS_FRAME_BITMASK_FIN | NN_WS_OPCODE_CLOSE;

    /*  Size of the payload, which is the status code plus the reason. */
    self->fail_msg [1] = payload_len;

    self->fail_msg_len = NN_SWS_FRAME_SIZE_INITIAL;

    /*  Generate 32-bit mask as per RFC 6455 5.3. */
    if (self->mode == NN_WS_CLIENT) {
        self->fail_msg [1] |= NN_SWS_FRAME_BITMASK_MASKED;
        nn_random_generate (mask, sizeof (mask));
        memcpy (&self->fail_msg [NN_SWS_FRAME_SIZE_INITIAL], mask, 4);
        self->fail_msg_len += 4;
    }

    payload_pos = &self->fail_msg [self->fail_msg_len];
    
    /*  Copy Status Code in network order (big-endian). */
    nn_puts (payload_pos, (uint16_t) code);
    self->fail_msg_len += 2;

    /*  Copy Close Reason immediately following the code. */
    memcpy (payload_pos + 2, reason, reason_len);

    /*  If this is a client, apply mask. */
    if (self->mode == NN_WS_CLIENT) {
        nn_sws_mask_payload (payload_pos, payload_len, mask, NULL);
    }

    self->fail_msg_len += payload_len;

    self->instate = NN_SWS_INSTATE_FAILING;

    /*  On the next recv, the connection will be failed. Why defer
        until the next recv? Semantically, until then, this incoming
        message has not been interpreted, so it's not until then that
        it could be failed. This type of pre-processing is necessary
        to early fail chunked transfers. */
    nn_pipebase_received (&self->pipebase);
}
コード例 #2
0
ファイル: req.c プロジェクト: ryanbaylorkillea/nanomsg
void nn_req_init (struct nn_req *self,
    const struct nn_sockbase_vfptr *vfptr, void *hint)
{
    nn_req_handle hndl;

    nn_xreq_init (&self->xreq, vfptr, hint);
    nn_fsm_init_root (&self->fsm, nn_req_handler, nn_req_shutdown,
        nn_sockbase_getctx (&self->xreq.sockbase));
    self->state = NN_REQ_STATE_IDLE;

    /*  Start assigning request IDs beginning with a random number. This way
        there should be no key clashes even if the executable is re-started. */
    nn_random_generate (&self->lastid, sizeof (self->lastid));

    self->task.sent_to = NULL;

    nn_msg_init (&self->task.request, 0);
    nn_msg_init (&self->task.reply, 0);
    nn_timer_init (&self->task.timer, NN_REQ_SRC_RESEND_TIMER, &self->fsm);
    self->resend_ivl = NN_REQ_DEFAULT_RESEND_IVL;

    /*  For now, handle is empty. */
    memset (&hndl, 0, sizeof (hndl));
    nn_task_init (&self->task, self->lastid, hndl);

    /*  Start the state machine. */
    nn_fsm_start (&self->fsm);
}
コード例 #3
0
ファイル: surveyor.c プロジェクト: tpopen/nanomsg
static void nn_surveyor_init (struct nn_surveyor *self,
    const struct nn_sockbase_vfptr *vfptr, void *hint)
{
    nn_xsurveyor_init (&self->xsurveyor, vfptr, hint);
    nn_fsm_init_root (&self->fsm, nn_surveyor_handler,
        nn_sockbase_getctx (&self->xsurveyor.sockbase));
    self->state = NN_SURVEYOR_STATE_IDLE;

    /*  Start assigning survey IDs beginning with a random number. This way
        there should be no key clashes even if the executable is re-started. */
    nn_random_generate (&self->surveyid, sizeof (self->surveyid));

    nn_timer_init (&self->timer, NN_SURVEYOR_SRC_DEADLINE_TIMER, &self->fsm);
    nn_msg_init (&self->tosend, 0);
    self->deadline = NN_SURVEYOR_DEFAULT_DEADLINE;

    /*  Start the state machine. */
    nn_fsm_start (&self->fsm);
}
コード例 #4
0
ファイル: cstream.c プロジェクト: neomantra/nanomsg
/*  Private functions. */
static int nn_cstream_compute_retry_ivl (struct nn_cstream *self)
{
    int reconnect_ivl;
    int reconnect_ivl_max;
    size_t sz;
    int result;
    unsigned int random;

    /*  Get relevant options' values. */
    sz = sizeof (reconnect_ivl);
    nn_epbase_getopt (&self->epbase, NN_SOL_SOCKET, NN_RECONNECT_IVL,
        &reconnect_ivl, &sz);
    nn_assert (sz == sizeof (reconnect_ivl));
    sz = sizeof (reconnect_ivl_max);
    nn_epbase_getopt (&self->epbase, NN_SOL_SOCKET, NN_RECONNECT_IVL_MAX,
        &reconnect_ivl_max, &sz);
    nn_assert (sz == sizeof (reconnect_ivl_max));

    /*  Negative number means that reconnect sequence is starting.
        The reconnect interval in this case is NN_RECONNECT_IVL. */
    if (self->retry_ivl < 0)
        self->retry_ivl = reconnect_ivl;

    /*  Current retry_ivl will be returned to the caller. */
    result = self->retry_ivl;

    /*  Re-compute new retry interval. */
    if (reconnect_ivl_max > 0 && reconnect_ivl_max > reconnect_ivl) {
        self->retry_ivl *= 2;
        if (self->retry_ivl > reconnect_ivl_max)
            self->retry_ivl = reconnect_ivl_max;
    }

    /*  Randomise the result to prevent re-connection storms when network
        and/or server goes down and then up again. This may rise
        the reconnection interval at most twice and at most by one second. */
    nn_random_generate (&random, sizeof (random));
    result += (random % result % 1000);
    return result;
}
コード例 #5
0
ファイル: req.c プロジェクト: Neopallium/nanomsg
static int nn_req_init (struct nn_req *self,
    const struct nn_sockbase_vfptr *vfptr)
{
    int rc;

    rc = nn_xreq_init (&self->xreq, vfptr);
    if (rc < 0)
        return rc;

    self->sink = &nn_req_sink;

    /*  Start assigning request IDs beginning with a random number. This way
        there should be no key clashes even if the executable is re-started. */
    nn_random_generate (&self->reqid, sizeof (self->reqid));

    self->state = NN_REQ_STATE_IDLE;
    self->resend_ivl = NN_REQ_DEFAULT_RESEND_IVL;
    nn_timer_init (&self->resend_timer, &self->sink,
        nn_sockbase_getcp (&self->xreq.sockbase));

    return 0;
}
コード例 #6
0
ファイル: surveyor.c プロジェクト: Droppe/nanomsg
static int nn_surveyor_init (struct nn_surveyor *self,
    const struct nn_sockbase_vfptr *vfptr)
{
    int rc;

    rc = nn_xsurveyor_init (&self->xsurveyor, vfptr);
    if (rc < 0)
        return rc;

    self->sink = &nn_surveyor_sink;
    self->flags = 0;

    /*  Start assigning survey IDs beginning with a random number. This way
        there should be no key clashes even if the executable is re-started. */
    nn_random_generate (&self->surveyid, sizeof (self->surveyid));

    self->deadline = NN_SURVEYOR_DEFAULT_DEADLINE;
    nn_timer_init (&self->deadline_timer, &self->sink,
        nn_sockbase_getcp (&self->xsurveyor.sockbase));

    return 0;
}
コード例 #7
0
ファイル: sws.c プロジェクト: threatstack/nanomsg
/*  Start sending a message. */
static int nn_sws_send (struct nn_pipebase *self, struct nn_msg *msg)
{
    struct nn_sws *sws;
    struct nn_iovec iov [3];
    int mask_pos;
    size_t sz;
    size_t hdrsz;
    uint8_t mask [4];

    sws = nn_cont (self, struct nn_sws, pipebase);

    nn_assert_state (sws, NN_SWS_STATE_ACTIVE);
    nn_assert (sws->outstate == NN_SWS_OUTSTATE_IDLE);

    /*  Move the message to the local storage. */
    nn_msg_term (&sws->outmsg);
    nn_msg_mv (&sws->outmsg, msg);

    /*  Compose the message header. See RFC 6455, section 5.2. */

    /*  Messages are always sent in a single fragment.
        They may be split up on the way to the peer though. */
    sws->outhdr [0] = NN_WS_OPCODE_BINARY | NN_SWS_FRAME_BITMASK_FIN;
    hdrsz = 1;
    
    /*  Frame the payload size. Don't set the mask bit yet. */
    sz = nn_chunkref_size (&sws->outmsg.sphdr) +
        nn_chunkref_size (&sws->outmsg.body);
    if (sz <= 0x7d) {
        sws->outhdr [1] = (uint8_t) sz;
        hdrsz += 1;
    }
    else if (sz <= 0xffff) {
        sws->outhdr [1] = 0x7e;
        nn_puts (&sws->outhdr [2], (uint16_t) sz);
        hdrsz += 3;
    }
    else {
        sws->outhdr [1] = 0x7f;
        nn_putll (&sws->outhdr [2], (uint64_t) sz);
        hdrsz += 9;
    }

    /*  Client-to-server communication has to be masked. See RFC 6455 5.3. */
    if (sws->mode == NN_WS_CLIENT) {

        /*  Generate 32-bit mask and store it in the frame. */
        /*  TODO: This is not a strong source of entropy. However, can we
            afford one wihout exhausting all the available entropy in the
            system at the high message rates? */
        nn_random_generate (mask, 4);
        sws->outhdr [1] |= NN_SWS_FRAME_BITMASK_MASKED;
        memcpy (&sws->outhdr [hdrsz], mask, 4);
        hdrsz += 4;

        /*  Mask payload, beginning with header and moving to body. */
        /*  TODO: This won't work if the message is shared among muliple
                   transports. We probably want to send the message in multiple
                   operations, masking only as much data at a time. */
        mask_pos = 0;
        nn_sws_mask_payload (nn_chunkref_data (&sws->outmsg.sphdr),
            nn_chunkref_size (&sws->outmsg.sphdr), mask, &mask_pos);
        nn_sws_mask_payload (nn_chunkref_data (&sws->outmsg.body),
            nn_chunkref_size (&sws->outmsg.body), mask, &mask_pos);
    }

    /*  Start async sending. */
    iov [0].iov_base = sws->outhdr;
    iov [0].iov_len = hdrsz;
    iov [1].iov_base = nn_chunkref_data (&sws->outmsg.sphdr);
    iov [1].iov_len = nn_chunkref_size (&sws->outmsg.sphdr);
    iov [2].iov_base = nn_chunkref_data (&sws->outmsg.body);
    iov [2].iov_len = nn_chunkref_size (&sws->outmsg.body);
    nn_usock_send (sws->usock, iov, 3);

    sws->outstate = NN_SWS_OUTSTATE_SENDING;

    /*  If a Close handshake was just sent, it's time to shut down. */
    if ((sws->outhdr [0] & NN_SWS_FRAME_BITMASK_OPCODE) ==
        NN_WS_OPCODE_CLOSE) {
        nn_pipebase_stop (&sws->pipebase);
        sws->state = NN_SWS_STATE_CLOSING_CONNECTION;
    }

    return 0;
}