Exemplo n.º 1
0
PRIVATE void rpc__dg_xmit_hdr_only_pkt
(
    rpc_socket_t sock,
    rpc_addr_p_t addr,
    rpc_dg_pkt_hdr_p_t hdrp,
    rpc_dg_ptype_t ptype
)
{
    rpc_socket_iovec_t iov[1];
    rpc_dg_pkt_hdr_t hdr;
    boolean b;

    /*
     * Create a pkt header initialized with the prototype's contents.
     */

    hdr = *hdrp;

    RPC_DG_HDR_SET_VERS(&hdr);
    RPC_DG_HDR_SET_PTYPE(&hdr, ptype);
    RPC_DG_HDR_SET_DREP(&hdr);

    hdr.flags       = 0;
    hdr.flags2      = 0;
    hdr.len         = 0;

    /*
     * Setup the iov and send the packet.
     */

    iov[0].iov_base = (byte_p_t) &hdr;
    iov[0].iov_len  = RPC_C_DG_RAW_PKT_HDR_SIZE;

    rpc__dg_xmit_pkt(sock, addr, iov, 1, &b);
}
Exemplo n.º 2
0
PRIVATE void rpc__dg_xmit_error_body_pkt
(
    rpc_socket_t sock,
    rpc_addr_p_t addr,
    rpc_dg_pkt_hdr_p_t hdrp,
    rpc_dg_ptype_t ptype,
    unsigned32 errst
)
{
    rpc_socket_iovec_t iov[2];
    rpc_dg_pkt_hdr_t hdr;
#ifndef MISPACKED_HDR
    rpc_dg_epkt_body_t body;
#else
    rpc_dg_raw_epkt_body_t body;
#endif
    boolean b;

    /*
     * Create a pkt header initialized with the prototype's contents.
     */

    hdr = *hdrp;

    RPC_DG_HDR_SET_VERS(&hdr);
    RPC_DG_HDR_SET_PTYPE(&hdr, ptype);
    RPC_DG_HDR_SET_DREP(&hdr);

    hdr.flags       = 0;
    hdr.flags2      = 0;
    hdr.len         = RPC_C_DG_RAW_EPKT_BODY_SIZE;


    /*
     * Create the error body packet's body.
     */

#ifndef MISPACKED_HDR
    body.st = errst;
#else
#error "extract and pack the 32 bit status code into the body" /*!!! */
#endif

    /*
     * Setup the iov and send the packet.
     */

    iov[0].iov_base = (byte_p_t) &hdr;
    iov[0].iov_len  = RPC_C_DG_RAW_PKT_HDR_SIZE;
    iov[1].iov_base = (byte_p_t) &body;
    iov[1].iov_len  = hdr.len;

    rpc__dg_xmit_pkt(sock, addr, iov, 2, &b);

    RPC_DBG_GPRINTF(("(rpc__dg_xmit_call_error_body_pkt) \"%s\" - st 0x%x sent\n",
        rpc__dg_pkt_name(ptype), errst));
}
Exemplo n.º 3
0
PRIVATE void rpc__dg_scall_reinit
(
    rpc_dg_scall_p_t scall,
    rpc_dg_sock_pool_elt_p_t sp,
    rpc_dg_recvq_elt_p_t rqe
)
{
    rpc_dg_pkt_hdr_p_t hdrp = rqe->hdrp;
    unsigned32 st;
    boolean  maybe = RPC_DG_HDR_FLAG_IS_SET(rqe->hdrp, RPC_C_DG_PF_MAYBE);

    RPC_LOCK_ASSERT(0);
    RPC_DG_CALL_LOCK_ASSERT(&scall->c);

    /*
     * Re-initialize the common call handle fields
     */

    RPC_DG_CALL_REINIT(&scall->c);

    scall->c.c.u.server.cancel.accepting    = true;
    scall->c.c.u.server.cancel.queuing      = true;
    scall->c.c.u.server.cancel.had_pending  = false;
    scall->c.c.u.server.cancel.count        = 0;

    /*
     * Typically, subsequent calls on a given actid will be for the same
     * naf and network address and received over the same server socket
     * from the same client socket (netaddr/endpoint), but alas, we can't
     * count on that...
     */

    /*
     * Detect naf changes and reinit cached naf-specific info.
     *
     * The max_frag_size is really associated with the
     * more specific "network address / interface" than just the naf
     * (actually they're really dependent on the even lower level of
     * path through the network even if the peer address don't change).
     * However, since the runtime currently manages these as constant
     * for a particular naf (mostly due to to the inability of system
     * APIs and/or network transports to provide this dynamic information),
     * we really only have to reset them if the naf changed (the significance
     * of this is a "different netaddr" check would be more costly).
     */
    if (scall->c.addr == NULL
        || rqe->from.rpc_protseq_id != scall->c.addr->rpc_protseq_id)
    {
        /*
         * Update to the current client address.
         */
        rpc__naf_addr_overcopy((rpc_addr_p_t) &rqe->from, &scall->c.addr, &st);

        /*
         * Initialize the max_frag_size field for the conversation with this
         * client.
         */
        RPC_DG_CALL_SET_MAX_FRAG_SIZE(&scall->c, &st);
        RPC_DBG_PRINTF(rpc_e_dbg_recv, 7,
                       ("(rpc__dg_scall_reinit) Set max fs %u\n",
                        scall->c.xq.max_frag_size));
    }
    else
    {
        /*
         * Update to the (typically unchanged) current client address.
         * (Only its endpoint may change.)
         */
        rpc__naf_addr_overcopy((rpc_addr_p_t) &rqe->from, &scall->c.addr, &st);
    }

    /*
     * Detect received socket changes and reinit cached socket specific info
     * (the scall may not yet have a cached sock ref or it may be different
     * from the current one).
     */
    if (scall->c.sock_ref != sp)
    {
        if (scall->c.sock_ref != NULL)
            rpc__dg_network_sock_release(&scall->c.sock_ref);

        /*
         * This reference update is a little tricky.  We need to be sure
         * that the socket is not closed before we get a chance to record
         * our reference.  We can do this safely because we are the
         * listener thread, and and we know that the listener thread
         * has a reference to the socket.  If the socket had failed,
         * and we had closed it, we wouldn't be here right now.
         */
        scall->c.sock_ref = sp;
        rpc__dg_network_sock_reference(sp);

        /*
         * Initialize the max_rcv_tsdu and max_snd_tsdu fields
         * for the conversation with this client.
         */
        rpc__naf_inq_max_tsdu(scall->c.addr->rpc_protseq_id,
                              &scall->c.xq.max_rcv_tsdu, &st);
        scall->c.xq.max_snd_tsdu = scall->c.xq.max_rcv_tsdu;
        scall->c.xq.max_rcv_tsdu = MIN(scall->c.xq.max_rcv_tsdu,
                                       scall->c.sock_ref->rcvbuf);
        scall->c.xq.max_snd_tsdu = MIN(scall->c.xq.max_snd_tsdu,
                                       scall->c.sock_ref->sndbuf);

        RPC_DBG_PRINTF(rpc_e_dbg_recv, 7,
                    ("(rpc__dg_scall_reinit) Set rcv tsdu %u, snd tsdu %u\n",
                        scall->c.xq.max_rcv_tsdu, scall->c.xq.max_snd_tsdu));

        /*
         * Reinit cached socket-specific information.
         */
        RPC_DG_RBUF_SIZE_TO_WINDOW_SIZE(sp->rcvbuf,
                                        sp->is_private,
                                        scall->c.xq.max_frag_size,
                                        scall->c.rq.window_size);
        RPC_DBG_PRINTF(rpc_e_dbg_recv, 7,
                ("(rpc__dg_scall_reinit) Set ws %u, rcvbuf %u, max fs %u\n",
              scall->c.rq.window_size, sp->rcvbuf, scall->c.xq.max_frag_size));
    }

    if (scall->c.is_cbk && scall->cbk_ccall != NULL)
    {
        /*
         * This is essentially a turnaround. The client, which is waiting
         * for a response, becomes the receiver.
         *
         * We inherit high_rcv_frag_size and snd_frag_size from the original
         * ccall.
         *
         * Note: If this is the initial allocation of the callback scall,
         * is_cbk is still false. rpc__dg_scall_cbk_alloc() will handle that
         * case.
         */
        scall->c.rq.high_rcv_frag_size =
            scall->cbk_ccall->c.rq.high_rcv_frag_size;
        scall->c.xq.snd_frag_size = scall->cbk_ccall->c.xq.snd_frag_size;

        /*
         * Also we inherit the reservation from the original ccall, which
         * gives us enough packets for receiving fragments.
         */
        scall->c.n_resvs = scall->cbk_ccall->c.n_resvs;
    }

    RPC_DBG_PRINTF(rpc_e_dbg_xmit, 6,
                   ("(rpc__dg_scall_reinit) Set snd fs %lu, high rcv fs %lu\n",
                    scall->c.xq.snd_frag_size, scall->c.rq.high_rcv_frag_size));

    /*
     * Re-initialize the fields of the common call handle header that
     * are really part of the prototype packet header.
     */

    scall->c.call_seq           = hdrp->seq;
    scall->c.high_seq           = hdrp->seq;
    scall->c.call_if_id         = hdrp->if_id;
    scall->c.call_if_vers       = hdrp->if_vers;
    scall->c.call_ihint         = hdrp->ihint;
    scall->c.call_opnum         = hdrp->opnum;
    scall->c.call_object        = hdrp->object;

    /*
     * Re-initialize some remaining fields in the prototype packet header.
     * Note: the ptype may not currently be "response" due to the way
     * we handle fault pkts.
     */

    scall->c.xq.base_flags      = 0;
    scall->c.xq.base_flags2     = 0;
    scall->c.xq.hdr.flags       = 0;
    scall->c.xq.hdr.flags2      = 0;

    RPC_DG_HDR_SET_PTYPE(&scall->c.xq.hdr, RPC_C_DG_PT_RESPONSE);

    /*
     * Reset the call state to the initial state.
     */

    RPC_DG_CALL_SET_STATE(&scall->c, rpc_e_dg_cs_init);

    scall->call_is_setup            = false;
    scall->has_call_executor_ref    = false;
    scall->call_is_queued           = false;
    scall->client_needs_sboot       = false;    /* Really "unknown" */

    scall->c.com_timeout_knob       = rpc_mgmt_inq_server_com_timeout();

    /*
     * If the new call uses maybe semantics, and this scall is already
     * associated with an SCTE, then we may need to reposition this scall
     * within the SCTE.
     */
    if (maybe && scall->scte != NULL && scall->scte->scall == scall)
    {
        rpc_dg_sct_elt_p_t scte = scall->scte;

        RPC_DBG_PRINTF(rpc_e_dbg_general, 3, (
            "(rpc__dg_scall_reinit) using cached scall for maybe call\n"));

        scall->c.next = (rpc_dg_call_p_t) scte->maybe_chain;
        scte->maybe_chain = scall;
        scte->scall = NULL;
    }
}
Exemplo n.º 4
0
INTERNAL void queue_mapped_reject
(
    rpc_dg_scall_p_t scall,
    unsigned32 st
)
{
    rpc_iovector_t iovec;
    unsigned32 tst, mst;

    switch ((int)st)
    {
        case rpc_s_who_are_you_failed:
                                        mst = nca_s_who_are_you_failed;  break;
        case rpc_s_comm_failure:        mst = nca_s_comm_failure;        break;
        case rpc_s_unknown_if:          mst = nca_s_unk_if;              break;
        case rpc_s_protocol_error:      mst = nca_s_proto_error;         break;
        case rpc_s_unsupported_type:    mst = nca_s_unsupported_type;    break;
        case rpc_s_manager_not_entered: mst = nca_s_manager_not_entered; break;
        case rpc_s_op_rng_error:        mst = nca_s_op_rng_error;        break;
        case rpc_s_call_orphaned:       mst = nca_s_unspec_reject;       break;
        case rpc_s_unknown_reject:      mst = nca_s_unspec_reject;       break;
        case rpc_s_unknown_mgr_type:    mst = nca_s_unsupported_type;    break;
        default:
            RPC_DBG_GPRINTF(("(queue_mapped_reject) unknown status; st=0x%x\n", st));
            mst = nca_s_unspec_reject;
            break;
    }

    /*
     * Build the iovector for calling transmit_int
     */

    iovec.num_elt = 1;
    iovec.elt[0].buff_dealloc = NULL;
    iovec.elt[0].flags = rpc_c_iovector_elt_reused;
    iovec.elt[0].data_addr = (byte_p_t) &mst;
    iovec.elt[0].data_len = sizeof(st);

    RPC_DG_CALL_LOCK_ASSERT(&scall->c);

    /*
     * Purge the recvq since it won't be used after this.  The recvq
     * may currently have lots of rqes on it and freeing it now will
     * help pkt rationing.  It's likely that the recvq is already empty
     * however, this is the slow path so do it (again) just to be sure.
     */

    rpc__dg_recvq_free(&scall->c.rq);

    /*
     * Toss any pending xmitq pkts and add the fault_info to the xmit
     * queue just as if it were a response (but whack the proto pkt header
     * to the fault pkt type).  The call will now be in the xmit state if it
     * wasn't already there.  Defer the sending of the fault until
     * the "end of the call" (execute_call).  This prevents the client
     * from receiving the complete response, completing the call and
     * generating a new one while the server still thinks the call is
     * not complete (thinking it must have dropped an ack,...).  The
     * fault is really just a special response pkt.
     *
     * This routine is called by the sstub (the thread executing the
     * call) so there's no need to signal the call.  We don't actually
     * want the call's status to be set to a error value; the server
     * runtime wants to still complete processing the call which involves
     * sending the fault response to the client (instead of any further
     * data response).
     *
     * Subsequent fault response retransmissions will occur just as if
     * this were a "normal" call response as well as in reply to a ping.
     * Of course, faults for idempotent calls don't get remembered or
     * retransmitted.
     */

    RPC_DBG_GPRINTF(("(queue_mapped_reject) st=0x%x => 0x%x [%s]\n",
        st, mst, rpc__dg_act_seq_string(&scall->c.xq.hdr)));

    RPC_DG_XMITQ_REINIT(&scall->c.xq, &scall->c);
    RPC_DG_HDR_SET_PTYPE(&scall->c.xq.hdr, RPC_C_DG_PT_REJECT);

    rpc__dg_call_transmit_int(&scall->c, &iovec, &tst);
    /*
     * The transmit may fail because the call is already orphaned.
     * It may fail for some other reason as well.  In either case,
     * we're not gonna get a response to the client.  Just keep
     * falling through (other calls may fail as well) and clean up.
     */
}