Exemplo n.º 1
0
INTERNAL void fwd_reject (
    rpc_dg_sock_pool_elt_p_t sp,
    rpc_dg_recvq_elt_p_t rqe)
{

    RPC_DBG_PRINTF(rpc_e_dbg_general, 10,
        ("(fwd_forward) rejecting (ptype=%s) [%s]\n",
        rpc__dg_pkt_name(RPC_DG_HDR_INQ_PTYPE(rqe->hdrp)),
        rpc__dg_act_seq_string(rqe->hdrp)));

    if (! RPC_DG_HDR_FLAG_IS_SET(rqe->hdrp, RPC_C_DG_PF_BROADCAST))
    {
        rpc__dg_xmit_error_body_pkt(
            sp->sock, (rpc_addr_p_t) &rqe->from, rqe->hdrp, RPC_C_DG_PT_REJECT,
            nca_s_unk_if);      /* !!! status could be better */
    }
}
Exemplo n.º 2
0
INTERNAL void rpc__dg_scall_timer
(
    dce_pointer_t p
)
{
    rpc_dg_scall_p_t scall = (rpc_dg_scall_p_t) p;
    static rpc_clock_t rpc_c_dg_scall_max_idle_time = RPC_CLOCK_SEC(10);
    static com_timeout_params_t scall_com_timeout_params[] = {
        /*  0 min */        {RPC_CLOCK_SEC(2)},
        /*  1 */            {RPC_CLOCK_SEC(4)},
        /*  2 */            {RPC_CLOCK_SEC(8)},
        /*  3 */            {RPC_CLOCK_SEC(15)},
        /*  4 */            {RPC_CLOCK_SEC(30)},
        /*  5 def */        {RPC_CLOCK_SEC(2*30)},
        /*  6 */            {RPC_CLOCK_SEC(3*30)},
        /*  7 */            {RPC_CLOCK_SEC(5*30)},
        /*  8 */            {RPC_CLOCK_SEC(9*30)},
        /*  9 */            {RPC_CLOCK_SEC(17*30)},
        /* 10 infinite */   {RPC_CLOCK_SEC(0)}
    };

    RPC_DG_CALL_LOCK(&scall->c);

    if (scall->c.stop_timer)
    {
        rpc__timer_clear(&scall->c.timer);
        RPC_DG_SCALL_RELEASE(&scall);
        return;
    }

    switch (scall->c.state)
    {
        case rpc_e_dg_cs_init:
            /*
             * Nothing to do in this state.
             */
            break;

        case rpc_e_dg_cs_idle:
            /*
             * If the call has been idle for a long time, stop caching
             * it.  In the case of a callback SCALL, do nothing; the
             * originating CCALL's processing dictates when this cached
             * SCALL finally gets freed.  If for some reason the
             * uncache couldn't complete, we'll try again on the next tick.
             */

            if (! scall->c.is_cbk)
            {
                if (rpc__clock_aged(scall->c.state_timestamp,
                                    rpc_c_dg_scall_max_idle_time))
                {
                    if (scall_uncache(scall))
                        return;
                }
            }
            break;

        case rpc_e_dg_cs_xmit:
            /*
             * Retransmit frags if necessary.
             */
            rpc__dg_call_xmitq_timer(&scall->c);
            break;

        case rpc_e_dg_cs_recv:
            /*
             * Check to see if the client is alive.  If we have not
             * received anything from the client in "max_recv_idle_time" and
             * the receive stream is not complete assume that the client
             * is dead.  In the case of a callback SCALL, do nothing;
             * the originating CCALL's processing dictates when this
             * cached SCALL finally gets freed.
             */
            if (! scall->c.is_cbk)
            {
                if (! scall->c.rq.all_pkts_recvd
                    && rpc__clock_aged
                        (scall->c.last_rcv_timestamp,
                         scall_com_timeout_params[scall->c.com_timeout_knob]
                                                    .max_recv_idle_time)
                    && scall->c.com_timeout_knob != rpc_c_binding_infinite_timeout)
                {
                    boolean b;

                    /*
                     * We need the global lock because we are about to
                     * modify an SCT entry. We have to violate the locking
                     * hierarchy to get the global lock.  If we can't
                     * get the global lock, just give up.  We'll try
                     * again later.  Otherwise, we will uncache the scall
                     * and stop its timer processing.
                     */

                    RPC_TRY_LOCK(&b);
                    if (b)
                    {
                        rpc__dg_scall_orphan_call(scall);
                        RPC_DG_CALL_UNLOCK(&scall->c);
                        RPC_UNLOCK(0);
                        return;
                    }
                }
            }
            break;

        case rpc_e_dg_cs_final:
            /*
             * Retransmit response if necessary; eventually give up and change to
             * the idle state.
             */
            rpc__dg_call_xmitq_timer(&scall->c);
            if (scall->c.status != rpc_s_ok
                && ! RPC_DG_HDR_FLAG_IS_SET(&scall->c.xq.hdr, RPC_C_DG_PF_IDEMPOTENT))
            {
                RPC_DG_CALL_SET_STATE(&scall->c, rpc_e_dg_cs_idle);
                if (scall->c.xq.head != NULL)
                    rpc__dg_xmitq_free(&scall->c.xq, &scall->c);
            }
            break;

        case rpc_e_dg_cs_orphan:
            /*
             * Once the orphaned call has completed, free up the
             * the remaining resources.  As always, callbacks complicates
             * things, yielding a total of three scall scenarios:
             *      a)  a normal (server side) scall that has never
             *          been used in making a callback to a client
             *          (!scall->is_cbk && scall->cbk_ccall == NULL)
             *      b)  a normal (server side) scall that HAS
             *          been used in making a callback to a client
             *          (!scall->is_cbk && scall->cbk_ccall != NULL)
             *      c)  a callback scall (client side) that was the
             *          callback being executed
             *          (scall->is_cbk == true)
             *          (implicitly scall->cbk_ccall != NULL)
             *
             * The appropriate time for freeing up the remaining resources
             * is when the call executor (rpc__dg_execute_call) has
             * completed.  While it is possible to infer this condition
             * by examination of the scall's reference counts, it would
             * make this code fragment intolerably dependent on knowing
             * what/who has references to the scall under the various
             * scenarios.  Therefore we introduce and use the new flag:
             * scall->has_call_executor_ref.
             *
             * If for some reason the uncache couldn't complete, we'll
             * try again on the next tick.
             */

            if (! scall->has_call_executor_ref)
            {
                if (scall_uncache(scall))
                    return;
            }
            break;
    }

    RPC_DG_CALL_UNLOCK(&scall->c);
}
Exemplo n.º 3
0
PRIVATE void rpc__dg_scall_reinit
(
    rpc_dg_scall_p_t scall,
    rpc_dg_sock_pool_elt_p_t sp,
    rpc_dg_recvq_elt_p_t rqe
)
{
    rpc_dg_pkt_hdr_p_t hdrp = rqe->hdrp;
    unsigned32 st;
    boolean  maybe = RPC_DG_HDR_FLAG_IS_SET(rqe->hdrp, RPC_C_DG_PF_MAYBE);

    RPC_LOCK_ASSERT(0);
    RPC_DG_CALL_LOCK_ASSERT(&scall->c);

    /*
     * Re-initialize the common call handle fields
     */

    RPC_DG_CALL_REINIT(&scall->c);

    scall->c.c.u.server.cancel.accepting    = true;
    scall->c.c.u.server.cancel.queuing      = true;
    scall->c.c.u.server.cancel.had_pending  = false;
    scall->c.c.u.server.cancel.count        = 0;

    /*
     * Typically, subsequent calls on a given actid will be for the same
     * naf and network address and received over the same server socket
     * from the same client socket (netaddr/endpoint), but alas, we can't
     * count on that...
     */

    /*
     * Detect naf changes and reinit cached naf-specific info.
     *
     * The max_frag_size is really associated with the
     * more specific "network address / interface" than just the naf
     * (actually they're really dependent on the even lower level of
     * path through the network even if the peer address don't change).
     * However, since the runtime currently manages these as constant
     * for a particular naf (mostly due to to the inability of system
     * APIs and/or network transports to provide this dynamic information),
     * we really only have to reset them if the naf changed (the significance
     * of this is a "different netaddr" check would be more costly).
     */
    if (scall->c.addr == NULL
        || rqe->from.rpc_protseq_id != scall->c.addr->rpc_protseq_id)
    {
        /*
         * Update to the current client address.
         */
        rpc__naf_addr_overcopy((rpc_addr_p_t) &rqe->from, &scall->c.addr, &st);

        /*
         * Initialize the max_frag_size field for the conversation with this
         * client.
         */
        RPC_DG_CALL_SET_MAX_FRAG_SIZE(&scall->c, &st);
        RPC_DBG_PRINTF(rpc_e_dbg_recv, 7,
                       ("(rpc__dg_scall_reinit) Set max fs %u\n",
                        scall->c.xq.max_frag_size));
    }
    else
    {
        /*
         * Update to the (typically unchanged) current client address.
         * (Only its endpoint may change.)
         */
        rpc__naf_addr_overcopy((rpc_addr_p_t) &rqe->from, &scall->c.addr, &st);
    }

    /*
     * Detect received socket changes and reinit cached socket specific info
     * (the scall may not yet have a cached sock ref or it may be different
     * from the current one).
     */
    if (scall->c.sock_ref != sp)
    {
        if (scall->c.sock_ref != NULL)
            rpc__dg_network_sock_release(&scall->c.sock_ref);

        /*
         * This reference update is a little tricky.  We need to be sure
         * that the socket is not closed before we get a chance to record
         * our reference.  We can do this safely because we are the
         * listener thread, and and we know that the listener thread
         * has a reference to the socket.  If the socket had failed,
         * and we had closed it, we wouldn't be here right now.
         */
        scall->c.sock_ref = sp;
        rpc__dg_network_sock_reference(sp);

        /*
         * Initialize the max_rcv_tsdu and max_snd_tsdu fields
         * for the conversation with this client.
         */
        rpc__naf_inq_max_tsdu(scall->c.addr->rpc_protseq_id,
                              &scall->c.xq.max_rcv_tsdu, &st);
        scall->c.xq.max_snd_tsdu = scall->c.xq.max_rcv_tsdu;
        scall->c.xq.max_rcv_tsdu = MIN(scall->c.xq.max_rcv_tsdu,
                                       scall->c.sock_ref->rcvbuf);
        scall->c.xq.max_snd_tsdu = MIN(scall->c.xq.max_snd_tsdu,
                                       scall->c.sock_ref->sndbuf);

        RPC_DBG_PRINTF(rpc_e_dbg_recv, 7,
                    ("(rpc__dg_scall_reinit) Set rcv tsdu %u, snd tsdu %u\n",
                        scall->c.xq.max_rcv_tsdu, scall->c.xq.max_snd_tsdu));

        /*
         * Reinit cached socket-specific information.
         */
        RPC_DG_RBUF_SIZE_TO_WINDOW_SIZE(sp->rcvbuf,
                                        sp->is_private,
                                        scall->c.xq.max_frag_size,
                                        scall->c.rq.window_size);
        RPC_DBG_PRINTF(rpc_e_dbg_recv, 7,
                ("(rpc__dg_scall_reinit) Set ws %u, rcvbuf %u, max fs %u\n",
              scall->c.rq.window_size, sp->rcvbuf, scall->c.xq.max_frag_size));
    }

    if (scall->c.is_cbk && scall->cbk_ccall != NULL)
    {
        /*
         * This is essentially a turnaround. The client, which is waiting
         * for a response, becomes the receiver.
         *
         * We inherit high_rcv_frag_size and snd_frag_size from the original
         * ccall.
         *
         * Note: If this is the initial allocation of the callback scall,
         * is_cbk is still false. rpc__dg_scall_cbk_alloc() will handle that
         * case.
         */
        scall->c.rq.high_rcv_frag_size =
            scall->cbk_ccall->c.rq.high_rcv_frag_size;
        scall->c.xq.snd_frag_size = scall->cbk_ccall->c.xq.snd_frag_size;

        /*
         * Also we inherit the reservation from the original ccall, which
         * gives us enough packets for receiving fragments.
         */
        scall->c.n_resvs = scall->cbk_ccall->c.n_resvs;
    }

    RPC_DBG_PRINTF(rpc_e_dbg_xmit, 6,
                   ("(rpc__dg_scall_reinit) Set snd fs %lu, high rcv fs %lu\n",
                    scall->c.xq.snd_frag_size, scall->c.rq.high_rcv_frag_size));

    /*
     * Re-initialize the fields of the common call handle header that
     * are really part of the prototype packet header.
     */

    scall->c.call_seq           = hdrp->seq;
    scall->c.high_seq           = hdrp->seq;
    scall->c.call_if_id         = hdrp->if_id;
    scall->c.call_if_vers       = hdrp->if_vers;
    scall->c.call_ihint         = hdrp->ihint;
    scall->c.call_opnum         = hdrp->opnum;
    scall->c.call_object        = hdrp->object;

    /*
     * Re-initialize some remaining fields in the prototype packet header.
     * Note: the ptype may not currently be "response" due to the way
     * we handle fault pkts.
     */

    scall->c.xq.base_flags      = 0;
    scall->c.xq.base_flags2     = 0;
    scall->c.xq.hdr.flags       = 0;
    scall->c.xq.hdr.flags2      = 0;

    RPC_DG_HDR_SET_PTYPE(&scall->c.xq.hdr, RPC_C_DG_PT_RESPONSE);

    /*
     * Reset the call state to the initial state.
     */

    RPC_DG_CALL_SET_STATE(&scall->c, rpc_e_dg_cs_init);

    scall->call_is_setup            = false;
    scall->has_call_executor_ref    = false;
    scall->call_is_queued           = false;
    scall->client_needs_sboot       = false;    /* Really "unknown" */

    scall->c.com_timeout_knob       = rpc_mgmt_inq_server_com_timeout();

    /*
     * If the new call uses maybe semantics, and this scall is already
     * associated with an SCTE, then we may need to reposition this scall
     * within the SCTE.
     */
    if (maybe && scall->scte != NULL && scall->scte->scall == scall)
    {
        rpc_dg_sct_elt_p_t scte = scall->scte;

        RPC_DBG_PRINTF(rpc_e_dbg_general, 3, (
            "(rpc__dg_scall_reinit) using cached scall for maybe call\n"));

        scall->c.next = (rpc_dg_call_p_t) scte->maybe_chain;
        scte->maybe_chain = scall;
        scte->scall = NULL;
    }
}
Exemplo n.º 4
0
PRIVATE rpc_dg_scall_p_t rpc__dg_scall_alloc
(
    rpc_dg_sct_elt_p_t scte,
    rpc_dg_sock_pool_elt_p_t sp,
    rpc_dg_recvq_elt_p_t rqe
)
{
    rpc_dg_scall_p_t scall;
    unsigned32 st ATTRIBUTE_UNUSED;
    static rpc_clock_t rpc_c_dg_scall_timer_freq_init = RPC_CLOCK_SEC(1);
    boolean maybe   = RPC_DG_HDR_FLAG_IS_SET(rqe->hdrp, RPC_C_DG_PF_MAYBE);

    RPC_LOCK_ASSERT(0);

    RPC_MEM_ALLOC(scall, rpc_dg_scall_p_t, sizeof *scall,
            RPC_C_MEM_DG_SCALL, RPC_C_MEM_NOWAIT);

    /*
     * Initialize the common SCALL handle fields (and LOCK the SCALL)
     */

    scall_init(scall, sp, rqe);

    /*
     * The rest is specific to normal (non-callback) SCALLs.
     */

    scall->cbk_ccall        = NULL;
    scall->c.actid_hash     = rpc__dg_uuid_hash(&scte->actid);

    /*
     * Initialize the server specific call handle fields
     */

    /*
     * Setup the SCTE / SCALL cross linkage.
     */

    RPC_DG_SCT_REFERENCE(scte);
    scall->scte = scte;

    RPC_DG_CALL_REFERENCE(&scall->c);
    if (! maybe)
        scte->scall = scall;
    else
    {
        RPC_DBG_PRINTF(rpc_e_dbg_general, 3, (
            "(rpc__dg_scall_alloc) putting call on maybe chain\n"));
        scall->c.next = (rpc_dg_call_p_t) scte->maybe_chain;
        scte->maybe_chain = scall;
    }

    /*
     * Initialize the fields of the common call handle header that
     * are really part of the prototype packet header.
     */

    scall->c.call_actid     = scte->actid;
    scall->c.call_ahint     = scte->ahint;
    scall->c.is_cbk         = false;

    /*
     * Copy over authentication/keying information.
     */
    scall->c.auth_epv       = scte->auth_epv;
    scall->c.key_info       = scte->key_info;
    if (scall->c.key_info != NULL)
        RPC_DG_KEY_REFERENCE(scall->c.key_info);

    RPC_DG_CALL_SET_TIMER(&scall->c, rpc__dg_scall_timer, rpc_c_dg_scall_timer_freq_init);

    return(scall);
}