Exemple #1
0
INTERNAL void rpc__dg_scall_timer
(
    dce_pointer_t p
)
{
    rpc_dg_scall_p_t scall = (rpc_dg_scall_p_t) p;
    static rpc_clock_t rpc_c_dg_scall_max_idle_time = RPC_CLOCK_SEC(10);
    static com_timeout_params_t scall_com_timeout_params[] = {
        /*  0 min */        {RPC_CLOCK_SEC(2)},
        /*  1 */            {RPC_CLOCK_SEC(4)},
        /*  2 */            {RPC_CLOCK_SEC(8)},
        /*  3 */            {RPC_CLOCK_SEC(15)},
        /*  4 */            {RPC_CLOCK_SEC(30)},
        /*  5 def */        {RPC_CLOCK_SEC(2*30)},
        /*  6 */            {RPC_CLOCK_SEC(3*30)},
        /*  7 */            {RPC_CLOCK_SEC(5*30)},
        /*  8 */            {RPC_CLOCK_SEC(9*30)},
        /*  9 */            {RPC_CLOCK_SEC(17*30)},
        /* 10 infinite */   {RPC_CLOCK_SEC(0)}
    };

    RPC_DG_CALL_LOCK(&scall->c);

    if (scall->c.stop_timer)
    {
        rpc__timer_clear(&scall->c.timer);
        RPC_DG_SCALL_RELEASE(&scall);
        return;
    }

    switch (scall->c.state)
    {
        case rpc_e_dg_cs_init:
            /*
             * Nothing to do in this state.
             */
            break;

        case rpc_e_dg_cs_idle:
            /*
             * If the call has been idle for a long time, stop caching
             * it.  In the case of a callback SCALL, do nothing; the
             * originating CCALL's processing dictates when this cached
             * SCALL finally gets freed.  If for some reason the
             * uncache couldn't complete, we'll try again on the next tick.
             */

            if (! scall->c.is_cbk)
            {
                if (rpc__clock_aged(scall->c.state_timestamp,
                                    rpc_c_dg_scall_max_idle_time))
                {
                    if (scall_uncache(scall))
                        return;
                }
            }
            break;

        case rpc_e_dg_cs_xmit:
            /*
             * Retransmit frags if necessary.
             */
            rpc__dg_call_xmitq_timer(&scall->c);
            break;

        case rpc_e_dg_cs_recv:
            /*
             * Check to see if the client is alive.  If we have not
             * received anything from the client in "max_recv_idle_time" and
             * the receive stream is not complete assume that the client
             * is dead.  In the case of a callback SCALL, do nothing;
             * the originating CCALL's processing dictates when this
             * cached SCALL finally gets freed.
             */
            if (! scall->c.is_cbk)
            {
                if (! scall->c.rq.all_pkts_recvd
                    && rpc__clock_aged
                        (scall->c.last_rcv_timestamp,
                         scall_com_timeout_params[scall->c.com_timeout_knob]
                                                    .max_recv_idle_time)
                    && scall->c.com_timeout_knob != rpc_c_binding_infinite_timeout)
                {
                    boolean b;

                    /*
                     * We need the global lock because we are about to
                     * modify an SCT entry. We have to violate the locking
                     * hierarchy to get the global lock.  If we can't
                     * get the global lock, just give up.  We'll try
                     * again later.  Otherwise, we will uncache the scall
                     * and stop its timer processing.
                     */

                    RPC_TRY_LOCK(&b);
                    if (b)
                    {
                        rpc__dg_scall_orphan_call(scall);
                        RPC_DG_CALL_UNLOCK(&scall->c);
                        RPC_UNLOCK(0);
                        return;
                    }
                }
            }
            break;

        case rpc_e_dg_cs_final:
            /*
             * Retransmit response if necessary; eventually give up and change to
             * the idle state.
             */
            rpc__dg_call_xmitq_timer(&scall->c);
            if (scall->c.status != rpc_s_ok
                && ! RPC_DG_HDR_FLAG_IS_SET(&scall->c.xq.hdr, RPC_C_DG_PF_IDEMPOTENT))
            {
                RPC_DG_CALL_SET_STATE(&scall->c, rpc_e_dg_cs_idle);
                if (scall->c.xq.head != NULL)
                    rpc__dg_xmitq_free(&scall->c.xq, &scall->c);
            }
            break;

        case rpc_e_dg_cs_orphan:
            /*
             * Once the orphaned call has completed, free up the
             * the remaining resources.  As always, callbacks complicates
             * things, yielding a total of three scall scenarios:
             *      a)  a normal (server side) scall that has never
             *          been used in making a callback to a client
             *          (!scall->is_cbk && scall->cbk_ccall == NULL)
             *      b)  a normal (server side) scall that HAS
             *          been used in making a callback to a client
             *          (!scall->is_cbk && scall->cbk_ccall != NULL)
             *      c)  a callback scall (client side) that was the
             *          callback being executed
             *          (scall->is_cbk == true)
             *          (implicitly scall->cbk_ccall != NULL)
             *
             * The appropriate time for freeing up the remaining resources
             * is when the call executor (rpc__dg_execute_call) has
             * completed.  While it is possible to infer this condition
             * by examination of the scall's reference counts, it would
             * make this code fragment intolerably dependent on knowing
             * what/who has references to the scall under the various
             * scenarios.  Therefore we introduce and use the new flag:
             * scall->has_call_executor_ref.
             *
             * If for some reason the uncache couldn't complete, we'll
             * try again on the next tick.
             */

            if (! scall->has_call_executor_ref)
            {
                if (scall_uncache(scall))
                    return;
            }
            break;
    }

    RPC_DG_CALL_UNLOCK(&scall->c);
}
Exemple #2
0
INTERNAL void network_monitor_liveness(void)
{
    rpc_dg_client_rep_p_t client;
    unsigned32 i;
    struct timespec next_ts;

    RPC_DBG_PRINTF(rpc_e_dbg_conv_thread, 1, 
                   ("(network_monitor_liveness) starting up...\n"));

    RPC_MUTEX_LOCK(monitor_mutex);

    while (stop_monitor == false)
    {
        /*
         * Awake every 60 seconds.
         */
        rpc__clock_timespec(rpc__clock_stamp()+60, &next_ts);

        RPC_COND_TIMED_WAIT(monitor_cond, monitor_mutex, &next_ts);
        if (stop_monitor == true)
            break;

        for (i = 0; i < CLIENT_TABLE_SIZE; i++)
        {                                     
            client = client_table[i];
    
            while (client != NULL && active_monitors != 0)
            {      
                if (client->rundown != NULL &&
                    rpc__clock_aged(client->last_update, 
                                    RPC_CLOCK_SEC(LIVE_TIMEOUT_INTERVAL)))
                {                 
                    /*
                     * If the timer has expired, call the rundown routine.
                     * Stop monitoring the client handle by setting its rundown
                     * routine pointer to NULL.
                     */
    
                    RPC_DBG_PRINTF(rpc_e_dbg_general, 3, 
                        ("(network_monitor_liveness_timer) Calling rundown function\n"));
                            
                    RPC_MUTEX_UNLOCK(monitor_mutex);
                    (*client->rundown)((rpc_client_handle_t)client);
                    RPC_MUTEX_LOCK(monitor_mutex);

                    /*
                     * The monitor is no longer active.
                     */
                    client->rundown = NULL;
                    active_monitors--;
                }
                client = client->next;
            }

            if (active_monitors == 0)
            {
                /*
                 * While we were executing the rundown function and opened the
                 * mutex, the fork handler might try to stop us.
                 */
                if (stop_monitor == true)
                    break;
                /*
                 * Nothing left to monitor, so terminate the thread.
                 */
                dcethread_detach_throw(monitor_task);
                monitor_running = false;
                RPC_DBG_PRINTF(rpc_e_dbg_conv_thread, 1, 
                    ("(network_monitor_liveness) shutting down (no active)...\n"));
                RPC_MUTEX_UNLOCK(monitor_mutex);
                return;
            }
        }
    }
    RPC_DBG_PRINTF(rpc_e_dbg_conv_thread, 1, 
                   ("(network_monitor_liveness) shutting down...\n"));

    RPC_MUTEX_UNLOCK(monitor_mutex);
}