Exemple #1
0
PRIVATE void rpc__dg_network_stop_mon
(
    rpc_binding_rep_p_t binding_r ATTRIBUTE_UNUSED,
    rpc_client_handle_t client_h,
    unsigned32 *st
)
{
    rpc_dg_client_rep_p_t client = (rpc_dg_client_rep_p_t) client_h;
    rpc_dg_client_rep_p_t ptr;
    dce_uuid_p_t cas_uuid = &client->cas_uuid;
    unsigned16 probe;
                 
    RPC_MUTEX_LOCK(monitor_mutex);

    /*
     * Hash into the client rep table based on the client handle's UUID.
     */
  
    probe = CLIENT_HASH_PROBE(cas_uuid, st);
    ptr = client_table[probe];
       
    /*
     * Scan down the hash chain, looking for the reference to the client
     * handle
     */
                   
    while (ptr != NULL) {
        if (ptr == client)
        {   
            /*
             * To stop monitoring a client handle requires only that 
             * the rundown function pointer be set to NULL.
             */           
         
            if (client->rundown != NULL)
            {
                client->rundown = NULL;
                active_monitors--;
            }
            RPC_MUTEX_UNLOCK(monitor_mutex);
            *st = rpc_s_ok;
            return;
        }
        ptr = ptr->next;
    }

    *st = -1;               /* !!! attempt to remove unmonitored client */
    RPC_MUTEX_UNLOCK(monitor_mutex);
}
Exemple #2
0
INTERNAL void fwd_delayed (
    rpc_dg_sock_pool_elt_p_t sp,
    rpc_dg_recvq_elt_p_t rqe)
{
    pkt_list_element_t	*new_pkt;

    /* save the packet on our list */
    RPC_MEM_ALLOC(new_pkt,
                  pkt_list_element_t *,
                  sizeof(pkt_list_element_t),
                  RPC_C_MEM_UTIL,
                  RPC_C_MEM_WAITOK);

    new_pkt->rqe = rqe;
    new_pkt->sp = sp;
    /* incremenet the reference count */
    rpc__dg_network_sock_reference(sp);

    RPC_MUTEX_LOCK(fwd_list_mutex);

    new_pkt->next = delayed_pkt_head;
    delayed_pkt_head = new_pkt;

    RPC_MUTEX_UNLOCK(fwd_list_mutex);

    return;
}
Exemple #3
0
PRIVATE void rpc__dg_client_free
(
    rpc_client_handle_t client_h
)
{
    unsigned16 probe;
    rpc_dg_client_rep_p_t client = (rpc_dg_client_rep_p_t) client_h;
    rpc_dg_client_rep_p_t ptr, prev = NULL;

    RPC_MUTEX_LOCK(monitor_mutex);
             
    /*
     * Hash into the client rep table based on the client handle's UUID.
     */
  
    probe = CLIENT_HASH_PROBE(&client->cas_uuid, &st);
    ptr = client_table[probe];
       
    /*
     * Scan down the hash chain, looking for the reference to the client
     * handle
     */
                   
    while (ptr != NULL) 
    {
        if (ptr == client)
        {   
            if (prev == NULL)
                client_table[probe] = ptr->next;
            else
                prev->next = ptr->next;

            RPC_MEM_FREE(client, RPC_C_MEM_DG_CLIENT_REP);

            RPC_DBG_PRINTF(rpc_e_dbg_general, 3, 
                ("(client_free) Freeing client handle\n"));
    
            RPC_MUTEX_UNLOCK(monitor_mutex);
            return;
        }          

        prev = ptr;
        ptr = ptr->next;
    }
    RPC_MUTEX_UNLOCK(monitor_mutex);
}
Exemple #4
0
PRIVATE void rpc__dg_convc_indy
(
    dce_uuid_t *cas_uuid
)
{
    rpc_dg_client_rep_p_t client;
                
    RPC_MUTEX_LOCK(monitor_mutex);

    client = find_client(cas_uuid);

    if (client != NULL)
    {
        client->last_update = rpc__clock_stamp();
    }
    RPC_MUTEX_UNLOCK(monitor_mutex);
}
Exemple #5
0
PRIVATE dce_pointer_t rpc__list_element_alloc
(
    rpc_list_desc_p_t       list_desc,
    boolean32               block
)
{
    volatile dce_pointer_t  element = NULL;
    unsigned32          wait_cnt;
    struct timespec     delta;
    struct timespec     abstime;

    RPC_LOG_LIST_ELT_ALLOC_NTR;

    for (wait_cnt = 0;
         wait_cnt < rpc_g_lookaside_rcb.max_wait_times;
         wait_cnt++)
    {
        /*
         * Acquire the global resource control lock for all lookaside
         * lists if the caller doesn't have their own lock.
         */
        if (list_desc->use_global_mutex)
        {
            RPC_MUTEX_LOCK (rpc_g_lookaside_rcb.res_lock);
        }

        /*
         * Try allocating a structure off the lookaside list given.
         */
        if (list_desc->cur_size > 0)
        {
#define DEBUG 1
#ifdef DEBUG
            if (list_desc->list_head.next == NULL)
            {
                /*
                 * rpc_m_lookaside_corrupt
                 * "(%s) Lookaside list is corrupted"
                 */
                rpc_dce_svc_printf (
                    __FILE__, __LINE__,
                    "%s",
                    rpc_svc_general,
                    svc_c_sev_fatal | svc_c_action_abort,
                    rpc_m_lookaside_corrupt,
                    "rpc__list_element_alloc" );
            }
#endif
            list_desc->cur_size--;
            RPC_LIST_REMOVE_HEAD (list_desc->list_head, element, dce_pointer_t);

            /*
             * Release the global resource control lock for all lookaside
             * lists if the caller doesn't have their own lock.
             */
            if (list_desc->use_global_mutex)
            {
                RPC_MUTEX_UNLOCK (rpc_g_lookaside_rcb.res_lock);
            }
            break;
        }
        else
        {
            /*
             * Release the global resource control lock if the
             * caller doesn't have their own lock for all lookaside lists
             * since the structure was available on the lookaside list.
             *
             * We do it now because allocating an element from heap is a relatively
             * time consuming operation.
             */
            if (list_desc->use_global_mutex)
            {
                RPC_MUTEX_UNLOCK (rpc_g_lookaside_rcb.res_lock);
            }

            /*
             * The lookaside list is empty. Try and allocate from
             * heap.
             */
            RPC_MEM_ALLOC (element,
                           dce_pointer_t,
                           list_desc->element_size,
                           list_desc->element_type,
                           RPC_C_MEM_NOWAIT);

            if (element == NULL)
            {
                /*
                 * The heap allocate failed. If the caller indicated
                 * that we should not block return right now.
                 */
                if (block == false)
                {
                    break;
                }

                delta.tv_sec = rpc_g_lookaside_rcb.wait_time;
                delta.tv_nsec = 0;
                dcethread_get_expiration (&delta, &abstime);

                /*
                 * If we are using the global lookaside list lock
                 * then reaquire the global lookaside list lock and
                 * wait on the global lookaside list condition
                 * variable otherwise use the caller's mutex and
                 * condition variable.
                 */
                if (list_desc->use_global_mutex)
                {
                    RPC_MUTEX_LOCK (rpc_g_lookaside_rcb.res_lock);
                    RPC_COND_TIMED_WAIT (rpc_g_lookaside_rcb.wait_flg,
                                         rpc_g_lookaside_rcb.res_lock,
                                         &abstime);
                    RPC_MUTEX_UNLOCK (rpc_g_lookaside_rcb.res_lock);
                }
                else
                {
                    RPC_COND_TIMED_WAIT (*list_desc->cond,
                                         *list_desc->mutex,
                                         &abstime);
                }

                /*
                 * Try to allocate the structure again.
                 */
                continue;
            }
            else
            {
                /*
                 * The RPC_MEM_ALLOC succeeded. If an alloc routine
                 * was specified when the lookaside list was inited
                 * call it now.
                 */
                if (list_desc->alloc_rtn != NULL)
                {
                    /*
                     * Catch any exceptions which may occur in the
                     * list-specific alloc routine. Any exceptions
                     * will be caught and the memory will be freed.
                     */
                    DCETHREAD_TRY
                    {
                        (*list_desc->alloc_rtn) (element);
                    }
                    DCETHREAD_CATCH_ALL(THIS_CATCH)
                    {
                        RPC_MEM_FREE (element, list_desc->element_type);
                        element = NULL;
                        /*
                         * rpc_m_call_failed_no_status
                         * "%s failed"
                         */
                        rpc_dce_svc_printf (
                            __FILE__, __LINE__,
                            "%s",
                            rpc_svc_general,
                            svc_c_sev_fatal | svc_c_action_abort,
                            rpc_m_call_failed_no_status,
                            "rpc__list_element_alloc/(*list_desc->alloc_rtn)(element)" );
                    }
                    DCETHREAD_ENDTRY
                }
                break;
            }
        }
    }
Exemple #6
0
/*
 * R P C _ _ S E R V E R _ F W D _ R E S O L V E _ D E A L Y E D
 *
 * Remove specified packet from the list of delayed packets
 * and do what we are told with it
 */
PRIVATE void rpc__server_fwd_resolve_delayed(
    dce_uuid_p_t             actuuid,
    rpc_addr_p_t	fwd_addr,
    rpc_fwd_action_t	*fwd_action,
    unsigned32		*status)

{
    rpc_dg_sock_pool_elt_p_t 	sp;
    rpc_dg_recvq_elt_p_t 	rqe = (rpc_dg_recvq_elt_p_t)-1;
    rpc_dg_pkt_hdr_p_t  	hdrp;
    pkt_list_element_t          *ep, *last_ep = NULL;
    unsigned32 			st;

    /* get the requsted packet from the list */
    *status = rpc_s_not_found;

    RPC_MUTEX_LOCK(fwd_list_mutex);

    ep = delayed_pkt_head;
    while (ep != NULL)
    {
        hdrp = ep->rqe->hdrp;
        if (dce_uuid_equal(&(hdrp->actuid), actuuid, &st) && (st == rpc_s_ok))
        {
            /* found - remove it from the list */
            rqe = ep->rqe;
            sp = ep->sp;
            if (last_ep == NULL)
            {
                delayed_pkt_head  = ep->next;
            }
            else
            {
                last_ep->next  = ep->next;
            }
            RPC_MEM_FREE(ep, RPC_C_MEM_UTIL);
            *status = rpc_s_ok;
            break;
        }
        last_ep = ep;
        ep = ep->next;
    }
    RPC_MUTEX_UNLOCK(fwd_list_mutex);

    if (*status != rpc_s_ok)
    {
        return;
    }

    /*
     * Do what we're told to do with this packet.
     */
    switch (*fwd_action)
    {
        case rpc_e_fwd_drop:
            RPC_DBG_PRINTF(rpc_e_dbg_general, 10,
               ("(rpc__server_fwd_resolve_delayed) dropping (ptype=%s) [%s]\n",
                rpc__dg_pkt_name(RPC_DG_HDR_INQ_PTYPE(rqe->hdrp)),
                rpc__dg_act_seq_string(rqe->hdrp)));
            break;

        case rpc_e_fwd_reject:
            fwd_reject(sp, rqe);
            break;

        case rpc_e_fwd_forward:
            fwd_forward(sp, rqe, fwd_addr);
            break;

        default:
            *status = rpc_s_not_supported;
            break;
    }
    rpc__dg_network_sock_release(&sp);
	 if (rqe == (rpc_dg_recvq_elt_p_t)-1)	{
		 fprintf(stderr, "%s: bad rqe: aborting\n", __PRETTY_FUNCTION__);
		 abort();
	 }
    rpc__dg_pkt_free_rqe(rqe, NULL);
    return;
}
Exemple #7
0
PRIVATE void rpc__dg_monitor_fork_handler
(
    rpc_fork_stage_id_t stage
)
{                           
    unsigned32 i;
    unsigned32 st;

    switch ((int)stage)
    {
    case RPC_C_PREFORK:
        RPC_MUTEX_LOCK(monitor_mutex);
        monitor_was_running = false;
        
        if (monitor_running) 
        {
            stop_monitor = true;
            RPC_COND_SIGNAL(monitor_cond, monitor_mutex);
            RPC_MUTEX_UNLOCK(monitor_mutex);
            dcethread_join_throw (monitor_task, (void **) &st);
            RPC_MUTEX_LOCK(monitor_mutex); /* FIXME: wtf
				DCETHREAD_TRY	{
            	dcethread_detach_throw(monitor_task);
				}
				DCETHREAD_CATCH(dcethread_use_error_e)
				{}
				DCETHREAD_ENDTRY; */
            monitor_running = false;
            /*
             * The monitor thread may have nothing to do.
             */
            if (active_monitors != 0)
                monitor_was_running = true;
            stop_monitor = false;
        }
        break;
    case RPC_C_POSTFORK_PARENT:
        if (monitor_was_running) 
        {
            monitor_was_running = false;
            monitor_running = true;
            stop_monitor = false;
            dcethread_create_throw(&monitor_task, NULL, 
                           (dcethread_startroutine) network_monitor_liveness, 
                           NULL);  
        }
        RPC_MUTEX_UNLOCK(monitor_mutex);
        break;
    case RPC_C_POSTFORK_CHILD:  
        monitor_was_running = false;
        monitor_running = false;
        stop_monitor = false;

        /*
         * Initialize the count of handles currently being monitored.
         */
        
        active_monitors = 0;
        for (i = 0; i < CLIENT_TABLE_SIZE; i++)
            client_table[i] = NULL;

        RPC_MUTEX_UNLOCK(monitor_mutex);
        break;
    }
}
Exemple #8
0
PRIVATE void rpc__dg_binding_inq_client
(
    rpc_binding_rep_p_t binding_r,
    rpc_client_handle_t *client_h,
    unsigned32 *st
)
{       
    rpc_dg_binding_server_p_t shand = (rpc_dg_binding_server_p_t) binding_r;
    rpc_dg_scall_p_t scall = shand->scall;
    rpc_binding_handle_t h;
    dce_uuid_t cas_uuid;
    rpc_dg_client_rep_p_t client;
    unsigned32 temp_seq, tst;
                              
    *st = rpc_s_ok;

    /*
     * Lock down and make sure we're in an OK state.
     */

    RPC_LOCK(0);
    RPC_DG_CALL_LOCK(&scall->c);
                      
    if (scall->c.state == rpc_e_dg_cs_orphan)
    {
        *st = rpc_s_call_orphaned;
        RPC_DG_CALL_UNLOCK(&scall->c);
        RPC_UNLOCK(0);
        return;
    }
    
    /*
     * See if there is already a client handle associated with the scte
     * associated with this server binding handle.  If there is, just
     * return it.
     */

    if (scall->scte->client != NULL)
    {
        *client_h = (rpc_client_handle_t) scall->scte->client;
        RPC_DG_CALL_UNLOCK(&scall->c);
        RPC_UNLOCK(0);
        return;
    }

    /*
     * No client handle.  We need to do a call back to obtain a UUID
     * uniquely identifying this particular instance of the client.
     */

    h = rpc__dg_sct_make_way_binding(scall->scte, st);

    RPC_DG_CALL_UNLOCK(&scall->c);
    RPC_UNLOCK(0);

    if (h == NULL)
    {
        return;
    }

    RPC_DBG_PRINTF(rpc_e_dbg_general, 3, 
        ("(binding_inq_client) Doing whats-your-proc-id callback\n"));

    DCETHREAD_TRY
    {
        (*conv_v3_0_c_epv.conv_who_are_you2)
            (h, &scall->c.call_actid, rpc_g_dg_server_boot_time, 
            &temp_seq, &cas_uuid, st);
    }
    DCETHREAD_CATCH_ALL(THIS_CATCH)
    {
        *st = rpc_s_who_are_you_failed;
    }
    DCETHREAD_ENDTRY

    rpc_binding_free(&h, &tst);

    if (*st != rpc_s_ok)
        return;

    /*
     * Check to see if the UUID returned has already been built into
     * a client handle associated with another scte.  Since we have no
     * way of mapping actids to processes, we can't know that two actid
     * are in the same address space until we get the same address space
     * UUID from both.  In this case it is necessary to use the same
     * client handle for both actids.
     */
             
    RPC_LOCK(0);          
    RPC_DG_CALL_LOCK(&scall->c);

    if (scall->c.state == rpc_e_dg_cs_orphan)
    {
        *st = rpc_s_call_orphaned;
        RPC_DG_CALL_UNLOCK(&scall->c);
        RPC_UNLOCK(0);                                     
        return;
    }
    
    RPC_MUTEX_LOCK(monitor_mutex);

    client = find_client(&cas_uuid);

    if (client != NULL)
    {   
        client->refcnt++;
        scall->scte->client = client;
    }
    else
    {
        /*
         * If not, alloc up a client handle structure and thread
         * it onto the table.
         */

        unsigned16 probe;

        probe = CLIENT_HASH_PROBE(&cas_uuid, st);

        RPC_MEM_ALLOC(client, rpc_dg_client_rep_p_t, sizeof *client, 
            RPC_C_MEM_DG_CLIENT_REP, RPC_C_MEM_NOWAIT);

        client->next = client_table[probe];
        client->rundown = NULL;
        client->last_update = 0;
        client->cas_uuid = cas_uuid;

        client_table[probe] = client;
        scall->scte->client = client;
        client->refcnt = 2;
    }  

    RPC_MUTEX_UNLOCK(monitor_mutex);
    RPC_DG_CALL_UNLOCK(&scall->c);
    RPC_UNLOCK(0);                                     
    
    *client_h = (rpc_client_handle_t) client; 
}
Exemple #9
0
INTERNAL void network_monitor_liveness(void)
{
    rpc_dg_client_rep_p_t client;
    unsigned32 i;
    struct timespec next_ts;

    RPC_DBG_PRINTF(rpc_e_dbg_conv_thread, 1, 
                   ("(network_monitor_liveness) starting up...\n"));

    RPC_MUTEX_LOCK(monitor_mutex);

    while (stop_monitor == false)
    {
        /*
         * Awake every 60 seconds.
         */
        rpc__clock_timespec(rpc__clock_stamp()+60, &next_ts);

        RPC_COND_TIMED_WAIT(monitor_cond, monitor_mutex, &next_ts);
        if (stop_monitor == true)
            break;

        for (i = 0; i < CLIENT_TABLE_SIZE; i++)
        {                                     
            client = client_table[i];
    
            while (client != NULL && active_monitors != 0)
            {      
                if (client->rundown != NULL &&
                    rpc__clock_aged(client->last_update, 
                                    RPC_CLOCK_SEC(LIVE_TIMEOUT_INTERVAL)))
                {                 
                    /*
                     * If the timer has expired, call the rundown routine.
                     * Stop monitoring the client handle by setting its rundown
                     * routine pointer to NULL.
                     */
    
                    RPC_DBG_PRINTF(rpc_e_dbg_general, 3, 
                        ("(network_monitor_liveness_timer) Calling rundown function\n"));
                            
                    RPC_MUTEX_UNLOCK(monitor_mutex);
                    (*client->rundown)((rpc_client_handle_t)client);
                    RPC_MUTEX_LOCK(monitor_mutex);

                    /*
                     * The monitor is no longer active.
                     */
                    client->rundown = NULL;
                    active_monitors--;
                }
                client = client->next;
            }

            if (active_monitors == 0)
            {
                /*
                 * While we were executing the rundown function and opened the
                 * mutex, the fork handler might try to stop us.
                 */
                if (stop_monitor == true)
                    break;
                /*
                 * Nothing left to monitor, so terminate the thread.
                 */
                dcethread_detach_throw(monitor_task);
                monitor_running = false;
                RPC_DBG_PRINTF(rpc_e_dbg_conv_thread, 1, 
                    ("(network_monitor_liveness) shutting down (no active)...\n"));
                RPC_MUTEX_UNLOCK(monitor_mutex);
                return;
            }
        }
    }
    RPC_DBG_PRINTF(rpc_e_dbg_conv_thread, 1, 
                   ("(network_monitor_liveness) shutting down...\n"));

    RPC_MUTEX_UNLOCK(monitor_mutex);
}
Exemple #10
0
PRIVATE void rpc__dg_network_mon
(
    rpc_binding_rep_p_t binding_r ATTRIBUTE_UNUSED,
    rpc_client_handle_t client_h,
    rpc_network_rundown_fn_t rundown,
    unsigned32 *st
)
{            
    rpc_dg_client_rep_p_t ptr, client = (rpc_dg_client_rep_p_t) client_h;
    unsigned16 probe;
    dce_uuid_p_t cas_uuid = (dce_uuid_p_t) &client->cas_uuid;

    RPC_MUTEX_LOCK(monitor_mutex);
   
    /*
     * Hash into the client rep table based on the handle's UUID.
     * Scan the chain to find the client handle.
     */
                  
    probe = CLIENT_HASH_PROBE(cas_uuid, st);
    ptr = client_table[probe]; 

    while (ptr != NULL)
    {
        if (ptr == client)
            break;
        ptr = ptr->next;
    }

    /*           
     * If the handle passed in is not in the table, it must be bogus.
     * Also, make sure that we are not already monitoring this client,
     * indicated by a non-NULL rundown routine pointer.
     */

    if (ptr == NULL || ptr->rundown != NULL)
    {    
        *st = -1;         /* !!! Need a real error value */
        RPC_MUTEX_UNLOCK(monitor_mutex);
        return;
    }

    /*
     * (Re)initialize the table entry, and bump the count of active monitors.
     */

    client->rundown  = rundown;
    client->last_update = rpc__clock_stamp();
    active_monitors++;

    /*
     * Last, make sure that the monitor timer routine is running. 
     */

    if (! monitor_running)
    {
        monitor_running = true;
        dcethread_create_throw(&monitor_task, NULL, 
            (dcethread_startroutine) network_monitor_liveness, 
            NULL);  
    }                         

    *st = rpc_s_ok;
    RPC_MUTEX_UNLOCK(monitor_mutex);
}