/***********************************************************************
* 函 数 名   : healthy_check_arp_proc
* 函数描述   : 
* 输入参数   : node  
* 输出参数   : void
------------------------------------------------------------------------
最近一次修改记录 :
修改作者    : 
修改目的    : 
修改日期    : 
*-------------------------------------------------------------------- */
void* healthy_check_arp_proc(void *node)
{
    u32 i;
    s32 oldstate,err;
    s32 sockfd[ARP_CHECK_COUNT_MAX];
    call_func_cfg_s argument;
    (void)node;
    if(g_apswt_arp_healthcheck_cfg.interval == 0)
    {
        g_apswt_arp_healthcheck_cfg.interval = 10;
    }
    if(g_apswt_arp_healthcheck_cfg.timeout == 0)
    {
        g_apswt_arp_healthcheck_cfg.timeout = 3;
    }
    if(g_apswt_arp_healthcheck_cfg.fail_time == 0)
    {
        g_apswt_arp_healthcheck_cfg.fail_time = 3;
    }


    for(;;)
    {
        pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
        for(i=0; i<g_apswt_arp_healthcheck_cfg.count; i++)
        {
            sockfd[i] = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ARP));
        }
        err = healthy_check_apswt_arp(sockfd);
        for(i=0; i<g_apswt_arp_healthcheck_cfg.count; i++)
        {
            if(sockfd[i] != -1)
            {
                close(sockfd[i]);
            }
        }
        pthread_setcancelstate(oldstate, NULL);
        pthread_testcancel();   
        if(err == ERROR_SUCCESS)
        {
            for(i=0; i<g_apswt_arp_healthcheck_cfg.count; i++)
            {
                /*本次检测结果为OK*/
                if(g_apswt_arp_healthcheck_cfg.new_status[i] == E_ARP_CHECK_RESULT_OK)
                {
                    /*查看当前状态是否为DOWN*/
                    if(g_apswt_arp_healthcheck_cfg.old_status[i] >= g_apswt_arp_healthcheck_cfg.fail_time)
                    {
                        if(NULL != g_apswt_arp_healthcheck_cfg.call_func)
                        {
                            memset(&argument, 0, sizeof(call_func_cfg_s));
                            argument.change_type = HC_RODE_NOTREADY_TO_READY;
							argument.id = g_apswt_arp_healthcheck_cfg.id[i];
                            memcpy(argument.eth, g_apswt_arp_healthcheck_cfg.eth[i], HC_IFNAME_LEN);
                            memcpy(argument.ip, (s8 *)g_apswt_arp_healthcheck_cfg.ip[i], HC_IP_LEN_MAX);
                            (*(g_apswt_arp_healthcheck_cfg.call_func))(argument);
                        }
                    }
                    g_apswt_arp_healthcheck_cfg.old_status[i] = E_ARP_CHECK_RESULT_OK;
                }
                /*本次检测结果为DOWN*/
                else
                {
                    g_apswt_arp_healthcheck_cfg.old_status[i]++;
                    if(g_apswt_arp_healthcheck_cfg.old_status[i] == g_apswt_arp_healthcheck_cfg.fail_time)
                    {
                        if(NULL != g_apswt_arp_healthcheck_cfg.call_func)
                        {
                            memset(&argument, 0, sizeof(call_func_cfg_s));
                            argument.change_type = HC_RODE_READY_TO_NOTREADY;
							argument.id = g_apswt_arp_healthcheck_cfg.id[i];
                            memcpy(argument.eth, g_apswt_arp_healthcheck_cfg.eth[i], HC_IFNAME_LEN);
                            memcpy(argument.ip, (s8 *)g_apswt_arp_healthcheck_cfg.ip[i], HC_IP_LEN_MAX);
                            (*(g_apswt_arp_healthcheck_cfg.call_func))(argument);
                        }
                    }
                }    

                /*清除本次记录下次使用*/
                g_apswt_arp_healthcheck_cfg.new_status[i] = E_ARP_CHECK_RESULT_DOWN;
                /*防止255+1变为0影响检查结果*/
                if(g_apswt_arp_healthcheck_cfg.old_status[i] >= 255)
                {
                    g_apswt_arp_healthcheck_cfg.old_status[i] = g_apswt_arp_healthcheck_cfg.fail_time;
                }
            }
        }

        sleep(g_apswt_arp_healthcheck_cfg.interval);
    }
    return NULL;
}
Beispiel #2
0
static void *WatchdogFunc( void *userData )
{
    PaError result = paNoError, *pres = NULL;
    int err;
    PaAlsaThreading *th = (PaAlsaThreading *) userData;
    unsigned intervalMsec = 500;
    const PaTime maxSeconds = 3.;   /* Max seconds between callbacks */
    PaTime timeThen = PaUtil_GetTime(), timeNow, timeElapsed, cpuTimeThen, cpuTimeNow, cpuTimeElapsed;
    double cpuLoad, avgCpuLoad = 0.;
    int throttled = 0;

    assert( th );

    /* Execute OnWatchdogExit when exiting */
    pthread_cleanup_push( &OnWatchdogExit, th );

    /* Boost priority of callback thread */
    PA_ENSURE( result = BoostPriority( th ) );
    if( !result )
    {
        /* Boost failed, might as well exit */
        pthread_exit( NULL );
    }

    cpuTimeThen = th->callbackCpuTime;
    {
        int policy;
        struct sched_param spm = { 0 };
        pthread_getschedparam( pthread_self(), &policy, &spm );
        PA_DEBUG(( "%s: Watchdog priority is %d\n", __FUNCTION__, spm.sched_priority ));
    }

    while( 1 )
    {
        double lowpassCoeff = 0.9, lowpassCoeff1 = 0.99999 - lowpassCoeff;

        /* Test before and after in case whatever underlying sleep call isn't interrupted by pthread_cancel */
        pthread_testcancel();
        Pa_Sleep( intervalMsec );
        pthread_testcancel();

        if( PaUtil_GetTime() - th->callbackTime > maxSeconds )
        {
            PA_DEBUG(( "Watchdog: Terminating callback thread\n" ));
            /* Tell thread to terminate */
            err = pthread_kill( th->callbackThread, SIGKILL );
            pthread_exit( NULL );
        }

        PA_DEBUG(( "%s: PortAudio reports CPU load: %g\n", __FUNCTION__, PaUtil_GetCpuLoad( th->cpuLoadMeasurer ) ));

        /* Check if we should throttle, or unthrottle :P */
        cpuTimeNow = th->callbackCpuTime;
        cpuTimeElapsed = cpuTimeNow - cpuTimeThen;
        cpuTimeThen = cpuTimeNow;

        timeNow = PaUtil_GetTime();
        timeElapsed = timeNow - timeThen;
        timeThen = timeNow;
        cpuLoad = cpuTimeElapsed / timeElapsed;
        avgCpuLoad = avgCpuLoad * lowpassCoeff + cpuLoad * lowpassCoeff1;
        /*
        if( throttled )
            PA_DEBUG(( "Watchdog: CPU load: %g, %g\n", avgCpuLoad, cpuTimeElapsed ));
            */
        if( PaUtil_GetCpuLoad( th->cpuLoadMeasurer ) > .925 )
        {
            static int policy;
            static struct sched_param spm = { 0 };
            static const struct sched_param defaultSpm = { 0 };
            PA_DEBUG(( "%s: Throttling audio thread, priority %d\n", __FUNCTION__, spm.sched_priority ));

            pthread_getschedparam( th->callbackThread, &policy, &spm );
            if( !pthread_setschedparam( th->callbackThread, SCHED_OTHER, &defaultSpm ) )
            {
                throttled = 1;
            }
            else
                PA_DEBUG(( "Watchdog: Couldn't lower priority of audio thread: %s\n", strerror( errno ) ));

            /* Give other processes a go, before raising priority again */
            PA_DEBUG(( "%s: Watchdog sleeping for %lu msecs before unthrottling\n", __FUNCTION__, th->throttledSleepTime ));
            Pa_Sleep( th->throttledSleepTime );

            /* Reset callback priority */
            if( pthread_setschedparam( th->callbackThread, SCHED_FIFO, &spm ) != 0 )
            {
                PA_DEBUG(( "%s: Couldn't raise priority of audio thread: %s\n", __FUNCTION__, strerror( errno ) ));
            }

            if( PaUtil_GetCpuLoad( th->cpuLoadMeasurer ) >= .99 )
                intervalMsec = 50;
            else
                intervalMsec = 100;

            /*
            lowpassCoeff = .97;
            lowpassCoeff1 = .99999 - lowpassCoeff;
            */
        }
        else if( throttled && avgCpuLoad < .8 )
        {
            intervalMsec = 500;
            throttled = 0;

            /*
            lowpassCoeff = .9;
            lowpassCoeff1 = .99999 - lowpassCoeff;
            */
        }
    }

    pthread_cleanup_pop( 1 );   /* Execute cleanup on exit */

error:
    /* Shouldn't get here in the normal case */

    /* Pass on error code */
    pres = malloc( sizeof (PaError) );
    *pres = result;

    pthread_exit( pres );
}
Beispiel #3
0
/**
 * Issues an explicit deferred cancellation point.
 * This has no effect if thread cancellation is disabled.
 * This can be called when there is a rather slow non-sleeping operation.
 * This is also used to force a cancellation point in a function that would
 * otherwise "not always" be a one (block_FifoGet() is an example).
 */
void vlc_testcancel (void)
{
    pthread_testcancel ();
}
Beispiel #4
0
// Run method for the thread
// It is meant to do the following:
// (1) Initialize status buffer
// (2) Set up network parameters and socket
// (3) Start main loop
//     (3a) Receive packet on socket
//     (3b) Error check packet (packet size, etc)
//     (3c) Call process_packet on received packet
// (4) Terminate thread cleanly
static void *run(hashpipe_thread_args_t * args) {

    fprintf(stdout, "N_INPUTS = %d\n", N_INPUTS);
    fprintf(stdout, "N_CHAN = %d\n", N_CHAN);
    fprintf(stdout, "N_CHAN_PER_X = %d\n", N_CHAN_PER_X);
    fprintf(stdout, "N_CHAN_PER_PACKET = %d\n", N_CHAN_PER_PACKET);
    fprintf(stdout, "N_TIME_PER_PACKET = %d\n", N_TIME_PER_PACKET);
    fprintf(stdout, "N_TIME_PER_BLOCK = %d\n", N_TIME_PER_BLOCK);
    fprintf(stdout, "N_BYTES_PER_BLOCK = %d\n", N_BYTES_PER_BLOCK);
    fprintf(stdout, "N_BYTES_PER_PACKET = %d\n", N_BYTES_PER_PACKET);
    fprintf(stdout, "N_PACKETS_PER_BLOCK = %d\n", N_PACKETS_PER_BLOCK);
    fprintf(stdout, "N_COR_MATRIX = %d\n", N_COR_MATRIX);

    // Local aliases to shorten access to args fields
    // Our output buffer happens to be a paper_input_databuf
    flag_input_databuf_t *db = (flag_input_databuf_t *)args->obuf;
    hashpipe_status_t st = args->st;
    const char * status_key = args->thread_desc->skey;

    st_p = &st;	// allow global (this source file) access to the status buffer

    /* Read network params */
    fprintf(stdout, "Setting up network parameters\n");
    struct hashpipe_udp_params up = {
	.bindhost = "0.0.0.0",
	.bindport = 8511,
	.packet_size = 8008
    };

    hashpipe_status_lock_safe(&st);
    	// Get info from status buffer if present (no change if not present)
    	hgets(st.buf, "BINDHOST", 80, up.bindhost);
    	hgeti4(st.buf, "BINDPORT", &up.bindport);
    
    	// Store bind host/port info etc in status buffer
    	hputs(st.buf, "BINDHOST", up.bindhost);
    	hputi4(st.buf, "BINDPORT", up.bindport);
    	hputu4(st.buf, "MISSEDFE", 0);
    	hputu4(st.buf, "MISSEDPK", 0);
    	hputs(st.buf, status_key, "running");
    hashpipe_status_unlock_safe(&st);

    struct hashpipe_udp_packet p;

    /* Give all the threads a chance to start before opening network socket */
    int netready = 0;
    int corready = 0;
    int checkready = 0;
    while (!netready) {
        sleep(1);
        // Check the correlator to see if it's ready yet
        hashpipe_status_lock_safe(&st);
        hgeti4(st.buf, "CORREADY",  &corready);
        hgeti4(st.buf, "SAVEREADY", &checkready);
        hashpipe_status_unlock_safe(&st);
        if (!corready) {
            continue;
        }
        //if (!checkready) {
        //    continue;
        //}

        // Check the other threads to see if they're ready yet
        // TBD

        // If we get here, then all threads are initialized
        netready = 1;
    }
    sleep(3);

    /* Set up UDP socket */
    fprintf(stderr, "NET: BINDHOST = %s\n", up.bindhost);
    fprintf(stderr, "NET: BINDPORT = %d\n", up.bindport);
    int rv = hashpipe_udp_init(&up);
    
    if (rv!=HASHPIPE_OK) {
        hashpipe_error("paper_net_thread",
                "Error opening UDP socket.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)hashpipe_udp_close, &up);


    // Initialize first few blocks in the buffer
    int i;
    for (i = 0; i < 2; i++) {
        // Wait until block semaphore is free
        if (flag_input_databuf_wait_free(db, i) != HASHPIPE_OK) {
            if (errno == EINTR) { // Interrupt occurred
                hashpipe_error(__FUNCTION__, "waiting for free block interrupted\n");
                pthread_exit(NULL);
            } else {
                hashpipe_error(__FUNCTION__, "error waiting for free block\n");
                pthread_exit(NULL);
            }
        }
        initialize_block(db, i*Nm);
    }


    // Set correlator to "start" state
    hashpipe_status_lock_safe(&st);
    hputs(st.buf, "INTSTAT", "start");
    hashpipe_status_unlock_safe(&st);

    /* Main loop */
    uint64_t packet_count = 0;

    fprintf(stdout, "Net: Starting Thread!\n");
    
    while (run_threads()) {
        // Get packet
	do {
	    p.packet_size = recv(up.sock, p.data, HASHPIPE_MAX_PACKET_SIZE, 0);
	} while (p.packet_size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK) && run_threads());
	if(!run_threads()) break;
        
        if (up.packet_size != p.packet_size && up.packet_size != p.packet_size-8) {
	    // If an error was returned instead of a valid packet size
            if (p.packet_size == -1) {
                fprintf(stderr, "uh oh!\n");
		// Log error and exit
                hashpipe_error("paper_net_thread",
                        "hashpipe_udp_recv returned error");
                perror("hashpipe_udp_recv");
                pthread_exit(NULL);
            } else {
		// Log warning and ignore wrongly sized packet
                hashpipe_warn("paper_net_thread", "Incorrect pkt size (%d)", p.packet_size);
                continue;
            }
	}
	packet_count++;
        process_packet(db, &p);

        /* Will exit if thread has been cancelled */
        pthread_testcancel();
    }

    pthread_cleanup_pop(1); /* Closes push(hashpipe_udp_close) */

    hashpipe_status_lock_busywait_safe(&st);
    hputs(st.buf, status_key, "terminated");
    hashpipe_status_unlock_safe(&st);
    return NULL;
}


static hashpipe_thread_desc_t net_thread = {
    name: "flag_net_thread",
    skey: "NETSTAT",
    init: NULL,
    run:  run,
    ibuf_desc: {NULL},
/*******************************************************************************
 函数名称  :    healthy_check_ipping
 功能描述  :    健康检查ping检测方法主处理函数
 输入参数  :    无
 输出参数  :    无
 返回值        :    无
--------------------------------------------------------------------------------
 最近一次修改记录 :
 修改作者   :       王宗发
 修改目的   :       新增函数
 修改日期   :       20101020
********************************************************************************/
void *healthy_check_ipping(void *data)
{
    u32 i = 0;
    u32 num = 0;
    s32 err_tmp = ERROR_SUCCESS;
    fd_set fdset;
    hc_ping_s *ping_cfg = NULL;
    hc_ping_check_s *checker = NULL;
    u32 first_time = 0;

    checker = &g_hc_ping_checker;
    memcpy(checker, (hc_ping_check_s *)data, sizeof(hc_ping_check_s));
    checker->thread_id = pthread_self();
    checker->pid = getpid();
    checker->valid_ip_num = 0;
    checker->outtime_ip_num = 0;
    FD_ZERO(&fdset);

    /*配置检查和初始化*/
    err_tmp = healthy_check_option_check_ipping();
    if(ERROR_SUCCESS != err_tmp)
    {
        return NULL;
    }
    
    for(i = 0; i < HC_CHECK_NUM_MAX; i++)
    {
        ping_cfg = &(checker->hc_ping_array[i]);

        err_tmp = healthy_check_ipping_ip(ping_cfg);
        if(ERROR_SUCCESS != err_tmp)
        {
            continue;
        }

        healthy_check_data_init(ping_cfg);
        checker->valid_ip_no[checker->valid_ip_num++] = i;      /*记录真正要检查的ip 数组下标*/
        checker->outtime_ip_no[checker->outtime_ip_num++] = i;  /*初始化超时数组*/
    }
    
    while(1)
    {
        err_tmp = select(checker->fd_max + 1, &fdset, NULL, NULL, &(checker->out_time));

        if(err_tmp < 0)
        {
            if(EINTR == errno)
            {
                printf("system signal EINTR, continue\r\n");
            }
            else
            {
                printf("select process error, continue\r\n");
            }
            continue;
        }
        else if(0 == err_tmp)
        {
            if(0 == first_time)
            {
                 first_time = 1;
                 goto next_round;
            }
            healthy_check_outtime_process(&fdset);
            goto next_round;
        }
        else
        {
            healthy_check_select_process(&fdset, &num);
            if(num == checker->outtime_ip_num)
            {
                goto next_round;
            }
            else
            {
                continue;
            }
        }

next_round:
        pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
        pthread_testcancel();
        sleep(checker->interval);
                
        pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
        num = 0;
        healthy_check_redo_ping(&fdset);
    }

    return NULL;
}
Beispiel #6
0
void thread_testcancel()
{
    pthread_testcancel();
}
void Network_RawListener( void* data )
{
    Network* Self = ( Network* ) data;
    int ConnFD;
    int i, n, retval;

    NetData* Incoming;
    socklen_t AddrLength = sizeof( struct sockaddr_in );

    int FdMax = Self->SockFD;
    fd_set master;
    fd_set fdlist;

    struct sockaddr_in RemoteAdress;

    FD_ZERO( &fdlist );
    FD_ZERO( &master );

    FD_SET( Self->SockFD, &master );

    if( listen( Self->SockFD, SOMAXCONN ) != 0 )
    {
        // ERROR: at listen()
        // printf(" ERROR: %s\nExit listen Thread!\n", strerror(errno));
        return;
    }

    while( 1 ) // While thread is running
    {
        fdlist = master;

        retval = select( FdMax+1 , &fdlist, NULL, NULL, NULL);

        pthread_testcancel();

        if( retval == -1 )
        {
            // ERROR: at select()
            // printf ....
        }
        // Look which socket was selected
        for( i = 0; i <= FdMax; i++ )
        {
            // Because it has to be in the fdlist if it was selected
            if( FD_ISSET( i, &fdlist ))
            {
                // New Connection
                if( i == Self->SockFD )
                {
                    ConnFD = accept( Self->SockFD,
                                     (struct sockaddr*) &Self->Address,
                                     &AddrLength);

                    FD_SET(ConnFD, &master);
                    FdMax = ConnFD;

                    // Here be welcome message!

                } else
                {   // received data:
                    n = recv( i, Self->Buffer, Self->BuffSize, 0 );

                    // no data received
                    if( n <= 0 )
                    {
                        // close connection

                        //printf("Client Disconnected.\n");
                        close( i );
                        FD_CLR( i, &master );
                    } else
                    {

                        if( Self->Buffer[n-1] == '\r' || Self->Buffer[n-1] == '\n' )
                            Self->Buffer[n-1] = 0;

                        if( Self->Buffer[n-2] == '\r' || Self->Buffer[n-2] == '\n' )
                            Self->Buffer[n-2] = 0;

                        Self->Buffer[n] = 0;
                        Incoming = NetData_Init( Self->Buffer, i, RAW_MODE );
                        // Add Data to queue
                        TQueue_AddElement( Self->InQueue, (void*) Incoming );
                        //printf("%s",Self->Buffer);
                    }
                }
            }
        }
    }

    return;
}
Beispiel #8
0
int
sem_wait_nocancel (sem_t * sem)
/*
 * ------------------------------------------------------
 * DOCPUBLIC
 *      This function  waits on a semaphore, and doesn't
 *      allow cancellation.
 *
 * PARAMETERS
 *      sem
 *              pointer to an instance of sem_t
 *
 * DESCRIPTION
 *      This function waits on a semaphore. If the
 *      semaphore value is greater than zero, it decreases
 *      its value by one. If the semaphore value is zero, then
 *      the calling thread (or process) is blocked until it can
 *      successfully decrease the value or until interrupted by
 *      a signal.
 *
 * RESULTS
 *              0               successfully decreased semaphore,
 *              -1              failed, error in errno
 * ERRNO
 *              EINVAL          'sem' is not a valid semaphore,
 *              ENOSYS          semaphores are not supported,
 *              EINTR           the function was interrupted by a signal,
 *              EDEADLK         a deadlock condition was detected.
 *
 * ------------------------------------------------------
 */
{
  int result = 0;
  sem_t s = *sem;

  pthread_testcancel();

  if (s == NULL)
    {
      result = EINVAL;
    }
  else
    {
      if ((result = pthread_mutex_lock (&s->lock)) == 0)
        {
          int v;

          /* See sem_destroy.c
           */
          if (*sem == NULL)
            {
              (void) pthread_mutex_unlock (&s->lock);
              errno = EINVAL;
              return -1;
            }

          v = --s->value;
          (void) pthread_mutex_unlock (&s->lock);

          if (v < 0)
            {
              pte_osSemaphorePend(s->sem, NULL);
            }
        }

    }

  if (result != 0)
    {
      errno = result;
      return -1;
    }

  return 0;

}				/* sem_wait_nocancel */
Beispiel #9
0
int
sem_wait (sem_t * sem)
/*
 * ------------------------------------------------------
 * DOCPUBLIC
 *      This function  waits on a semaphore.
 *
 * PARAMETERS
 *      sem
 *              pointer to an instance of sem_t
 *
 * DESCRIPTION
 *      This function waits on a semaphore. If the
 *      semaphore value is greater than zero, it decreases
 *      its value by one. If the semaphore value is zero, then
 *      the calling thread (or process) is blocked until it can
 *      successfully decrease the value or until interrupted by
 *      a signal.
 *
 * RESULTS
 *              0               successfully decreased semaphore,
 *              -1              failed, error in errno
 * ERRNO
 *              EINVAL          'sem' is not a valid semaphore,
 *              ENOSYS          semaphores are not supported,
 *              EINTR           the function was interrupted by a signal,
 *              EDEADLK         a deadlock condition was detected.
 *
 * ------------------------------------------------------
 */
{
  int result = 0;
  sem_t s = *sem;

  pthread_testcancel();

  if (s == NULL)
    {
      result = EINVAL;
    }
  else
    {
      if ((result = pthread_mutex_lock (&s->lock)) == 0)
        {
          int v;

          /* See sem_destroy.c
           */
          if (*sem == NULL)
            {
              (void) pthread_mutex_unlock (&s->lock);
              errno = EINVAL;
              return -1;
            }

          v = --s->value;
          (void) pthread_mutex_unlock (&s->lock);

          if (v < 0)
            {
              /* Must wait */
              pthread_cleanup_push(pte_sem_wait_cleanup, (void *) s);
              result = pte_cancellable_wait(s->sem,NULL);
              /* Cleanup if we're canceled or on any other error */
              pthread_cleanup_pop(result);

              // Wait was cancelled, indicate that we're no longer waiting on this semaphore.
              /*
                      if (result == PTE_OS_INTERRUPTED)
                        {
                          result = EINTR;
                          ++s->value;
                        }
              */
            }
        }

    }

  if (result != 0)
    {
      errno = result;
      return -1;
    }

  return 0;

}				/* sem_wait */
Beispiel #10
0
void *
remunge (conf_t conf)
{
/*  Worker thread responsible for encoding/decoding/validating credentials.
 */
    tdata_t         tdata;
    int             cancel_state;
    unsigned long   n;
    unsigned long   got_encode_err;
    unsigned long   got_decode_err;
    struct timeval  t_start;
    struct timeval  t_stop;
    double          delta;
    munge_err_t     e;
    char           *cred;
    void           *data;
    int             dlen;
    uid_t           uid;
    gid_t           gid;

    tdata = create_tdata (conf);

    pthread_cleanup_push ((thread_cleanup_f) remunge_cleanup, tdata);

    if ((errno = pthread_mutex_lock (&conf->mutex)) != 0) {
        log_errno (EMUNGE_SNAFU, LOG_ERR, "Failed to lock mutex");
    }
    while (conf->num_creds - conf->shared.num_creds_done > 0) {

        pthread_testcancel ();

        if ((errno = pthread_setcancelstate
                    (PTHREAD_CANCEL_DISABLE, &cancel_state)) != 0) {
            log_errno (EMUNGE_SNAFU, LOG_ERR,
                "Failed to disable thread cancellation");
        }
        n = ++conf->shared.num_creds_done;

        if ((errno = pthread_mutex_unlock (&conf->mutex)) != 0) {
            log_errno (EMUNGE_SNAFU, LOG_ERR, "Failed to unlock mutex");
        }
        got_encode_err = 0;
        got_decode_err = 0;
        data = NULL;

        GET_TIMEVAL (t_start);
        e = munge_encode(&cred, tdata->ectx, conf->payload, conf->num_payload);
        GET_TIMEVAL (t_stop);

        delta = DIFF_TIMEVAL (t_stop, t_start);
        if (delta > conf->warn_time) {
            output_msg ("Credential #%lu encoding took %0.3f seconds",
                n, delta);
        }
        if (e != EMUNGE_SUCCESS) {
            output_msg ("Credential #%lu encoding failed: %s (err=%d)",
                n, munge_ctx_strerror (tdata->ectx), e);
            ++got_encode_err;
        }
        else if (conf->do_decode) {

            GET_TIMEVAL (t_start);
            e = munge_decode (cred, tdata->dctx, &data, &dlen, &uid, &gid);
            GET_TIMEVAL (t_stop);

            delta = DIFF_TIMEVAL (t_stop, t_start);
            if (delta > conf->warn_time) {
                output_msg ("Credential #%lu decoding took %0.3f seconds",
                    n, delta);
            }
            if (e != EMUNGE_SUCCESS) {
                output_msg ("Credential #%lu decoding failed: %s (err=%d)",
                    n, munge_ctx_strerror (tdata->dctx), e);
                ++got_decode_err;
            }

/*  FIXME:
 *    The following block does some validating of the decoded credential.
 *    It should have a cmdline option to enable this validation check.
 *    The decode ctx should also be checked against the encode ctx.
 *    This becomes slightly more difficult in that it must also take
 *    into account the default field settings.
 *
 *    This block should be moved into a separate function (or more).
 *    The [cred], [data], [dlen], [uid], and [gid] vars could be placed
 *    into the tdata struct to facilitate parameter passing.
 */
#if 0
            else if (conf->do_validate) {
                if (getuid () != uid) {
                output_msg (
                    "Credential #%lu UID %d does not match process UID %d",
                    n, uid, getuid ());
                }
                if (getgid () != gid) {
                    output_msg (
                        "Credential #%lu GID %d does not match process GID %d",
                        n, gid, getgid ());
                }
                if (conf->num_payload != dlen) {
                    output_msg (
                        "Credential #%lu payload length mismatch (%d/%d)",
                        n, conf->num_payload, dlen);
                }
                else if (data && memcmp (conf->payload, data, dlen) != 0) {
                    output_msg ("Credential #%lu payload mismatch", n);
                }
            }
#endif /* 0 */

            /*  The 'data' parm can still be set on certain munge errors.
             */
            if (data != NULL) {
                free (data);
            }
        }
        if (cred != NULL) {
            free (cred);
        }
        if ((errno = pthread_setcancelstate
                    (cancel_state, &cancel_state)) != 0) {
            log_errno (EMUNGE_SNAFU, LOG_ERR,
                "Failed to enable thread cancellation");
        }
        if ((errno = pthread_mutex_lock (&conf->mutex)) != 0) {
            log_errno (EMUNGE_SNAFU, LOG_ERR, "Failed to lock mutex");
        }
        conf->shared.num_encode_errs += got_encode_err;
        conf->shared.num_decode_errs += got_decode_err;
    }
    pthread_cleanup_pop (1);
    return (NULL);
}
Beispiel #11
0
int
sem_wait (sem_t * sem)
     /*
      * ------------------------------------------------------
      * DOCPUBLIC
      *      This function  waits on a semaphore.
      *
      * PARAMETERS
      *      sem
      *              pointer to an instance of sem_t
      *
      * DESCRIPTION
      *      This function waits on a semaphore. If the
      *      semaphore value is greater than zero, it decreases
      *      its value by one. If the semaphore value is zero, then
      *      the calling thread (or process) is blocked until it can
      *      successfully decrease the value or until interrupted by
      *      a signal.
      *
      * RESULTS
      *              0               successfully decreased semaphore,
      *              -1              failed, error in errno
      * ERRNO
      *              EINVAL          'sem' is not a valid semaphore,
      *              ENOSYS          semaphores are not supported,
      *              EINTR           the function was interrupted by a signal,
      *              EDEADLK         a deadlock condition was detected.
      *
      * ------------------------------------------------------
      */
{
  int result = 0;
  sem_t s = *sem;

  pthread_testcancel();

  if (s == NULL)
    {
      result = EINVAL;
    }
  else
    {
      if ((result = pthread_mutex_lock (&s->lock)) == 0)
	{
	  int v;

	  /* See sem_destroy.c
	   */
	  if (*sem == NULL)
	    {
	      (void) pthread_mutex_unlock (&s->lock);
	      errno = EINVAL;
	      return -1;
	    }

          v = --s->value;
	  (void) pthread_mutex_unlock (&s->lock);

	  if (v < 0)
	    {
#if defined(_MSC_VER) && _MSC_VER < 1400
#pragma inline_depth(0)
#endif
	      /* Must wait */
	      pthread_cleanup_push(ptw32_sem_wait_cleanup, (void *) s);
	      result = pthreadCancelableWait (s->sem);
	      /* Cleanup if we're canceled or on any other error */
	      pthread_cleanup_pop(result);
#if defined(_MSC_VER) && _MSC_VER < 1400
#pragma inline_depth()
#endif
	    }
#if defined(NEED_SEM)

	  if (!result && pthread_mutex_lock (&s->lock) == 0)
	    {
	      if (*sem == NULL)
	        {
	          (void) pthread_mutex_unlock (&s->lock);
	          errno = EINVAL;
	          return -1;
	        }

	      if (s->leftToUnblock > 0)
		{
		  --s->leftToUnblock;
		  SetEvent(s->sem);
		}
	      (void) pthread_mutex_unlock (&s->lock);
	    }

#endif /* NEED_SEM */

	}

    }

  if (result != 0)
    {
      errno = result;
      return -1;
    }

  return 0;

}				/* sem_wait */
Beispiel #12
0
void vegas_pfb_thread(void *_args) {

    /* Get args */
    struct guppi_thread_args *args = (struct guppi_thread_args *)_args;
    int rv;

    /* Set cpu affinity */
    cpu_set_t cpuset, cpuset_orig;
    sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig);
    //CPU_ZERO(&cpuset);
    CPU_CLR(13, &cpuset);
    CPU_SET(11, &cpuset);
    rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
    if (rv<0) { 
        guppi_error("vegas_pfb_thread", "Error setting cpu affinity.");
        perror("sched_setaffinity");
    }

    /* Set priority */
    rv = setpriority(PRIO_PROCESS, 0, args->priority);
    if (rv<0) {
        guppi_error("vegas_pfb_thread", "Error setting priority level.");
        perror("set_priority");
    }

    /* Attach to status shared mem area */
    struct guppi_status st;
    rv = guppi_status_attach(&st);
    if (rv!=GUPPI_OK) {
        guppi_error("vegas_pfb_thread", 
                "Error attaching to status shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_status_detach, &st);
    pthread_cleanup_push((void *)set_exit_status, &st);
    pthread_cleanup_push((void *)guppi_thread_set_finished, args);

    /* Init status */
    guppi_status_lock_safe(&st);
    hputs(st.buf, STATUS_KEY, "init");
    guppi_status_unlock_safe(&st);

    /* Init structs */
    struct guppi_params gp;
    struct sdfits sf;
    pthread_cleanup_push((void *)guppi_free_sdfits, &sf);

    /* Attach to databuf shared mem */
    struct guppi_databuf *db_in, *db_out;
    db_in = guppi_databuf_attach(args->input_buffer);
    if (db_in==NULL) {
        char msg[256];
        sprintf(msg, "Error attaching to databuf(%d) shared memory.",
                args->input_buffer);
        guppi_error("vegas_pfb_thread", msg);
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_databuf_detach, db_in);
    db_out = guppi_databuf_attach(args->output_buffer);
    if (db_out==NULL) {
        char msg[256];
        sprintf(msg, "Error attaching to databuf(%d) shared memory.",
                args->output_buffer);
        guppi_error("vegas_pfb_thread", msg);
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_databuf_detach, db_out);

    /* Loop */
    char *hdr_in = NULL;
    int curblock_in=0;
    int first=1;
    int acc_len = 0;
    int nchan = 0;
    int nsubband = 0;
    signal(SIGINT,cc);

    guppi_status_lock_safe(&st);
    if (hgeti4(st.buf, "NCHAN", &nchan)==0) {
        fprintf(stderr, "ERROR: %s not in status shm!\n", "NCHAN");
    }
    if (hgeti4(st.buf, "NSUBBAND", &nsubband)==0) {
        fprintf(stderr, "ERROR: %s not in status shm!\n", "NSUBBAND");
    }
    guppi_status_unlock_safe(&st);
    if (EXIT_SUCCESS != init_gpu(db_in->block_size,
                                 db_out->block_size,
                                 nsubband,
                                 nchan))
    {
        (void) fprintf(stderr, "ERROR: GPU initialisation failed!\n");
        run = 0;
    }

    while (run) {

        /* Note waiting status */
        guppi_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "waiting");
        guppi_status_unlock_safe(&st);

        /* Wait for buf to have data */
        rv = guppi_databuf_wait_filled(db_in, curblock_in);
        if (rv!=0) continue;

        /* Note waiting status, current input block */
        guppi_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "processing");
        hputi4(st.buf, "PFBBLKIN", curblock_in);
        guppi_status_unlock_safe(&st);

        hdr_in = guppi_databuf_header(db_in, curblock_in);
        
        /* Get params */
        if (first)
        {
            guppi_read_obs_params(hdr_in, &gp, &sf);
            /* Read required exposure from status shared memory, and calculate
               corresponding accumulation length */
            acc_len = (sf.hdr.chan_bw * sf.hdr.hwexposr);
        }
        guppi_read_subint_params(hdr_in, &gp, &sf);

        /* Call PFB function */
        do_pfb(db_in, curblock_in, db_out, first, st, acc_len);

        /* Mark input block as free */
        guppi_databuf_set_free(db_in, curblock_in);
        /* Go to next input block */
        curblock_in = (curblock_in + 1) % db_in->n_block;

        /* Check for cancel */
        pthread_testcancel();

        if (first) {
            first=0;
        }
    }
    run=0;

    //cudaThreadExit();
    pthread_exit(NULL);

    cleanup_gpu();

    pthread_cleanup_pop(0); /* Closes guppi_databuf_detach(out) */
    pthread_cleanup_pop(0); /* Closes guppi_databuf_detach(in) */
    pthread_cleanup_pop(0); /* Closes guppi_free_sdfits */
    pthread_cleanup_pop(0); /* Closes guppi_thread_set_finished */
    pthread_cleanup_pop(0); /* Closes set_exit_status */
    pthread_cleanup_pop(0); /* Closes guppi_status_detach */

}
/**
 * sending_buffer_ contains one block from every partition (e.t. socket
 * connection), if one block is send completely, supply one from
 * partitioned_data_buffer_
 */
void* ExchangeSenderPipeline::Sender(void* arg) {
  ExchangeSenderPipeline* Pthis =
      reinterpret_cast<ExchangeSenderPipeline*>(arg);
  pthread_testcancel();

  //  LOG(INFO) << "(exchange_id = " << Pthis->state_.exchange_id_
  //            << " , partition_offset = " << Pthis->state_.partition_offset_
  //            << " ) sender thread created successfully!";
  RAW_LOG(INFO,
          "exchange_id= %d, par_off= %d sender thread is created successfully!",
          Pthis->state_.exchange_id_, Pthis->state_.partition_offset_);
  Pthis->sending_buffer_->Initialized();
  //  Pthis->sendedblocks_ = 0;
  bool consumed = false;
  int partition_id = -1;
  BlockContainer* block_for_sending = NULL;
  int recvbytes;

  try {
    while (true) {
      pthread_testcancel();
      consumed = false;

      block_for_sending = NULL;
      partition_id =
          Pthis->sending_buffer_->getBlockForSending(block_for_sending);
      if (partition_id >= 0) {
#ifndef ALL_NETWORK
        // if target partition located at local node, so copy the
        // block_for_sending to corresponding exchange_mereger, but the last
        // block which is an empty block should be sent through network for
        // notifying the end of corresponding connections.
        auto it = Pthis->local_partition_exch_id_.find(partition_id);
        if (Pthis->local_partition_exch_id_.cend() != it &&
            (!block_for_sending->IsEmpty())) {
          if (block_for_sending->GetRestSizeToHandle() > 0) {
            consumed = Environment::getInstance()
                           ->getExchangeTracker()
                           ->getBuffer(it->second)
                           ->InsertOneBlock(
                               reinterpret_cast<Block*>(block_for_sending));
            block_for_sending->IncreaseActualSize(block_for_sending->getsize());
          } else {
            consumed = true;
          }
        } else {
#else
        {
#endif
          // send block to remote node
          // get one block from sending_buffer which isn't empty
          pthread_testcancel();
          if (block_for_sending->GetRestSizeToHandle() > 0) {
            recvbytes =
                send(Pthis->socket_fd_upper_list_[partition_id],
                     reinterpret_cast<char*>(block_for_sending->getBlock()) +
                         block_for_sending->GetCurSize(),
                     block_for_sending->GetRestSizeToHandle(), MSG_DONTWAIT);
            if (recvbytes == -1) {
              if (errno == EAGAIN) {
                continue;
              }
              LOG(ERROR) << "(exchange_id = " << Pthis->state_.exchange_id_
                         << " , partition_offset = "
                         << Pthis->state_.partition_offset_
                         << " ) sender send error: " << errno << " fd = "
                         << Pthis->socket_fd_upper_list_[partition_id]
                         << std::endl;
              break;
            } else {
              if (recvbytes < block_for_sending->GetRestSizeToHandle()) {
/* the block is not entirely sent. */
#ifdef GLOG_STATUS

                LOG(INFO) << "(exchange_id = " << Pthis->state_.exchange_id_
                          << " , partition_offset = "
                          << Pthis->state_.partition_offset_
                          << " ) doesn't send a block completely, actual send "
                             "bytes = " << recvbytes << " rest bytes = "
                          << block_for_sending->GetRestSizeToHandle()
                          << std::endl;
#endif
                block_for_sending->IncreaseActualSize(recvbytes);
                continue;
              } else {
                /** the block is sent in entirety. **/
                block_for_sending->IncreaseActualSize(recvbytes);
                ++Pthis->sendedblocks_;

                /*
                 can not be executed in case of abort in glog in this phase
                 one of the following should be executed after rewriting
                  Pthis->logging_->log(
                      "[%llu,%u]Send the %u block(bytes=%d, rest
                      size=%d) to [%d]",
                      Pthis->state_.exchange_id_,
                      Pthis->state_.partition_offset_,
                      Pthis->sendedblocks_, recvbytes,
                      block_for_sending_->GetRestSize(),
                      Pthis->state_.upper_id_list_[partition_id]);
                LOG(INFO) << "[ExchangeEagerLower]: "
                          << "[" << Pthis->state_.exchange_id_ << ","
                          << Pthis->state_.partition_offset_ << "]Send the "
                          << Pthis->sendedblocks_ << " block(bytes=" <<
                recvbytes
                          << ", rest size=" <<
                block_for_sending->GetRestSizeToHandle()
                          << ") to ["
                          << Pthis->state_.upper_id_list_[partition_id] << "]"
                          << std::endl;
                              cout << "[ExchangeEagerLower]: " << "["
                                    << Pthis->state_.exchange_id_ << ","
                                    << Pthis->state_.partition_offset_ << "]Send
                                    the "
                                    << Pthis->sendedblocks_ << " block(bytes="
                <<
                                    recvbytes
                                    << ", rest size=" <<
                                    block_for_sending_->GetRestSize()
                                    << ") to [" <<
                                    Pthis->state_.upper_id_list_[partition_id]
                                    << "]" << std::endl;
                                */
                consumed = true;
              }
            }
          } else {
            consumed = true;
          }
        }
      } else {
        /* "partition_id<0" means that block_for_sending_ is empty, so we get
         * one block from the buffer into the block_for_sending_
         */
        unsigned index = Pthis->partitioned_data_buffer_->getBlock(
            *Pthis->block_for_sending_buffer_);
        Pthis->block_for_sending_buffer_->reset();
        Pthis->sending_buffer_->insert(index, Pthis->block_for_sending_buffer_);
      }
      if (consumed == true) {
        /* In the current loop, we have sent an entire block to the Receiver,
         * so we should get a new block into the block_for_sender_, but note
         * one empty block is also appended in partitioned_data_buffer_ in
         * next()
         */
        pthread_testcancel();
        if (Pthis->partitioned_data_buffer_->getBlock(
                *Pthis->block_for_sending_buffer_, partition_id)) {
          Pthis->block_for_sending_buffer_->reset();
          Pthis->sending_buffer_->insert(partition_id,
                                         Pthis->block_for_sending_buffer_);
        } else {
          /**
           * TODO: test the effort of the following sleeping statement and
           * consider whether it should be replaced by conditioned wait
           **/
          usleep(1);
        }
      }
    }
  } catch (std::exception& e) {
    pthread_cancel(pthread_self());
  }
}
Beispiel #14
0
int
sem_timedwait (sem_t * sem, const struct timespec *abstime)
/*
 * ------------------------------------------------------
 * DOCPUBLIC
 *      This function waits on a semaphore possibly until
 *      'abstime' time.
 *
 * PARAMETERS
 *      sem
 *              pointer to an instance of sem_t
 *
 *      abstime
 *              pointer to an instance of struct timespec
 *
 * DESCRIPTION
 *      This function waits on a semaphore. If the
 *      semaphore value is greater than zero, it decreases
 *      its value by one. If the semaphore value is zero, then
 *      the calling thread (or process) is blocked until it can
 *      successfully decrease the value or until interrupted by
 *      a signal.
 *
 *      If 'abstime' is a NULL pointer then this function will
 *      block until it can successfully decrease the value or
 *      until interrupted by a signal.
 *
 * RESULTS
 *              0               successfully decreased semaphore,
 *              -1              failed, error in errno
 * ERRNO
 *              EINVAL          'sem' is not a valid semaphore,
 *              ENOSYS          semaphores are not supported,
 *              EINTR           the function was interrupted by a signal,
 *              EDEADLK         a deadlock condition was detected.
 *              ETIMEDOUT       abstime elapsed before success.
 *
 * ------------------------------------------------------
 */
{
    int result = 0;
    sem_t s = *sem;

    if (sem == NULL)
    {
        result = EINVAL;
    }
    else
    {
        DWORD milliseconds;

        if (abstime == NULL)
        {
            milliseconds = INFINITE;
        }
        else
        {
            /*
             * Calculate timeout as milliseconds from current system time.
             */
            milliseconds = ptw32_relmillisecs (abstime);
        }

        pthread_testcancel();

        if ((result = pthread_mutex_lock (&s->lock)) == 0)
        {
            int v = --s->value;
            (void) pthread_mutex_unlock (&s->lock);

            if (v < 0)
            {
#ifdef NEED_SEM
                int timedout;
#endif
                sem_timedwait_cleanup_args_t cleanup_args;

                cleanup_args.sem = s;
                cleanup_args.resultPtr = &result;

#ifdef _MSC_VER
#pragma inline_depth(0)
#endif
                /* Must wait */
                pthread_cleanup_push(ptw32_sem_timedwait_cleanup, (void *) &cleanup_args);
#ifdef NEED_SEM
                timedout =
#endif
                    result = pthreadCancelableTimedWait (s->sem, milliseconds);
                pthread_cleanup_pop(result);
#ifdef _MSC_VER
#pragma inline_depth()
#endif

#ifdef NEED_SEM

                if (!timedout && pthread_mutex_lock (&s->lock) == 0)
                {
                    if (s->leftToUnblock > 0)
                    {
                        --s->leftToUnblock;
                        SetEvent(s->sem);
                    }
                    (void) pthread_mutex_unlock (&s->lock);
                }

#endif /* NEED_SEM */

            }
        }

    }

    if (result != 0)
    {

        errno = result;
        return -1;

    }

    return 0;

}				/* sem_timedwait */
Beispiel #15
0
void
TE::Main() {
    double g_dx, g_da;
    double alpha, theta, phi;
    double vx, va;
    player_pose_t relativeGoal;
    player_pose_t currGoal;

    // Fill in the TE's parameter structure

    current_dir = 1;

    for (;;) {
        usleep(200000); // 200 ms delay

        pthread_testcancel();

        // call
        ProcessMessages();

        // are we waiting for a stall to clear?
        if (waiting) {
            PutPositionCmd(0, 0);
            stall = true;
            if (verbose)
                PLAYER_MSG0(0, "TE::Main waiting");
            continue;
        }

        // do we have a goal?
        if (!active_goal) {
            continue;
        }

        // wzgledne polozenie celu
        relativeGoal.px = goal.px - odom_pose.px;
        relativeGoal.py = goal.py - odom_pose.py;
        relativeGoal.pa = goal.pa;

        // angle from 0 to the goal (theta)
        theta = atan2(relativeGoal.py, relativeGoal.px);
        // diff betwean robot orientation angle (psi) and goal vector (theta)
        alpha = angle_diff(theta, odom_pose.pa);
        g_dx = hypot(relativeGoal.px, relativeGoal.py);


        if (obstacle && g_dx > dist_eps) {
            //PLAYER_MSG0(1, "TE: obstacle avoidance");
            if (fabs(beta) > ang_eps)
                phi = angle_diff(fabs(beta) / beta * M_PI / 2,
                    angle_diff(beta, alpha));
            else
                phi = angle_diff(M_PI / 2, angle_diff(beta, alpha));
            currGoal.px = cos(phi) * relativeGoal.px +
                    sin(phi) * relativeGoal.py;
            currGoal.py = -sin(phi) * relativeGoal.px +
                    cos(phi) * relativeGoal.py;
            currGoal.pa = relativeGoal.pa;
        } else
            currGoal = relativeGoal;

        // angle from 0 to the goal (theta)
        theta = atan2(currGoal.py, currGoal.px);
        // diff betwean robot orientation angle (psi) and goal vector (theta)
        alpha = angle_diff(theta, odom_pose.pa);
        // are we at the goal?
        g_dx = hypot(currGoal.px, currGoal.py);
        g_da = angle_diff(currGoal.pa, odom_pose.pa);

        if ((g_dx < dist_eps)) { // jestesmy bliko celu
            if (verbose)
                PLAYER_MSG0(0, "TE::Main Close to goal");
            if (fabs(g_da) < ang_eps) { // z wlasciwa orientacja
                active_goal = false;
                PutPositionCmd(0.0, 0.0);
                if (verbose)
                    PLAYER_MSG0(0, "TE::Main At goal");
                continue;
            } else { // trzeba poprawić orientację po dojechaniu do celu
                if (verbose)
                    PLAYER_MSG0(0, "TE::Main Correcting orientation");
                vx = 0;
                va = k_a * va_max * tanh(10 * g_da);
            }
        } else {
            // sterowanie
            vx = vx_max * tanh(fabs(g_dx)) * fabs(cos(alpha));
            va = k_a * va_max * tanh(alpha);
        }

        if (nearObstDist <= obs_dist) {
            vx = vx * (nearObstDist - min_dist) / (obs_dist - min_dist);
        }
        if (nearObstDist <= min_dist) {
            vx = 0;
            va = 0;
        }

        PutPositionCmd(vx, va);
    }
}
Beispiel #16
0
void eStreamThread::thread() {
	const int bufsize = 50*1024;
	unsigned char *buf = (unsigned char *)malloc(bufsize);
	bool eof = false;
	fd_set rfds;
	fd_set wfds;
	struct timeval timeout;
	int rc,r,w,maxfd;
	time_t next_scantime = 0;
	bool sosSend = false;
	m_running = true;

	if(buf == NULL)
	{
		eDebug("eStreamThread::thread: failed to allocate buffer, aborting!");
		m_stop = true;
	}

	r = w = 0;
	hasStarted();
	eDebug("eStreamThread started");
	while (!m_stop) {
		pthread_testcancel();
		FD_ZERO(&rfds);
		FD_ZERO(&wfds);
		maxfd = 0;
		timeout.tv_sec = 1;
		timeout.tv_usec = 0;
		if (r < bufsize && !eof) {
			FD_SET(m_srcfd, &rfds);
			maxfd = MAX(maxfd, m_srcfd);
		}
		if (w < r) {
			FD_SET(m_destfd, &wfds);
			maxfd = MAX(maxfd, m_destfd);
		}
		rc = select(maxfd+1, &rfds, &wfds, NULL, &timeout);
		if (rc == 0) {
			eDebug("eStreamThread::thread: timeout!");
			continue;
		}
		if (rc < 0) {
			eDebug("eStreamThread::thread: error in select (%d)", errno);
			break;
		}
		if (FD_ISSET(m_srcfd, &rfds)) {
			rc = ::read(m_srcfd, buf+r, bufsize - r);
			if (rc < 0) {
				eDebug("eStreamThread::thread: error in read (%d)", errno);
				m_messagepump.send(evtReadError);
				break;
			} else if (rc == 0) {
				eof = true;
			} else {
				if (!sosSend) {
					sosSend = true;
					m_messagepump.send(evtSOS);
				}
				r += rc;
				if (r == bufsize) eDebug("eStreamThread::thread: buffer full");
			}
		}
		if (FD_ISSET(m_destfd, &wfds) && ((r > 10*1024) || eof)) {
			rc = ::write(m_destfd, buf+w, r-w);
			if (rc < 0) {
				eDebug("eStreamThread::thread: error in write (%d)", errno);
				m_messagepump.send(evtWriteError);
				break;
			}
			w += rc;
			//eDebug("eStreamThread::thread: buffer r=%d w=%d",r,w);
			if (w == r) {
				if (time(0) >= next_scantime) {
					if (scanAudioInfo(buf, r)) {
						m_messagepump.send(evtStreamInfo);
						next_scantime = time(0) + 1;
					}
				}
				w = r = 0;
			}
		}
		if (eof && (r==w)) {
			m_messagepump.send(evtEOS);
			break;
		}
	}
	free(buf);
	eDebug("eStreamThread end");
}
Beispiel #17
0
/* and the PerformMove function */
void *FindBestMoveThread(void *arg)
{
    int i,x, depth; 
    double val;
    double alpha = DBL_MIN;
    double beta  = DBL_MAX;
    struct State state; 
    struct State newState; 
    int player = *((int *)arg);
    
    int max_depth = (MaxDepth == -1) ? INT_MAX : MaxDepth;

    /* Set up the current state */
    state.player = player;
    memcpy(state.board,board,64*sizeof(char));
    memset(bestmove,0,12*sizeof(char));

    /* Find the legal moves for the current state */
    FindLegalMoves(&state);

    /* Pick a random move as the best move just to get started. */
    i = rand() % state.numLegalMoves;
    memcpy(bestmove, state.movelist[i], MoveLength(state.movelist[i]));

    /* Do an iterative deepening search for the best move.  This uses alpha
     * beta pruning to discard unnecessary branches from the search. */
    for (depth=START_DEPTH; depth <= max_depth; depth++)
    {
        /* Check to see if this thread has been cancelled before moving on. */
        pthread_testcancel();

        /* Walk the move list and find the best one. */
        for (x=0; x<state.numLegalMoves; ++x)
        {
            /* Copy the current board state. */
            memcpy(&newState, &state, sizeof(struct State));

            /* Perform a move on the copied state. */
            PerformMove(newState.board, 
                        state.movelist[x],
                        MoveLength(state.movelist[x]));

            /* Toggle the current player. */
            newState.player = (newState.player == 1) ? 2 : 1;

            /* Perform a depth limited MiniMax search for the best move.
             * Uses Alpha-Beta pruning. */
            val = minVal(alpha, beta, &newState, depth);

            if (val > alpha)
            {
                i     = x;
                alpha = val;
            }
        }
        
        /* Clear the best move and then copy in the new best move. */
        memset(bestmove, 0, 12*sizeof(char));
        memcpy(bestmove, state.movelist[i], MoveLength(state.movelist[i]));

        /* fprintf(stderr, "DEBUG :: %s searched to depth %d.\n", prog_name, depth); */
    }

    return NULL;
}
Beispiel #18
0
void Thread::TestCancel() {
#if CONFIG_PTHREAD_TESTCANCEL
    pthread_testcancel();
#endif
}
void *cw_ap_local_cfg_dynamic_update(void *param)
{
	u32 i, ret;
	time_t cw_network = 0;
	s8 *url = NULL;
	u32 ip = 0;
	s8 cmd[128]={0};
	s8 logname[64]={0};
	struct in_addr tmp_ip;
	//sqlite3_res res;
	dns_white_list_t dns_white_list = {0,{0}};
	struct stat filestat;

	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
	/*以下为系统默认方式*/
	//pthread_setcanceltype(PTHREAD_CANCEL_DEFFERED, NULL);

	
	while(1)
	{
	
		pthread_testcancel();
/*这个需要系统功能支持*/
		stat(DNS_CONFIG_FILE, &filestat);
		if(cw_network < filestat.st_ctime)
		{
			cw_network = filestat.st_ctime;
			memset(&dns_white_list, 0, sizeof(dns_white_list));
/*优先更新本地,要不管理web页面都上不去*/
			get_interface_info("br-lan", &ip, NULL);
			dns_white_list.list[dns_white_list.number++] = ip;
			cloud_wlan_sendto_kmod(CW_NLMSG_UPDATE_WHITE_LIST, (s8 *)&dns_white_list, sizeof(dns_white_list_t));
			
			while(1)
			{
				get_interface_info(g_ap_com_cfg.ap_com_eth, &ip, NULL);
				if(ip != 0)
				{
					break;
				}
			}

			/*更新portal的url*/
			cw_ap_local_update_portal_wl();

			cw_ap_local_update_url_wl(&dns_white_list);

		}
		stat(CW_AP_LOG_FILE, &filestat);
		if( filestat.st_size> CW_AP_LOG_MAX)
		{
			system(">cw_ap.log");
		}
		 
		stat(CW_AP_KLOG_FILE, &filestat);
		if( filestat.st_size> CW_AP_KLOG_MAX)
		{

			snprintf(logname, 64, "%02x%02x%02x%02x%02x%02x.%s.%d", 
				(u8)g_ap_udp_sock.client_mac[0],(u8)g_ap_udp_sock.client_mac[1],
				(u8)g_ap_udp_sock.client_mac[2],(u8)g_ap_udp_sock.client_mac[3],
				(u8)g_ap_udp_sock.client_mac[4],(u8)g_ap_udp_sock.client_mac[5],
				CW_AP_KLOG_FILE,filestat.st_mtime
				);
			
			CW_DEBUG_U("Turn on the log: %s\n",logname);
			
			tmp_ip.s_addr = g_ap_com_cfg.ac_com_addr;
			snprintf(cmd, 128, "mv %s %s; tftp -p -l %s %s; rm %s",
				CW_AP_KLOG_FILE, logname, logname, inet_ntoa(tmp_ip), logname);
			system(cmd);
		}
		/*
		stat(CLIENTS_FILE_PATH, &filestat);
		if(clients < filestat.st_ctime)
		{
			clients = filestat.st_ctime;
			CW_DEBUG_U("call clients config update\n");
		}
		*/
		pthread_testcancel();
		sleep(3);
	};
}
Beispiel #20
0
void memories::lock() {
	if(m_memmutex == NULL)
		return;
	while(pthread_mutex_trylock(m_memmutex) == EBUSY)
		pthread_testcancel();
}
void Network_CliListener( void* data )
{
    Network* Self = ( Network* ) data;
    int ConnFD;
    int i, n, retval;

    NetData* Incoming;
    socklen_t AddrLength = sizeof( struct sockaddr_in );

    int FdMax = Self->SockFD;
    fd_set master;
    fd_set fdlist;

    struct sockaddr_in RemoteAdress;

    FD_ZERO( &fdlist );

    FD_SET( Self->SockFD, &master );

    if( listen( Self->SockFD, SOMAXCONN ) != 0 )
    {
        // ERROR: at listen()
        // printf(" ERROR: %s\nExit listen Thread!\n", strerror(errno));
        return;
    }

    while( 1 ) // While thread is running
    {
        fdlist = master;

        retval = select( FdMax+1 , &fdlist, NULL, NULL, NULL);
        pthread_testcancel();
        if( retval == -1 )
        {
            // ERROR: at select()
            // printf ....
        }
        // Look which socket was selected
        for( i = 0; i <= FdMax; i++ )
        {
            // Because it has to be in the fdlist if it was selected
            if( FD_ISSET( i, &fdlist ))
            {
                // New Connection
                if( i == Self->SockFD )
                {
                    ConnFD = accept( Self->SockFD,
                                     (struct sockaddr*) &Self->Address,
                                     &AddrLength);

                    FD_SET(ConnFD, &master);
                    FdMax = ConnFD;
                    char* negotiate = "\xFF\xFB\x03"
                                      "\xFF\xFB\x01"
                                      "\xFF\xFD\x03"
                                      "\xFF\xFD\x01";

                    send( ConnFD, negotiate, strlen(negotiate), 0 );

                } else
                {   // received data:
                    n = recv( i, Self->Buffer, Self->BuffSize, 0 );

                    // no data received
                    if( n <= 0 )
                    {
                        // close connection

                        //printf("Client Disconnected.\n");
                        close( i );
                        FD_CLR( i, &master );
                    } else
                    {
                        // This is a dirty hack for telnet
                        // TODO: catch the first strings after connection and do NOT
                        // echo them
                        if( n > 3 )
                            continue;

                        Self->Buffer[n] = 0;
                        //send( i, Self->Buffer, n, 0 );
                        Incoming = NetData_Init( Self->Buffer, i, CLI_MODE );
                        // Add Data to queue
                        TQueue_AddElement( Self->InQueue, (void*) Incoming );

                        //printf("%s\n",Self->Buffer);
                    }
                }
            }
        }
    }

    return;
}
Beispiel #22
0
size_t buffer_get_data (buf_t *buf, char *data, long nbytes)
{
  int write_amount;
  int orig_size;

  orig_size = nbytes;

  DEBUG("Enter buffer_get_data");

  pthread_cleanup_push(buffer_mutex_unlock, buf);

  LOCK_MUTEX(buf->mutex);

  /* Put the data into the buffer as space is made available */
  while (nbytes > 0) {

    if (buf->abort_write)
      break;

    DEBUG("Obtaining lock on buffer");
    /* Block until we can read something */
    if (buf->curfill == 0 && buf->eos)
      break; /* No more data to read */
      
    if (buf->curfill == 0 || (buf->prebuffering && !buf->eos)) {
      DEBUG("Waiting for more data to copy.");
      COND_WAIT(buf->playback_cond, buf->mutex);
    }

    if (buf->abort_write)
      break;

    /* Note: Even if curfill is still 0, nothing bad will happen here */
    
    /* For simplicity, the number of bytes played must satisfy
       the following three requirements:
       
       1. Do not copy more bytes than are stored in the buffer.
       2. Do not copy more bytes than the reqested data size.
       3. Do not run off the end of the buffer. */
    write_amount = compute_dequeue_size(buf, nbytes);

    UNLOCK_MUTEX(buf->mutex);
    execute_actions(buf, &buf->actions, buf->position);

    /* No need to lock mutex here because the other thread will
       NEVER reduce the number of bytes stored in the buffer */
    DEBUG1("Copying %d bytes from the buffer", write_amount);
    memcpy(data, buf->buffer + buf->start, write_amount);
    LOCK_MUTEX(buf->mutex);

    buf->curfill -= write_amount;
    data += write_amount;
    nbytes -= write_amount;
    buf->start = (buf->start + write_amount) % buf->size;
    DEBUG1("Updated buffer fill, curfill = %ld", buf->curfill);
    
    /* Signal a waiting decoder thread that they can put more
       audio into the buffer */
    DEBUG("Signal decoder thread that buffer space is available");
    COND_SIGNAL(buf->write_cond);
  }

  UNLOCK_MUTEX(buf->mutex);

  pthread_cleanup_pop(0);
  
  pthread_testcancel();

  DEBUG("Exit buffer_get_data");
   
  return orig_size - nbytes;
}
/*******************************************************************************
 函数名称  :    healthy_check_http_process
 功能描述  :    健康检查http检测方法主处理函数
 输入参数  :    无
 输出参数  :    无
 返回值        :    无
--------------------------------------------------------------------------------
 最近一次修改记录 :
 修改作者   :       王宗发
 修改目的   :       新增函数
 修改日期   :       20101020
********************************************************************************/
void *healthy_check_http_process(void *data)
{
    u32 i = 0;
    s32 err_tmp = ERROR_SUCCESS;
    fd_set rset;
    fd_set wset;
    hc_http_s *http_cfg = NULL;
    hc_http_check_s * checker = NULL;

    struct sigaction action;
    
    action.sa_handler = handle_pipe;
    sigemptyset(&action.sa_mask);
    action.sa_flags = 0;
    sigaction(SIGPIPE, &action, NULL);
    
    checker = &g_hc_http_checker;
    memcpy(checker, (hc_http_check_s *)data, sizeof(hc_http_check_s));
    checker->thread_id = pthread_self();
    checker->valid_ip_num = 0;
    checker->request_ip_num = 0;
    checker->response_ip_num = 0;

    /*配置合法性检查和初始化*/
    healthy_check_option_check_http();
    
    for(i = 0; i < HC_CHECK_NUM_MAX; i++)
    {
        http_cfg = &checker->hc_http_array[i];
        err_tmp = healthy_check_http_ip(http_cfg);
        if(ERROR_SUCCESS != err_tmp)
        {
            continue;
        }
        
        /*添加到valied 数组*/
        checker->valid_ip_no[checker->valid_ip_num++] = i;
    }

    while(1)
    {
        err_tmp = select(checker->fd_max + 1, &rset, &wset, NULL, &(checker->out_time));

        if(err_tmp < 0)
        {
            if(EINTR == errno)
            {
                printf("system signal EINTR, continue\r\n");
            }
            else
            {
                printf("select process error, continue\r\n");
            }
            continue;
        }
        else if(0 == err_tmp)
        {
            healthy_check_outtime_process_http();
            goto next_round;
        }
        else
        {
            healthy_check_select_process_http(&rset, &wset);
            if(0 == checker->request_ip_num && 0 == checker->response_ip_num)
            {
                goto next_round;
            }
            else
            {
                continue;
            }
        }

next_round:
        /*重置参数,重启socket*/
        pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
        pthread_testcancel();
        sleep(checker->interval);
        pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
        FD_ZERO(&rset);
        FD_ZERO(&wset);
        healthy_check_redo_http(&wset);
    }

    return NULL;
}
void *control_handler(struct ControlProgram *control_program)
{
   int tid,i,r=-1,c=-1,status,rc,tmp;
   struct pollfd pfd;
   char command;
   int sockflag,poll_err_count,retval,socket,oldv,socket_err,length=sizeof(int);
   int32_t current_freq,radar=0,channel=0;
   struct timeval tv,current_time,last_report,t_get_data_start,t_get_data_end;
   struct ROSMsg msg; 
   struct DriverMsg dmsg; 
   struct DataPRM control_data; 
   struct ControlPRM control_parameters; 
   struct SiteSettings settings;
   struct TSGbuf *pulseseq;
   struct SeqPRM tprm;
   int data_int;
   pthread_t thread,threads[10];
   struct timeval t0,t1,t2,t3,t4;
   unsigned long elapsed;
   unsigned long ultemp;
   int32_t data_length;
   char entry_type,entry_name[80];
   int return_type,entry_exists;
   char *temp_strp;
   int32_t temp_int32;
   double temp_double;
/*
*  Init the Control Program state
*/
   r=control_program->radarinfo->radar-1;
   c=control_program->radarinfo->channel-1;
   pthread_mutex_lock(&exit_lock);
   pthread_mutex_lock(&controlprogram_list_lock);

   setbuf(stdout, 0);
   setbuf(stderr, 0);
   tid = pthread_self();
/* set the cancellation parameters --
   - Enable thread cancellation 
   - Defer the action of the cancellation 
*/
   pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
   pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
   pthread_cleanup_push((void *)&controlprogram_exit,(void *)control_program);
     if(control_program!=NULL) {
       socket=control_program->state->socket;
     }
   pthread_mutex_unlock(&controlprogram_list_lock);
   pthread_mutex_unlock(&exit_lock);

   poll_err_count=0;
   gettimeofday(&last_report,NULL);
   while (1) {
      if(control_program==NULL) {
        fprintf(stderr,"Client Error: cp is null: %p\n",control_program);
        fflush(stderr);
        break;
      }
      retval=getsockopt(socket, SOL_SOCKET, SO_ERROR, &socket_err, &length);
      if ((retval!=0) || (socket_err!=0)) {
            fprintf(stderr,"Client Error: socket error: %d : %d %d : %p\n",socket,retval,socket_err,control_program);
            fflush(stderr);
        break;
      }
      /* poll the socket and check for waiting messages */
      pfd.fd = socket;
      pfd.events = POLLIN | POLLHUP | POLLRDNORM ;
      pfd.revents = 0;
      sockflag=0;
      retval=poll(&pfd, 1, 1000);
      if (retval > 0) {
        if(pfd.revents & POLLHUP) {
          fprintf(stderr,"Client: poll socket: %d cp: %p retval: %d poll_revents: %d :: %d %d %d\n",socket,control_program,retval,
          pfd.revents,POLLHUP,POLLIN,POLLRDNORM);
          break;
        } else {

          char buffer[32];
          if (recv(socket, buffer, sizeof(buffer), MSG_PEEK | MSG_DONTWAIT) > 0) {
            sockflag=1;
            poll_err_count=0;      
          } else {
            fprintf(stdout,"Client: poll socket: %d cp: %p poll_revents: %d :: No recv que'd\n",socket,control_program,pfd.revents);
            fflush(stdout);
            poll_err_count++;      
          }
        } 
      } else {
        if (retval < 0 ) {
           perror("poll()");
           break; 
        }
        if (retval == 0 ){
          fprintf(stdout,"Client: poll socket: %d cp: %p retval: %d :: timeout\n",socket,control_program,retval);
          fflush(stdout);
          poll_err_count++;      
        }
      }
      if (poll_err_count > 3 ) {
            break;    
      }
/* sockflag: socket looks good, time to  read from it */
      if(sockflag) {
        r=control_program->radarinfo->radar-1;
        c=control_program->radarinfo->channel-1;
        pthread_mutex_lock(&controlprogram_list_lock);
        r=control_program->radarinfo->radar-1;
        c=control_program->radarinfo->channel-1;
        if ((r<0) || (c<0)) control_program->data->status=-1;
 
       /* Read controlprogram msg */
        recv_data(socket, &msg, sizeof(struct ROSMsg));
        gettimeofday(&current_time,NULL);
        if((current_time.tv_sec-last_report.tv_sec)>5) {
          system("date -t > /tmp/server_cmd_time");
          last_report=current_time;
        }
        control_program->state->thread->last_seen=current_time;
        pthread_mutex_unlock(&controlprogram_list_lock);

        /* Process controlprogram msg */
        switch(msg.type) {
          case PING:
            //printf("PING: START\n");
            gettimeofday(&t0,NULL);
            msg.status=1;
            send_data(socket, &msg, sizeof(struct ROSMsg));
            //printf("PING: END\n");
            break;
          case SET_INACTIVE:
            //printf("SET_RADAR_INACTIVE\n");
            gettimeofday(&t0,NULL);
            if (verbose > 1 ) fprintf(stderr,"Client: Set INACTIVE: %ld %ld :: %d %d\n",(long)t0.tv_sec,(long)t0.tv_usec,r,c);
            if ( (r < 0) || (c < 0)) {
              msg.status=-1;
              send_data(socket, &msg, sizeof(struct ROSMsg));
            } else {
              pthread_mutex_lock(&controlprogram_list_lock);
              if(control_program->active!=0) {
                control_program->active=-1;
                control_program->state->ready=0;
                pthread_mutex_lock(&coord_lock);
                rc = pthread_create(&thread, NULL, (void *)&coordination_handler,(void *)
control_program);
                pthread_join(thread,NULL);
                pthread_mutex_unlock(&coord_lock);
              }
              pthread_mutex_unlock(&controlprogram_list_lock);
              msg.status=1;
              send_data(socket, &msg, sizeof(struct ROSMsg));
            }
            gettimeofday(&t1,NULL);
            if (verbose > 1 ) fprintf(stderr,"Client: End INACTIVE: %ld %ld :: %d %d\n",(long)t1.tv_sec,(long)t1.tv_usec,r,c);
            //printf("end SET_RADAR_INACTIVE\n");
            break;
          case SET_ACTIVE:
            gettimeofday(&t0,NULL);
            if (verbose > 1 ) fprintf(stderr,"Client: Set ACTIVE: %ld %ld :: %d %d\n",(long)t0.tv_sec,(long)t0.tv_usec,r,c);
            //printf("SET_RADAR_ACTIVE\n");
            gettimeofday(&t0,NULL);
            if ( (r < 0) || (c < 0)) {
              msg.status=-1;
              send_data(socket, &msg, sizeof(struct ROSMsg));
            } else {
              pthread_mutex_lock(&controlprogram_list_lock);
              if(control_program->active!=0) {
                control_program->active=1;
                control_program->state->ready=0;
/*  JDS : Do not need to run coordinator when setting active
                pthread_mutex_lock(&coord_lock);
                rc = pthread_create(&thread, NULL, (void *)&coordination_handler,(void *)
control_program);
                pthread_join(thread,NULL);
                pthread_mutex_unlock(&coord_lock);
*/
              }
              pthread_mutex_unlock(&controlprogram_list_lock);
              msg.status=1;
              send_data(socket, &msg, sizeof(struct ROSMsg));
            }
            gettimeofday(&t1,NULL);
            if (verbose > 1 ) fprintf(stderr,"Client: End ACTIVE: %ld %ld :: %d %d\n",(long)t1.tv_sec,(long)t1.tv_usec,r,c);
            //printf("end SET_RADAR_ACTIVE\n");
            break;
/*
          case UPDATE_SITE_SETTINGS:
            gettimeofday(&t0,NULL);
            SettingsInit(&settings);
            recv_data(socket, &settings, sizeof(struct SiteSettings));
            msg.status=-1;
            send_data(socket, &msg, sizeof(struct ROSMsg));
            SettingsCpy(&settings,&site_settings);
            rc = pthread_create(&thread, NULL, (void *)&settings_rxfe_update_rf,(void *)&site_settings.rf_settings);
            pthread_join(thread,NULL);
            rc = pthread_create(&thread, NULL, (void *)&settings_rxfe_update_if,(void *)&site_settings.if_settings);
            pthread_join(thread,NULL);
            break;
*/
          case QUERY_INI_SETTING:
            //fprintf(stdout,"start QUERY_INI_SETTING\n");
            recv_data(socket, &data_length, sizeof(int32_t));
            recv_data(socket, &entry_name, data_length*sizeof(char));
            recv_data(socket, &entry_type, sizeof(char));
            entry_exists=iniparser_find_entry(Site_INI,entry_name);
            msg.status=entry_exists;
            switch(entry_type) {
              case 'i':
                //fprintf(stdout,"  entry type: i\n");
                return_type='i';
                temp_int32=iniparser_getint(Site_INI,entry_name,-1);
                send_data(socket, &return_type, sizeof(char));
                data_length=1;
                send_data(socket, &data_length, sizeof(int32_t));
                send_data(socket, &temp_int32, data_length*sizeof(int32_t));
                break;
              case 'b':
                //fprintf(stdout,"  entry type: b\n");
                return_type='b';
                temp_int32=iniparser_getboolean(Site_INI,entry_name,-1);
                send_data(socket, &return_type, sizeof(char));
                data_length=1;
                send_data(socket, &data_length, sizeof(int32_t));
                send_data(socket, &temp_int32, data_length*sizeof(int32_t));
                break;
              case 's':
                //fprintf(stdout,"  entry type: s\n");
                return_type='s';
                temp_strp=iniparser_getstring(Site_INI,entry_name,NULL);
                send_data(socket, &return_type, sizeof(char));
                data_length=strlen(temp_strp);
                send_data(socket, &data_length, sizeof(int32_t));
                send_data(socket, temp_strp, data_length*sizeof(char));
              default:
                return_type=' ';
                send_data(socket, &return_type, sizeof(char));
                data_length=0;
                send_data(socket, &data_length, sizeof(int32_t));
                send_data(socket, temp_strp, data_length*sizeof(char));
            }
            send_data(socket, &msg, sizeof(struct ROSMsg));
            //fprintf(stdout,"end QUERY_INI_SETTING\n");
            break;
          case GET_SITE_SETTINGS:
            gettimeofday(&t0,NULL);
            settings=site_settings;
            send_data(socket, &settings, sizeof(struct SiteSettings));
            msg.status=-1;
            send_data(socket, &msg, sizeof(struct ROSMsg));
            break;
          case SET_SITE_IFMODE:
            gettimeofday(&t0,NULL);
            settings=site_settings;
            recv_data(socket, &settings.ifmode, sizeof(settings.ifmode));
            msg.status=-1;
            send_data(socket, &msg, sizeof(struct ROSMsg));
            break;
          case SET_RADAR_CHAN:
            gettimeofday(&t0,NULL);
            fprintf(stdout,"SET_RADAR_CHAN: %d.%d\n",(int)t0.tv_sec,(int)t0.tv_usec);
            fflush(stdout);
              msg.status=1;
              recv_data(socket, &radar, sizeof(int32_t)); //requested radar
              recv_data(socket, &channel, sizeof(int32_t)); //requested channel
            fprintf(stdout,"  Radar: %d Chan: %d\n",radar,channel);
            fflush(stdout);
              pthread_mutex_lock(&controlprogram_list_lock);
              status=register_radar_channel(control_program,radar,channel);
              if (status) {
              }
              else {
                if (verbose>-1) fprintf(stderr,"Control Program thread %p Bad status %d no radar channel registered\n", tid,status);
              }
              msg.status=status;
              pthread_mutex_unlock(&controlprogram_list_lock);
              send_data(socket, &msg, sizeof(struct ROSMsg));
              //printf(" END SET_RADAR_CHAN\n");
            break;
          case LINK_RADAR_CHAN:
            gettimeofday(&t0,NULL);
            msg.status=1;
            recv_data(socket, &r, sizeof(r)); //requested radar
            recv_data(socket, &c, sizeof(c)); //requested channel
            pthread_mutex_lock(&controlprogram_list_lock);
            control_program->state->linked_program=find_registered_controlprogram_by_radar_channel(r,c);
            control_program->state->linked=1;
            if (control_program->state->linked_program!=NULL) {
              status=1;
            }
            else {
              status=0;
            }
            msg.status=status;
            pthread_mutex_unlock(&controlprogram_list_lock);
            send_data(socket, &msg, sizeof(struct ROSMsg));
            break;
          case GET_PARAMETERS:
            //fprintf(stdout,"GET_PARAMETERS: START\n");
            //fflush(stdout);
            gettimeofday(&t0,NULL);
            if ( (r < 0) || (c < 0)) {
              send_data(socket, &control_parameters, sizeof(struct ControlPRM));
              msg.status=-1;
              send_data(socket, &msg, sizeof(struct ROSMsg));
            } else {
              pthread_mutex_lock(&controlprogram_list_lock);
              msg.status=status;
              control_parameters=controlprogram_fill_parameters(control_program);
              pthread_mutex_unlock(&controlprogram_list_lock);
              send_data(socket, &control_parameters, sizeof(struct ControlPRM));
              send_data(socket, &msg, sizeof(struct ROSMsg));
            }
            //fprintf(stdout,"GET_PARAMETERS: END\n");
            //fflush(stdout);
            break;
          case GET_DATA:
            //printf("GET_DATA: START\n");
            //printf("GET_DATA: Event :: sec: %d nsec: %d\n",control_program->data->event_secs,control_program->data->event_nsecs);
            //printf("GET_DATA: bad_transmit_times:: length %d \n",bad_transmit_times.length);
            //for(i=0;i<bad_transmit_times.length;i++) {
            //  printf("GET_DATA: bad_transmit_times:: %d : %d %d\n",i,bad_transmit_times.start_usec[i],bad_transmit_times.duration_usec[i]);
            //}
            gettimeofday(&t0,NULL);
            gettimeofday(&t_get_data_start,NULL);
            if (control_program->active != 1) { 
              control_program->data->status=-1;
              send_data(socket, control_program->data, sizeof(struct DataPRM));
              msg.status=-1;
              send_data(socket, &msg, sizeof(struct ROSMsg));
            } else {
              if ( (r < 0) || (c < 0)) {
                control_program->data->status=-1;
                send_data(socket, control_program->data, sizeof(struct DataPRM));
                msg.status=-1;
                send_data(socket, &msg, sizeof(struct ROSMsg));
              } else {
                msg.status=status;
                rc = pthread_create(&thread, NULL,(void *)&receiver_controlprogram_get_data,(void *) control_program);
                pthread_join(thread,NULL);
                send_data(socket, control_program->data, sizeof(struct DataPRM));
                if(control_program->data->status==0) {
                  //printf("GET_DATA: main: %d %d\n",sizeof(uint32_t),sizeof(uint32)*control_program->data->samples);
                  send_data(socket, control_program->main, sizeof(uint32_t)*control_program->data->samples);
                  send_data(socket, control_program->back, sizeof(uint32_t)*control_program->data->samples);
                  send_data(socket, &bad_transmit_times.length, sizeof(bad_transmit_times.length));
                  send_data(socket, bad_transmit_times.start_usec, sizeof(uint32_t)*bad_transmit_times.length);
                  send_data(socket, bad_transmit_times.duration_usec, sizeof(uint32_t)*bad_transmit_times.length);
                  tmp=MAX_TRANSMITTERS;
                  send_data(socket,&tmp,sizeof(int));
                  send_data(socket,&txstatus[r].AGC,sizeof(int)*tmp);
                  send_data(socket,&txstatus[r].LOWPWR,sizeof(int)*tmp);
                } else {
                  printf("GET_DATA: Bad status %d\n",control_program->data->status);
                } 
                send_data(socket, &msg, sizeof(struct ROSMsg));
              }
            }
            gettimeofday(&t1,NULL);
            if (verbose > 1) {
              elapsed=(t1.tv_sec-t0.tv_sec)*1E6;
              elapsed+=(t1.tv_usec-t0.tv_usec);
              if (verbose > 1 ) printf("Client:  Get Data Elapsed Microseconds: %ld\n",elapsed);
            }
            //printf("GET_DATA: END\n");
            gettimeofday(&t_get_data_end,NULL);
            if (verbose > 1) {
              elapsed=(t_get_data_end.tv_sec-t_pre_start.tv_sec)*1E6;
              elapsed+=(t_get_data_end.tv_usec-t_pre_start.tv_usec);
              fprintf(stderr,"Client %2d %2d:  From Pretrig start to Get Data end Elapsed Microseconds: %10ld  :: sec: %10d usec: %10d\n",r,c,elapsed,t_get_data_end.tv_sec,t_get_data_end.tv_usec);
              elapsed=(t_get_data_end.tv_sec-t_ready_first.tv_sec)*1E6;
              elapsed+=(t_get_data_end.tv_usec-t_ready_first.tv_usec);
              fprintf(stderr,"Client %2d %2d:  From Client Ready start to Get Data end Elapsed Microseconds: %10ld  :: sec: %10d usec: %10d\n",r,c,elapsed,t_get_data_end.tv_sec,t_get_data_end.tv_usec);
             fflush(stderr); 
            }
            break;
          case SET_PARAMETERS:
            //printf("SET_PARAMETERS: START\n");
            gettimeofday(&t0,NULL);
            if ( (r < 0) || (c < 0)) {
              recv_data(socket, control_program->parameters, sizeof(struct ControlPRM));
              msg.status=-1;
              send_data(socket, &msg, sizeof(struct ROSMsg));
            } else {
              msg.status=1;
              pthread_mutex_lock(&controlprogram_list_lock);
              recv_data(socket, control_program->parameters, sizeof(struct ControlPRM));
              if(control_program->parameters->rfreq<0) control_program->parameters->rfreq=control_program->parameters->tfreq;
              send_data(socket, &msg, sizeof(struct ROSMsg));
              pthread_mutex_unlock(&controlprogram_list_lock);
            }
            //printf("SET_PARAMETERS: END\n");
            break;
          case REGISTER_SEQ:
            gettimeofday(&t0,NULL);
            fprintf(stdout,"REGISTER_SEQ: r: %d c: %d  %d.%d\n",r,c,(int)t0.tv_sec,(int)t0.tv_usec);
            fflush(stdout);
            msg.status=1;
            recv_data(socket,&tprm, sizeof(struct SeqPRM)); // requested pulseseq
            pthread_mutex_lock(&controlprogram_list_lock);
            control_program->state->pulseseqs[tprm.index]=malloc(sizeof(struct TSGbuf));
            control_program->parameters->current_pulseseq_index=tprm.index;
            control_program->state->pulseseqs[tprm.index]->len=tprm.len;
            control_program->state->pulseseqs[tprm.index]->step=tprm.step;
            control_program->state->pulseseqs[tprm.index]->index=tprm.index;
            control_program->state->pulseseqs[tprm.index]->rep=
                malloc(sizeof(unsigned char)*control_program->state->pulseseqs[tprm.index]->len);
            control_program->state->pulseseqs[tprm.index]->code=
                malloc(sizeof(unsigned char)*control_program->state->pulseseqs[tprm.index]->len);
            recv_data(socket,control_program->state->pulseseqs[tprm.index]->rep, 
                sizeof(unsigned char)*control_program->state->pulseseqs[tprm.index]->len); // requested pulseseq
            recv_data(socket,control_program->state->pulseseqs[tprm.index]->code, 
                sizeof(unsigned char)*control_program->state->pulseseqs[tprm.index]->len); // requested pulseseq
            if ( (r < 0) || (c < 0)) {
              msg.status=-1;
            } else {
            //send on to timing socket
              rc = pthread_create(&threads[0], NULL, (void *)&timing_register_seq,(void *) control_program);
            //send on to dds socket
              rc = pthread_create(&threads[1], NULL, (void *)&dds_register_seq,(void *) control_program);
              //printf("Waiting on Timing Thread\n");
              pthread_join(threads[0],NULL);
              //printf("Waiting on DDS\n"); 
              pthread_join(threads[1],NULL);
            }
            pthread_mutex_unlock(&controlprogram_list_lock);
            //printf("REGISTER_SEQ: SEND ROSMsg\n");
            send_data(socket, &msg, sizeof(struct ROSMsg));
            gettimeofday(&t1,NULL);
            if (verbose > 1) {
              elapsed=(t1.tv_sec-t0.tv_sec)*1E6;
              elapsed+=(t1.tv_usec-t0.tv_usec);
              if (verbose > 1 ) printf("Client:  Reg Seq Elapsed Microseconds: %ld\n",elapsed);
            }
            //printf("REGISTER_SEQ: END\n");
            break;
          case SET_READY_FLAG:
            gettimeofday(&t0,NULL);
            if (verbose > 1 ) fprintf(stderr,"Client: Set READY: %ld %ld :: %d %d\n",(long)t0.tv_sec,(long)t0.tv_usec,r,c);
            if ( (r < 0) || (c < 0)) {
              msg.status=-1;
            } else {
              msg.status=0;
              pthread_mutex_lock(&controlprogram_list_lock);
              if (control_program->active!=0) { 
                control_program->active=1;
                control_program->state->ready=1;
              } 
              pthread_mutex_unlock(&controlprogram_list_lock);
              i=0;
              rc = pthread_create(&threads[i], NULL,(void *) &timing_wait, NULL);
              pthread_join(threads[0],NULL);
              pthread_mutex_lock(&controlprogram_list_lock);
              i=0;
              rc = pthread_create(&threads[i], NULL, (void *) &DIO_ready_controlprogram, control_program);
                pthread_join(threads[i],NULL);
              i++;
              rc = pthread_create(&threads[i], NULL, (void *) &timing_ready_controlprogram, control_program);
                pthread_join(threads[i],NULL);
              i++;
              rc = pthread_create(&threads[i], NULL, (void *) &dds_ready_controlprogram, control_program);
                pthread_join(threads[i],NULL);
              i++;
              rc = pthread_create(&threads[i], NULL, (void *) &receiver_ready_controlprogram, control_program);
                pthread_join(threads[i],NULL);
              for (;i>=0;i--) {
                gettimeofday(&t2,NULL);
                pthread_join(threads[i],NULL);
                gettimeofday(&t3,NULL);
                if (verbose > 1) {
                   elapsed=(t3.tv_sec-t2.tv_sec)*1E6;
                   elapsed+=(t3.tv_usec-t2.tv_usec);
                   if (verbose > 1 ) printf("Client:Set Ready: %d Elapsed Microseconds: %ld\n",i,elapsed);
                }
              }
              gettimeofday(&t2,NULL);
              pthread_mutex_lock(&coord_lock);
              rc = pthread_create(&thread, NULL, (void *)&coordination_handler,(void *) control_program);
              pthread_join(thread,NULL);
              pthread_mutex_unlock(&coord_lock);
              gettimeofday(&t3,NULL);
              if (verbose > 1) {
                   elapsed=(t3.tv_sec-t2.tv_sec)*1E6;
                   elapsed+=(t3.tv_usec-t2.tv_usec);
                   if (verbose > 1 ) printf("Client:Set Ready: Coord Elapsed Microseconds: %ld\n",elapsed);
              }
              pthread_mutex_unlock(&controlprogram_list_lock);
            }
            send_data(socket, &msg, sizeof(struct ROSMsg));
            gettimeofday(&t1,NULL);
            if (verbose > 1) {
              elapsed=(t1.tv_sec-t0.tv_sec)*1E6;
              elapsed+=(t1.tv_usec-t0.tv_usec);
              if (verbose > 1 ) printf("Client:  Set Ready Elapsed Microseconds: %ld\n",elapsed);
            }
            if (verbose > 1 ) fprintf(stderr,"Client: End READY: %ld %ld :: %d %d\n",(long)t1.tv_sec,(long)t1.tv_usec,r,c);
            break;

          case REQUEST_CLEAR_FREQ_SEARCH:
            gettimeofday(&t0,NULL);
            pthread_mutex_lock(&controlprogram_list_lock);
            recv_data(socket,&control_program->clrfreqsearch, sizeof(struct CLRFreqPRM)); // requested search parameters
            if (verbose > 1 )printf("Client: Requst CLRSearch: %d %d\n",control_program->clrfreqsearch.start,control_program->clrfreqsearch.end);
            if ( (r < 0) || (c < 0)) {
              msg.status=-1;
            } else {
              rc = pthread_create(&threads[0], NULL, (void *) &DIO_clrfreq,control_program);
              pthread_join(threads[0],NULL);
              rc = pthread_create(&threads[0], NULL, (void *) &receiver_clrfreq,control_program);
              pthread_join(threads[0],NULL);
              rc = pthread_create(&threads[0], NULL, (void *) &DIO_rxfe_reset,NULL);
              pthread_join(threads[0],NULL);
              msg.status=control_program->state->freq_change_needed;
            }
            send_data(socket, &msg, sizeof(struct ROSMsg));
            pthread_mutex_unlock(&controlprogram_list_lock);
            gettimeofday(&t1,NULL);
            if (verbose > 1) {
              elapsed=(t1.tv_sec-t0.tv_sec)*1E6;
              elapsed+=(t1.tv_usec-t0.tv_usec);
              if (verbose > 1 ) printf("Client:  CLR Elapsed Microseconds: %ld\n",elapsed);
            }
            break;
          case REQUEST_ASSIGNED_FREQ:
            gettimeofday(&t0,NULL);
            pthread_mutex_lock(&controlprogram_list_lock);
            if ( (r < 0) || (c < 0)) {
              msg.status=-1;
              control_program->state->current_assigned_freq=0;
              control_program->state->current_assigned_noise=0;
            } else {
              rc = pthread_create(&threads[0], NULL, (void *) &receiver_assign_frequency,(void *)  control_program);
              pthread_join(threads[0],NULL);
              msg.status=control_program->state->best_assigned_freq!=control_program->state->current_assigned_freq;
            }
            //control_program->state->current_assigned_noise=1;
            current_freq=control_program->state->current_assigned_freq; 
            send_data(socket, &current_freq, sizeof(int32_t));
            send_data(socket, &control_program->state->current_assigned_noise, sizeof(float));
            send_data(socket, &msg, sizeof(struct ROSMsg));
            pthread_mutex_unlock(&controlprogram_list_lock);
            gettimeofday(&t1,NULL);
            if (verbose > 1) {
              elapsed=(t1.tv_sec-t0.tv_sec)*1E6;
              elapsed+=(t1.tv_usec-t0.tv_usec);
              if (verbose > 1 ) printf("Client:  Request Freq Elapsed Microseconds: %ld\n",elapsed);
            }
            break;

          case QUIT:
            gettimeofday(&t0,NULL);
            fprintf(stdout,"Client QUIT:: %d.%06d \n",(int)t0.tv_sec,(int)t0.tv_usec);
            fflush(stdout);
            msg.status=0;
            send_data(socket, &msg, sizeof(struct ROSMsg));
            //controlprogram_exit(control_program);
            pthread_exit(NULL);
            break;
          default:
            msg.status=1;
            send_data(socket, &msg, sizeof(struct ROSMsg));
        }
          /* FD_ISSET(0, &rfds) will be true. */
      } //else printf("No data within five seconds.\n");
//        if (verbose>1) printf("Client: test cancel\n");
        
        pthread_testcancel();
   }
   fprintf(stdout,"Outside of socket while loop : %p\n",control_program);
   fflush(stdout);
   pthread_testcancel();
   pthread_cleanup_pop(0);
   controlprogram_exit(control_program);
   pthread_exit(NULL);
};
Beispiel #25
0
void vegas_null_thread(void *_args) {

    int rv;
    /* Set cpu affinity */
    cpu_set_t cpuset, cpuset_orig;
    sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig);
    CPU_ZERO(&cpuset);
    CPU_SET(6, &cpuset);
    rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
    if (rv<0) { 
        vegas_error("vegas_null_thread", "Error setting cpu affinity.");
        perror("sched_setaffinity");
    }

    /* Set priority */
    rv = setpriority(PRIO_PROCESS, 0, 0);
    if (rv<0) {
        vegas_error("vegas_null_thread", "Error setting priority level.");
        perror("set_priority");
    }

    /* Get args */
    struct vegas_thread_args *args = (struct vegas_thread_args *)_args;

    /* get instance_id */
    int instance_id = args->instance_id;

    /* Attach to status shared mem area */
    struct vegas_status st;
    rv = vegas_status_attach(instance_id, &st);
    if (rv!=VEGAS_OK) {
        vegas_error("vegas_null_thread", 
                "Error attaching to status shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)vegas_status_detach, &st);
    pthread_cleanup_push((void *)set_exit_status, &st);

    /* Init status */
    vegas_status_lock_safe(&st);
    hputs(st.buf, STATUS_KEY, "init");
    vegas_status_unlock_safe(&st);

    /* Attach to databuf shared mem */
    struct vegas_databuf *db;
    db = vegas_databuf_attach(instance_id, args->input_buffer);
    if (db==NULL) {
        vegas_error("vegas_null_thread",
                "Error attaching to databuf shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)vegas_databuf_detach, db);

    /* Loop */
    char *ptr;
    struct vegas_params gp;
#if FITS_TYPE == PSRFITS
    struct psrfits pf;
    pf.sub.dat_freqs = NULL;
    pf.sub.dat_weights = NULL;
    pf.sub.dat_offsets = NULL;
    pf.sub.dat_scales = NULL;
    pthread_cleanup_push((void *)vegas_free_psrfits, &pf);
#else
    struct sdfits pf;
    pthread_cleanup_push((void *)vegas_free_sdfits, &pf);
#endif
    int curblock=0;
    signal(SIGINT,cc);
    while (run) {

        /* Note waiting status */
        vegas_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "waiting");
        vegas_status_unlock_safe(&st);

        /* Wait for buf to have data */
        rv = vegas_databuf_wait_filled(db, curblock);
        if (rv!=0) {
            //sleep(1);
            continue;
        }

        /* Note waiting status, current block */
        vegas_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "discarding");
        hputi4(st.buf, "DSKBLKIN", curblock);
        vegas_status_unlock_safe(&st);

        /* Get params */
        ptr = vegas_databuf_header(db, curblock);
        vegas_read_obs_params(ptr, &gp, &pf);

        /* Output if data was lost */
#if FITS_TYPE == PSRFITS
        if (gp.n_dropped!=0 && 
                (gp.packetindex==0 || strcmp(pf.hdr.obs_mode,"SEARCH"))) {
            printf("Block beginning with pktidx=%lld dropped %d packets\n",
                    gp.packetindex, gp.n_dropped);
            fflush(stdout);
        }
#else
        if (gp.num_pkts_dropped!=0 && gp.num_pkts_rcvd!=0) {
            printf("Block received %d packets and dropped %d packets\n",
                    gp.num_pkts_rcvd, gp.num_pkts_dropped);
            fflush(stdout);
        }
#endif

        /* Mark as free */
        vegas_databuf_set_free(db, curblock);

        /* Go to next block */
        curblock = (curblock + 1) % db->n_block;

        /* Check for cancel */
        pthread_testcancel();

    }

    pthread_exit(NULL);

    pthread_cleanup_pop(0); /* Closes set_exit_status */
    pthread_cleanup_pop(0); /* Closes vegas_free_psrfits */
    pthread_cleanup_pop(0); /* Closes vegas_status_detach */
    pthread_cleanup_pop(0); /* Closes vegas_databuf_detach */

}
Beispiel #26
0
void* xf_monitor_updates(void* param)
{
	int fds;
	xfInfo* xfi;
	XEvent xevent;
	fd_set rfds_set;
	int select_status;
	int pending_events;
	xfPeerContext* xfp;
	freerdp_peer* client;
	uint32 wait_interval;
	struct timeval timeout;
	int x, y, width, height;
	XDamageNotifyEvent* notify;
	xfEventRegion* event_region;

	client = (freerdp_peer*) param;
	xfp = (xfPeerContext*) client->context;
	xfi = xfp->info;

	fds = xfi->xfds;
	wait_interval = (1000000 / 2500);
	memset(&timeout, 0, sizeof(struct timeval));

	pthread_create(&(xfp->frame_rate_thread), 0, xf_frame_rate_thread, (void*) client);

	while (1)
	{
		// check if we should terminate
		pthread_testcancel();

		FD_ZERO(&rfds_set);
		FD_SET(fds, &rfds_set);

		timeout.tv_sec = 0;
		timeout.tv_usec = wait_interval;
		select_status = select(fds + 1, &rfds_set, NULL, NULL, &timeout);

		if (select_status == -1)
		{
			printf("select failed\n");
		}
		else if (select_status == 0)
		{
			//printf("select timeout\n");
		}

		pthread_mutex_lock(&(xfp->mutex));
		pending_events = XPending(xfi->display);
		pthread_mutex_unlock(&(xfp->mutex));

		if (pending_events > 0)
		{
			pthread_mutex_lock(&(xfp->mutex));
			memset(&xevent, 0, sizeof(xevent));
			XNextEvent(xfi->display, &xevent);
			pthread_mutex_unlock(&(xfp->mutex));

			if (xevent.type == xfi->xdamage_notify_event)
			{
				notify = (XDamageNotifyEvent*) &xevent;

				x = notify->area.x;
				y = notify->area.y;
				width = notify->area.width;
				height = notify->area.height;

				xf_xdamage_subtract_region(xfp, x, y, width, height);

				event_region = xf_event_region_new(x, y, width, height);
				xf_event_push(xfp->event_queue, (xfEvent*) event_region);
			}
		}
	}

	return NULL;
}
// Set of threads which talk to client over the connection for doing the needful
// processing. Note that once fd is assigned to a thread all the work on that fd
// is done by that thread. Fair fd usage is expected of the client. First thread
// is special - also does accept [listens for new connections]. It is the only
// thread which does it.
void *
thr_demarshal(void *arg)
{
	cf_socket_cfg *s, *ls;
	// Create my epoll fd, register in the global list.
	struct epoll_event ev;
	int nevents, i, n, epoll_fd;
	cf_clock last_fd_print = 0;

#if defined(USE_SYSTEMTAP)
	uint64_t nodeid = g_config.self_node;
#endif

	// Early stage aborts; these will cause faults in process scope.
	cf_assert(arg, AS_DEMARSHAL, CF_CRITICAL, "invalid argument");
	s = &g_config.socket;
	ls = &g_config.localhost_socket;

#ifdef USE_JEM
	int orig_arena;
	if (0 > (orig_arena = jem_get_arena())) {
		cf_crash(AS_DEMARSHAL, "Failed to get original arena for thr_demarshal()!");
	} else {
		cf_info(AS_DEMARSHAL, "Saved original JEMalloc arena #%d for thr_demarshal()", orig_arena);
	}
#endif

	// Figure out my thread index.
	pthread_t self = pthread_self();
	int thr_id;
	for (thr_id = 0; thr_id < MAX_DEMARSHAL_THREADS; thr_id++) {
		if (0 != pthread_equal(g_demarshal_args->dm_th[thr_id], self))
			break;
	}

	if (thr_id == MAX_DEMARSHAL_THREADS) {
		cf_debug(AS_FABRIC, "Demarshal thread could not figure own ID, bogus, exit, fu!");
		return(0);
	}

	// First thread accepts new connection at interface socket.
	if (thr_id == 0) {
		demarshal_file_handle_init();
		epoll_fd = epoll_create(EPOLL_SZ);
		if (epoll_fd == -1)
			cf_crash(AS_DEMARSHAL, "epoll_create(): %s", cf_strerror(errno));

		memset(&ev, 0, sizeof (ev));
		ev.events = EPOLLIN | EPOLLERR | EPOLLHUP;
		ev.data.fd = s->sock;
		if (0 > epoll_ctl(epoll_fd, EPOLL_CTL_ADD, s->sock, &ev))
			cf_crash(AS_DEMARSHAL, "epoll_ctl(): %s", cf_strerror(errno));
		cf_info(AS_DEMARSHAL, "Service started: socket %s:%d", s->addr, s->port);

		if (ls->sock) {
			ev.events = EPOLLIN | EPOLLERR | EPOLLHUP;
			ev.data.fd = ls->sock;
			if (0 > epoll_ctl(epoll_fd, EPOLL_CTL_ADD, ls->sock, &ev))
			  cf_crash(AS_DEMARSHAL, "epoll_ctl(): %s", cf_strerror(errno));
			cf_info(AS_DEMARSHAL, "Service also listening on localhost socket %s:%d", ls->addr, ls->port);
		}
	}
	else {
		epoll_fd = epoll_create(EPOLL_SZ);
		if (epoll_fd == -1)
			cf_crash(AS_DEMARSHAL, "epoll_create(): %s", cf_strerror(errno));
	}

	g_demarshal_args->epoll_fd[thr_id] = epoll_fd;
	cf_detail(AS_DEMARSHAL, "demarshal thread started: id %d", thr_id);

	int id_cntr = 0;

	// Demarshal transactions from the socket.
	for ( ; ; ) {
		struct epoll_event events[EPOLL_SZ];

		cf_detail(AS_DEMARSHAL, "calling epoll");

		nevents = epoll_wait(epoll_fd, events, EPOLL_SZ, -1);

		if (0 > nevents) {
			cf_debug(AS_DEMARSHAL, "epoll_wait() returned %d ; errno = %d (%s)", nevents, errno, cf_strerror(errno));
		}

		cf_detail(AS_DEMARSHAL, "epoll event received: nevents %d", nevents);

		uint64_t now_ns = cf_getns();
		uint64_t now_ms = now_ns / 1000000;

		// Iterate over all events.
		for (i = 0; i < nevents; i++) {
			if ((s->sock == events[i].data.fd) || (ls->sock == events[i].data.fd)) {
				// Accept new connections on the service socket.
				int csocket = -1;
				struct sockaddr_in caddr;
				socklen_t clen = sizeof(caddr);
				char cpaddr[64];

				if (-1 == (csocket = accept(events[i].data.fd, (struct sockaddr *)&caddr, &clen))) {
					// This means we're out of file descriptors - could be a SYN
					// flood attack or misbehaving client. Eventually we'd like
					// to make the reaper fairer, but for now we'll just have to
					// ignore the accept error and move on.
					if ((errno == EMFILE) || (errno == ENFILE)) {
						if (last_fd_print != (cf_getms() / 1000L)) {
							cf_info(AS_DEMARSHAL, " warning: hit OS file descript limit (EMFILE on accept), consider raising limit");
							last_fd_print = cf_getms() / 1000L;
						}
						continue;
					}
					cf_crash(AS_DEMARSHAL, "accept: %s (errno %d)", cf_strerror(errno), errno);
				}

				// Get the client IP address in string form.
				if (caddr.sin_family == AF_INET) {
					if (NULL == inet_ntop(AF_INET, &caddr.sin_addr.s_addr, (char *)cpaddr, sizeof(cpaddr))) {
						cf_crash(AS_DEMARSHAL, "inet_ntop(): %s (errno %d)", cf_strerror(errno), errno);
					}
				}
				else if (caddr.sin_family == AF_INET6) {
					struct sockaddr_in6* addr_in6 = (struct sockaddr_in6*)&caddr;

					if (NULL == inet_ntop(AF_INET6, &addr_in6->sin6_addr, (char *)cpaddr, sizeof(cpaddr))) {
						cf_crash(AS_DEMARSHAL, "inet_ntop(): %s (errno %d)", cf_strerror(errno), errno);
					}
				}
				else {
					cf_crash(AS_DEMARSHAL, "unknown address family %u", caddr.sin_family);
				}

				cf_detail(AS_DEMARSHAL, "new connection: %s (fd %d)", cpaddr, csocket);

				// Validate the limit of protocol connections we allow.
				uint32_t conns_open = g_config.proto_connections_opened - g_config.proto_connections_closed;
				if (conns_open > g_config.n_proto_fd_max) {
					if ((last_fd_print + 5000L) < cf_getms()) { // no more than 5 secs
						cf_warning(AS_DEMARSHAL, "dropping incoming client connection: hit limit %d connections", conns_open);
						last_fd_print = cf_getms();
					}
					shutdown(csocket, SHUT_RDWR);
					close(csocket);
					csocket = -1;
					continue;
				}

				// Set the socket to nonblocking.
				if (-1 == cf_socket_set_nonblocking(csocket)) {
					cf_info(AS_DEMARSHAL, "unable to set client socket to nonblocking mode");
					shutdown(csocket, SHUT_RDWR);
					close(csocket);
					csocket = -1;
					continue;
				}

				// Create as_file_handle and queue it up in epoll_fd for further
				// communication on one of the demarshal threads.
				as_file_handle *fd_h = cf_rc_alloc(sizeof(as_file_handle));
				if (!fd_h) {
					cf_crash(AS_DEMARSHAL, "malloc");
				}

				sprintf(fd_h->client, "%s:%d", cpaddr, ntohs(caddr.sin_port));
				fd_h->fd = csocket;

				fd_h->last_used = cf_getms();
				fd_h->reap_me = false;
				fd_h->trans_active = false;
				fd_h->proto = 0;
				fd_h->proto_unread = 0;
				fd_h->fh_info = 0;
				fd_h->security_filter = as_security_filter_create();

				// Insert into the global table so the reaper can manage it. Do
				// this before queueing it up for demarshal threads - once
				// EPOLL_CTL_ADD is done it's difficult to back out (if insert
				// into global table fails) because fd state could be anything.
				cf_rc_reserve(fd_h);

				pthread_mutex_lock(&g_file_handle_a_LOCK);

				int j;
				bool inserted = true;

				if (0 != cf_queue_pop(g_freeslot, &j, CF_QUEUE_NOWAIT)) {
					inserted = false;
				}
				else {
					g_file_handle_a[j] = fd_h;
				}

				pthread_mutex_unlock(&g_file_handle_a_LOCK);

				if (!inserted) {
					cf_info(AS_DEMARSHAL, "unable to add socket to file handle table");
					shutdown(csocket, SHUT_RDWR);
					close(csocket);
					csocket = -1;
					cf_rc_free(fd_h); // will free even with ref-count of 2
				}
				else {
					// Place the client socket in the event queue.
					memset(&ev, 0, sizeof(ev));
					ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP ;
					ev.data.ptr = fd_h;

					// Round-robin pick up demarshal thread epoll_fd and add
					// this new connection to epoll.
					int id;
					while (true) {
						id = (id_cntr++) % g_demarshal_args->num_threads;
						if (g_demarshal_args->epoll_fd[id] != 0) {
							break;
						}
					}

					fd_h->epoll_fd = g_demarshal_args->epoll_fd[id];

					if (0 > (n = epoll_ctl(fd_h->epoll_fd, EPOLL_CTL_ADD, csocket, &ev))) {
						cf_info(AS_DEMARSHAL, "unable to add socket to event queue of demarshal thread %d %d", id, g_demarshal_args->num_threads);
						pthread_mutex_lock(&g_file_handle_a_LOCK);
						fd_h->reap_me = true;
						as_release_file_handle(fd_h);
						fd_h = 0;
						pthread_mutex_unlock(&g_file_handle_a_LOCK);
					}
					else {
						cf_atomic_int_incr(&g_config.proto_connections_opened);
					}
				}
			}
			else {
				bool has_extra_ref   = false;
				as_file_handle *fd_h = events[i].data.ptr;
				if (fd_h == 0) {
					cf_info(AS_DEMARSHAL, "event with null handle, continuing");
					goto NextEvent;
				}

				cf_detail(AS_DEMARSHAL, "epoll connection event: fd %d, events 0x%x", fd_h->fd, events[i].events);

				// Process data on an existing connection: this might be more
				// activity on an already existing transaction, so we have some
				// state to manage.
				as_proto *proto_p = 0;
				int fd = fd_h->fd;

				if (events[i].events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
					cf_detail(AS_DEMARSHAL, "proto socket: remote close: fd %d event %x", fd, events[i].events);
					// no longer in use: out of epoll etc
					goto NextEvent_FD_Cleanup;
				}

				if (fd_h->trans_active) {
					goto NextEvent;
				}

				// If pointer is NULL, then we need to create a transaction and
				// store it in the buffer.
				if (fd_h->proto == NULL) {
					as_proto proto;
					int sz;

					/* Get the number of available bytes */
					if (-1 == ioctl(fd, FIONREAD, &sz)) {
						cf_info(AS_DEMARSHAL, "unable to get number of available bytes");
						goto NextEvent_FD_Cleanup;
					}

					// If we don't have enough data to fill the message buffer,
					// just wait and we'll come back to this one. However, we'll
					// let messages with zero size through, since they are
					// likely errors. We don't cleanup the FD in this case since
					// we'll get more data on it.
					if (sz < sizeof(as_proto) && sz != 0) {
						goto NextEvent;
					}

					// Do a preliminary read of the header into a stack-
					// allocated structure, so that later on we can allocate the
					// entire message buffer.
					if (0 >= (n = cf_socket_recv(fd, &proto, sizeof(as_proto), MSG_WAITALL))) {
						cf_detail(AS_DEMARSHAL, "proto socket: read header fail: error: rv %d sz was %d errno %d", n, sz, errno);
						goto NextEvent_FD_Cleanup;
					}

					if (proto.version != PROTO_VERSION &&
							// For backward compatibility, allow version 0 with
							// security messages.
							! (proto.version == 0 && proto.type == PROTO_TYPE_SECURITY)) {
						cf_warning(AS_DEMARSHAL, "proto input from %s: unsupported proto version %u",
								fd_h->client, proto.version);
						goto NextEvent_FD_Cleanup;
					}

					// Swap the necessary elements of the as_proto.
					as_proto_swap(&proto);

					if (proto.sz > PROTO_SIZE_MAX) {
						cf_warning(AS_DEMARSHAL, "proto input from %s: msg greater than %d, likely request from non-Aerospike client, rejecting: sz %"PRIu64,
								fd_h->client, PROTO_SIZE_MAX, proto.sz);
						goto NextEvent_FD_Cleanup;
					}

#ifdef USE_JEM
					// Attempt to peek the namespace and set the JEMalloc arena accordingly.
					size_t peeked_data_sz = 0;
					size_t min_field_sz = sizeof(uint32_t) + sizeof(char);
					size_t min_as_msg_sz = sizeof(as_msg) + min_field_sz;
					size_t peekbuf_sz = 2048; // (Arbitrary "large enough" size for peeking the fields of "most" AS_MSGs.)
					uint8_t peekbuf[peekbuf_sz];
					if (PROTO_TYPE_AS_MSG == proto.type) {
						size_t offset = sizeof(as_msg);
						// Number of bytes to peek from the socket.
//						size_t peek_sz = peekbuf_sz;                 // Peak up to the size of the peek buffer.
						size_t peek_sz = MIN(proto.sz, peekbuf_sz);  // Peek only up to the minimum necessary number of bytes.
						if (!(peeked_data_sz = cf_socket_recv(fd, peekbuf, peek_sz, 0))) {
							// That's actually legitimate. The as_proto may have gone into one
							// packet, the as_msg into the next one, which we haven't yet received.
							// This just "never happened" without async.
							cf_detail(AS_DEMARSHAL, "could not peek the as_msg header, expected %zu byte(s)", peek_sz);
						}
						if (peeked_data_sz > min_as_msg_sz) {
//							cf_debug(AS_DEMARSHAL, "(Peeked %zu bytes.)", peeked_data_sz);
							if (peeked_data_sz > proto.sz) {
								cf_warning(AS_DEMARSHAL, "Received unexpected extra data from client %s socket %d when peeking as_proto!", fd_h->client, fd);
								log_as_proto_and_peeked_data(&proto, peekbuf, peeked_data_sz);
								goto NextEvent_FD_Cleanup;
							}

							if (((as_msg*)peekbuf)->info1 & AS_MSG_INFO1_BATCH) {
								jem_set_arena(orig_arena);
							} else {
								uint16_t n_fields = ntohs(((as_msg *) peekbuf)->n_fields), field_num = 0;
								bool found = false;
	//							cf_debug(AS_DEMARSHAL, "Found %d AS_MSG fields", n_fields);
								while (!found && (field_num < n_fields)) {
									as_msg_field *field = (as_msg_field *) (&peekbuf[offset]);
									uint32_t value_sz = ntohl(field->field_sz) - 1;
	//								cf_debug(AS_DEMARSHAL, "Field #%d offset: %lu", field_num, offset);
	//								cf_debug(AS_DEMARSHAL, "\tvalue_sz %u", value_sz);
	//								cf_debug(AS_DEMARSHAL, "\ttype %d", field->type);
									if (AS_MSG_FIELD_TYPE_NAMESPACE == field->type) {
										if (value_sz >= AS_ID_NAMESPACE_SZ) {
											cf_warning(AS_DEMARSHAL, "namespace too long (%u) in as_msg", value_sz);
											goto NextEvent_FD_Cleanup;
										}
										char ns[AS_ID_NAMESPACE_SZ];
										found = true;
										memcpy(ns, field->data, value_sz);
										ns[value_sz] = '\0';
	//									cf_debug(AS_DEMARSHAL, "Found ns \"%s\" in field #%d.", ns, field_num);
										jem_set_arena(as_namespace_get_jem_arena(ns));
									} else {
	//									cf_debug(AS_DEMARSHAL, "Message field %d is not namespace (type %d) ~~ Reading next field", field_num, field->type);
										field_num++;
										offset += sizeof(as_msg_field) + value_sz;
										if (offset >= peeked_data_sz) {
											break;
										}
									}
								}
								if (!found) {
									cf_warning(AS_DEMARSHAL, "Can't get namespace from AS_MSG (peeked %zu bytes) ~~ Using default thr_demarshal arena.", peeked_data_sz);
									jem_set_arena(orig_arena);
								}
							}
						} else {
							jem_set_arena(orig_arena);
						}
					} else {
						jem_set_arena(orig_arena);
					}
#endif

					// Allocate the complete message buffer.
					proto_p = cf_malloc(sizeof(as_proto) + proto.sz);

					cf_assert(proto_p, AS_DEMARSHAL, CF_CRITICAL, "allocation: %zu %s", (sizeof(as_proto) + proto.sz), cf_strerror(errno));
					memcpy(proto_p, &proto, sizeof(as_proto));

#ifdef USE_JEM
					// Jam in the peeked data.
					if (peeked_data_sz) {
						memcpy(proto_p->data, &peekbuf, peeked_data_sz);
					}
					fd_h->proto_unread = proto_p->sz - peeked_data_sz;
#else
					fd_h->proto_unread = proto_p->sz;
#endif
					fd_h->proto = (void *) proto_p;
				}
				else {
					proto_p = fd_h->proto;
				}

				if (fd_h->proto_unread > 0) {

					// Read the data.
					n = cf_socket_recv(fd, proto_p->data + (proto_p->sz - fd_h->proto_unread), fd_h->proto_unread, 0);
					if (0 >= n) {
						if (errno == EAGAIN) {
							continue;
						}
						cf_info(AS_DEMARSHAL, "receive socket: fail? n %d errno %d %s closing connection.", n, errno, cf_strerror(errno));
						goto NextEvent_FD_Cleanup;
					}

					// Decrement bytes-unread counter.
					cf_detail(AS_DEMARSHAL, "read fd %d (%d %d)", fd, n, fd_h->proto_unread);
					fd_h->proto_unread -= n;
				}

				// Check for a finished read.
				if (0 == fd_h->proto_unread) {

					// It's only really live if it's injecting a transaction.
					fd_h->last_used = now_ms;

					thr_demarshal_pause(fd_h); // pause reading while the transaction is in progress
					fd_h->proto = 0;
					fd_h->proto_unread = 0;

					// INIT_TR
					as_transaction tr;
					as_transaction_init(&tr, NULL, (cl_msg *)proto_p);

					cf_rc_reserve(fd_h);
					has_extra_ref   = true;
					tr.proto_fd_h   = fd_h;
					tr.start_time   = now_ns; // set transaction start time
					tr.preprocessed = false;

					if (! as_proto_is_valid_type(proto_p)) {
						cf_warning(AS_DEMARSHAL, "unsupported proto message type %u", proto_p->type);
						// We got a proto message type we don't recognize, so it
						// may not do any good to send back an as_msg error, but
						// it's the best we can do. At least we can keep the fd.
						as_transaction_demarshal_error(&tr, AS_PROTO_RESULT_FAIL_UNKNOWN);
						cf_atomic_int_incr(&g_config.proto_transactions);
						goto NextEvent;
					}

					if (g_config.microbenchmarks) {
						histogram_insert_data_point(g_config.demarshal_hist, now_ns);
						tr.microbenchmark_time = cf_getns();
					}

					// Check if it's compressed.
					if (tr.msgp->proto.type == PROTO_TYPE_AS_MSG_COMPRESSED) {
						// Decompress it - allocate buffer to hold decompressed
						// packet.
						uint8_t *decompressed_buf = NULL;
						size_t decompressed_buf_size = 0;
						int rv = 0;
						if ((rv = as_packet_decompression((uint8_t *)proto_p, &decompressed_buf, &decompressed_buf_size))) {
							cf_warning(AS_DEMARSHAL, "as_proto decompression failed! (rv %d)", rv);
							cf_warning_binary(AS_DEMARSHAL, proto_p, sizeof(as_proto) + proto_p->sz, CF_DISPLAY_HEX_SPACED, "compressed proto_p");
							as_transaction_demarshal_error(&tr, AS_PROTO_RESULT_FAIL_UNKNOWN);
							cf_atomic_int_incr(&g_config.proto_transactions);
							goto NextEvent;
						}
						// Count the packets.
						cf_atomic_int_add(&g_config.stat_compressed_pkts_received, 1);
						// Free the compressed packet since we'll be using the
						// decompressed packet from now on.
						cf_free(proto_p);
						proto_p = NULL;
						// Get original packet.
						tr.msgp = (cl_msg *)decompressed_buf;
						as_proto_swap(&(tr.msgp->proto));

						if (! as_proto_wrapped_is_valid(&tr.msgp->proto, decompressed_buf_size)) {
							cf_warning(AS_DEMARSHAL, "decompressed unusable proto: version %u, type %u, sz %lu [%lu]",
									tr.msgp->proto.version, tr.msgp->proto.type, tr.msgp->proto.sz, decompressed_buf_size);
							as_transaction_demarshal_error(&tr, AS_PROTO_RESULT_FAIL_UNKNOWN);
							cf_atomic_int_incr(&g_config.proto_transactions);
							goto NextEvent;
						}
					}

					// Security protocol transactions.
					if (tr.msgp->proto.type == PROTO_TYPE_SECURITY) {
						as_security_transact(&tr);
						cf_atomic_int_incr(&g_config.proto_transactions);
						goto NextEvent;
					}

					// Info protocol requests.
					if (tr.msgp->proto.type == PROTO_TYPE_INFO) {
						if (as_info(&tr)) {
							cf_warning(AS_DEMARSHAL, "Info request failed to be enqueued ~~ Freeing protocol buffer");
							goto NextEvent_FD_Cleanup;
						}
						cf_atomic_int_incr(&g_config.proto_transactions);
						goto NextEvent;
					}

					ASD_TRANS_DEMARSHAL(nodeid, (uint64_t) tr.msgp);

					// Fast path for batch requests.
					if (tr.msgp->msg.info1 & AS_MSG_INFO1_BATCH) {
						as_batch_queue_task(&tr);
						cf_atomic_int_incr(&g_config.proto_transactions);
						goto NextEvent;
					}

					// Either process the transaction directly in this thread,
					// or queue it for processing by another thread (tsvc/info).
					if (0 != thr_tsvc_process_or_enqueue(&tr)) {
						cf_warning(AS_DEMARSHAL, "Failed to queue transaction to the service thread");
						goto NextEvent_FD_Cleanup;
					}
					else {
						cf_atomic_int_incr(&g_config.proto_transactions);
					}
				}

				// Jump the proto message free & FD cleanup. If we get here, the
				// above operations went smoothly. The message free & FD cleanup
				// job is handled elsewhere as directed by
				// thr_tsvc_process_or_enqueue().
				goto NextEvent;

NextEvent_FD_Cleanup:
				// If we allocated memory for the incoming message, free it.
				if (proto_p) {
					cf_free(proto_p);
					fd_h->proto = 0;
				}
				// If fd has extra reference for transaction, release it.
				if (has_extra_ref) {
					cf_rc_release(fd_h);
				}
				// Remove the fd from the events list.
				if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, fd, 0) < 0) {
					cf_crash(AS_DEMARSHAL, "unable to remove socket FD %d from epoll instance FD %d: %d (%s)",
							fd, epoll_fd, errno, cf_strerror(errno));
				}
				pthread_mutex_lock(&g_file_handle_a_LOCK);
				fd_h->reap_me = true;
				as_release_file_handle(fd_h);
				fd_h = 0;
				pthread_mutex_unlock(&g_file_handle_a_LOCK);
NextEvent:
				;
			}

			// We should never be canceled externally, but just in case...
			pthread_testcancel();
		}
	}

	return NULL;
}
/*
 * pthread_delay_np
 *
 * DESCRIPTION
 *
 *       This routine causes a thread to delay execution for a specific period of time.
 *       This period ends at the current time plus the specified interval. The routine
 *       will not return before the end of the period is reached, but may return an
 *       arbitrary amount of time after the period has gone by. This can be due to
 *       system load, thread priorities, and system timer granularity. 
 *
 *       Specifying an interval of zero (0) seconds and zero (0) nanoseconds is
 *       allowed and can be used to force the thread to give up the processor or to
 *       deliver a pending cancelation request. 
 *
 *       The timespec structure contains the following two fields: 
 *
 *            tv_sec is an integer number of seconds. 
 *            tv_nsec is an integer number of nanoseconds. 
 *
 *  Return Values
 *
 *  If an error condition occurs, this routine returns an integer value indicating
 *  the type of error. Possible return values are as follows: 
 *
 *  0 
 *           Successful completion.
 *  [EINVAL] 
 *           The value specified by interval is invalid. 
 *
 * Example
 *
 * The following code segment would wait for 5 and 1/2 seconds
 *
 *  struct timespec tsWait;
 *  int      intRC;
 *
 *  tsWait.tv_sec  = 5;
 *  tsWait.tv_nsec = 500000000L;
 *  intRC = pthread_delay_np(&tsWait);
 */
int
pthread_delay_np (struct timespec *interval)
{
  DWORD wait_time;
  DWORD secs_in_millisecs;
  DWORD millisecs;
  DWORD status;
  pthread_t self;
  ptw32_thread_t * sp;

  if (interval == NULL)
    {
      return EINVAL;
    }

  if (interval->tv_sec == 0L && interval->tv_nsec == 0L)
    {
      pthread_testcancel ();
      Sleep (0);
      pthread_testcancel ();
      return (0);
    }

  /* convert secs to millisecs */
  secs_in_millisecs = (DWORD)interval->tv_sec * 1000L;

  /* convert nanosecs to millisecs (rounding up) */
  millisecs = (interval->tv_nsec + 999999L) / 1000000L;

#if defined(__WATCOMC__)
#pragma disable_message (124)
#endif

  /*
   * Most compilers will issue a warning 'comparison always 0'
   * because the variable type is unsigned, but we need to keep this
   * for some reason I can't recall now.
   */
  if (0 > (wait_time = secs_in_millisecs + millisecs))
    {
      return EINVAL;
    }

#if defined(__WATCOMC__)
#pragma enable_message (124)
#endif

  if (NULL == (self = pthread_self ()).p)
    {
      return ENOMEM;
    }

  sp = (ptw32_thread_t *) self.p;

  if (sp->cancelState == PTHREAD_CANCEL_ENABLE)
    {
      /*
       * Async cancelation won't catch us until wait_time is up.
       * Deferred cancelation will cancel us immediately.
       */
      if (WAIT_OBJECT_0 ==
	  (status = WaitForSingleObject (sp->cancelEvent, wait_time)))
	{
          ptw32_mcs_local_node_t stateLock;
	  /*
	   * Canceling!
	   */
	  ptw32_mcs_lock_acquire (&sp->stateLock, &stateLock);
	  if (sp->state < PThreadStateCanceling)
	    {
	      sp->state = PThreadStateCanceling;
	      sp->cancelState = PTHREAD_CANCEL_DISABLE;
	      ptw32_mcs_lock_release (&stateLock);

	      ptw32_throw (PTW32_EPS_CANCEL);
	    }

	  ptw32_mcs_lock_release (&stateLock);
	  return ESRCH;
	}
      else if (status != WAIT_TIMEOUT)
	{
	  return EINVAL;
	}
    }
  else
    {
      Sleep (wait_time);
    }

  return (0);
}
Beispiel #29
0
	void Thread::cancellationPoint () const
	{
	  pthread_testcancel();
	}
Beispiel #30
0
static void *run(hashpipe_thread_args_t * args)
{
    // Local aliases to shorten access to args fields
    // Our output buffer happens to be a paper_input_databuf
    hashpipe_status_t st = args->st;
    const char * status_key = args->thread_desc->skey;

    st_p = &st;	// allow global (this source file) access to the status buffer

    // Get inital value for crc32 function
    uint32_t init_crc = crc32(0,0,0);

    // Flag that holds off the crc thread
    int holdoff = 1;

    // Force ourself into the hold off state
    hashpipe_status_lock_safe(&st);
    hputi4(st.buf, "NETHOLD", 1);
    hashpipe_status_unlock_safe(&st);

    while(holdoff) {
	// We're not in any hurry to startup
	sleep(1);
	hashpipe_status_lock_safe(&st);
	// Look for NETHOLD value
	hgeti4(st.buf, "NETHOLD", &holdoff);
	if(!holdoff) {
	    // Done holding, so delete the key
	    hdel(st.buf, "NETHOLD");
	}
	hashpipe_status_unlock_safe(&st);
    }

    /* Read network params */
    struct hashpipe_udp_params up = {
	.bindhost = "0.0.0.0",
	.bindport = 8511,
	.packet_size = 8200
    };
    hashpipe_status_lock_safe(&st);
    // Get info from status buffer if present (no change if not present)
    hgets(st.buf, "BINDHOST", 80, up.bindhost);
    hgeti4(st.buf, "BINDPORT", &up.bindport);
    // Store bind host/port info etc in status buffer
    hputs(st.buf, "BINDHOST", up.bindhost);
    hputi4(st.buf, "BINDPORT", up.bindport);
    hputu4(st.buf, "CRCPKOK", 0);
    hputu4(st.buf, "CRCPKERR", 0);
    hputs(st.buf, status_key, "running");
    hashpipe_status_unlock_safe(&st);

    struct hashpipe_udp_packet p;

    /* Give all the threads a chance to start before opening network socket */
    sleep(1);


    /* Set up UDP socket */
    int rv = hashpipe_udp_init(&up);
    if (rv!=HASHPIPE_OK) {
        hashpipe_error("paper_crc_thread",
                "Error opening UDP socket.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)hashpipe_udp_close, &up);

    /* Main loop */
    uint64_t packet_count = 0;
    uint64_t good_count = 0;
    uint64_t error_count = 0;
    uint64_t elapsed_wait_ns = 0;
    uint64_t elapsed_recv_ns = 0;
    uint64_t elapsed_proc_ns = 0;
    float ns_per_wait = 0.0;
    float ns_per_recv = 0.0;
    float ns_per_proc = 0.0;
    struct timespec start, stop;
    struct timespec recv_start, recv_stop;
    packet_header_t hdr;

    while (run_threads()) {

        /* Read packet */
	clock_gettime(CLOCK_MONOTONIC, &recv_start);
	do {
	    clock_gettime(CLOCK_MONOTONIC, &start);
	    p.packet_size = recv(up.sock, p.data, HASHPIPE_MAX_PACKET_SIZE, 0);
	    clock_gettime(CLOCK_MONOTONIC, &recv_stop);
	} while (p.packet_size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK) && run_threads());

	// Break out of loop if stopping
	if(!run_threads()) break;

	// Increment packet count
	packet_count++;

	// Check CRC
        if(crc32(init_crc, (/*const?*/ uint8_t *)p.data, p.packet_size) == 0xffffffff) {
	    // CRC OK! Increment good counter
	    good_count++;
	} else {
	    // CRC error!  Increment error counter
	    error_count++;

	    // Log message
	    get_header(&p, &hdr);
	    hashpipe_warn("paper_crc", "CRC error mcnt %llu ; fid %u ; xid %u",
		    hdr.mcnt, hdr.fid, hdr.xid);
	}

	clock_gettime(CLOCK_MONOTONIC, &stop);
	elapsed_wait_ns += ELAPSED_NS(recv_start, start);
	elapsed_recv_ns += ELAPSED_NS(start, recv_stop);
	elapsed_proc_ns += ELAPSED_NS(recv_stop, stop);

        if(packet_count % 1000 == 0) {
	    // Compute stats
	    get_header(&p, &hdr);
            ns_per_wait = (float)elapsed_wait_ns / packet_count;
            ns_per_recv = (float)elapsed_recv_ns / packet_count;
            ns_per_proc = (float)elapsed_proc_ns / packet_count;

            // Update status
            hashpipe_status_lock_busywait_safe(&st);
            hputu8(st.buf, "CRCMCNT", hdr.mcnt);
	    // Gbps = bits_per_packet / ns_per_packet
	    // (N_BYTES_PER_PACKET excludes header, so +8 for the header)
            hputr4(st.buf, "CRCGBPS", 8*(N_BYTES_PER_PACKET+8)/(ns_per_recv+ns_per_proc));
            hputr4(st.buf, "CRCWATNS", ns_per_wait);
            hputr4(st.buf, "CRCRECNS", ns_per_recv);
            hputr4(st.buf, "CRCPRCNS", ns_per_proc);
	    // TODO Provide some way to recognize request to zero out the
	    // CRCERR and CRCOK fields.
	    hputu8(st.buf, "CRCPKOK",  good_count);
	    hputu8(st.buf, "CRCPKERR", error_count);
            hashpipe_status_unlock_safe(&st);

	    // Start new average
	    elapsed_wait_ns = 0;
	    elapsed_recv_ns = 0;
	    elapsed_proc_ns = 0;
	    packet_count = 0;
        }

        /* Will exit if thread has been cancelled */
        pthread_testcancel();
    }

    /* Have to close all push's */
    pthread_cleanup_pop(1); /* Closes push(hashpipe_udp_close) */

    return NULL;
}

static hashpipe_thread_desc_t crc_thread = {
    name: "paper_crc_thread",
    skey: "CRCSTAT",
    init: NULL,
    run:  run,
    ibuf_desc: {NULL},