Пример #1
0
static void destroyAllChannels ( 
    struct client * client, ELLLIST * pList )
{
    if ( !client->chanListLock || !client->eventqLock ) {
        return;
    }

    while ( TRUE ) {
        struct event_ext        *pevext;
        int                     status;
        struct channel_in_use   *pciu;

        epicsMutexMustLock ( client->chanListLock );
        pciu = (struct channel_in_use *) ellGet ( pList );
        epicsMutexUnlock ( client->chanListLock );

        if ( ! pciu ) {
            break;
        }

        while ( TRUE ) {
            /*
            * AS state change could be using this list
            */
            epicsMutexMustLock ( client->eventqLock );
            pevext = (struct event_ext *) ellGet ( &pciu->eventq );
            epicsMutexUnlock ( client->eventqLock );

            if ( ! pevext ) {
                break;
            }

            if ( pevext->pdbev ) {
                db_cancel_event (pevext->pdbev);
            }
            freeListFree (rsrvEventFreeList, pevext);
        }
        rsrvFreePutNotify ( client, pciu->pPutNotify );
        LOCK_CLIENTQ;
        status = bucketRemoveItemUnsignedId ( pCaBucket, &pciu->sid);
        rsrvChannelCount--;
        UNLOCK_CLIENTQ;
        if ( status != S_bucket_success ) {
            errPrintf ( status, __FILE__, __LINE__, 
                "Bad id=%d at close", pciu->sid);
        }
        status = asRemoveClient(&pciu->asClientPVT);
        if ( status && status != S_asLib_asNotActive ) {
            printf ( "bad asRemoveClient() status was %x \n", status );
            errPrintf ( status, __FILE__, __LINE__, "asRemoveClient" );
        }

        freeListFree ( rsrvChanFreeList, pciu );
    }
}
Пример #2
0
void
bufRxManager::received(CALLBACK* cb)
{
    void *vptr;
    callbackGetUser(vptr,cb);
    bufRxManager &self=*static_cast<bufRxManager*>(vptr);

    SCOPED_LOCK2(self.guard, G);

    while(true) {
        ELLNODE *node=ellGet(&self.usedbufs);

        if (!node)
            break;
        buffer *buf=CONTAINER(node, buffer, node);

        G.unlock();

        for(ELLNODE *cur=ellFirst(&self.dispatch); cur; cur=ellNext(cur)) {
            listener *action=CONTAINER(cur, listener, node);
            (action->fn)(action->fnarg, 0, buf->used, buf->data);
        }

        G.lock();

        ellAdd(&self.freebufs, &buf->node);
    };
}
Пример #3
0
static void harnessExit(void *dummy) {
    epicsTimeStamp ended;
    int Faulty;

    if (!Harness) return;

    epicsTimeGetCurrent(&ended);

    printf("\n\n    EPICS Test Harness Results"
             "\n    ==========================\n\n");

    Faulty = ellCount(&faults);
    if (!Faulty)
        printf("All tests successful.\n");
    else {
        int Failures = 0;
        testFailure *f;

        printf("Failing Program           Tests  Faults\n"
               "---------------------------------------\n");
        while ((f = (testFailure *)ellGet(&faults))) {
            Failures += f->failures;
            printf("%-25s %5d   %5d\n", f->name, f->tests, f->failures);
            if (f->skips)
                printf("%d subtests skipped\n", f->skips);
            free(f);
        }
        printf("\nFailed %d/%d test programs. %d/%d subtests failed.\n",
               Faulty, Programs, Failures, Tests);
    }

    printf("Programs=%d, Tests=%d, %.0f wallclock secs\n\n",
           Programs, Tests, epicsTimeDiffInSeconds(&ended, &started));
}
Пример #4
0
static void twdShutdown(void *arg)
{
    ELLNODE *cur;
    twdCtl = twdctlExit;
    epicsEventSignal(loopEvent);
    epicsEventWait(exitEvent);
    while ((cur = ellGet(&fList)) != NULL) {
        VALGRIND_MEMPOOL_FREE(&fList, cur);
        free(cur);
    }
    VALGRIND_DESTROY_MEMPOOL(&fList);
}
static struct eventNode *
getEventNode(epicsMessageQueueId pmsg)
{
    struct eventNode *evp;

    evp = reinterpret_cast < struct eventNode * > ( ellGet(&pmsg->eventFreeList) );
    if (evp == NULL) {
        evp = (struct eventNode *) callocMustSucceed(1, sizeof(*evp),
                "epicsMessageQueueGetEventNode");
        evp->event = epicsEventMustCreate(epicsEventEmpty);
    }
    return evp;
}
epicsShareFunc void epicsShareAPI
epicsMessageQueueDestroy(epicsMessageQueueId pmsg)
{
    struct eventNode *evp;

    while ((evp = reinterpret_cast < struct eventNode * >
                  ( ellGet(&pmsg->eventFreeList) ) ) != NULL) {
        epicsEventDestroy(evp->event);
        free(evp);
    }
    epicsMutexDestroy(pmsg->mutex);
    free(pmsg->buf);
    free(pmsg);
}
Пример #7
0
static union twdNode *newNode(void)
{
    union twdNode *pn;

    epicsMutexMustLock(fLock);
    pn = (union twdNode *)ellGet(&fList);
    if (pn) {
        VALGRIND_MEMPOOL_FREE(&fList, pn);
    }
    epicsMutexUnlock(fLock);
    if (!pn)
        pn = calloc(1, sizeof(union twdNode));
    if (pn)
        VALGRIND_MEMPOOL_ALLOC(&fList, pn, sizeof(*pn));
    return pn;
}
Пример #8
0
epicsUInt8*
bufRxManager::getFree(unsigned int* blen)
{
    ELLNODE *node;
    {
        SCOPED_LOCK(guard);
        node=ellGet(&freebufs);
    }

    if (!node)
        return NULL;

    buffer *buf=CONTAINER(node, buffer, node);

    if (blen) *blen=bsize();

    buf->used=0;
    return buf->data;
}
Пример #9
0
void dbCaShutdown(void)
{
    if (dbCaCtl == ctlRun || dbCaCtl == ctlPause) {
        dbCaCtl = ctlExit;
        epicsEventSignal(workListEvent);
        epicsEventMustWait(startStopEvent);
        epicsEventDestroy(startStopEvent);
    } else {
        /* manually cleanup queue since dbCa thread isn't running
         * which only happens in unit tests
         */
        caLink *pca;
        epicsMutexMustLock(workListLock);
        while((pca=(caLink*)ellGet(&workList))!=NULL) {
            if(pca->link_action&CA_CLEAR_CHANNEL) {
                dbCaLinkFree(pca);
            }
        }
        epicsMutexUnlock(workListLock);
    }
}
Пример #10
0
/*
 * removeDuplicateAddresses ()
 */
extern "C" void epicsShareAPI removeDuplicateAddresses 
    ( ELLLIST *pDestList, ELLLIST *pSrcList, int silent )
{
    ELLNODE *pRawNode;

    while ( (pRawNode  = ellGet ( pSrcList ) ) ) {
		STATIC_ASSERT ( offsetof (osiSockAddrNode, node) == 0 );
		osiSockAddrNode *pNode = reinterpret_cast <osiSockAddrNode *> ( pRawNode );
        osiSockAddrNode *pTmpNode;

        if ( pNode->addr.sa.sa_family == AF_INET ) {

            pTmpNode = (osiSockAddrNode *) ellFirst (pDestList);    // X aCC 749
            while ( pTmpNode ) {
                if (pTmpNode->addr.sa.sa_family == AF_INET) {
                    if ( pNode->addr.ia.sin_addr.s_addr == pTmpNode->addr.ia.sin_addr.s_addr && 
                            pNode->addr.ia.sin_port == pTmpNode->addr.ia.sin_port ) {
						if ( ! silent ) {
                            char buf[64];
                            ipAddrToDottedIP ( &pNode->addr.ia, buf, sizeof (buf) );
							fprintf ( stderr, 
								"Warning: Duplicate EPICS CA Address list entry \"%s\" discarded\n", buf );
						}
                        free (pNode);
                        pNode = NULL;
                        break;
                    }
                }
                pTmpNode = (osiSockAddrNode *) ellNext (&pTmpNode->node); // X aCC 749
            }
            if (pNode) {
                ellAdd (pDestList, &pNode->node);
            }
        }
        else {
            ellAdd (pDestList, &pNode->node);
        }
    }
}
Пример #11
0
//
// cac::cac ()
//
cac::cac ( 
    epicsMutex & mutualExclusionIn, 
    epicsMutex & callbackControlIn, 
    cacContextNotify & notifyIn ) :
    _refLocalHostName ( localHostNameCache.getReference () ),
    programBeginTime ( epicsTime::getCurrent() ),
    connTMO ( CA_CONN_VERIFY_PERIOD ),
    mutex ( mutualExclusionIn ),
    cbMutex ( callbackControlIn ),
    ipToAEngine ( ipAddrToAsciiEngine::allocate () ),
    timerQueue ( epicsTimerQueueActive::allocate ( false, 
        lowestPriorityLevelAbove(epicsThreadGetPrioritySelf()) ) ),
    pUserName ( 0 ),
    pudpiiu ( 0 ),
    tcpSmallRecvBufFreeList ( 0 ),
    tcpLargeRecvBufFreeList ( 0 ),
    notify ( notifyIn ),
    initializingThreadsId ( epicsThreadGetIdSelf() ),
    initializingThreadsPriority ( epicsThreadGetPrioritySelf() ),
    maxRecvBytesTCP ( MAX_TCP ),
    maxContigFrames ( contiguousMsgCountWhichTriggersFlowControl ),
    beaconAnomalyCount ( 0u ),
    iiuExistenceCount ( 0u ),
    cacShutdownInProgress ( false )
{
    if ( ! osiSockAttach () ) {
        throwWithLocation ( udpiiu :: noSocket () );
    }

    try {
	    long status;

        /*
         * Certain os, such as HPUX, do not unblock a socket system call 
         * when another thread asynchronously calls both shutdown() and 
         * close(). To solve this problem we need to employ OS specific
         * mechanisms.
         */
        epicsSignalInstallSigAlarmIgnore ();
        epicsSignalInstallSigPipeIgnore ();

        {
            char tmp[256];
            size_t len;
            osiGetUserNameReturn gunRet;

            gunRet = osiGetUserName ( tmp, sizeof (tmp) );
            if ( gunRet != osiGetUserNameSuccess ) {
                tmp[0] = '\0';
            }
            len = strlen ( tmp ) + 1;
            this->pUserName = new char [ len ];
            strncpy ( this->pUserName, tmp, len );
        }

        this->_serverPort = 
            envGetInetPortConfigParam ( &EPICS_CA_SERVER_PORT,
                                        static_cast <unsigned short> (CA_SERVER_PORT) );

        status = envGetDoubleConfigParam ( &EPICS_CA_CONN_TMO, &this->connTMO );
        if ( status ) {
            this->connTMO = CA_CONN_VERIFY_PERIOD;
            epicsGuard < epicsMutex > cbGuard ( this->cbMutex );
            errlogPrintf ( "EPICS \"%s\" double fetch failed\n", EPICS_CA_CONN_TMO.name );
            errlogPrintf ( "Defaulting \"%s\" = %f\n", EPICS_CA_CONN_TMO.name, this->connTMO );
        }

        long maxBytesAsALong;
        status =  envGetLongConfigParam ( &EPICS_CA_MAX_ARRAY_BYTES, &maxBytesAsALong );
        if ( status || maxBytesAsALong < 0 ) {
            errlogPrintf ( "cac: EPICS_CA_MAX_ARRAY_BYTES was not a positive integer\n" );
        }
        else {
            /* allow room for the protocol header so that they get the array size they requested */
            static const unsigned headerSize = sizeof ( caHdr ) + 2 * sizeof ( ca_uint32_t );
            ca_uint32_t maxBytes = ( unsigned ) maxBytesAsALong;
            if ( maxBytes < 0xffffffff - headerSize ) {
                maxBytes += headerSize;
            }
            else {
                maxBytes = 0xffffffff;
            }
            if ( maxBytes < MAX_TCP ) {
                errlogPrintf ( "cac: EPICS_CA_MAX_ARRAY_BYTES was rounded up to %u\n", MAX_TCP );
            }
            else {
                this->maxRecvBytesTCP = maxBytes;
            }
        }
        freeListInitPvt ( &this->tcpSmallRecvBufFreeList, MAX_TCP, 1 );
        if ( ! this->tcpSmallRecvBufFreeList ) {
            throw std::bad_alloc ();
        }

        freeListInitPvt ( &this->tcpLargeRecvBufFreeList, this->maxRecvBytesTCP, 1 );
        if ( ! this->tcpLargeRecvBufFreeList ) {
            throw std::bad_alloc ();
        }
        unsigned bufsPerArray = this->maxRecvBytesTCP / comBuf::capacityBytes ();
        if ( bufsPerArray > 1u ) {
            maxContigFrames = bufsPerArray * 
                contiguousMsgCountWhichTriggersFlowControl;
        }
    }
    catch ( ... ) {
        osiSockRelease ();
        delete [] this->pUserName;
        if ( this->tcpSmallRecvBufFreeList ) {
            freeListCleanup ( this->tcpSmallRecvBufFreeList );
        }
        if ( this->tcpLargeRecvBufFreeList ) {
            freeListCleanup ( this->tcpLargeRecvBufFreeList );
        }
        this->timerQueue.release ();
        throw;
    }

    /*
     * load user configured tcp name server address list,
     * create virtual circuits, and add them to server table
     */
    ELLLIST dest, tmpList;

    ellInit ( & dest );
    ellInit ( & tmpList );

    addAddrToChannelAccessAddressList ( &tmpList, &EPICS_CA_NAME_SERVERS, this->_serverPort, false );
    removeDuplicateAddresses ( &dest, &tmpList, 0 );

    epicsGuard < epicsMutex > guard ( this->mutex );

    while ( osiSockAddrNode *
        pNode = reinterpret_cast < osiSockAddrNode * > ( ellGet ( & dest ) ) ) {
        tcpiiu * piiu = NULL;
        SearchDestTCP * pdst = new SearchDestTCP ( *this, pNode->addr );
        this->registerSearchDest ( guard, * pdst );
        bool newIIU = findOrCreateVirtCircuit (
            guard, pNode->addr, cacChannel::priorityDefault,
            piiu, CA_UKN_MINOR_VERSION, pdst );
        free ( pNode );
        if ( newIIU ) {
            piiu->start ( guard );
        }
    }
}
Пример #12
0
//
// udpiiu::udpiiu ()
//
udpiiu::udpiiu ( 
    epicsGuard < epicsMutex > & cacGuard,
    epicsTimerQueueActive & timerQueue, 
    epicsMutex & cbMutexIn, 
    epicsMutex & cacMutexIn,
    cacContextNotify & ctxNotifyIn,
    cac & cac,
    unsigned port,
    tsDLList < SearchDest > & searchDestListIn ) :
    recvThread ( *this, ctxNotifyIn, cbMutexIn, "CAC-UDP", 
        epicsThreadGetStackSize ( epicsThreadStackMedium ),
        cac::lowestPriorityLevelAbove (
            cac::lowestPriorityLevelAbove (
                cac.getInitializingThreadsPriority () ) ) ),
    m_repeaterTimerNotify ( *this ),
    repeaterSubscribeTmr (
        m_repeaterTimerNotify, timerQueue, cbMutexIn, ctxNotifyIn ),
    govTmr ( *this, timerQueue, cacMutexIn ),
    maxPeriod ( maxSearchPeriodDefault ),
    rtteMean ( minRoundTripEstimate ),
    rtteMeanDev ( 0 ),
    cacRef ( cac ),
    cbMutex ( cbMutexIn ),
    cacMutex ( cacMutexIn ),
    nBytesInXmitBuf ( 0 ),
    nTimers ( 0 ),
    beaconAnomalyTimerIndex ( 0 ),
    sequenceNumber ( 0 ),
    lastReceivedSeqNo ( 0 ),
    sock ( 0 ),
    repeaterPort ( 0 ),
    serverPort ( port ),
    localPort ( 0 ),
    shutdownCmd ( false ),
    lastReceivedSeqNoIsValid ( false )
{
    cacGuard.assertIdenticalMutex ( cacMutex );
    
    if ( envGetConfigParamPtr ( & EPICS_CA_MAX_SEARCH_PERIOD ) ) {
        long longStatus = envGetDoubleConfigParam ( 
            & EPICS_CA_MAX_SEARCH_PERIOD, & this->maxPeriod );
        if ( ! longStatus ) {
            if ( this->maxPeriod < maxSearchPeriodLowerLimit ) {
                epicsPrintf ( "\"%s\" out of range (low)\n",
                                EPICS_CA_MAX_SEARCH_PERIOD.name );
                this->maxPeriod = maxSearchPeriodLowerLimit;
                epicsPrintf ( "Setting \"%s\" = %f seconds\n",
                    EPICS_CA_MAX_SEARCH_PERIOD.name, this->maxPeriod );
            }
        }
        else {
            epicsPrintf ( "EPICS \"%s\" wasnt a real number\n",
                            EPICS_CA_MAX_SEARCH_PERIOD.name );
            epicsPrintf ( "Setting \"%s\" = %f seconds\n",
                EPICS_CA_MAX_SEARCH_PERIOD.name, this->maxPeriod );
        }
    }

    double powerOfTwo = log ( this->maxPeriod / minRoundTripEstimate ) / log ( 2.0 );
    this->nTimers = static_cast < unsigned > ( powerOfTwo + 1.0 );
    if ( this->nTimers > channelNode::getMaxSearchTimerCount () ) {
        this->nTimers = channelNode::getMaxSearchTimerCount ();
        epicsPrintf ( "\"%s\" out of range (high)\n",
                        EPICS_CA_MAX_SEARCH_PERIOD.name );
        epicsPrintf ( "Setting \"%s\" = %f seconds\n",
            EPICS_CA_MAX_SEARCH_PERIOD.name, 
            (1<<(this->nTimers-1)) * minRoundTripEstimate );
    }

    powerOfTwo = log ( beaconAnomalySearchPeriod / minRoundTripEstimate ) / log ( 2.0 );
    this->beaconAnomalyTimerIndex = static_cast < unsigned > ( powerOfTwo + 1.0 );
    if ( this->beaconAnomalyTimerIndex >= this->nTimers ) {
        this->beaconAnomalyTimerIndex = this->nTimers - 1;
    }

    this->ppSearchTmr.reset ( new epics_auto_ptr < class searchTimer > [ this->nTimers ] );
    for ( unsigned i = 0; i < this->nTimers; i++ ) {
        this->ppSearchTmr[i].reset ( 
            new searchTimer ( *this, timerQueue, i, cacMutexIn, 
                i > this->beaconAnomalyTimerIndex ) ); 
    }

    this->repeaterPort = 
        envGetInetPortConfigParam ( &EPICS_CA_REPEATER_PORT,
                                    static_cast <unsigned short> (CA_REPEATER_PORT) );

    this->sock = epicsSocketCreate ( AF_INET, SOCK_DGRAM, IPPROTO_UDP );
    if ( this->sock == INVALID_SOCKET ) {
        char sockErrBuf[64];
        epicsSocketConvertErrnoToString ( 
            sockErrBuf, sizeof ( sockErrBuf ) );
        errlogPrintf ("CAC: unable to create datagram socket because = \"%s\"\n",
            sockErrBuf );
        throwWithLocation ( noSocket () );
    }

    int boolValue = true;
    int status = setsockopt ( this->sock, SOL_SOCKET, SO_BROADCAST, 
                (char *) &boolValue, sizeof ( boolValue ) );
    if ( status < 0 ) {
        char sockErrBuf[64];
        epicsSocketConvertErrnoToString ( 
            sockErrBuf, sizeof ( sockErrBuf ) );
        errlogPrintf ("CAC: IP broadcasting enable failed because = \"%s\"\n",
            sockErrBuf );
    }

#if 0
    {
        /*
         * some concern that vxWorks will run out of mBuf's
         * if this change is made joh 11-10-98
         *
         * bump up the UDP recv buffer
         */
        int size = 1u<<15u;
        status = setsockopt ( this->sock, SOL_SOCKET, SO_RCVBUF,
                (char *)&size, sizeof (size) );
        if (status<0) {
            char sockErrBuf[64];
            epicsSocketConvertErrnoToString ( sockErrBuf, sizeof ( sockErrBuf ) );
            errlogPrintf ( "CAC: unable to set socket option SO_RCVBUF because \"%s\"\n",
                sockErrBuf );
        }
    }
#endif

    // force a bind to an unconstrained address so we can obtain
    // the local port number below
    static const unsigned short PORT_ANY = 0u;
    osiSockAddr addr;
    memset ( (char *)&addr, 0 , sizeof (addr) );
    addr.ia.sin_family = AF_INET;
    addr.ia.sin_addr.s_addr = htonl ( INADDR_ANY ); 
    addr.ia.sin_port = htons ( PORT_ANY );
    status = bind (this->sock, &addr.sa, sizeof (addr) );
    if ( status < 0 ) {
        char sockErrBuf[64];
        epicsSocketConvertErrnoToString ( 
            sockErrBuf, sizeof ( sockErrBuf ) );
        epicsSocketDestroy (this->sock);
        errlogPrintf ( "CAC: unable to bind to an unconstrained address because = \"%s\"\n",
            sockErrBuf );
        throwWithLocation ( noSocket () );
    }
    
    {
        osiSockAddr tmpAddr;
        osiSocklen_t saddr_length = sizeof ( tmpAddr );
        status = getsockname ( this->sock, &tmpAddr.sa, &saddr_length );
        if ( status < 0 ) {
            char sockErrBuf[64];
            epicsSocketConvertErrnoToString ( 
                sockErrBuf, sizeof ( sockErrBuf ) );
            epicsSocketDestroy ( this->sock );
            errlogPrintf ( "CAC: getsockname () error was \"%s\"\n", sockErrBuf );
            throwWithLocation ( noSocket () );
        }
        if ( tmpAddr.sa.sa_family != AF_INET) {
            epicsSocketDestroy ( this->sock );
            errlogPrintf ( "CAC: UDP socket was not inet addr family\n" );
            throwWithLocation ( noSocket () );
        }
        this->localPort = ntohs ( tmpAddr.ia.sin_port );
    }

    /*
     * load user and auto configured
     * broadcast address list
     */
    ELLLIST dest;
    ellInit ( & dest );
    configureChannelAccessAddressList ( & dest, this->sock, this->serverPort );
    while ( osiSockAddrNode * 
        pNode = reinterpret_cast < osiSockAddrNode * > ( ellGet ( & dest ) ) ) {
        SearchDestUDP & searchDest = * 
            new SearchDestUDP ( pNode->addr, *this );
        _searchDestList.add ( searchDest );
        free ( pNode );
    }

    /* add list of tcp name service addresses */
    _searchDestList.add ( searchDestListIn );
    
    caStartRepeaterIfNotInstalled ( this->repeaterPort );

    this->pushVersionMsg ();

    // start timers and receive thread
    for ( unsigned j =0; j < this->nTimers; j++ ) {
        this->ppSearchTmr[j]->start ( cacGuard ); 
    }
    this->govTmr.start ();
    this->repeaterSubscribeTmr.start ();
    this->recvThread.start ();
}
static int
myReceive(epicsMessageQueueId pmsg, void *message, unsigned int size, bool wait, bool haveTimeout, double timeout)
{
    char *myOutPtr;
    unsigned long l;
    struct threadNode *pthr;

    /*
     * If there's a message on the queue, copy it
     */
    epicsMutexLock(pmsg->mutex);
    myOutPtr = (char *)pmsg->outPtr;
    if ((myOutPtr != pmsg->inPtr) || pmsg->full) {
        int ret;
        l = *(unsigned long *)myOutPtr;
        if (l <= size) {
            memcpy(message, (unsigned long *)myOutPtr + 1, l);
            ret = l;
        }
        else {
            ret = -1;
        }
        if (myOutPtr == pmsg->lastMessageSlot)
            pmsg->outPtr = pmsg->firstMessageSlot;
        else
            pmsg->outPtr += pmsg->slotSize;
        pmsg->full = false;

        /*
         * Wake up the oldest task waiting to send
         */
        if ((pthr = reinterpret_cast < struct threadNode * >
                    ( ellGet(&pmsg->sendQueue) ) ) != NULL) {
            pthr->eventSent = true;
            epicsEventSignal(pthr->evp->event);
        }
        epicsMutexUnlock(pmsg->mutex);
        return ret;
    }

    /*
     * Return if not allowed to wait
     */
    if (!wait) {
        epicsMutexUnlock(pmsg->mutex);
        return -1;
    }

    /*
     * Wake up the oldest task waiting to send
     */
    if ((pthr = reinterpret_cast < struct threadNode * >
                ( ellGet(&pmsg->sendQueue) ) ) != NULL) {
        pthr->eventSent = true;
        epicsEventSignal(pthr->evp->event);
    }

    /*
     * Wait for message to arrive
     */
    struct threadNode threadNode;
    threadNode.evp = getEventNode(pmsg);
    threadNode.buf = message;
    threadNode.size = size;
    threadNode.eventSent = false;
    ellAdd(&pmsg->receiveQueue, &threadNode.link);
    epicsMutexUnlock(pmsg->mutex);
    if(haveTimeout)
        epicsEventWaitWithTimeout(threadNode.evp->event, timeout);
    else
        epicsEventWait(threadNode.evp->event);
    epicsMutexLock(pmsg->mutex);
    if(!threadNode.eventSent)
        ellDelete(&pmsg->receiveQueue, &threadNode.link);
    ellAdd(&pmsg->eventFreeList, &threadNode.evp->link);
    epicsMutexUnlock(pmsg->mutex);
    if(threadNode.eventSent && (threadNode.size <= size))
        return threadNode.size;
    return -1;
}
static int
mySend(epicsMessageQueueId pmsg, void *message, unsigned int size, bool wait, bool haveTimeout, double timeout)
{
    char *myInPtr, *nextPtr;
    struct threadNode *pthr;

    if(size > pmsg->maxMessageSize)
        return -1;

    /*
     * See if message can be sent
     */
    epicsMutexLock(pmsg->mutex);
    if ((pmsg->numberOfSendersWaiting > 0)
            || (pmsg->full && (ellFirst(&pmsg->receiveQueue) == NULL))) {
        /*
         * Return if not allowed to wait
         */
        if (!wait) {
            epicsMutexUnlock(pmsg->mutex);
            return -1;
        }

        /*
         * Wait
         */
        struct threadNode threadNode;
        threadNode.evp = getEventNode(pmsg);
        threadNode.eventSent = false;
        ellAdd(&pmsg->sendQueue, &threadNode.link);
        pmsg->numberOfSendersWaiting++;
        epicsMutexUnlock(pmsg->mutex);
        if(haveTimeout)
            epicsEventWaitWithTimeout(threadNode.evp->event, timeout);
        else
            epicsEventWait(threadNode.evp->event);
        epicsMutexLock(pmsg->mutex);
        if(!threadNode.eventSent)
            ellDelete(&pmsg->sendQueue, &threadNode.link);
        pmsg->numberOfSendersWaiting--;
        ellAdd(&pmsg->eventFreeList, &threadNode.evp->link);
        if (pmsg->full && (ellFirst(&pmsg->receiveQueue) == NULL)) {
            epicsMutexUnlock(pmsg->mutex);
            return -1;
        }
    }

    /*
     * Copy message to waiting receiver
     */
    if ((pthr = reinterpret_cast < struct threadNode * >
                ( ellGet(&pmsg->receiveQueue) ) ) != NULL) {
        if(size <= pthr->size)
            memcpy(pthr->buf, message, size);
        pthr->size = size;
        pthr->eventSent = true;
        epicsEventSignal(pthr->evp->event);
        epicsMutexUnlock(pmsg->mutex);
        return 0;
    }

    /*
     * Copy to queue
     */
    myInPtr = (char *)pmsg->inPtr;
    if (myInPtr == pmsg->lastMessageSlot)
        nextPtr = pmsg->firstMessageSlot;
    else
        nextPtr = myInPtr + pmsg->slotSize;
    if (nextPtr == (char *)pmsg->outPtr)
        pmsg->full = true;
    *(volatile unsigned long *)myInPtr = size;
    memcpy((unsigned long *)myInPtr + 1, message, size);
    pmsg->inPtr = nextPtr;
    epicsMutexUnlock(pmsg->mutex);
    return 0;
}
Пример #15
0
void epicsThreadPoolDestroy(epicsThreadPool *pool)
{
    unsigned int nThr;
    ELLLIST notify;
    ELLNODE *cur;

    if (!pool)
        return;

    ellInit(&notify);

    epicsMutexMustLock(pool->guard);

    /* run remaining queued jobs */
    epicsThreadPoolControlImpl(pool, epicsThreadPoolQueueAdd, 0);
    epicsThreadPoolControlImpl(pool, epicsThreadPoolQueueRun, 1);
    nThr = pool->threadsRunning;
    pool->freezeopt = 1;

    epicsMutexUnlock(pool->guard);

    epicsThreadPoolWait(pool, -1.0);
    /* At this point all queued jobs have run */

    epicsMutexMustLock(pool->guard);

    pool->shutdown = 1;
    /* wakeup all */
    if (pool->threadsWaking < pool->threadsSleeping) {
        pool->threadsWaking = pool->threadsSleeping;
        epicsEventSignal(pool->workerWakeup);
    }

    ellConcat(&notify, &pool->owned);
    ellConcat(&notify, &pool->jobs);

    epicsMutexUnlock(pool->guard);

    if (nThr && epicsEventWait(pool->shutdownEvent) != epicsEventWaitOK){
        errlogMessage("epicsThreadPoolDestroy: wait error");
        return;
    }

    /* all workers are now shutdown */

    /* notify remaining jobs that pool is being destroyed */
    while ((cur = ellGet(&notify)) != NULL) {
        epicsJob *job = CONTAINER(cur, epicsJob, jobnode);

        job->running = 1;
        job->func(job->arg, epicsJobModeCleanup);
        job->running = 0;
        if (job->freewhendone)
            free(job);
        else
            job->pool = NULL; /* orphan */
    }

    epicsEventDestroy(pool->workerWakeup);
    epicsEventDestroy(pool->shutdownEvent);
    epicsEventDestroy(pool->observerWakeup);
    epicsMutexDestroy(pool->guard);

    free(pool);
}
Пример #16
0
static
void workerMain(void *arg)
{
    epicsThreadPool *pool = arg;
    unsigned int nrun, ocnt;

    /* workers are created with counts
     * in the running, sleeping, and (possibly) waking counters
     */

    epicsMutexMustLock(pool->guard);
    pool->threadsAreAwake++;
    pool->threadsSleeping--;

    while (1) {
        ELLNODE *cur;

        pool->threadsAreAwake--;
        pool->threadsSleeping++;
        epicsMutexUnlock(pool->guard);

        epicsEventMustWait(pool->workerWakeup);

        epicsMutexMustLock(pool->guard);
        pool->threadsSleeping--;
        pool->threadsAreAwake++;

        if (pool->threadsWaking==0)
            continue;

        pool->threadsWaking--;

        CHECKCOUNT(pool);

        if (pool->shutdown)
            break;

        if (pool->pauserun)
            continue;

        /* more threads to wakeup */
        if (pool->threadsWaking) {
            epicsEventSignal(pool->workerWakeup);
        }

        while ((cur=ellGet(&pool->jobs)) != NULL) {
            epicsJob *job = CONTAINER(cur, epicsJob, jobnode);

            assert(job->queued && !job->running);

            job->queued=0;
            job->running=1;

            epicsMutexUnlock(pool->guard);
            (*job->func)(job->arg, epicsJobModeRun);
            epicsMutexMustLock(pool->guard);

            if (job->freewhendone) {
                job->dead=1;
                free(job);
            }
            else {
                job->running=0;
                /* job may be re-queued from within callback */
                if (job->queued)
                    ellAdd(&pool->jobs, &job->jobnode);
                else
                    ellAdd(&pool->owned, &job->jobnode);
            }
        }

        if (pool->observerCount)
            epicsEventSignal(pool->observerWakeup);
    }

    pool->threadsAreAwake--;
    pool->threadsRunning--;

    nrun = pool->threadsRunning;
    ocnt = pool->observerCount;
    epicsMutexUnlock(pool->guard);

    if (ocnt)
        epicsEventSignal(pool->observerWakeup);

    if (nrun)
        epicsEventSignal(pool->workerWakeup); /* pass along */
    else
        epicsEventSignal(pool->shutdownEvent);
}
Пример #17
0
static void dbCaTask(void *arg)
{
    taskwdInsert(0, NULL, NULL);
    SEVCHK(ca_context_create(ca_enable_preemptive_callback),
        "dbCaTask calling ca_context_create");
    dbCaClientContext = ca_current_context ();
    SEVCHK(ca_add_exception_event(exceptionCallback,NULL),
        "ca_add_exception_event");
    epicsEventSignal(startStopEvent);

    /* channel access event loop */
    while (TRUE){
        do {
            epicsEventMustWait(workListEvent);
        } while (dbCaCtl == ctlPause);
        while (TRUE) { /* process all requests in workList*/
            caLink *pca;
            short  link_action;
            int    status;

            epicsMutexMustLock(workListLock);
            if (!(pca = (caLink *)ellGet(&workList))){  /* Take off list head */
                epicsMutexUnlock(workListLock);
                if (dbCaCtl == ctlExit) goto shutdown;
                break; /* workList is empty */
            }
            link_action = pca->link_action;
            pca->link_action = 0;
            if (link_action & CA_CLEAR_CHANNEL) --removesOutstanding;
            epicsMutexUnlock(workListLock);         /* Give back immediately */
            if (link_action & CA_CLEAR_CHANNEL) {   /* This must be first */
                dbCaLinkFree(pca);
                /* No alarm is raised. Since link is changing so what? */
                continue; /* No other link_action makes sense */
            }
            if (link_action & CA_CONNECT) {
                status = ca_create_channel(
                      pca->pvname,connectionCallback,(void *)pca,
                      CA_PRIORITY_DB_LINKS, &(pca->chid));
                if (status != ECA_NORMAL) {
                    errlogPrintf("dbCaTask ca_create_channel %s\n",
                        ca_message(status));
                    printLinks(pca);
                    continue;
                }
                dbca_chan_count++;
                status = ca_replace_access_rights_event(pca->chid,
                    accessRightsCallback);
                if (status != ECA_NORMAL) {
                    errlogPrintf("dbCaTask replace_access_rights_event %s\n",
                        ca_message(status));
                    printLinks(pca);
                }
                continue; /*Other options must wait until connect*/
            }
            if (ca_state(pca->chid) != cs_conn) continue;
            if (link_action & CA_WRITE_NATIVE) {
                assert(pca->pputNative);
                if (pca->putType == CA_PUT) {
                    status = ca_array_put(
                        pca->dbrType, pca->nelements,
                        pca->chid, pca->pputNative);
                } else if (pca->putType==CA_PUT_CALLBACK) {
                    status = ca_array_put_callback(
                        pca->dbrType, pca->nelements,
                        pca->chid, pca->pputNative,
                        putCallback, pca);
                } else {
                    status = ECA_PUTFAIL;
                }
                if (status != ECA_NORMAL) {
                    errlogPrintf("dbCaTask ca_array_put %s\n",
                        ca_message(status));
                    printLinks(pca);
                }
                epicsMutexMustLock(pca->lock);
                if (status == ECA_NORMAL) pca->newOutNative = FALSE;
                epicsMutexUnlock(pca->lock);
            }
            if (link_action & CA_WRITE_STRING) {
                assert(pca->pputString);
                if (pca->putType == CA_PUT) {
                    status = ca_array_put(
                        DBR_STRING, 1,
                        pca->chid, pca->pputString);
                } else if (pca->putType==CA_PUT_CALLBACK) {
                    status = ca_array_put_callback(
                        DBR_STRING, 1,
                        pca->chid, pca->pputString,
                        putCallback, pca);
                } else {
                    status = ECA_PUTFAIL;
                }
                if (status != ECA_NORMAL) {
                    errlogPrintf("dbCaTask ca_array_put %s\n",
                        ca_message(status));
                    printLinks(pca);
                }
                epicsMutexMustLock(pca->lock);
                if (status == ECA_NORMAL) pca->newOutString = FALSE;
                epicsMutexUnlock(pca->lock);
            }
            /*CA_GET_ATTRIBUTES before CA_MONITOR so that attributes available
             * before the first monitor callback                              */
            if (link_action & CA_GET_ATTRIBUTES) {
                status = ca_get_callback(DBR_CTRL_DOUBLE,
                    pca->chid, getAttribEventCallback, pca);
                if (status != ECA_NORMAL) {
                    errlogPrintf("dbCaTask ca_get_callback %s\n",
                        ca_message(status));
                    printLinks(pca);
                }
            }
            if (link_action & CA_MONITOR_NATIVE) {
                size_t element_size;
    
                element_size = dbr_value_size[ca_field_type(pca->chid)];
                epicsMutexMustLock(pca->lock);
                pca->pgetNative = dbCalloc(pca->nelements, element_size);
                epicsMutexUnlock(pca->lock);
                status = ca_add_array_event(
                    ca_field_type(pca->chid)+DBR_TIME_STRING,
                    ca_element_count(pca->chid),
                    pca->chid, eventCallback, pca, 0.0, 0.0, 0.0, 0);
                if (status != ECA_NORMAL) {
                    errlogPrintf("dbCaTask ca_add_array_event %s\n",
                        ca_message(status));
                    printLinks(pca);
                }
            }
            if (link_action & CA_MONITOR_STRING) {
                epicsMutexMustLock(pca->lock);
                pca->pgetString = dbCalloc(1, MAX_STRING_SIZE);
                epicsMutexUnlock(pca->lock);
                status = ca_add_array_event(DBR_TIME_STRING, 1,
                    pca->chid, eventCallback, pca, 0.0, 0.0, 0.0, 0);
                if (status != ECA_NORMAL) {
                    errlogPrintf("dbCaTask ca_add_array_event %s\n",
                        ca_message(status));
                    printLinks(pca);
                }
            }
        }
        SEVCHK(ca_flush_io(), "dbCaTask");
    }
shutdown:
    taskwdRemove(0);
    if (dbca_chan_count == 0)
        ca_context_destroy();
    else
        fprintf(stderr, "dbCa: chan_count = %d at shutdown\n", dbca_chan_count);
    epicsEventSignal(startStopEvent);
}