Beispiel #1
0
void scanShutdown(void)
{
    int i;

    if (scanCtl == ctlExit) return;
    scanCtl = ctlExit;

    interruptAccept = FALSE;

    for (i = 0; i < nPeriodic; i++) {
        papPeriodic[i]->scanCtl = ctlExit;
        epicsEventSignal(papPeriodic[i]->loopEvent);
        epicsEventWait(startStopEvent);
    }

    scanOnce((dbCommon *)&exitOnce);
    epicsEventWait(startStopEvent);

    deletePeriodic();
    ioscanDestroy();

    epicsRingPointerDelete(onceQ);

    epicsEventDestroy(startStopEvent);
    epicsEventDestroy(onceSem);
    onceSem = startStopEvent = NULL;

    free(periodicTaskId);
    papPeriodic = NULL;
    periodicTaskId = NULL;
}
/* Starts "mcnt" jobs in a pool with initial and max
 * thread counts "icnt" and "mcnt".
 * The test ensures that all jobs run in parallel.
 * "cork" checks the function of pausing the run queue
 * with epicsThreadPoolQueueRun
 */
static void postjobs(size_t icnt, size_t mcnt, int cork)
{
    size_t i;
    epicsThreadPool *pool;
    countPriv *priv=callocMustSucceed(1, sizeof(*priv), "postjobs priv alloc");
    priv->guard=epicsMutexMustCreate();
    priv->done=epicsEventMustCreate(epicsEventEmpty);
    priv->allrunning=epicsEventMustCreate(epicsEventEmpty);
    priv->count=mcnt;
    priv->job=callocMustSucceed(mcnt, sizeof(*priv->job), "postjobs job array");

    testDiag("postjobs(%lu,%lu)", (unsigned long)icnt, (unsigned long)mcnt);

    {
        epicsThreadPoolConfig conf;
        epicsThreadPoolConfigDefaults(&conf);
        conf.initialThreads=icnt;
        conf.maxThreads=mcnt;

        testOk1((pool=epicsThreadPoolCreate(&conf))!=NULL);
        if(!pool)
            return;
    }

    if(cork)
        epicsThreadPoolControl(pool, epicsThreadPoolQueueRun, 0);

    for(i=0; i<mcnt; i++) {
        testDiag("i=%lu", (unsigned long)i);
        priv->job[i] = epicsJobCreate(pool, &countjob, priv);
        testOk1(priv->job[i]!=NULL);
        testOk1(epicsJobQueue(priv->job[i])==0);
    }

    if(cork) {
        /* no jobs should have run */
        epicsMutexMustLock(priv->guard);
        testOk1(priv->count==mcnt);
        epicsMutexUnlock(priv->guard);

        epicsThreadPoolControl(pool, epicsThreadPoolQueueRun, 1);
    }

    testDiag("Waiting for all jobs to start");
    epicsEventMustWait(priv->allrunning);
    testDiag("Stop all");
    epicsEventSignal(priv->done);

    for(i=0; i<mcnt; i++) {
        testDiag("i=%lu", (unsigned long)i);
        epicsJobDestroy(priv->job[i]);
    }

    epicsThreadPoolDestroy(pool);
    epicsMutexDestroy(priv->guard);
    epicsEventDestroy(priv->allrunning);
    epicsEventDestroy(priv->done);
    free(priv->job);
    free(priv);
}
Beispiel #3
0
void
erase_eventRecord_links( acqEvent_t *ev)
{
	int i;

	for( i=0; i < ev->numPvList; i++)
	{
		if( ev->pvList[i].conEvent == NULL)
			continue;
		connector_disconnect( ev->pvList[i].conEvent);
		ev->pvList[i].conEvent = NULL;
	}

	if( ev->startEvent)
	{
		epicsEventDestroy(ev->startEvent);
		ev->startEvent = 0;
	}
	if( ev->endEvent)
	{
		epicsEventDestroy(ev->endEvent);
		ev->endEvent = 0;
	}
	if( ev->monitorPvEvent)
	{
		epicsEventDestroy(ev->monitorPvEvent);
		ev->monitorPvEvent = 0;
	}
	return;
}
Beispiel #4
0
int
erase_scanRecord_links(acqScan_t *sc)
{
	int i;

	for( i=0; i < sc->numControlPV; i++)
	{
		if( sc->acqControlList && sc->acqControlList[i].controlChan)
		{
			connector_disconnect( sc->acqControlList[i].controlChan);
			sc->acqControlList[i].controlChan = NULL;
		}
	}
	if( sc->startEvent)
	{
		epicsEventDestroy(sc->startEvent);
		sc->startEvent = 0;
	}
	if( sc->endEvent)
	{
		epicsEventDestroy(sc->endEvent);
		sc->endEvent = 0;
	}
	if( sc->putNotify)
	{
		epicsEventDestroy(sc->putNotify);
		sc->putNotify = 0;
	}
	for(i=0; i < NUM_SCAN_TRIGGERS; i++)
	{
		eraseActionList(sc->actions[i]);
	}

	return 0;
}
Beispiel #5
0
epicsShareFunc int
 asynSetOption(const char *portName, int addr, const char *key, const char *val)
{
    asynUser *pasynUser;
    asynInterface *pasynInterface;
    setOptionArgs optionargs;

    if ((key == NULL) || (val == NULL)) {
        printf("Missing key/value argument\n");
        return asynError;
    }
    if (findInterface(portName, addr, asynOptionType, setOption,
                                    &pasynUser, &pasynInterface) != asynSuccess)
        return asynError;
    pasynUser->timeout = 2;
    pasynUser->userPvt = &optionargs;
    pasynUser->reason = ASYN_REASON_QUEUE_EVEN_IF_NOT_CONNECTED;
    optionargs.pasynOption = (asynOption *)pasynInterface->pinterface;
    optionargs. drvPvt = pasynInterface->drvPvt;
    optionargs.key = key;
    optionargs.val = val;
    optionargs.done = epicsEventMustCreate(epicsEventEmpty);
    if (pasynManager->queueRequest(pasynUser,asynQueuePriorityConnect,0.0) != asynSuccess) {
        printf("queueRequest failed: %s\n", pasynUser->errorMessage);
        epicsEventDestroy(optionargs.done);
        pasynManager->freeAsynUser(pasynUser);
        return asynError;
    }
    epicsEventWait(optionargs.done);
    epicsEventDestroy(optionargs.done);
    pasynManager->freeAsynUser(pasynUser);
    return asynSuccess;
}
Beispiel #6
0
static asynStatus
asynSetEos(const char *portName, int addr, enum eosType type, const char *eos)
{
    asynUser *pasynUser;
    asynInterface *pasynInterface;
    eosArgs eosargs;

    if (eos == NULL) {
        printf("Missing EOS argument\n");
        return asynError;
    }
    eosargs.eosLen = epicsStrnRawFromEscaped(eosargs.eos, sizeof eosargs.eos, eos, strlen(eos));
    if (eosargs.eosLen >= sizeof eosargs.eos) {
        printf("End of string argument \"%s\" too long.\n", eos);
        return asynError;
    }
    if (findInterface(portName, addr, asynOctetType, setEos,
                                        &pasynUser, &pasynInterface) != asynSuccess)
        return asynError;
    pasynUser->timeout = 2;
    pasynUser->userPvt = &eosargs;
    pasynUser->reason = ASYN_REASON_QUEUE_EVEN_IF_NOT_CONNECTED;
    eosargs.pasynOctet = (asynOctet *)pasynInterface->pinterface;
    eosargs. drvPvt = pasynInterface->drvPvt;
    eosargs.type = type;
    eosargs.done = epicsEventMustCreate(epicsEventEmpty);
    eosargs.status = pasynManager->queueRequest(pasynUser, asynQueuePriorityConnect, 0.0);
    if (eosargs.status == asynSuccess)
        epicsEventWait(eosargs.done);
    epicsEventDestroy(eosargs.done);
    if (eosargs.status != asynSuccess)
        printf("Set EOS failed: %s\n", pasynUser->errorMessage);
    pasynManager->freeAsynUser(pasynUser);
    return eosargs.status;
}
Beispiel #7
0
static
int linuxDevPCIDisconnectInterrupt(
  const epicsPCIDevice *dev,
  void (*pFunction)(void *),
  void  *parameter
)
{
    int ret=S_dev_intDisconnect;
    ELLNODE *cur;
    osdISR *isr;
    osdPCIDevice *osd=CONTAINER((epicsPCIDevice*)dev,osdPCIDevice,dev);

    epicsMutexMustLock(osd->devLock);

    for(cur=ellFirst(&osd->isrs); cur; cur=ellNext(cur))
    {
        isr=CONTAINER(cur,osdISR,node);

        if (pFunction==isr->fptr && parameter==isr->param) {

            stopIsrThread(isr);

            ellDelete(&osd->isrs,cur);
            epicsEventDestroy(isr->done);
            free(isr);

            ret=0;
            break;
        }
    }
    epicsMutexUnlock(osd->devLock);

    return ret;
}
Beispiel #8
0
epicsShareFunc int
asynShowEos(const char *portName, int addr, enum eosType type)
{
    asynInterface *pasynInterface;
    eosArgs eosargs;
    asynUser *pasynUser;

    if (findInterface(portName, addr, asynOctetType, getEos,
                                        &pasynUser, &pasynInterface) != asynSuccess)
        return asynError;
    pasynUser->timeout = 2;
    pasynUser->userPvt = &eosargs;
    pasynUser->reason = ASYN_REASON_QUEUE_EVEN_IF_NOT_CONNECTED;
    eosargs.pasynOctet = (asynOctet *)pasynInterface->pinterface;
    eosargs. drvPvt = pasynInterface->drvPvt;
    eosargs.type = type;
    eosargs.done = epicsEventMustCreate(epicsEventEmpty);
    eosargs.status = pasynManager->queueRequest(pasynUser, asynQueuePriorityConnect, 0.0);
    if (eosargs.status == asynSuccess)
        epicsEventWait(eosargs.done);
    epicsEventDestroy(eosargs.done);
    if (eosargs.status != asynSuccess)
        printf("Set EOS failed: %s\n", pasynUser->errorMessage);
    pasynManager->freeAsynUser(pasynUser);
    if (eosargs.status == asynSuccess) {
        char cbuf[4*sizeof eosargs.eos + 2];
        epicsStrnEscapedFromRaw(cbuf, sizeof cbuf, eosargs.eos, eosargs.eosLen);
        printf("\"%s\"\n", cbuf);
    }
    return eosargs.status;
}
static
void testcancel(void)
{
    epicsJob *job[2];
    epicsThreadPool *pool;
    testOk1((pool=epicsThreadPoolCreate(NULL))!=NULL);
    if(!pool)
        return;

    cancel[0]=epicsEventCreate(epicsEventEmpty);
    cancel[1]=epicsEventCreate(epicsEventEmpty);

    testOk1((job[0]=epicsJobCreate(pool, &neverrun, EPICSJOB_SELF))!=NULL);
    testOk1((job[1]=epicsJobCreate(pool, &toolate, EPICSJOB_SELF))!=NULL);

    /* freeze */
    epicsThreadPoolControl(pool, epicsThreadPoolQueueRun, 0);

    testOk1(epicsJobUnqueue(job[0])==S_pool_jobIdle); /* not queued yet */

    epicsJobQueue(job[0]);
    testOk1(epicsJobUnqueue(job[0])==0);
    testOk1(epicsJobUnqueue(job[0])==S_pool_jobIdle);

    epicsThreadSleep(0.01);
    epicsJobQueue(job[0]);
    testOk1(epicsJobUnqueue(job[0])==0);
    testOk1(epicsJobUnqueue(job[0])==S_pool_jobIdle);

    epicsThreadPoolControl(pool, epicsThreadPoolQueueRun, 1);

    epicsJobQueue(job[1]); /* actually let it run this time */

    epicsEventMustWait(cancel[0]);
    testOk1(epicsJobUnqueue(job[0])==S_pool_jobIdle);
    epicsEventSignal(cancel[1]);

    epicsThreadPoolDestroy(pool);
    epicsEventDestroy(cancel[0]);
    epicsEventDestroy(cancel[1]);

    testOk1(shouldneverrun==0);
    testOk1(numtoolate==1);
}
Beispiel #10
0
/*
    Shutdown the EPICS library.
*/
void CaObject::shutdown() {

    CA_UNIQUE_OBJECT_ID--;
    if( CA_UNIQUE_OBJECT_ID <= 0 ) {
        epicsEventDestroy( monitorEvent );
        monitorEvent = NULL;
    }
    caPrivate->caRecord.setName( "" );
    caPrivate->caRecord.setValid( false );
}
Beispiel #11
0
static void tpnThread(void *pvt)
{
    tpnInfo   *ptpnInfo = (tpnInfo *)pvt;
    putNotify *ppn = (putNotify *)ptpnInfo->ppn;

    dbPutNotify(ppn);
    epicsEventWait(ptpnInfo->callbackDone);
    dbNotifyCancel(ppn);
    epicsEventDestroy(ptpnInfo->callbackDone);
    free((void *)ppn->paddr);
    free(ppn);
    free(ptpnInfo);
}
Beispiel #12
0
static void tpnThread(void *pvt)
{
    tpnInfo *ptpnInfo = (tpnInfo *) pvt;
    processNotify *ppn = (processNotify *) ptpnInfo->ppn;

    dbProcessNotify(ppn);
    epicsEventWait(ptpnInfo->callbackDone);
    dbNotifyCancel(ppn);
    epicsEventDestroy(ptpnInfo->callbackDone);
    dbChannelDelete(ppn->chan);
    free(ppn);
    free(ptpnInfo);
}
epicsShareFunc void epicsShareAPI
epicsMessageQueueDestroy(epicsMessageQueueId pmsg)
{
    struct eventNode *evp;

    while ((evp = reinterpret_cast < struct eventNode * >
                  ( ellGet(&pmsg->eventFreeList) ) ) != NULL) {
        epicsEventDestroy(evp->event);
        free(evp);
    }
    epicsMutexDestroy(pmsg->mutex);
    free(pmsg->buf);
    free(pmsg);
}
/* Test re-queueing a job while it is running.
 * Check that a single job won't run concurrently.
 */
static void testreadd(void) {
    epicsThreadPoolConfig conf;
    epicsThreadPool *pool;
    readdPriv *priv=callocMustSucceed(1, sizeof(*priv), "testcleanup priv");
    readdPriv *priv2=callocMustSucceed(1, sizeof(*priv), "testcleanup priv");

    testDiag("testreadd");

    priv->done=epicsEventMustCreate(epicsEventEmpty);
    priv->count=5;
    priv2->done=epicsEventMustCreate(epicsEventEmpty);
    priv2->count=5;

    epicsThreadPoolConfigDefaults(&conf);
    conf.maxThreads = 2;
    testOk1((pool=epicsThreadPoolCreate(&conf))!=NULL);
    if(!pool)
        return;

    testOk1((priv->job=epicsJobCreate(pool, &readdjob, priv))!=NULL);
    testOk1((priv2->job=epicsJobCreate(pool, &readdjob, priv2))!=NULL);

    testOk1(epicsJobQueue(priv->job)==0);
    testOk1(epicsJobQueue(priv2->job)==0);
    epicsEventMustWait(priv->done);
    epicsEventMustWait(priv2->done);

    testOk1(epicsThreadPoolNThreads(pool)==2);
    testDiag("epicsThreadPoolNThreads = %d", epicsThreadPoolNThreads(pool));

    epicsThreadPoolDestroy(pool);
    epicsEventDestroy(priv->done);
    epicsEventDestroy(priv2->done);
    free(priv);
    free(priv2);

}
Beispiel #15
0
/*
 * remove components from the action list at the end of the run
 */
void
eraseActionList( acqAction_t *act_h)
{
	acqAction_t *ap;

	for(ap=act_h; ap; ap = (ap->next==act_h?NULL:ap->next) )
	{
		switch(ap->type)
		{
		case AA_SET_CONTROL:
			continue;
		case AA_DELAY_TIME:
			continue;
		case AA_SET_PV:
			connector_disconnect( ap->au.spv.connector);
			ap->au.spv.connector = NULL;
			continue;
		case AA_WAIT_PV:
			ap->au.wpv.connector->update = NULL;		/* prevent inconvenient callbacks */
			connector_disconnect( ap->au.wpv.connector);
			ap->au.wpv.connector = NULL;
			epicsEventDestroy(ap->au.wpv.endDelay);
			ap->au.wpv.endDelay = 0;
			continue;
		case AA_WAIT_MOTOR:
			shutdown_acqMotor( ap->au.wm.motor);
			ap->au.wm.motor = NULL;
			continue;
		case AA_CALL_EVENT:
			ap->au.ce.event = NULL;
			continue;
		case AA_CALL_SCAN:
			ap->au.cs.scan = NULL;
			continue;
		case AA_WAIT_EVENT:
			ap->au.we.event = NULL;
			continue;
		case AA_WAIT_SCAN:
			ap->au.ws.scan = NULL;
			continue;
		case AA_NEXT_OUTPUT:
		//Added because of warning (David Chevrier, Aug 25 2011)
			continue;
		case AA_NO_ACTION:
		//Added because of warning (David Chevrier, Aug 25 2011)
			continue;
		}
	}
}
Beispiel #16
0
static void deletePeriodic(void)
{
    int i;

    for (i = 0; i < nPeriodic; i++) {
        periodic_scan_list *ppsl = papPeriodic[i];
        ellFree(&ppsl->scan_list.list);
        epicsEventDestroy(ppsl->loopEvent);
        epicsMutexDestroy(ppsl->scan_list.lock);
        free(ppsl);
    }

    free(papPeriodic);
    papPeriodic = NULL;
}
Beispiel #17
0
static void free_threadInfo(epicsThreadOSD *pthreadInfo)
{
    int status;

    status = mutexLock(&listLock);
    checkStatusQuit(status,"pthread_mutex_lock","free_threadInfo");
    if(pthreadInfo->isOnThreadList) ellDelete(&pthreadList,&pthreadInfo->node);
    status = pthread_mutex_unlock(&listLock);
    checkStatusQuit(status,"pthread_mutex_unlock","free_threadInfo");
    epicsEventDestroy(pthreadInfo->suspendEvent);
    status = pthread_attr_destroy(&pthreadInfo->attr);
    checkStatusQuit(status,"pthread_attr_destroy","free_threadInfo");
    free(pthreadInfo->name);
    free(pthreadInfo);
}
Beispiel #18
0
void callbackCleanup(void)
{
    int i;

    for (i = 0; i < NUM_CALLBACK_PRIORITIES; i++) {
        cbQueueSet *mySet = &callbackQueue[i];

        assert(epicsAtomicGetIntT(&mySet->threadsRunning)==0);
        epicsEventDestroy(mySet->semWakeUp);
        epicsRingPointerDelete(mySet->queue);
    }

    epicsTimerQueueRelease(timerQueue);
    callbackIsInit = 0;
    memset(callbackQueue, 0, sizeof(callbackQueue));
}
Beispiel #19
0
void verifyTryLock ()
{
    struct verifyTryLock verify;

    verify.mutex = epicsMutexMustCreate ();
    verify.done = epicsEventMustCreate ( epicsEventEmpty );

    testOk1(epicsMutexTryLock(verify.mutex) == epicsMutexLockOK);

    epicsThreadCreate ( "verifyTryLockThread", 40, 
        epicsThreadGetStackSize(epicsThreadStackSmall),
        verifyTryLockThread, &verify );

    testOk1(epicsEventWait ( verify.done ) == epicsEventWaitOK);

    epicsMutexUnlock ( verify.mutex );
    epicsMutexDestroy ( verify.mutex );
    epicsEventDestroy ( verify.done );
}
Beispiel #20
0
void dbCaShutdown(void)
{
    if (dbCaCtl == ctlRun || dbCaCtl == ctlPause) {
        dbCaCtl = ctlExit;
        epicsEventSignal(workListEvent);
        epicsEventMustWait(startStopEvent);
        epicsEventDestroy(startStopEvent);
    } else {
        /* manually cleanup queue since dbCa thread isn't running
         * which only happens in unit tests
         */
        caLink *pca;
        epicsMutexMustLock(workListLock);
        while((pca=(caLink*)ellGet(&workList))!=NULL) {
            if(pca->link_action&CA_CLEAR_CHANNEL) {
                dbCaLinkFree(pca);
            }
        }
        epicsMutexUnlock(workListLock);
    }
}
Beispiel #21
0
/* Start one thread per CPU which will all try lock
 * the same spinlock.  They break as soon as one
 * fails to take the lock.
 */
static void verifyTryLock()
{
    int N, i;
    struct verifyTryLock verify;

    N = epicsThreadGetCPUs();
    if(N==1) {
        testSkip(1, "verifyTryLock() only for SMP systems");
        return;
    }

    verify.flag = 0;
    verify.spin = epicsSpinMustCreate();

    testDiag("Starting %d spinners", N);

    verify.ents = calloc(N, sizeof(*verify.ents));
    for(i=0; i<N; i++) {
        verify.ents[i].main = &verify;
        verify.ents[i].done = epicsEventMustCreate(epicsEventEmpty);
        epicsThreadMustCreate("verifyTryLockThread", 40,
            epicsThreadGetStackSize(epicsThreadStackSmall),
                              verifyTryLockThread, &verify.ents[i]);
    }

    testDiag("All started");

    for(i=0; i<N; i++) {
        epicsEventMustWait(verify.ents[i].done);
        epicsEventDestroy(verify.ents[i].done);
    }

    testDiag("All done");

    testOk(verify.flag==1, "epicsTryLock returns %d (expect 1)", verify.flag);

    epicsSpinDestroy(verify.spin);
    free(verify.ents);
}
Beispiel #22
0
static void testUDP(void)
{
    caster_t caster;
    shSocket sender;
    osiSockAddr dest;
    union casterUDP buf;

    shSocketInit(&sender);

    sender.sd = shCreateSocket(AF_INET, SOCK_DGRAM, 0);
    if(sender.sd==INVALID_SOCKET) {
        testAbort("Failed to create socket");
        return;
    }

    lock = epicsMutexMustCreate();
    cycled[0] = epicsEventMustCreate(epicsEventEmpty);
    cycled[1] = epicsEventMustCreate(epicsEventEmpty);

    casterInit(&caster);

    caster.udpport = 0; /* test with random port */
    caster.testhook = &testerhook;

    epicsThreadMustCreate("udptester",
                          epicsThreadPriorityMedium,
                          epicsThreadGetStackSize(epicsThreadStackSmall),
                          &tester, &caster);

    epicsEventSignal(cycled[1]);

    /* wait for tester thread to setup socket */
    epicsEventMustWait(cycled[0]);

    epicsMutexMustLock(lock);

    testOk1(caster.udpport!=0);

    testDiag("UDP test with port %d", caster.udpport);

    memset(&dest, 0, sizeof(dest));
    dest.ia.sin_family = AF_INET;
    dest.ia.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
    dest.ia.sin_port = htons(caster.udpport);

    epicsMutexUnlock(lock);

    /* allow tester thread to begin recv() */
    epicsEventSignal(cycled[1]);

    testDiag("Test announcement directly from server");

    memset(&buf, 0, sizeof(buf));
    buf.m_msg.pid = htons(RECAST_MAGIC);
    buf.m_msg.serverIP = htonl(0xffffffff);
    buf.m_msg.serverPort = htons(0x1020);
    buf.m_msg.serverKey = htonl(0x12345678);

    testOk1(0==shSendTo(&sender, &buf.m_bytes, 0x10, 0, &dest));

    /* wait for tester thread to completer recv() and end cycle */
    epicsEventMustWait(cycled[0]);

    epicsMutexMustLock(lock);
    testOk1(cycles==1);
    testOk1(result==0);
    testOk1(caster.haveserv==1);
    testOk1(caster.nameserv.ia.sin_family==AF_INET);
    testOk1(caster.nameserv.ia.sin_addr.s_addr==htonl(INADDR_LOOPBACK));
    testOk1(caster.nameserv.ia.sin_port==htons(0x1020));
    testOk1(caster.servkey==0x12345678);
    epicsMutexUnlock(lock);

    testDiag("Test proxied announcement");

    /* start next cycle */
    epicsEventSignal(cycled[1]);

    /* wait for tester thread to setup socket */
    epicsEventMustWait(cycled[0]);

    epicsMutexMustLock(lock);

    dest.ia.sin_port = htons(caster.udpport);

    epicsMutexUnlock(lock);

    buf.m_msg.serverIP = htonl(0x50607080);

    /* allow tester thread to begin recv() */
    epicsEventSignal(cycled[1]);

    testOk1(0==shSendTo(&sender, &buf.m_bytes, 0x10, 0, &dest));

    /* wait for tester thread to completer recv() and end cycle */
    epicsEventMustWait(cycled[0]);

    epicsMutexMustLock(lock);
    testOk1(cycles==2);
    testOk1(result==0);
    testOk1(caster.haveserv==1);
    testOk1(caster.nameserv.ia.sin_family==AF_INET);
    testOk1(caster.nameserv.ia.sin_addr.s_addr==htonl(0x50607080));
    testOk1(caster.nameserv.ia.sin_port==htons(0x1020));
    epicsMutexUnlock(lock);

    /* begin shutdown cycle */
    epicsEventSignal(cycled[1]);
    epicsEventMustWait(cycled[0]);
    epicsEventSignal(cycled[1]);


    casterShutdown(&caster);

    epicsEventDestroy(cycled[0]);
    epicsEventDestroy(cycled[1]);
    epicsMutexDestroy(lock);
}
Beispiel #23
0
/* 
 * destroy_client ()
 */
void destroy_client ( struct client *client )
{
    if ( ! client ) {
        return;
    }
    
    if ( client->tid != 0 ) {
        taskwdRemove ( client->tid );
    }

    if ( client->sock != INVALID_SOCKET ) {
        epicsSocketDestroy ( client->sock );
    }

    if ( client->proto == IPPROTO_TCP ) {
        if ( client->send.buf ) {
            if ( client->send.type == mbtSmallTCP ) {
                freeListFree ( rsrvSmallBufFreeListTCP,  client->send.buf );
            }
            else if ( client->send.type == mbtLargeTCP ) {
                freeListFree ( rsrvLargeBufFreeListTCP,  client->send.buf );
            }
            else {
                errlogPrintf ( "CAS: Corrupt send buffer free list type code=%u during client cleanup?\n",
                    client->send.type );
            }
        }
        if ( client->recv.buf ) {
            if ( client->recv.type == mbtSmallTCP ) {
                freeListFree ( rsrvSmallBufFreeListTCP,  client->recv.buf );
            }
            else if ( client->recv.type == mbtLargeTCP ) {
                freeListFree ( rsrvLargeBufFreeListTCP,  client->recv.buf );
            }
            else {
                errlogPrintf ( "CAS: Corrupt recv buffer free list type code=%u during client cleanup?\n",
                    client->send.type );
            }
        }
    }
    else if ( client->proto == IPPROTO_UDP ) {
        if ( client->send.buf ) {
            free ( client->send.buf );
        }
        if ( client->recv.buf ) {
            free ( client->recv.buf );
        }
    }

    if ( client->eventqLock ) {
        epicsMutexDestroy ( client->eventqLock );
    }

    if ( client->chanListLock ) {
        epicsMutexDestroy ( client->chanListLock );
    }

    if ( client->putNotifyLock ) {
        epicsMutexDestroy ( client->putNotifyLock );
    }

    if ( client->lock ) {
        epicsMutexDestroy ( client->lock );
    }

    if ( client->blockSem ) {
        epicsEventDestroy ( client->blockSem );
    }

    if ( client->pUserName ) {
        free ( client->pUserName );
    }

    if ( client->pHostName ) {
        free ( client->pHostName );
    } 

    freeListFree ( rsrvClientFreeList, client );
}
Beispiel #24
0
/*
 *  logClientCreate()
 */
logClientId epicsShareAPI logClientCreate (
    struct in_addr server_addr, unsigned short server_port)
{
    epicsTimeStamp begin, current;
    logClient *pClient;
    double diff;

    pClient = calloc (1, sizeof (*pClient));
    if (pClient==NULL) {
        return NULL;
    }

    pClient->addr.sin_family = AF_INET;
    pClient->addr.sin_addr = server_addr;
    pClient->addr.sin_port = htons(server_port);
    ipAddrToDottedIP (&pClient->addr, pClient->name, sizeof(pClient->name));

    pClient->mutex = epicsMutexCreate ();
    if ( ! pClient->mutex ) {
        free ( pClient );
        return NULL;
    }

    pClient->sock = INVALID_SOCKET;
    pClient->connected = 0u;
    pClient->connFailStatus = 0;
    pClient->shutdown = 0;
    pClient->shutdownConfirm = 0;

    epicsAtExit (logClientDestroy, (void*) pClient);
    
    pClient->stateChangeNotify = epicsEventCreate (epicsEventEmpty);
    if ( ! pClient->stateChangeNotify ) {
        epicsMutexDestroy ( pClient->mutex );
        free ( pClient );
        return NULL;
    }
   
    pClient->restartThreadId = epicsThreadCreate (
        "logRestart", epicsThreadPriorityLow, 
        epicsThreadGetStackSize(epicsThreadStackSmall),
        logClientRestart, pClient );
    if ( pClient->restartThreadId == NULL ) {
        epicsMutexDestroy ( pClient->mutex );
        epicsEventDestroy ( pClient->stateChangeNotify );
        free (pClient);
        fprintf(stderr, "log client: unable to start log client connection watch dog thread\n");
        return NULL;
    }

    /*
     * attempt to synchronize with circuit connect
     */
    epicsTimeGetCurrent ( & begin );
    epicsMutexMustLock ( pClient->mutex );
    do {
        epicsMutexUnlock ( pClient->mutex );
        epicsEventWaitWithTimeout ( 
            pClient->stateChangeNotify, 
            LOG_SERVER_CREATE_CONNECT_SYNC_TIMEOUT / 10.0 ); 
        epicsTimeGetCurrent ( & current );
        diff = epicsTimeDiffInSeconds ( & current, & begin );
        epicsMutexMustLock ( pClient->mutex );
    }
    while ( ! pClient->connected && diff < LOG_SERVER_CREATE_CONNECT_SYNC_TIMEOUT );
    epicsMutexUnlock ( pClient->mutex );

    if ( ! pClient->connected ) {
        fprintf (stderr, "log client create: timed out synchronizing with circuit connect to \"%s\" after %.1f seconds\n",
            pClient->name, LOG_SERVER_CREATE_CONNECT_SYNC_TIMEOUT );
    }
        
    return (void *) pClient;
}
Beispiel #25
0
/*
 * logClientDestroy
 */
static void logClientDestroy (logClientId id)
{
    enum epicsSocketSystemCallInterruptMechanismQueryInfo interruptInfo;
    logClient *pClient = (logClient *) id;
    epicsTimeStamp begin, current;
    double diff;

    /* command log client thread to shutdown - taking mutex here */
    /* forces cache flush on SMP machines */
    epicsMutexMustLock ( pClient->mutex );
    pClient->shutdown = 1u;
    epicsMutexUnlock ( pClient->mutex );

    /* unblock log client thread blocking in send() or connect() */
    interruptInfo =
        epicsSocketSystemCallInterruptMechanismQuery ();
    switch ( interruptInfo ) {
    case esscimqi_socketCloseRequired:
        logClientClose ( pClient );
        break;
    case esscimqi_socketBothShutdownRequired:
        shutdown ( pClient->sock, SHUT_WR );
        break;
    case esscimqi_socketSigAlarmRequired:
        epicsSignalRaiseSigAlarm ( pClient->restartThreadId );
        break;
    default:
        break;
    };

    /* wait for confirmation that the thread exited - taking */
    /* mutex here forces cache update on SMP machines */
    epicsTimeGetCurrent ( & begin );
    epicsMutexMustLock ( pClient->mutex );
    do {
        epicsMutexUnlock ( pClient->mutex );
        epicsEventWaitWithTimeout ( 
            pClient->stateChangeNotify, 
            LOG_SERVER_SHUTDOWN_TIMEOUT / 10.0 ); 
        epicsTimeGetCurrent ( & current );
        diff = epicsTimeDiffInSeconds ( & current, & begin );
        epicsMutexMustLock ( pClient->mutex );
    }
    while ( ! pClient->shutdownConfirm && diff < LOG_SERVER_SHUTDOWN_TIMEOUT );
    epicsMutexUnlock ( pClient->mutex );

    if ( ! pClient->shutdownConfirm ) {
        fprintf ( stderr, "log client shutdown: timed out stopping"
            " reconnect thread for \"%s\" after %.1f seconds - cleanup aborted\n",
            pClient->name, LOG_SERVER_SHUTDOWN_TIMEOUT );
        return;
    }

    logClientClose ( pClient );

    epicsMutexDestroy ( pClient->mutex );
   
    epicsEventDestroy ( pClient->stateChangeNotify );

    free ( pClient );
}
Beispiel #26
0
/*
 *  Task for continuing record processing
 *     1. Find lockset in stack for precord.
 *       DO 2-3 while breakpoints exist in the lockset.
 *        2. Wait on execution semaphore ...
 *        3. Run through every entrypoint in queue, processing
 *             those that are scheduled.
 *     4. Free resources for lockset, and exit task.
 */
static void dbBkptCont(dbCommon *precord)
{
  struct LS_LIST *pnode;
  struct EP_LIST *pqe = NULL;

 /*
  *  Reset breakpoint, process record, and
  *    reset bkpt field in record
  */
  epicsMutexMustLock(bkpt_stack_sem);

  FIND_LOCKSET(precord, pnode);

  if (pnode == NULL) {
    printf("   BKPT> Logic error in dbBkptCont()\n");
    return;
  }

 /*
  *  For every entrypoint scheduled, process.  Run process
  *    until there are no more breakpoints remaining in a
  *    lock set.
  */
  do {
   /* Give up semaphore before waiting to run ... */
    epicsMutexUnlock(bkpt_stack_sem);

   /* Wait to run */
    epicsEventMustWait(pnode->ex_sem);

   /* Bkpt stack must still be stable ! */
    epicsMutexMustLock(bkpt_stack_sem);

    pqe = (struct EP_LIST *) ellFirst(&pnode->ep_queue);

   /* Run through entrypoint queue */
    while (pqe != NULL) {
        /* check if entrypoint is currently scheduled */
         if (pqe->sched) {
             /* save current entrypoint */
              pnode->current_ep = pqe->entrypoint;

             /* lock the lockset, process record, unlock */
              dbScanLock(precord);
              dbProcess(pqe->entrypoint);
              dbScanUnlock(precord);

             /* reset schedule and stepping flag - Do this AFTER processing */
              pqe->sched = 0;
              pnode->step = 0;
         }
         pqe = (struct EP_LIST *) ellNext((ELLNODE *)pqe);
    }

   /* Reset precord. (Since no records are at a breakpoint) */
    pnode->precord = NULL;
  } while (ellCount(&pnode->bp_list) != 0);

 /* remove node from lockset stack */
  ellDelete(&lset_stack, (ELLNODE *)pnode);
  --lset_stack_count;

 /* free entrypoint queue */
  ellFree(&pnode->ep_queue);

 /* remove execution semaphore */
  epicsEventDestroy(pnode->ex_sem);

  printf("\n   BKPT> End debug of lockset %lu\n-> ", pnode->l_num);

 /* free list node */
  free(pnode);

  epicsMutexUnlock(bkpt_stack_sem);
}
Beispiel #27
0
static void testMultiThreading(void)
{
    int i;
    int masks[2];
    testmulti data[6];
    xdrv *drvs[2];

    memset(masks, 0, sizeof(masks));
    memset(data, 0, sizeof(data));

    for(i=0; i<NELEMENTS(data); i++) {
        data[i].wake = epicsEventMustCreate(epicsEventEmpty);
        data[i].wait = epicsEventMustCreate(epicsEventEmpty);
    }

    testDiag("Test multi-threaded I/O Intr scanning");

    testdbPrepare();
    testdbReadDatabase("dbTestIoc.dbd", NULL, NULL);
    dbTestIoc_registerRecordDeviceDriver(pdbbase);

    /* create two scan lists with one record on each of three priorities */

    /* group#, member#, priority */
    loadRecord(0, 0, "LOW");
    loadRecord(1, 1, "LOW");
    loadRecord(0, 2, "MEDIUM");
    loadRecord(1, 3, "MEDIUM");
    loadRecord(0, 4, "HIGH");
    loadRecord(1, 5, "HIGH");

    drvs[0] = xdrv_add(0, &testcbmulti, data);
    drvs[1] = xdrv_add(1, &testcbmulti, data);
    scanIoSetComplete(drvs[0]->scan, &testcompmulti, &masks[0]);
    scanIoSetComplete(drvs[1]->scan, &testcompmulti, &masks[1]);

    /* just enough workers to process all records concurrently */
    callbackParallelThreads(2, "LOW");
    callbackParallelThreads(2, "MEDIUM");
    callbackParallelThreads(2, "HIGH");

    eltc(0);
    testIocInitOk();
    eltc(1);

    testDiag("Scan first list");
    testOk1(scanIoRequest(drvs[0]->scan)==0x7);
    testDiag("Scan second list");
    testOk1(scanIoRequest(drvs[1]->scan)==0x7);

    testDiag("Wait for everything to start");
    for(i=0; i<NELEMENTS(data); i++)
        epicsEventMustWait(data[i].wait);

    testDiag("Wait one more second");
    epicsThreadSleep(1.0);

    for(i=0; i<NELEMENTS(data); i++) {
        testOk(data[i].hasprocd==1, "data[%d].hasprocd==1 (%d)", i, data[i].hasprocd);
        testOk(data[i].getcomplete==0, "data[%d].getcomplete==0 (%d)", i, data[i].getcomplete);
    }

    testDiag("Release all and complete");
    for(i=0; i<NELEMENTS(data); i++)
        epicsEventMustTrigger(data[i].wake);

    testIocShutdownOk();

    for(i=0; i<NELEMENTS(data); i++) {
        testOk(data[i].getcomplete==1, "data[%d].getcomplete==0 (%d)", i, data[i].getcomplete);
    }
    testOk1(masks[0]==0x7);
    testOk1(masks[1]==0x7);

    testdbCleanup();

    xdrv_reset();

    for(i=0; i<NELEMENTS(data); i++) {
        epicsEventDestroy(data[i].wake);
        epicsEventDestroy(data[i].wait);
    }
}
Beispiel #28
0
static void testSingleThreading(void)
{
    int i;
    testsingle data[2];
    xdrv *drvs[2];

    memset(data, 0, sizeof(data));

    for(i=0; i<2; i++) {
        int p;
        for(p=0; p<NUM_CALLBACK_PRIORITIES; p++) {
            data[i].wake[p] = epicsEventMustCreate(epicsEventEmpty);
            data[i].wait[p] = epicsEventMustCreate(epicsEventEmpty);
        }
    }

    testDiag("Test single-threaded I/O Intr scanning");

    testdbPrepare();
    testdbReadDatabase("dbTestIoc.dbd", NULL, NULL);
    dbTestIoc_registerRecordDeviceDriver(pdbbase);

    /* create two scan lists with one record on each of three priorities */

    /* group#, member#, priority */
    loadRecord(0, 0, "LOW");
    loadRecord(1, 0, "LOW");
    loadRecord(0, 1, "MEDIUM");
    loadRecord(1, 1, "MEDIUM");
    loadRecord(0, 2, "HIGH");
    loadRecord(1, 2, "HIGH");

    drvs[0] = xdrv_add(0, &testcb, &data[0]);
    drvs[1] = xdrv_add(1, &testcb, &data[1]);
    scanIoSetComplete(drvs[0]->scan, &testcomp, &data[0]);
    scanIoSetComplete(drvs[1]->scan, &testcomp, &data[1]);

    eltc(0);
    testIocInitOk();
    eltc(1);

    testDiag("Scan first list");
    scanIoRequest(drvs[0]->scan);
    testDiag("Scan second list");
    scanIoRequest(drvs[1]->scan);

    testDiag("Wait for first list to complete");
    for(i=0; i<NUM_CALLBACK_PRIORITIES; i++)
        epicsEventMustWait(data[0].wait[i]);

    testDiag("Wait one more second");
    epicsThreadSleep(1.0);

    testOk1(data[0].hasprocd[0]==1);
    testOk1(data[0].hasprocd[1]==1);
    testOk1(data[0].hasprocd[2]==1);
    testOk1(data[0].getcomplete[0]==1);
    testOk1(data[0].getcomplete[1]==1);
    testOk1(data[0].getcomplete[2]==1);
    testOk1(data[1].hasprocd[0]==0);
    testOk1(data[1].hasprocd[1]==0);
    testOk1(data[1].hasprocd[2]==0);
    testOk1(data[1].getcomplete[0]==0);
    testOk1(data[1].getcomplete[1]==0);
    testOk1(data[1].getcomplete[2]==0);

    testDiag("Release the first scan and wait for the second");
    for(i=0; i<NUM_CALLBACK_PRIORITIES; i++)
        epicsEventMustTrigger(data[0].wake[i]);
    for(i=0; i<NUM_CALLBACK_PRIORITIES; i++)
        epicsEventMustWait(data[1].wait[i]);

    testOk1(data[0].hasprocd[0]==1);
    testOk1(data[0].hasprocd[1]==1);
    testOk1(data[0].hasprocd[2]==1);
    testOk1(data[0].getcomplete[0]==1);
    testOk1(data[0].getcomplete[1]==1);
    testOk1(data[0].getcomplete[2]==1);
    testOk1(data[1].hasprocd[0]==1);
    testOk1(data[1].hasprocd[1]==1);
    testOk1(data[1].hasprocd[2]==1);
    testOk1(data[1].getcomplete[0]==1);
    testOk1(data[1].getcomplete[1]==1);
    testOk1(data[1].getcomplete[2]==1);

    testDiag("Release the second scan and complete");
    for(i=0; i<NUM_CALLBACK_PRIORITIES; i++)
        epicsEventMustTrigger(data[1].wake[i]);

    testIocShutdownOk();

    testdbCleanup();

    xdrv_reset();

    for(i=0; i<2; i++) {
        int p;
        for(p=0; p<NUM_CALLBACK_PRIORITIES; p++) {
            epicsEventDestroy(data[i].wake[p]);
            epicsEventDestroy(data[i].wait[p]);
        }
    }
}
Beispiel #29
0
void epicsThreadPoolDestroy(epicsThreadPool *pool)
{
    unsigned int nThr;
    ELLLIST notify;
    ELLNODE *cur;

    if (!pool)
        return;

    ellInit(&notify);

    epicsMutexMustLock(pool->guard);

    /* run remaining queued jobs */
    epicsThreadPoolControlImpl(pool, epicsThreadPoolQueueAdd, 0);
    epicsThreadPoolControlImpl(pool, epicsThreadPoolQueueRun, 1);
    nThr = pool->threadsRunning;
    pool->freezeopt = 1;

    epicsMutexUnlock(pool->guard);

    epicsThreadPoolWait(pool, -1.0);
    /* At this point all queued jobs have run */

    epicsMutexMustLock(pool->guard);

    pool->shutdown = 1;
    /* wakeup all */
    if (pool->threadsWaking < pool->threadsSleeping) {
        pool->threadsWaking = pool->threadsSleeping;
        epicsEventSignal(pool->workerWakeup);
    }

    ellConcat(&notify, &pool->owned);
    ellConcat(&notify, &pool->jobs);

    epicsMutexUnlock(pool->guard);

    if (nThr && epicsEventWait(pool->shutdownEvent) != epicsEventWaitOK){
        errlogMessage("epicsThreadPoolDestroy: wait error");
        return;
    }

    /* all workers are now shutdown */

    /* notify remaining jobs that pool is being destroyed */
    while ((cur = ellGet(&notify)) != NULL) {
        epicsJob *job = CONTAINER(cur, epicsJob, jobnode);

        job->running = 1;
        job->func(job->arg, epicsJobModeCleanup);
        job->running = 0;
        if (job->freewhendone)
            free(job);
        else
            job->pool = NULL; /* orphan */
    }

    epicsEventDestroy(pool->workerWakeup);
    epicsEventDestroy(pool->shutdownEvent);
    epicsEventDestroy(pool->observerWakeup);
    epicsMutexDestroy(pool->guard);

    free(pool);
}
Beispiel #30
0
epicsThreadPool* epicsThreadPoolCreate(epicsThreadPoolConfig *opts)
{
    size_t i;
    epicsThreadPool *pool;

    /* caller likely didn't initialize the options structure */
    if (opts && opts->maxThreads == 0) {
        errlogMessage("Error: epicsThreadPoolCreate() options provided, but not initialized");
        return NULL;
    }

    pool = calloc(1, sizeof(*pool));
    if (!pool)
        return NULL;

    if (opts)
        memcpy(&pool->conf, opts, sizeof(*opts));
    else
        epicsThreadPoolConfigDefaults(&pool->conf);

    if (pool->conf.initialThreads > pool->conf.maxThreads)
        pool->conf.initialThreads = pool->conf.maxThreads;

    pool->workerWakeup = epicsEventCreate(epicsEventEmpty);
    pool->shutdownEvent = epicsEventCreate(epicsEventEmpty);
    pool->observerWakeup = epicsEventCreate(epicsEventEmpty);
    pool->guard = epicsMutexCreate();

    if (!pool->workerWakeup || !pool->shutdownEvent ||
       !pool->observerWakeup || !pool->guard)
        goto cleanup;

    ellInit(&pool->jobs);
    ellInit(&pool->owned);

    epicsMutexMustLock(pool->guard);

    for (i = 0; i < pool->conf.initialThreads; i++) {
        createPoolThread(pool);
    }

    if (pool->threadsRunning == 0 && pool->conf.initialThreads != 0) {
        epicsMutexUnlock(pool->guard);
        errlogPrintf("Error: Unable to create any threads for thread pool\n");
        goto cleanup;

    }
    else if (pool->threadsRunning < pool->conf.initialThreads) {
        errlogPrintf("Warning: Unable to create all threads for thread pool (%u/%u)\n",
                     pool->threadsRunning, pool->conf.initialThreads);
    }

    epicsMutexUnlock(pool->guard);

    return pool;

cleanup:
    if (pool->workerWakeup)
        epicsEventDestroy(pool->workerWakeup);
    if (pool->shutdownEvent)
        epicsEventDestroy(pool->shutdownEvent);
    if (pool->observerWakeup)
        epicsEventDestroy(pool->observerWakeup);
    if (pool->guard)
        epicsMutexDestroy(pool->guard);

    free(pool);
    return NULL;
}