static void logClient(void* raw, const char* msg) { clientPvt *pvt = raw; size_t len; char show[46]; /* Simulate thread priority on non-realtime * OSs like Linux. This will cause the logging * thread to sleep with the buffer lock held. */ if (pvt->jam > 0) { pvt->jam = 0; epicsEventMustWait(pvt->jammer); } else if (pvt->jam < 0) { pvt->jam++; if (pvt->jam == 0) epicsEventMustWait(pvt->jammer); } len = strlen(msg); if (len > 45) { /* Only show start and end of long messages */ strncpy(show, msg, 20); show[20] = 0; strcat(show + 20, " ... "); strcat(show + 25, msg + len - 20); } else { strcpy(show, msg); } if (pvt->checkLen) if (!testOk(pvt->checkLen == len, "Received %d chars", (int) len)) { testDiag("Expected %d", (int) pvt->checkLen); if (!pvt->expect) testDiag("Message is \"%s\"", show); } if (pvt->expect) { if (!testOk(strcmp(pvt->expect, msg) == 0, "Message is \"%s\"", show)) { len = strlen(pvt->expect); if (len > 45) { testDiag("Expected \"%.20s ... %s\"", pvt->expect, pvt->expect + len - 20); } else { testDiag("Expected \"%s\"", pvt->expect); } } } pvt->count++; }
static void tester(void *raw) { caster_t *self = raw; epicsEventId sd; testDiag("UDP tester starts"); epicsMutexMustLock(lock); while(!self->shutdown) { epicsMutexUnlock(lock); epicsEventMustWait(cycled[1]); epicsMutexMustLock(lock); result = doCasterUDPPhase(self); cycles++; epicsEventSignal(cycled[0]); } testDiag("UDP tester stops"); sd = self->shutdownEvent; epicsMutexUnlock(lock); epicsEventSignal(sd); }
static void callbackTask(void *arg) { int prio = *(int*)arg; cbQueueSet *mySet = &callbackQueue[prio]; taskwdInsert(0, NULL, NULL); epicsEventSignal(startStopEvent); while(!mySet->shutdown) { void *ptr; if (epicsRingPointerIsEmpty(mySet->queue)) epicsEventMustWait(mySet->semWakeUp); while ((ptr = epicsRingPointerPop(mySet->queue))) { CALLBACK *pcallback = (CALLBACK *)ptr; if(!epicsRingPointerIsEmpty(mySet->queue)) epicsEventMustTrigger(mySet->semWakeUp); mySet->queueOverflow = FALSE; (*pcallback->callback)(pcallback); } } if(!epicsAtomicDecrIntT(&mySet->threadsRunning)) epicsEventSignal(startStopEvent); taskwdRemove(0); }
/* Starts "mcnt" jobs in a pool with initial and max * thread counts "icnt" and "mcnt". * The test ensures that all jobs run in parallel. * "cork" checks the function of pausing the run queue * with epicsThreadPoolQueueRun */ static void postjobs(size_t icnt, size_t mcnt, int cork) { size_t i; epicsThreadPool *pool; countPriv *priv=callocMustSucceed(1, sizeof(*priv), "postjobs priv alloc"); priv->guard=epicsMutexMustCreate(); priv->done=epicsEventMustCreate(epicsEventEmpty); priv->allrunning=epicsEventMustCreate(epicsEventEmpty); priv->count=mcnt; priv->job=callocMustSucceed(mcnt, sizeof(*priv->job), "postjobs job array"); testDiag("postjobs(%lu,%lu)", (unsigned long)icnt, (unsigned long)mcnt); { epicsThreadPoolConfig conf; epicsThreadPoolConfigDefaults(&conf); conf.initialThreads=icnt; conf.maxThreads=mcnt; testOk1((pool=epicsThreadPoolCreate(&conf))!=NULL); if(!pool) return; } if(cork) epicsThreadPoolControl(pool, epicsThreadPoolQueueRun, 0); for(i=0; i<mcnt; i++) { testDiag("i=%lu", (unsigned long)i); priv->job[i] = epicsJobCreate(pool, &countjob, priv); testOk1(priv->job[i]!=NULL); testOk1(epicsJobQueue(priv->job[i])==0); } if(cork) { /* no jobs should have run */ epicsMutexMustLock(priv->guard); testOk1(priv->count==mcnt); epicsMutexUnlock(priv->guard); epicsThreadPoolControl(pool, epicsThreadPoolQueueRun, 1); } testDiag("Waiting for all jobs to start"); epicsEventMustWait(priv->allrunning); testDiag("Stop all"); epicsEventSignal(priv->done); for(i=0; i<mcnt; i++) { testDiag("i=%lu", (unsigned long)i); epicsJobDestroy(priv->job[i]); } epicsThreadPoolDestroy(pool); epicsMutexDestroy(priv->guard); epicsEventDestroy(priv->allrunning); epicsEventDestroy(priv->done); free(priv->job); free(priv); }
static void cancelTest(asynUser *pasynUser) { cmdInfo *pcmdInfo = (cmdInfo *)pasynUser->userPvt; threadInfo *pthreadInfo = pcmdInfo->pthreadInfo; asynStatus status; int wasQueued; fprintf(pcmdInfo->file,"%s %s cancelRequest should remove\n", pthreadInfo->threadName,pcmdInfo->message); startBusy(pthreadInfo); epicsEventTryWait(pcmdInfo->callbackDone); status = pasynManager->queueRequest(pasynUser,asynQueuePriorityLow,0.0); if(checkStatus(status,pthreadInfo,"testCancelRequest")) return; epicsThreadSleep(.01); status = pasynManager->cancelRequest(pasynUser,&wasQueued); if(checkStatus(status,pthreadInfo,"testCancelRequest")) return; fprintf(pcmdInfo->file,"%s %s cancelRequest wasQueued %d\n", pthreadInfo->threadName,pcmdInfo->message,wasQueued); if(!wasQueued) epicsEventMustWait(pcmdInfo->callbackDone); epicsThreadSleep(.04); /*wait for busy to complete*/ fprintf(pcmdInfo->file,"%s %s should find callback active\n", pthreadInfo->threadName,pcmdInfo->message); epicsEventTryWait(pcmdInfo->callbackDone); status = pasynManager->queueRequest(pasynUser,asynQueuePriorityLow,0.05); if(checkStatus(status,pthreadInfo,"testCancelRequest")) return; epicsThreadSleep(.01); status = pasynManager->cancelRequest(pasynUser,&wasQueued); if(checkStatus(status,pthreadInfo,"testCancelRequest")) return; fprintf(pcmdInfo->file,"%s %s cancelRequest wasQueued %d\n", pthreadInfo->threadName,pcmdInfo->message,wasQueued); if(!wasQueued) epicsEventMustWait(pcmdInfo->callbackDone); fprintf(pcmdInfo->file,"%s %s should find timeout active\n", pthreadInfo->threadName,pcmdInfo->message); startBusy(pthreadInfo); epicsEventTryWait(pcmdInfo->callbackDone); status = pasynManager->queueRequest(pasynUser,asynQueuePriorityLow,0.02); if(checkStatus(status,pthreadInfo,"testCancelRequest")) return; epicsThreadSleep(.03); status = pasynManager->cancelRequest(pasynUser,&wasQueued); if(checkStatus(status,pthreadInfo,"testCancelRequest")) return; fprintf(pcmdInfo->file,"%s %s cancelRequest wasQueued %d\n", pthreadInfo->threadName,pcmdInfo->message,wasQueued); if(!wasQueued) epicsEventMustWait(pcmdInfo->callbackDone); }
static void testcomp(void *raw, IOSCANPVT scan, int prio) { testsingle *td = raw; testOk1(td->hasprocd[prio]==1); testOk1(td->getcomplete[prio]==0); td->getcomplete[prio] = 1; epicsEventMustTrigger(td->wait[prio]); epicsEventMustWait(td->wake[prio]); }
epicsShareFunc void epicsShareAPI epicsThreadSuspendSelf(void) { epicsThreadOSD *pthreadInfo; epicsThreadInit(); pthreadInfo = (epicsThreadOSD *)pthread_getspecific(getpthreadInfo); if(pthreadInfo==NULL) pthreadInfo = createImplicit(); pthreadInfo->isSuspended = 1; epicsEventMustWait(pthreadInfo->suspendEvent); }
static void testerhook(caster_t *self, caster_h state) { if(state!=casterUDPSetup) return; epicsMutexUnlock(lock); epicsEventSignal(cycled[0]); epicsEventMustWait(cycled[1]); epicsMutexMustLock(lock); }
static void testcbmulti(xpriv *priv, void *raw) { testmulti *td = raw; td += priv->member; testOk1(td->hasprocd==0); td->hasprocd = 1; epicsEventMustTrigger(td->wait); epicsEventMustWait(td->wake); td->getcomplete = 1; }
void dbCaLinkInit(void) { dbServiceIOInit(); dbCaLinkInitIsolated(); startStopEvent = epicsEventMustCreate(epicsEventEmpty); dbCaCtl = ctlPause; epicsThreadCreate("dbCaLink", epicsThreadPriorityMedium, epicsThreadGetStackSize(epicsThreadStackBig), dbCaTask, NULL); epicsEventMustWait(startStopEvent); }
static void connectTest(asynUser *pasynUser) { cmdInfo *pcmdInfo = (cmdInfo *)pasynUser->userPvt; threadInfo *pthreadInfo = pcmdInfo->pthreadInfo; asynStatus status; fprintf(pcmdInfo->file,"%s connect queueRequest\n", pthreadInfo->threadName); epicsEventTryWait(pcmdInfo->callbackDone); status = pasynManager->queueRequest(pasynUser,asynQueuePriorityConnect,0.0); if(checkStatus(status,pthreadInfo,"connect")) return; epicsEventMustWait(pcmdInfo->callbackDone); }
static void toolate(void *arg, epicsJobMode mode) { epicsJob *job=arg; if(mode==epicsJobModeCleanup){ epicsJobDestroy(job); return; } testPass("Job runs"); numtoolate++; epicsEventSignal(cancel[0]); epicsEventMustWait(cancel[1]); }
static void blockTest(asynUser *pasynUser) { cmdInfo *pcmdInfo = (cmdInfo *)pasynUser->userPvt; threadInfo *pthreadInfo = pcmdInfo->pthreadInfo; asynStatus status; status = pasynManager->blockProcessCallback(pasynUser,0); if(checkStatus(status,pthreadInfo,"testBlock")) return; fprintf(pcmdInfo->file,"%s %s first queueRequest\n", pthreadInfo->threadName,pcmdInfo->message); epicsEventTryWait(pcmdInfo->callbackDone); status = pasynManager->queueRequest(pasynUser,asynQueuePriorityLow,0.0); if(checkStatus(status,pthreadInfo,"testBlock")) return; epicsEventMustWait(pcmdInfo->callbackDone); epicsThreadSleep(.1); fprintf(pcmdInfo->file,"%s %s second queueRequest\n", pthreadInfo->threadName,pcmdInfo->message); status = pasynManager->queueRequest(pasynUser,asynQueuePriorityLow,0.0); if(checkStatus(status,pthreadInfo,"testBlock")) return; epicsEventMustWait(pcmdInfo->callbackDone); status = pasynManager->unblockProcessCallback(pasynUser,0); if(checkStatus(status,pthreadInfo,"testBlock")) return; }
/* Test re-queueing a job while it is running. * Check that a single job won't run concurrently. */ static void testreadd(void) { epicsThreadPoolConfig conf; epicsThreadPool *pool; readdPriv *priv=callocMustSucceed(1, sizeof(*priv), "testcleanup priv"); readdPriv *priv2=callocMustSucceed(1, sizeof(*priv), "testcleanup priv"); testDiag("testreadd"); priv->done=epicsEventMustCreate(epicsEventEmpty); priv->count=5; priv2->done=epicsEventMustCreate(epicsEventEmpty); priv2->count=5; epicsThreadPoolConfigDefaults(&conf); conf.maxThreads = 2; testOk1((pool=epicsThreadPoolCreate(&conf))!=NULL); if(!pool) return; testOk1((priv->job=epicsJobCreate(pool, &readdjob, priv))!=NULL); testOk1((priv2->job=epicsJobCreate(pool, &readdjob, priv2))!=NULL); testOk1(epicsJobQueue(priv->job)==0); testOk1(epicsJobQueue(priv2->job)==0); epicsEventMustWait(priv->done); epicsEventMustWait(priv2->done); testOk1(epicsThreadPoolNThreads(pool)==2); testDiag("epicsThreadPoolNThreads = %d", epicsThreadPoolNThreads(pool)); epicsThreadPoolDestroy(pool); epicsEventDestroy(priv->done); epicsEventDestroy(priv2->done); free(priv); free(priv2); }
static void testcancel(void) { epicsJob *job[2]; epicsThreadPool *pool; testOk1((pool=epicsThreadPoolCreate(NULL))!=NULL); if(!pool) return; cancel[0]=epicsEventCreate(epicsEventEmpty); cancel[1]=epicsEventCreate(epicsEventEmpty); testOk1((job[0]=epicsJobCreate(pool, &neverrun, EPICSJOB_SELF))!=NULL); testOk1((job[1]=epicsJobCreate(pool, &toolate, EPICSJOB_SELF))!=NULL); /* freeze */ epicsThreadPoolControl(pool, epicsThreadPoolQueueRun, 0); testOk1(epicsJobUnqueue(job[0])==S_pool_jobIdle); /* not queued yet */ epicsJobQueue(job[0]); testOk1(epicsJobUnqueue(job[0])==0); testOk1(epicsJobUnqueue(job[0])==S_pool_jobIdle); epicsThreadSleep(0.01); epicsJobQueue(job[0]); testOk1(epicsJobUnqueue(job[0])==0); testOk1(epicsJobUnqueue(job[0])==S_pool_jobIdle); epicsThreadPoolControl(pool, epicsThreadPoolQueueRun, 1); epicsJobQueue(job[1]); /* actually let it run this time */ epicsEventMustWait(cancel[0]); testOk1(epicsJobUnqueue(job[0])==S_pool_jobIdle); epicsEventSignal(cancel[1]); epicsThreadPoolDestroy(pool); epicsEventDestroy(cancel[0]); epicsEventDestroy(cancel[1]); testOk1(shouldneverrun==0); testOk1(numtoolate==1); }
static void countjob(void *param, epicsJobMode mode) { countPriv *cnt=param; testOk1(mode==epicsJobModeRun||mode==epicsJobModeCleanup); if(mode==epicsJobModeCleanup) return; epicsMutexMustLock(cnt->guard); testDiag("Job %lu", (unsigned long)cnt->count); cnt->count--; if(cnt->count==0) { testDiag("All jobs running"); epicsEventSignal(cnt->allrunning); } epicsMutexUnlock(cnt->guard); epicsEventMustWait(cnt->done); epicsEventSignal(cnt->done); /* pass along to next thread */ }
static void interruptThread(drvPvt *pdrvPvt) { while(1) { epicsEventMustWait(pdrvPvt->waitWork); while(1) { int addr; epicsUInt32 value; ELLLIST *pclientList; interruptNode *pnode; asynUInt32DigitalInterrupt *pinterrupt; if(pdrvPvt->interruptDelay <= .0001) break; for(addr=0; addr<NCHANNELS; addr++) { chanPvt *pchannel = &pdrvPvt->channel[addr]; epicsMutexMustLock(pdrvPvt->lock); value = pchannel->value; if(value<0xf) { value +=1; } else if(value&0x80000000) { value = 0; } else { value <<= 1; } pchannel->value = value; epicsMutexUnlock(pdrvPvt->lock); } pasynManager->interruptStart( pdrvPvt->asynUInt32DigitalPvt,&pclientList); pnode = (interruptNode *)ellFirst(pclientList); while (pnode) { pinterrupt = pnode->drvPvt; addr = pinterrupt->addr; pinterrupt->callback(pinterrupt->userPvt, pinterrupt->pasynUser, pdrvPvt->channel[addr].value); pnode = (interruptNode *)ellNext(&pnode->node); } pasynManager->interruptEnd(pdrvPvt->asynUInt32DigitalPvt); epicsThreadSleep(pdrvPvt->interruptDelay); } } }
void dbCaShutdown(void) { if (dbCaCtl == ctlRun || dbCaCtl == ctlPause) { dbCaCtl = ctlExit; epicsEventSignal(workListEvent); epicsEventMustWait(startStopEvent); epicsEventDestroy(startStopEvent); } else { /* manually cleanup queue since dbCa thread isn't running * which only happens in unit tests */ caLink *pca; epicsMutexMustLock(workListLock); while((pca=(caLink*)ellGet(&workList))!=NULL) { if(pca->link_action&CA_CLEAR_CHANNEL) { dbCaLinkFree(pca); } } epicsMutexUnlock(workListLock); } }
static void onceTask(void *arg) { taskwdInsert(0, NULL, NULL); epicsEventSignal(startStopEvent); while (TRUE) { void *precord; epicsEventMustWait(onceSem); while ((precord = epicsRingPointerPop(onceQ))) { if (precord == &exitOnce) goto shutdown; dbScanLock(precord); dbProcess(precord); dbScanUnlock(precord); } } shutdown: taskwdRemove(0); epicsEventSignal(startStopEvent); }
epicsShareFunc void epicsShareAPI errlogFlush(void) { int count; errlogInit(0); if (pvtData.atExit) return; /*If nothing in queue dont wake up errlogThread*/ epicsMutexMustLock(pvtData.msgQueueLock); count = ellCount(&pvtData.msgQueue); epicsMutexUnlock(pvtData.msgQueueLock); if (count <= 0) return; /*must let errlogThread empty queue*/ epicsMutexMustLock(pvtData.flushLock); epicsEventSignal(pvtData.flush); epicsEventSignal(pvtData.waitForWork); epicsEventMustWait(pvtData.waitForFlush); epicsMutexUnlock(pvtData.flushLock); }
int epicsThreadPoolWait(epicsThreadPool *pool, double timeout) { int ret = 0; epicsMutexMustLock(pool->guard); while (ellCount(&pool->jobs) > 0 || pool->threadsAreAwake > 0) { pool->observerCount++; epicsMutexUnlock(pool->guard); if (timeout < 0.0) { epicsEventMustWait(pool->observerWakeup); } else { switch (epicsEventWaitWithTimeout(pool->observerWakeup, timeout)) { case epicsEventWaitError: cantProceed("epicsThreadPoolWait: failed to wait for Event"); break; case epicsEventWaitTimeout: ret = S_pool_timeout; break; case epicsEventWaitOK: ret = 0; break; } } epicsMutexMustLock(pool->guard); pool->observerCount--; if (pool->observerCount) epicsEventSignal(pool->observerWakeup); if (ret != 0) break; } epicsMutexUnlock(pool->guard); return ret; }
/* Start one thread per CPU which will all try lock * the same spinlock. They break as soon as one * fails to take the lock. */ static void verifyTryLock() { int N, i; struct verifyTryLock verify; N = epicsThreadGetCPUs(); if(N==1) { testSkip(1, "verifyTryLock() only for SMP systems"); return; } verify.flag = 0; verify.spin = epicsSpinMustCreate(); testDiag("Starting %d spinners", N); verify.ents = calloc(N, sizeof(*verify.ents)); for(i=0; i<N; i++) { verify.ents[i].main = &verify; verify.ents[i].done = epicsEventMustCreate(epicsEventEmpty); epicsThreadMustCreate("verifyTryLockThread", 40, epicsThreadGetStackSize(epicsThreadStackSmall), verifyTryLockThread, &verify.ents[i]); } testDiag("All started"); for(i=0; i<N; i++) { epicsEventMustWait(verify.ents[i].done); epicsEventDestroy(verify.ents[i].done); } testDiag("All done"); testOk(verify.flag==1, "epicsTryLock returns %d (expect 1)", verify.flag); epicsSpinDestroy(verify.spin); free(verify.ents); }
static void workThread(threadInfo *pthreadInfo) { cmdInfo *pcmdInfo; asynUser *pasynUser; while(1) { epicsEventMustWait(pthreadInfo->work); pcmdInfo = pthreadInfo->pcmdInfo; pasynUser = pcmdInfo->pasynUser; if(pcmdInfo->test==quit) break; switch(pcmdInfo->test) { case connect: connectTest(pasynUser);break; case testBlock: blockTest(pasynUser); break; case testCancelRequest: cancelTest(pasynUser); break; default: fprintf(pcmdInfo->file,"%s workThread illegal test %d\n", pthreadInfo->threadName,pcmdInfo->test); } fprintf(pcmdInfo->file,"%s %s all done\n", pthreadInfo->threadName,pcmdInfo->message); epicsEventSignal(pthreadInfo->done); } epicsEventSignal(pthreadInfo->done); }
static void errlogThread(void) { listenerNode *plistenerNode; int noConsoleMessage; char *pmessage; epicsAtExit(errlogExitHandler,0); while (TRUE) { epicsEventMustWait(pvtData.waitForWork); while ((pmessage = msgbufGetSend(&noConsoleMessage))) { epicsMutexMustLock(pvtData.listenerLock); if (pvtData.toConsole && !noConsoleMessage) { fprintf(pvtData.console,"%s",pmessage); fflush(pvtData.console); } plistenerNode = (listenerNode *)ellFirst(&pvtData.listenerList); while (plistenerNode) { (*plistenerNode->listener)(plistenerNode->pPrivate, pmessage); plistenerNode = (listenerNode *)ellNext(&plistenerNode->node); } epicsMutexUnlock(pvtData.listenerLock); msgbufFreeSend(); } if (pvtData.atExit) break; if (epicsEventTryWait(pvtData.flush) != epicsEventWaitOK) continue; epicsThreadSleep(.2); /*just wait an extra .2 seconds*/ epicsEventSignal(pvtData.waitForFlush); } epicsEventSignal(pvtData.waitForExit); }
static void testMultiThreading(void) { int i; int masks[2]; testmulti data[6]; xdrv *drvs[2]; memset(masks, 0, sizeof(masks)); memset(data, 0, sizeof(data)); for(i=0; i<NELEMENTS(data); i++) { data[i].wake = epicsEventMustCreate(epicsEventEmpty); data[i].wait = epicsEventMustCreate(epicsEventEmpty); } testDiag("Test multi-threaded I/O Intr scanning"); testdbPrepare(); testdbReadDatabase("dbTestIoc.dbd", NULL, NULL); dbTestIoc_registerRecordDeviceDriver(pdbbase); /* create two scan lists with one record on each of three priorities */ /* group#, member#, priority */ loadRecord(0, 0, "LOW"); loadRecord(1, 1, "LOW"); loadRecord(0, 2, "MEDIUM"); loadRecord(1, 3, "MEDIUM"); loadRecord(0, 4, "HIGH"); loadRecord(1, 5, "HIGH"); drvs[0] = xdrv_add(0, &testcbmulti, data); drvs[1] = xdrv_add(1, &testcbmulti, data); scanIoSetComplete(drvs[0]->scan, &testcompmulti, &masks[0]); scanIoSetComplete(drvs[1]->scan, &testcompmulti, &masks[1]); /* just enough workers to process all records concurrently */ callbackParallelThreads(2, "LOW"); callbackParallelThreads(2, "MEDIUM"); callbackParallelThreads(2, "HIGH"); eltc(0); testIocInitOk(); eltc(1); testDiag("Scan first list"); testOk1(scanIoRequest(drvs[0]->scan)==0x7); testDiag("Scan second list"); testOk1(scanIoRequest(drvs[1]->scan)==0x7); testDiag("Wait for everything to start"); for(i=0; i<NELEMENTS(data); i++) epicsEventMustWait(data[i].wait); testDiag("Wait one more second"); epicsThreadSleep(1.0); for(i=0; i<NELEMENTS(data); i++) { testOk(data[i].hasprocd==1, "data[%d].hasprocd==1 (%d)", i, data[i].hasprocd); testOk(data[i].getcomplete==0, "data[%d].getcomplete==0 (%d)", i, data[i].getcomplete); } testDiag("Release all and complete"); for(i=0; i<NELEMENTS(data); i++) epicsEventMustTrigger(data[i].wake); testIocShutdownOk(); for(i=0; i<NELEMENTS(data); i++) { testOk(data[i].getcomplete==1, "data[%d].getcomplete==0 (%d)", i, data[i].getcomplete); } testOk1(masks[0]==0x7); testOk1(masks[1]==0x7); testdbCleanup(); xdrv_reset(); for(i=0; i<NELEMENTS(data); i++) { epicsEventDestroy(data[i].wake); epicsEventDestroy(data[i].wait); } }
/* * * req_server() * * CA server task * * Waits for connections at the CA port and spawns a task to * handle each of them * */ static void req_server (void *pParm) { unsigned priorityOfSelf = epicsThreadGetPrioritySelf (); unsigned priorityOfBeacons; epicsThreadBooleanStatus tbs; struct sockaddr_in serverAddr; /* server's address */ osiSocklen_t addrSize; int status; SOCKET clientSock; epicsThreadId tid; int portChange; epicsSignalInstallSigPipeIgnore (); taskwdInsert ( epicsThreadGetIdSelf (), NULL, NULL ); rsrvCurrentClient = epicsThreadPrivateCreate (); if ( envGetConfigParamPtr ( &EPICS_CAS_SERVER_PORT ) ) { ca_server_port = envGetInetPortConfigParam ( &EPICS_CAS_SERVER_PORT, (unsigned short) CA_SERVER_PORT ); } else { ca_server_port = envGetInetPortConfigParam ( &EPICS_CA_SERVER_PORT, (unsigned short) CA_SERVER_PORT ); } if (IOC_sock != 0 && IOC_sock != INVALID_SOCKET) { epicsSocketDestroy ( IOC_sock ); } /* * Open the socket. Use ARPA Internet address format and stream * sockets. Format described in <sys/socket.h>. */ if ( ( IOC_sock = epicsSocketCreate (AF_INET, SOCK_STREAM, 0) ) == INVALID_SOCKET ) { errlogPrintf ("CAS: Socket creation error\n"); epicsThreadSuspendSelf (); } epicsSocketEnableAddressReuseDuringTimeWaitState ( IOC_sock ); /* Zero the sock_addr structure */ memset ( (void *) &serverAddr, 0, sizeof ( serverAddr ) ); serverAddr.sin_family = AF_INET; serverAddr.sin_addr.s_addr = htonl (INADDR_ANY); serverAddr.sin_port = htons ( ca_server_port ); /* get server's Internet address */ status = bind ( IOC_sock, (struct sockaddr *) &serverAddr, sizeof ( serverAddr ) ); if ( status < 0 ) { if ( SOCKERRNO == SOCK_EADDRINUSE ) { /* * enable assignment of a default port * (so the getsockname() call below will * work correctly) */ serverAddr.sin_port = ntohs (0); status = bind ( IOC_sock, (struct sockaddr *) &serverAddr, sizeof ( serverAddr ) ); } if ( status < 0 ) { char sockErrBuf[64]; epicsSocketConvertErrnoToString ( sockErrBuf, sizeof ( sockErrBuf ) ); errlogPrintf ( "CAS: Socket bind error was \"%s\"\n", sockErrBuf ); epicsThreadSuspendSelf (); } portChange = 1; } else { portChange = 0; } addrSize = ( osiSocklen_t ) sizeof ( serverAddr ); status = getsockname ( IOC_sock, (struct sockaddr *)&serverAddr, &addrSize); if ( status ) { char sockErrBuf[64]; epicsSocketConvertErrnoToString ( sockErrBuf, sizeof ( sockErrBuf ) ); errlogPrintf ( "CAS: getsockname() error %s\n", sockErrBuf ); epicsThreadSuspendSelf (); } ca_server_port = ntohs (serverAddr.sin_port); if ( portChange ) { errlogPrintf ( "cas warning: Configured TCP port was unavailable.\n"); errlogPrintf ( "cas warning: Using dynamically assigned TCP port %hu,\n", ca_server_port ); errlogPrintf ( "cas warning: but now two or more servers share the same UDP port.\n"); errlogPrintf ( "cas warning: Depending on your IP kernel this server may not be\n" ); errlogPrintf ( "cas warning: reachable with UDP unicast (a host's IP in EPICS_CA_ADDR_LIST)\n" ); } /* listen and accept new connections */ if ( listen ( IOC_sock, 20 ) < 0 ) { errlogPrintf ("CAS: Listen error\n"); epicsSocketDestroy (IOC_sock); epicsThreadSuspendSelf (); } tbs = epicsThreadHighestPriorityLevelBelow ( priorityOfSelf, &priorityOfBeacons ); if ( tbs != epicsThreadBooleanStatusSuccess ) { priorityOfBeacons = priorityOfSelf; } beacon_startStopEvent = epicsEventMustCreate(epicsEventEmpty); beacon_ctl = ctlPause; tid = epicsThreadCreate ( "CAS-beacon", priorityOfBeacons, epicsThreadGetStackSize (epicsThreadStackSmall), rsrv_online_notify_task, 0 ); if ( tid == 0 ) { epicsPrintf ( "CAS: unable to start beacon thread\n" ); } epicsEventMustWait(beacon_startStopEvent); epicsEventSignal(castcp_startStopEvent); while (TRUE) { struct sockaddr sockAddr; osiSocklen_t addLen = sizeof(sockAddr); while (castcp_ctl == ctlPause) { epicsThreadSleep(0.1); } clientSock = epicsSocketAccept ( IOC_sock, &sockAddr, &addLen ); if ( clientSock == INVALID_SOCKET ) { char sockErrBuf[64]; epicsSocketConvertErrnoToString ( sockErrBuf, sizeof ( sockErrBuf ) ); errlogPrintf("CAS: Client accept error was \"%s\"\n", sockErrBuf ); epicsThreadSleep(15.0); continue; } else { epicsThreadId id; struct client *pClient; /* socket passed in is closed if unsuccessful here */ pClient = create_tcp_client ( clientSock ); if ( ! pClient ) { epicsThreadSleep ( 15.0 ); continue; } LOCK_CLIENTQ; ellAdd ( &clientQ, &pClient->node ); UNLOCK_CLIENTQ; id = epicsThreadCreate ( "CAS-client", epicsThreadPriorityCAServerLow, epicsThreadGetStackSize ( epicsThreadStackBig ), camsgtask, pClient ); if ( id == 0 ) { LOCK_CLIENTQ; ellDelete ( &clientQ, &pClient->node ); UNLOCK_CLIENTQ; destroy_tcp_client ( pClient ); errlogPrintf ( "CAS: task creation for new client failed\n" ); epicsThreadSleep ( 15.0 ); continue; } } } }
/* * rsrv_init () */ int rsrv_init (void) { epicsThreadBooleanStatus tbs; unsigned priorityOfConnectDaemon; epicsThreadId tid; long maxBytesAsALong; long status; clientQlock = epicsMutexMustCreate(); ellInit ( &clientQ ); freeListInitPvt ( &rsrvClientFreeList, sizeof(struct client), 8 ); freeListInitPvt ( &rsrvChanFreeList, sizeof(struct channel_in_use), 512 ); freeListInitPvt ( &rsrvEventFreeList, sizeof(struct event_ext), 512 ); freeListInitPvt ( &rsrvSmallBufFreeListTCP, MAX_TCP, 16 ); initializePutNotifyFreeList (); status = envGetLongConfigParam ( &EPICS_CA_MAX_ARRAY_BYTES, &maxBytesAsALong ); if ( status || maxBytesAsALong < 0 ) { errlogPrintf ( "CAS: EPICS_CA_MAX_ARRAY_BYTES was not a positive integer\n" ); rsrvSizeofLargeBufTCP = MAX_TCP; } else { /* allow room for the protocol header so that they get the array size they requested */ static const unsigned headerSize = sizeof ( caHdr ) + 2 * sizeof ( ca_uint32_t ); ca_uint32_t maxBytes = ( unsigned ) maxBytesAsALong; if ( maxBytes < 0xffffffff - headerSize ) { maxBytes += headerSize; } else { maxBytes = 0xffffffff; } if ( maxBytes < MAX_TCP ) { errlogPrintf ( "CAS: EPICS_CA_MAX_ARRAY_BYTES was rounded up to %u\n", MAX_TCP ); rsrvSizeofLargeBufTCP = MAX_TCP; } else { rsrvSizeofLargeBufTCP = maxBytes; } } freeListInitPvt ( &rsrvLargeBufFreeListTCP, rsrvSizeofLargeBufTCP, 1 ); ellInit ( &beaconAddrList ); prsrv_cast_client = NULL; pCaBucket = NULL; castcp_startStopEvent = epicsEventMustCreate(epicsEventEmpty); castcp_ctl = ctlPause; /* * go down two levels so that we are below * the TCP and event threads started on behalf * of individual clients */ tbs = epicsThreadHighestPriorityLevelBelow ( epicsThreadPriorityCAServerLow, &priorityOfConnectDaemon ); if ( tbs == epicsThreadBooleanStatusSuccess ) { tbs = epicsThreadHighestPriorityLevelBelow ( priorityOfConnectDaemon, &priorityOfConnectDaemon ); if ( tbs != epicsThreadBooleanStatusSuccess ) { priorityOfConnectDaemon = epicsThreadPriorityCAServerLow; } } else { priorityOfConnectDaemon = epicsThreadPriorityCAServerLow; } tid = epicsThreadCreate ( "CAS-TCP", priorityOfConnectDaemon, epicsThreadGetStackSize(epicsThreadStackMedium), req_server, 0); if ( tid == 0 ) { epicsPrintf ( "CAS: unable to start connection request thread\n" ); } epicsEventMustWait(castcp_startStopEvent); return RSRV_OK; }
static void workerMain(void *arg) { epicsThreadPool *pool = arg; unsigned int nrun, ocnt; /* workers are created with counts * in the running, sleeping, and (possibly) waking counters */ epicsMutexMustLock(pool->guard); pool->threadsAreAwake++; pool->threadsSleeping--; while (1) { ELLNODE *cur; pool->threadsAreAwake--; pool->threadsSleeping++; epicsMutexUnlock(pool->guard); epicsEventMustWait(pool->workerWakeup); epicsMutexMustLock(pool->guard); pool->threadsSleeping--; pool->threadsAreAwake++; if (pool->threadsWaking==0) continue; pool->threadsWaking--; CHECKCOUNT(pool); if (pool->shutdown) break; if (pool->pauserun) continue; /* more threads to wakeup */ if (pool->threadsWaking) { epicsEventSignal(pool->workerWakeup); } while ((cur=ellGet(&pool->jobs)) != NULL) { epicsJob *job = CONTAINER(cur, epicsJob, jobnode); assert(job->queued && !job->running); job->queued=0; job->running=1; epicsMutexUnlock(pool->guard); (*job->func)(job->arg, epicsJobModeRun); epicsMutexMustLock(pool->guard); if (job->freewhendone) { job->dead=1; free(job); } else { job->running=0; /* job may be re-queued from within callback */ if (job->queued) ellAdd(&pool->jobs, &job->jobnode); else ellAdd(&pool->owned, &job->jobnode); } } if (pool->observerCount) epicsEventSignal(pool->observerWakeup); } pool->threadsAreAwake--; pool->threadsRunning--; nrun = pool->threadsRunning; ocnt = pool->observerCount; epicsMutexUnlock(pool->guard); if (ocnt) epicsEventSignal(pool->observerWakeup); if (nrun) epicsEventSignal(pool->workerWakeup); /* pass along */ else epicsEventSignal(pool->shutdownEvent); }
static void testUDP(void) { caster_t caster; shSocket sender; osiSockAddr dest; union casterUDP buf; shSocketInit(&sender); sender.sd = shCreateSocket(AF_INET, SOCK_DGRAM, 0); if(sender.sd==INVALID_SOCKET) { testAbort("Failed to create socket"); return; } lock = epicsMutexMustCreate(); cycled[0] = epicsEventMustCreate(epicsEventEmpty); cycled[1] = epicsEventMustCreate(epicsEventEmpty); casterInit(&caster); caster.udpport = 0; /* test with random port */ caster.testhook = &testerhook; epicsThreadMustCreate("udptester", epicsThreadPriorityMedium, epicsThreadGetStackSize(epicsThreadStackSmall), &tester, &caster); epicsEventSignal(cycled[1]); /* wait for tester thread to setup socket */ epicsEventMustWait(cycled[0]); epicsMutexMustLock(lock); testOk1(caster.udpport!=0); testDiag("UDP test with port %d", caster.udpport); memset(&dest, 0, sizeof(dest)); dest.ia.sin_family = AF_INET; dest.ia.sin_addr.s_addr = htonl(INADDR_LOOPBACK); dest.ia.sin_port = htons(caster.udpport); epicsMutexUnlock(lock); /* allow tester thread to begin recv() */ epicsEventSignal(cycled[1]); testDiag("Test announcement directly from server"); memset(&buf, 0, sizeof(buf)); buf.m_msg.pid = htons(RECAST_MAGIC); buf.m_msg.serverIP = htonl(0xffffffff); buf.m_msg.serverPort = htons(0x1020); buf.m_msg.serverKey = htonl(0x12345678); testOk1(0==shSendTo(&sender, &buf.m_bytes, 0x10, 0, &dest)); /* wait for tester thread to completer recv() and end cycle */ epicsEventMustWait(cycled[0]); epicsMutexMustLock(lock); testOk1(cycles==1); testOk1(result==0); testOk1(caster.haveserv==1); testOk1(caster.nameserv.ia.sin_family==AF_INET); testOk1(caster.nameserv.ia.sin_addr.s_addr==htonl(INADDR_LOOPBACK)); testOk1(caster.nameserv.ia.sin_port==htons(0x1020)); testOk1(caster.servkey==0x12345678); epicsMutexUnlock(lock); testDiag("Test proxied announcement"); /* start next cycle */ epicsEventSignal(cycled[1]); /* wait for tester thread to setup socket */ epicsEventMustWait(cycled[0]); epicsMutexMustLock(lock); dest.ia.sin_port = htons(caster.udpport); epicsMutexUnlock(lock); buf.m_msg.serverIP = htonl(0x50607080); /* allow tester thread to begin recv() */ epicsEventSignal(cycled[1]); testOk1(0==shSendTo(&sender, &buf.m_bytes, 0x10, 0, &dest)); /* wait for tester thread to completer recv() and end cycle */ epicsEventMustWait(cycled[0]); epicsMutexMustLock(lock); testOk1(cycles==2); testOk1(result==0); testOk1(caster.haveserv==1); testOk1(caster.nameserv.ia.sin_family==AF_INET); testOk1(caster.nameserv.ia.sin_addr.s_addr==htonl(0x50607080)); testOk1(caster.nameserv.ia.sin_port==htons(0x1020)); epicsMutexUnlock(lock); /* begin shutdown cycle */ epicsEventSignal(cycled[1]); epicsEventMustWait(cycled[0]); epicsEventSignal(cycled[1]); casterShutdown(&caster); epicsEventDestroy(cycled[0]); epicsEventDestroy(cycled[1]); epicsMutexDestroy(lock); }
static void testSingleThreading(void) { int i; testsingle data[2]; xdrv *drvs[2]; memset(data, 0, sizeof(data)); for(i=0; i<2; i++) { int p; for(p=0; p<NUM_CALLBACK_PRIORITIES; p++) { data[i].wake[p] = epicsEventMustCreate(epicsEventEmpty); data[i].wait[p] = epicsEventMustCreate(epicsEventEmpty); } } testDiag("Test single-threaded I/O Intr scanning"); testdbPrepare(); testdbReadDatabase("dbTestIoc.dbd", NULL, NULL); dbTestIoc_registerRecordDeviceDriver(pdbbase); /* create two scan lists with one record on each of three priorities */ /* group#, member#, priority */ loadRecord(0, 0, "LOW"); loadRecord(1, 0, "LOW"); loadRecord(0, 1, "MEDIUM"); loadRecord(1, 1, "MEDIUM"); loadRecord(0, 2, "HIGH"); loadRecord(1, 2, "HIGH"); drvs[0] = xdrv_add(0, &testcb, &data[0]); drvs[1] = xdrv_add(1, &testcb, &data[1]); scanIoSetComplete(drvs[0]->scan, &testcomp, &data[0]); scanIoSetComplete(drvs[1]->scan, &testcomp, &data[1]); eltc(0); testIocInitOk(); eltc(1); testDiag("Scan first list"); scanIoRequest(drvs[0]->scan); testDiag("Scan second list"); scanIoRequest(drvs[1]->scan); testDiag("Wait for first list to complete"); for(i=0; i<NUM_CALLBACK_PRIORITIES; i++) epicsEventMustWait(data[0].wait[i]); testDiag("Wait one more second"); epicsThreadSleep(1.0); testOk1(data[0].hasprocd[0]==1); testOk1(data[0].hasprocd[1]==1); testOk1(data[0].hasprocd[2]==1); testOk1(data[0].getcomplete[0]==1); testOk1(data[0].getcomplete[1]==1); testOk1(data[0].getcomplete[2]==1); testOk1(data[1].hasprocd[0]==0); testOk1(data[1].hasprocd[1]==0); testOk1(data[1].hasprocd[2]==0); testOk1(data[1].getcomplete[0]==0); testOk1(data[1].getcomplete[1]==0); testOk1(data[1].getcomplete[2]==0); testDiag("Release the first scan and wait for the second"); for(i=0; i<NUM_CALLBACK_PRIORITIES; i++) epicsEventMustTrigger(data[0].wake[i]); for(i=0; i<NUM_CALLBACK_PRIORITIES; i++) epicsEventMustWait(data[1].wait[i]); testOk1(data[0].hasprocd[0]==1); testOk1(data[0].hasprocd[1]==1); testOk1(data[0].hasprocd[2]==1); testOk1(data[0].getcomplete[0]==1); testOk1(data[0].getcomplete[1]==1); testOk1(data[0].getcomplete[2]==1); testOk1(data[1].hasprocd[0]==1); testOk1(data[1].hasprocd[1]==1); testOk1(data[1].hasprocd[2]==1); testOk1(data[1].getcomplete[0]==1); testOk1(data[1].getcomplete[1]==1); testOk1(data[1].getcomplete[2]==1); testDiag("Release the second scan and complete"); for(i=0; i<NUM_CALLBACK_PRIORITIES; i++) epicsEventMustTrigger(data[1].wake[i]); testIocShutdownOk(); testdbCleanup(); xdrv_reset(); for(i=0; i<2; i++) { int p; for(p=0; p<NUM_CALLBACK_PRIORITIES; p++) { epicsEventDestroy(data[i].wake[p]); epicsEventDestroy(data[i].wait[p]); } } }