// extern "C" int epicsShareAPI ca_context_create ( ca_preemptive_callback_select premptiveCallbackSelect ) { ca_client_context *pcac; try { epicsThreadOnce ( & caClientContextIdOnce, ca_init_client_context, 0); if ( caClientContextId == 0 ) { return ECA_ALLOCMEM; } pcac = ( ca_client_context * ) epicsThreadPrivateGet ( caClientContextId ); if ( pcac ) { if ( premptiveCallbackSelect == ca_enable_preemptive_callback && ! pcac->preemptiveCallbakIsEnabled() ) { return ECA_NOTTHREADED; } return ECA_NORMAL; } pcac = new ca_client_context ( premptiveCallbackSelect == ca_enable_preemptive_callback ); if ( ! pcac ) { return ECA_ALLOCMEM; } epicsThreadPrivateSet ( caClientContextId, (void *) pcac ); } catch ( ... ) { return ECA_ALLOCMEM; } return ECA_NORMAL; }
static void lockInfo (void) { static epicsThreadOnceId infoMutexOnceFlag = EPICS_THREAD_ONCE_INIT; epicsThreadOnce (&infoMutexOnceFlag, createInfoMutex, NULL); epicsMutexMustLock (infoMutex); }
void ClockTime_Init(int synchronize) { int firstTime = 0; epicsThreadOnce(&onceId, ClockTime_InitOnce, &firstTime); if (synchronize == CLOCKTIME_SYNC) { if (ClockTimePvt.synchronize == CLOCKTIME_NOSYNC) { /* Start synchronizing */ ClockTimePvt.synchronize = synchronize; epicsThreadCreate("ClockTimeSync", epicsThreadPriorityHigh, epicsThreadGetStackSize(epicsThreadStackSmall), ClockTimeSync, NULL); } else { /* No change, sync thread should already be running */ } } else { if (ClockTimePvt.synchronize == CLOCKTIME_SYNC) { /* Turn off synchronization thread */ ClockTime_Shutdown(NULL); } else { /* No synchronization thread */ if (firstTime) ClockTimeGetCurrent(&ClockTimePvt.startTime); } } }
static long asInitCommon(void) { long status; int asWasActive = asActive; int wasFirstTime = firstTime; static epicsThreadOnceId asInitCommonOnceFlag = EPICS_THREAD_ONCE_INIT; epicsThreadOnce(&asInitCommonOnceFlag,asInitCommonOnce,(void *)&firstTime); if(wasFirstTime) { if(!pacf) return(0); /*access security will NEVER be turned on*/ } else { if(!asActive) { printf("Access security is NOT enabled." " Was asSetFilename specified before iocInit?\n"); return(S_asLib_asNotActive); } if(pacf) { asCaStop(); } else { /*Just leave everything as is */ return(S_asLib_badConfig); } } status = asInitFile(pacf,psubstitutions); if(asActive) { if(!asWasActive) { dbSpcAsRegisterCallback(asSpcAsCallback); asDbAddRecords(); } asCaStart(); } return(status); }
epicsShareFunc int devLibPCIUse(const char* use) { ELLNODE *cur; devLibPCI *drv; if (!use) use="native"; epicsThreadOnce(&devPCIReg_once, ®Init, NULL); epicsMutexMustLock(pciDriversLock); if (pdevLibPCI) { epicsMutexUnlock(pciDriversLock); errlogPrintf("PCI bus driver already selected. Can't change selection\n"); return 1; } for(cur=ellFirst(&pciDrivers); cur; cur=ellNext(cur)) { drv=CONTAINER(cur, devLibPCI, node); if (strcmp(drv->name, use)==0) { pdevLibPCI = drv; epicsMutexUnlock(pciDriversLock); return 0; } } epicsMutexUnlock(pciDriversLock); errlogPrintf("PCI bus driver '%s' not found\n",use); return 1; }
void testHarness(void) { epicsThreadOnce(&onceFlag, testOnce, NULL); epicsAtExit(harnessExit, NULL); Harness = 1; Programs = 0; Tests = 0; ellInit(&faults); epicsTimeGetCurrent(&started); }
void testPlan(int plan) { epicsThreadOnce(&onceFlag, testOnce, NULL); epicsMutexMustLock(testLock); planned = plan; tested = passed = failed = skipped = bonus = 0; todo = NULL; if (plan) printf("1..%d\n", plan); epicsMutexUnlock(testLock); }
// for now its probably sufficent to allocate one // DNS transaction thread for all codes sharing // the same process that need DNS services but we // leave our options open for the future ipAddrToAsciiEngine & ipAddrToAsciiEngine::allocate () { epicsThreadOnce ( & ipAddrToAsciiEngineGlobalMutexOnceFlag, ipAddrToAsciiEngineGlobalMutexConstruct, 0 ); if(!ipAddrToAsciiEnginePrivate::pEngine) throw std::runtime_error("ipAddrToAsciiEngine::allocate fails"); return * ipAddrToAsciiEnginePrivate::pEngine; }
epicsShareFunc epicsThreadPool* epicsThreadPoolGetShared(epicsThreadPoolConfig *opts) { ELLNODE *node; epicsThreadPool *cur; epicsThreadPoolConfig defopts; size_t N = epicsThreadGetCPUs(); if (!opts) { epicsThreadPoolConfigDefaults(&defopts); opts = &defopts; } /* shared pools must have a minimum allowed number of workers. * Use the number of CPU cores */ if (opts->maxThreads < N) opts->maxThreads = N; epicsThreadOnce(&sharedPoolsOnce, &sharedPoolsInit, NULL); epicsMutexMustLock(sharedPoolsGuard); for (node = ellFirst(&sharedPools); node; node = ellNext(node)) { cur = CONTAINER(node, epicsThreadPool, sharedNode); /* Must have exactly the requested priority * At least the requested max workers * and at least the requested stack size */ if (cur->conf.workerPriority != opts->workerPriority) continue; if (cur->conf.maxThreads < opts->maxThreads) continue; if (cur->conf.workerStack < opts->workerStack) continue; cur->sharedCount++; assert(cur->sharedCount > 0); epicsMutexUnlock(sharedPoolsGuard); epicsMutexMustLock(cur->guard); *opts = cur->conf; epicsMutexUnlock(cur->guard); return cur; } cur = epicsThreadPoolCreate(opts); if (!cur) { epicsMutexUnlock(sharedPoolsGuard); return NULL; } cur->sharedCount = 1; ellAdd(&sharedPools, &cur->sharedNode); epicsMutexUnlock(sharedPoolsGuard); return cur; }
void epicsSingletonBase::lockedFactory () { if ( ! this->pSingleton ) { epicsThreadOnce ( & epicsSingletonOnceId, epicsSingletonOnce, 0 ); epicsGuard < epicsMutex > guard ( *pSingletonBaseMutexEPICS ); if ( ! this->pSingleton ) { this->pSingleton = this->factory (); } } }
static epicsTimerId wdogCreate(void (*fn)(int), long arg) { static epicsThreadOnceId inited = EPICS_THREAD_ONCE_INIT; /* lazy init of timer queue */ if ( EPICS_THREAD_ONCE_INIT == inited ) epicsThreadOnce( &inited, timerQCreate, 0); return epicsTimerQueueCreateTimer(timerQ, (void (*)(void*))fn, (void*)arg); }
void SingletonUntyped :: incrRefCount ( PBuild pBuild ) { epicsThreadOnce ( & epicsSigletonOnceFlag, SingletonMutexOnce, 0 ); epicsGuard < epicsMutex > guard ( *pEPICSSigletonMutex ); assert ( _refCount < SIZE_MAX ); if ( _refCount == 0 ) { _pInstance = ( * pBuild ) (); } _refCount++; }
epicsShareFunc int epicsAtExit(epicsExitFunc func, void *arg) { int status = -1; epicsThreadOnce ( & exitPvtOnce, exitPvtOnceFunc, 0 ); epicsMutexMustLock ( exitPvtLock ); if ( pExitPvtPerProcess ) { status = epicsAtExitPvt ( pExitPvtPerProcess, func, arg ); } epicsMutexUnlock ( exitPvtLock ); return status; }
epicsShareFunc void epicsExitCallAtThreadExits(void) { exitPvt * pep; epicsThreadOnce ( & exitPvtOnce, exitPvtOnceFunc, 0 ); pep = epicsThreadPrivateGet ( exitPvtPerThread ); if ( pep ) { epicsExitCallAtExitsPvt ( pep ); destroyExitPvt ( pep ); epicsThreadPrivateSet ( exitPvtPerThread, 0 ); } }
SimADC::smart_pointer_type getSimADC(const std::string& name) { epicsThreadOnce(&sim_mute_once, &sim_global_init, 0); sim_global_type::guard_t G(sim_global->lock); sim_global_type::sims_t::const_iterator it = sim_global->sims.find(name); if(it==sim_global->sims.end()) return SimADC::smart_pointer_type(); return it->second; }
SimADC::smart_pointer_type createSimADC(const std::string& name) { epicsThreadOnce(&sim_mute_once, &sim_global_init, 0); sim_global_type::guard_t G(sim_global->lock); sim_global_type::sims_t &sims = sim_global->sims; SimADC::smart_pointer_type P(new SimADC); sims[name] = P; return P; }
epicsShareFunc const char* devLibPCIDriverName() { const char* ret=NULL; epicsThreadOnce(&devPCIReg_once, ®Init, NULL); epicsMutexMustLock(pciDriversLock); if (pdevLibPCI) ret = pdevLibPCI->name; epicsMutexUnlock(pciDriversLock); return ret; }
static void ntpshmhooks(initHookState state) { if(state!=initHookAfterIocRunning) return; epicsThreadOnce(&ntponce, &ntpshminit, 0); epicsMutexMustLock(ntpShm.ntplock); if(ntpShm.evr) { callbackRequest(&ntpShm.ntpcb); fprintf(stderr, "Starting NTP SHM writer for segment %d\n", ntpShm.segid); } epicsMutexUnlock(ntpShm.ntplock); }
epicsShareFunc int epicsAtThreadExit(epicsExitFunc func, void *arg) { exitPvt * pep; epicsThreadOnce ( & exitPvtOnce, exitPvtOnceFunc, 0 ); pep = epicsThreadPrivateGet ( exitPvtPerThread ); if ( ! pep ) { pep = createExitPvt (); if ( ! pep ) { return -1; } epicsThreadPrivateSet ( exitPvtPerThread, pep ); } return epicsAtExitPvt ( pep, func, arg ); }
epicsShareFunc void epicsExitCallAtExits(void) { exitPvt * pep = 0; epicsThreadOnce ( & exitPvtOnce, exitPvtOnceFunc, 0 ); epicsMutexMustLock ( exitPvtLock ); if ( pExitPvtPerProcess ) { pep = pExitPvtPerProcess; pExitPvtPerProcess = 0; } epicsMutexUnlock ( exitPvtLock ); if ( pep ) { epicsExitCallAtExitsPvt ( pep ); destroyExitPvt ( pep ); } }
static void devInit(void* junk) { epicsThreadOnce(&devPCIReg_once, ®Init, NULL); epicsMutexMustLock(pciDriversLock); if(!pdevLibPCI && devLibPCIUse(NULL)) { epicsMutexUnlock(pciDriversLock); devPCIInit_result = S_dev_internal; return; } epicsMutexUnlock(pciDriversLock); if(!!pdevLibPCI->pDevInit) devPCIInit_result = (*pdevLibPCI->pDevInit)(); else devPCIInit_result = 0; }
/// \param[in] configSection @copydoc initArg1 /// \param[in] configFile @copydoc initArg2 /// \param[in] options @copydoc initArg4 NetShrVarInterface::NetShrVarInterface(const char *configSection, const char* configFile, int options) : m_configSection(configSection), m_options(options) { epicsThreadOnce(&onceId, initCV, NULL); char* configFile_expanded = macEnvExpand(configFile); m_configFile = configFile_expanded; epicsAtExit(epicsExitFunc, this); pugi::xml_parse_result result = m_xmlconfig.load_file(configFile_expanded); free(configFile_expanded); if (result) { std::cerr << "Loaded XML config file \"" << m_configFile << "\" (expanded from \"" << configFile << "\")" << std::endl; } else { throw std::runtime_error("Cannot load XML \"" + m_configFile + "\" (expanded from \"" + std::string(configFile) + "\"): load failure: " + result.description()); } }
void time2ntp(const char* evrname, int segid, int event) { if(event==0) event = MRF_EVENT_TS_COUNTER_RST; else if(event<=0 || event >255) { fprintf(stderr, "Invalid 1Hz event # %d\n", event); return; } if(segid<0 || segid>4) { fprintf(stderr, "Invalid segment ID %d\n", segid); return; } mrf::Object *obj = mrf::Object::getObject(evrname); if(!obj) { fprintf(stderr, "Unknown EVR: %s\n", evrname); return; } EVRMRM *evr = dynamic_cast<EVRMRM*>(obj); if(!evr) { fprintf(stderr, "\"%s\" is not an EVR\n", evrname); return; } epicsThreadOnce(&ntponce, &ntpshminit, 0); epicsMutexMustLock(ntpShm.ntplock); if(ntpShm.evr) { epicsMutexUnlock(ntpShm.ntplock); fprintf(stderr, "ntpShm already initialized.\n"); return; } ntpShm.event = event; ntpShm.evr = evr; ntpShm.segid = segid; epicsMutexUnlock(ntpShm.ntplock); }
// for now its probably sufficent to allocate one // DNS transaction thread for all codes sharing // the same process that need DNS services but we // leave our options open for the future ipAddrToAsciiEngine & ipAddrToAsciiEngine::allocate () { epicsThreadOnce ( & ipAddrToAsciiEngineGlobalMutexOnceFlag, ipAddrToAsciiEngineGlobalMutexConstruct, 0 ); // since we must not own lock when checking this flag // this diagnostic has imperfect detection, but never // incorrect detection if ( ipAddrToAsciiEnginePrivate :: shutdownRequest ) { throw std :: runtime_error ( "ipAddrToAsciiEngine::allocate (): " "attempts to create an " "ipAddrToAsciiEngine while the exit " "handlers are running are rejected"); } epicsGuard < epicsMutex > guard ( * ipAddrToAsciiEnginePrivate::pGlobalMutex ); if ( ! ipAddrToAsciiEnginePrivate::pEngine ) { ipAddrToAsciiEnginePrivate::pEngine = new ipAddrToAsciiEnginePrivate (); } ipAddrToAsciiEnginePrivate::numberOfReferences++; return * ipAddrToAsciiEnginePrivate::pEngine; }
epicsShareFunc int epicsShareAPI errlogInit2(int bufsize, int maxMsgSize) { static epicsThreadOnceId errlogOnceFlag = EPICS_THREAD_ONCE_INIT; struct initArgs config; if (pvtData.atExit) return 0; if (bufsize < BUFFER_SIZE) bufsize = BUFFER_SIZE; config.bufsize = bufsize; if (maxMsgSize < MAX_MESSAGE_SIZE) maxMsgSize = MAX_MESSAGE_SIZE; config.maxMsgSize = maxMsgSize; epicsThreadOnce(&errlogOnceFlag, errlogInitPvt, &config); if (pvtData.errlogInitFailed) { fprintf(stderr,"errlogInit failed\n"); exit(1); } return 0; }
// for now its probably sufficent to allocate one // DNS transaction thread for all codes sharing // the same process that need DNS services but we // leave our options open for the future void ipAddrToAsciiEnginePrivate::release () { bool deleteGlobalMutexCondDetected = false; epicsThreadOnce ( & ipAddrToAsciiEngineGlobalMutexOnceFlag, ipAddrToAsciiEngineGlobalMutexConstruct, 0 ); { epicsGuard < epicsMutex > guard ( * ipAddrToAsciiEnginePrivate::pGlobalMutex ); assert ( ipAddrToAsciiEnginePrivate::numberOfReferences > 0u ); ipAddrToAsciiEnginePrivate::numberOfReferences--; if ( ipAddrToAsciiEnginePrivate::numberOfReferences == 0u ) { deleteGlobalMutexCondDetected = ipAddrToAsciiEnginePrivate :: shutdownRequest; delete ipAddrToAsciiEnginePrivate :: pEngine; ipAddrToAsciiEnginePrivate :: pEngine = 0; } } if ( deleteGlobalMutexCondDetected ) { delete ipAddrToAsciiEnginePrivate :: pGlobalMutex; ipAddrToAsciiEnginePrivate :: pGlobalMutex = 0; } }
epicsShareFunc int devLibPCIRegisterDriver2(devLibPCI* drv, size_t drvsize) { int ret=0; ELLNODE *cur; if (!drv->name) return 1; if(drvsize!=sizeof(*drv)) { errlogPrintf("devLibPCIRegisterDriver() fails with inconsistent PCI OS struct sizes.\n" "expect %lu but given %lu\n" "Please do a clean rebuild of devLib2 and any code with custom PCI OS structs\n", (unsigned long)sizeof(*drv), (unsigned long)drvsize); return S_dev_internal; } epicsThreadOnce(&devPCIReg_once, ®Init, NULL); epicsMutexMustLock(pciDriversLock); for(cur=ellFirst(&pciDrivers); cur; cur=ellNext(cur)) { devLibPCI *other=CONTAINER(cur, devLibPCI, node); if (strcmp(drv->name, other->name)==0) { errlogPrintf("Failed to register PCI bus driver: name already taken\n"); ret=1; break; } } if (!ret) ellAdd(&pciDrivers, &drv->node); epicsMutexUnlock(pciDriversLock); return ret; }
event_list *eventNameToHandle(const char *eventname) { int prio; event_list *pel; static epicsThreadOnceId onceId = EPICS_THREAD_ONCE_INIT; if (!eventname || eventname[0] == 0) return NULL; epicsThreadOnce(&onceId, eventOnce, NULL); epicsMutexMustLock(event_lock); for (pel = pevent_list[0]; pel; pel=pel->next) { if (strcmp(pel->event_name, eventname) == 0) break; } if (pel == NULL) { pel = dbCalloc(1, sizeof(event_list)); strcpy(pel->event_name, eventname); for (prio = 0; prio < NUM_CALLBACK_PRIORITIES; prio++) { callbackSetUser(&pel->scan_list[prio], &pel->callback[prio]); callbackSetPriority(prio, &pel->callback[prio]); callbackSetCallback(eventCallback, &pel->callback[prio]); pel->scan_list[prio].lock = epicsMutexMustCreate(); ellInit(&pel->scan_list[prio].list); } pel->next=pevent_list[0]; pevent_list[0]=pel; { /* backward compatibility */ char* p; long e = strtol(eventname, &p, 0); if (*p == 0 && e > 0 && e <= 255) pevent_list[e] = pel; } } epicsMutexUnlock(event_lock); return pel; }
static long subFinalInit(subRecord *precord) { static int instance_counter = 0; myData *pmyData; if(dbSubFinalDebug) { epicsPrintf("Init SubRecord by (thread) %s, (record) %s\n", epicsThreadGetNameSelf(), precord->name); } pmyData = (myData*) malloc (sizeof(myData)); pmyData->instance_counter = ++instance_counter; pmyData->private_counter = 0; pmyData->precord = precord; precord->dpvt = (void*) pmyData; epicsThreadOnce(&threadOnceFlag, (void(*))spawnThread, (void*) precord); return 0; }
/* * fetchClientContext (); */ int fetchClientContext ( ca_client_context **ppcac ) { epicsThreadOnce ( &caClientContextIdOnce, ca_init_client_context, 0 ); if ( caClientContextId == 0 ) { return ECA_ALLOCMEM; } int status; *ppcac = ( ca_client_context * ) epicsThreadPrivateGet ( caClientContextId ); if ( *ppcac ) { status = ECA_NORMAL; } else { status = ca_task_initialize (); if ( status == ECA_NORMAL ) { *ppcac = (ca_client_context *) epicsThreadPrivateGet ( caClientContextId ); if ( ! *ppcac ) { status = ECA_INTERNAL; } } } return status; }