/* * ss_read_buffer_static() - static version of ss_read_buffer. * This is to enable inlining in the for loop in ss_read_all_buffer. */ static void ss_read_buffer_static(SSCB *ss, CHAN *ch, boolean dirty_only) { char *val = valPtr(ch,ss); char *buf = bufPtr(ch); ptrdiff_t nch = chNum(ch); /* Must take dbCount for db channels, else we overwrite elements we didn't get */ size_t count = ch->dbch ? ch->dbch->dbCount : ch->count; size_t var_size = ch->type->size * count; if (!ss->dirty[nch] && dirty_only) return; epicsMutexMustLock(ch->varLock); DEBUG("ss %s: before read %s", ss->ssName, ch->varName); print_channel_value(DEBUG, ch, val); memcpy(val, buf, var_size); if (ch->dbch) { /* structure copy */ ss->metaData[nch] = ch->dbch->metaData; } DEBUG("ss %s: after read %s", ss->ssName, ch->varName); print_channel_value(DEBUG, ch, val); ss->dirty[nch] = FALSE; epicsMutexUnlock(ch->varLock); }
/* * ss_write_buffer() - Copy given value and meta data * to shared buffer. In safe mode, if dirtify is TRUE then * set dirty flag for each state set. */ void ss_write_buffer(CHAN *ch, void *val, PVMETA *meta, boolean dirtify) { PROG *sp = ch->prog; char *buf = bufPtr(ch); /* shared buffer */ /* Must use dbCount for db channels, else we overwrite elements we didn't get */ size_t count = ch->dbch ? ch->dbch->dbCount : ch->count; size_t var_size = ch->type->size * count; ptrdiff_t nch = chNum(ch); unsigned nss; epicsMutexMustLock(ch->varLock); DEBUG("ss_write_buffer: before write %s", ch->varName); print_channel_value(DEBUG, ch, buf); memcpy(buf, val, var_size); if (ch->dbch && meta) /* structure copy */ ch->dbch->metaData = *meta; DEBUG("ss_write_buffer: after write %s", ch->varName); print_channel_value(DEBUG, ch, buf); if (optTest(sp, OPT_SAFE) && dirtify) for (nss = 0; nss < sp->numSS; nss++) sp->ss[nss].dirty[nch] = TRUE; epicsMutexUnlock(ch->varLock); }
/* * Get value from a queued PV. */ epicsShareFunc boolean epicsShareAPI seq_pvGetQ(SS_ID ss, VAR_ID varId) { SPROG *sp = ss->sprog; CHAN *ch = sp->chan + varId; void *var = valPtr(ch,ss); EV_ID ev_flag = ch->syncedTo; PVMETA *meta = metaPtr(ch,ss); boolean was_empty; struct getq_cp_arg arg = {ch, var, meta}; if (!ch->queue) { errlogSevPrintf(errlogMajor, "pvGetQ(%s): user error (variable not queued)\n", ch->varName ); return FALSE; } was_empty = seqQueueGetF(ch->queue, getq_cp, &arg); if (ev_flag) { epicsMutexMustLock(sp->programLock); /* If queue is now empty, clear the event flag */ if (seqQueueIsEmpty(ch->queue)) { bitClear(sp->evFlags, ev_flag); } epicsMutexUnlock(sp->programLock); } return (!was_empty); }
epicsShareFunc int devLibPCIUse(const char* use) { ELLNODE *cur; devLibPCI *drv; if (!use) use="native"; epicsThreadOnce(&devPCIReg_once, ®Init, NULL); epicsMutexMustLock(pciDriversLock); if (pdevLibPCI) { epicsMutexUnlock(pciDriversLock); errlogPrintf("PCI bus driver already selected. Can't change selection\n"); return 1; } for(cur=ellFirst(&pciDrivers); cur; cur=ellNext(cur)) { drv=CONTAINER(cur, devLibPCI, node); if (strcmp(drv->name, use)==0) { pdevLibPCI = drv; epicsMutexUnlock(pciDriversLock); return 0; } } epicsMutexUnlock(pciDriversLock); errlogPrintf("PCI bus driver '%s' not found\n",use); return 1; }
static void accessRightsCallback(struct access_rights_handler_args arg) { caLink *pca = (caLink *)ca_puser(arg.chid); struct link *plink; struct pv_link *ppv_link; dbCommon *precord; assert(pca); if (ca_state(pca->chid) != cs_conn) return; /* connectionCallback will handle */ epicsMutexMustLock(pca->lock); plink = pca->plink; if (!plink) goto done; pca->hasReadAccess = ca_read_access(arg.chid); pca->hasWriteAccess = ca_write_access(arg.chid); if (pca->hasReadAccess && pca->hasWriteAccess) goto done; ppv_link = &plink->value.pv_link; precord = ppv_link->precord; if (precord && ((ppv_link->pvlMask & pvlOptCP) || ((ppv_link->pvlMask & pvlOptCPP) && precord->scan == 0))) scanOnce(precord); done: epicsMutexUnlock(pca->lock); }
static void deleteFromList(struct dbCommon *precord, scan_list *psl) { scan_element *pse; epicsMutexMustLock(psl->lock); pse = precord->spvt; if (pse == NULL) { epicsMutexUnlock(psl->lock); errlogPrintf("dbScan: Tried to delete record from wrong scan list!\n" "\t%s.SPVT = NULL, but psl = %p\n", precord->name, (void *)psl); return; } if (pse->pscan_list != psl) { epicsMutexUnlock(psl->lock); errlogPrintf("dbScan: Tried to delete record from wrong scan list!\n" "\t%s.SPVT->pscan_list = %p but psl = %p\n", precord->name, (void *)pse, (void *)psl); return; } pse->pscan_list = NULL; ellDelete(&psl->list, (void *)pse); psl->modified = TRUE; epicsMutexUnlock(psl->lock); }
/**************************************************************************** * Define private interface asynFloat64 methods ****************************************************************************/ static asynStatus writeFloat64(void* ppvt,asynUser* pasynUser,epicsFloat64 value) { int addr,status; const char* pcmd; char outBuf[BUFFER_SIZE]; Port* pport = (Port*)ppvt; asynPrint(pasynUser,ASYN_TRACE_FLOW,"drvAsynColby::writeFloat64 %s:\n",pport->myport); if( pasynManager->getAddr(pasynUser,&addr)) return( asynError ); switch( addr ) { case 0: pcmd = "DEL"; break; case 4: pcmd = "STEP"; break; default: return( asynError ); } epicsMutexMustLock(pport->syncLock); sprintf(outBuf,"%s %-.3f %s",pcmd,value,pport->units); status = writeOnly(pport->pasynUser,outBuf,pport->iface); epicsMutexUnlock(pport->syncLock); asynPrint(pasynUser,ASYN_TRACEIO_FILTER,"drvAsynColby::writeFloat64 %s: asyn - 0x%8.8X, addr - %d, value - %-.3f\n",pport->myport,pasynUser,addr,value); if( status ) return( asynError ); else return( asynSuccess ); }
static long read_delta(aiRecord* prec) { epicsMutexMustLock(ntpShm.ntplock); double val = 0.0; if(ntpShm.lastValid) val = epicsTimeDiffInSeconds(&ntpShm.lastStamp, &ntpShm.lastRx); else recGblSetSevr(prec, READ_ALARM, INVALID_ALARM); if(prec->tse==epicsTimeEventDeviceTime) { prec->time = ntpShm.lastStamp; } epicsMutexUnlock(ntpShm.ntplock); if(prec->linr==menuConvertLINEAR){ val-=prec->eoff; if(prec->eslo!=0) val/=prec->eslo; } val-=prec->aoff; if(prec->aslo!=0) val/=prec->aslo; prec->val = val; prec->udf = !isfinite(val); return 2; }
static long read_fail(longinRecord* prec) { epicsMutexMustLock(ntpShm.ntplock); prec->val = ntpShm.numFail; epicsMutexUnlock(ntpShm.ntplock); return 0; }
static void incFail() { epicsMutexMustLock(ntpShm.ntplock); ntpShm.lastValid = false; ntpShm.numFail++; epicsMutexUnlock(ntpShm.ntplock); }
/* print list of stopped records, and breakpoints set in locksets */ long epicsShareAPI dbstat(void) { struct LS_LIST *pnode; struct BP_LIST *pbl; struct EP_LIST *pqe; epicsTimeStamp time; epicsMutexMustLock(bkpt_stack_sem); epicsTimeGetCurrent(&time); /* * Traverse list, reporting stopped records */ pnode = (struct LS_LIST *) ellFirst(&lset_stack); while (pnode != NULL) { if (pnode->precord != NULL) { printf("LSet: %lu Stopped at: %-28.28s #B: %5.5d T: %p\n", pnode->l_num, pnode->precord->name, ellCount(&pnode->bp_list), pnode->taskid); /* for each entrypoint detected, print out entrypoint statistics */ pqe = (struct EP_LIST *) ellFirst(&pnode->ep_queue); while (pqe != NULL) { double diff = epicsTimeDiffInSeconds(&time,&pqe->time); if (diff) { printf(" Entrypoint: %-28.28s #C: %5.5lu C/S: %7.1f\n", pqe->entrypoint->name, pqe->count,diff); } pqe = (struct EP_LIST *) ellNext((ELLNODE *)pqe); } } else { printf("LSet: %lu #B: %5.5d T: %p\n", pnode->l_num, ellCount(&pnode->bp_list), pnode->taskid); } /* * Print out breakpoints set in the lock set */ pbl = (struct BP_LIST *) ellFirst(&pnode->bp_list); while (pbl != NULL) { printf(" Breakpoint: %-28.28s", pbl->precord->name); /* display auto print flag */ if (pbl->precord->bkpt & BKPT_PRINT_MASK) printf(" (ap)\n"); else printf("\n"); pbl = (struct BP_LIST *) ellNext((ELLNODE *)pbl); } pnode = (struct LS_LIST *) ellNext((ELLNODE *)pnode); } epicsMutexUnlock(bkpt_stack_sem); return(0); }
void dbStateSet(dbStateId id) { if (!id) return; epicsMutexMustLock(id->lock); id->status = 1; epicsMutexUnlock(id->lock); }
void dbStateClear(dbStateId id) { if (!id) return; epicsMutexMustLock(id->lock); id->status = 0; epicsMutexUnlock(id->lock); }
static void freeNode(union twdNode *pn) { VALGRIND_MEMPOOL_FREE(&fList, pn); VALGRIND_MEMPOOL_ALLOC(&fList, pn, sizeof(ELLNODE)); epicsMutexMustLock(fLock); ellAdd(&fList, (void *)pn); epicsMutexUnlock(fLock); }
const char * generalTimeHighestCurrentName(void) { gtProvider *ptp; epicsMutexMustLock(gtPvt.timeListLock); ptp = (gtProvider *)ellFirst(>Pvt.timeProviders); epicsMutexUnlock(gtPvt.timeListLock); return ptp ? ptp->name : NULL; }
/* * Called by iocInit at various points during initialization. * This function must only be called by iocInit and relatives. */ void initHookAnnounce(initHookState state) { initHookLink *hook; initHookInit(); epicsMutexMustLock(listLock); hook = (initHookLink *)ellFirst(&functionList); epicsMutexUnlock(listLock); while (hook != NULL) { hook->func(state); epicsMutexMustLock(listLock); hook = (initHookLink *)ellNext(&hook->node); epicsMutexUnlock(listLock); } }
void testPlan(int plan) { epicsThreadOnce(&onceFlag, testOnce, NULL); epicsMutexMustLock(testLock); planned = plan; tested = passed = failed = skipped = bonus = 0; todo = NULL; if (plan) printf("1..%d\n", plan); epicsMutexUnlock(testLock); }
/* * devAllocAddress() */ long devAllocAddress( const char *pOwnerName, epicsAddressType addrType, size_t size, unsigned alignment, /* n ls bits zero in base addr*/ volatile void ** pLocalAddress ) { int s; rangeItem *pRange; size_t base = 0; if (!devLibInitFlag) { s = devLibInit(); if(s){ return s; } } s = addrVerify (addrType, 0, size); if(s){ return s; } if (size == 0) { return S_dev_lowValue; } epicsMutexMustLock(addrListLock); pRange = (rangeItem *) ellFirst (&addrFree[addrType]); while (pRange) { if ((pRange->end - pRange->begin) + 1 >= size){ s = blockFind ( addrType, pRange, size, alignment, &base); if (s==SUCCESS) { break; } } pRange = (rangeItem *) pRange->node.next; } epicsMutexUnlock(addrListLock); if(!pRange){ s = S_dev_deviceDoesNotFit; errMessage(s, epicsAddressTypeName[addrType]); return s; } s = devInstallAddr (pRange, pOwnerName, addrType, base, size, pLocalAddress); return s; }
static int generalTimeGetEventPriority(epicsTimeStamp *pDest, int eventNumber, int *pPrio) { gtProvider *ptp; int status = S_time_noProvider; generalTime_Init(); if ((eventNumber < 0 || eventNumber >= NUM_TIME_EVENTS) && (eventNumber != epicsTimeEventBestTime)) return S_time_badEvent; epicsMutexMustLock(gtPvt.eventListLock); for (ptp = (gtProvider *)ellFirst(>Pvt.eventProviders); ptp; ptp = (gtProvider *)ellNext(&ptp->node)) { status = ptp->get.Event(pDest, eventNumber); if (status == epicsTimeOK) { gtPvt.lastEventProvider = ptp; if (pPrio) *pPrio = ptp->priority; if (eventNumber == epicsTimeEventBestTime) { if (epicsTimeGreaterThanEqual(pDest, >Pvt.lastProvidedBestTime)) { gtPvt.lastProvidedBestTime = *pDest; } else { int key; *pDest = gtPvt.lastProvidedBestTime; key = epicsInterruptLock(); gtPvt.ErrorCounts++; epicsInterruptUnlock(key); } } else { if (epicsTimeGreaterThanEqual(pDest, >Pvt.eventTime[eventNumber])) { gtPvt.eventTime[eventNumber] = *pDest; } else { int key; *pDest = gtPvt.eventTime[eventNumber]; key = epicsInterruptLock(); gtPvt.ErrorCounts++; epicsInterruptUnlock(key); } } break; } } if (status) gtPvt.lastEventProvider = NULL; epicsMutexUnlock(gtPvt.eventListLock); return status; }
unsigned int epicsThreadPoolNThreads(epicsThreadPool *pool) { unsigned int ret; epicsMutexMustLock(pool->guard); ret = pool->threadsRunning; epicsMutexUnlock(pool->guard); return ret; }
/* * Initialize an event flag. */ epicsShareFunc void seq_efInit(PROG_ID sp, EF_ID ev_flag, unsigned val) { assert(ev_flag > 0 && ev_flag <= sp->numEvFlags); epicsMutexMustLock(sp->lock); if (val) bitSet(sp->evFlags, ev_flag); else bitClear(sp->evFlags, ev_flag); epicsMutexUnlock(sp->lock); }
static void testerhook(caster_t *self, caster_h state) { if(state!=casterUDPSetup) return; epicsMutexUnlock(lock); epicsEventSignal(cycled[0]); epicsEventMustWait(cycled[1]); epicsMutexMustLock(lock); }
unsigned long long getWriteIndexOfMyRingBuffer( volatile MY_RING_BUFFER *pStruct ) { unsigned long long result = 0; epicsMutexMustLock( pStruct->lock ); result = pStruct->indexWrite; epicsMutexUnlock( pStruct->lock ); return result; }
int dbStateGet(dbStateId id) { int status; if (!id) return 0; epicsMutexMustLock(id->lock); status = id->status; epicsMutexUnlock(id->lock); return status; }
void testSkip(int skip, const char *why) { epicsMutexMustLock(testLock); while (skip-- > 0) { tested++; passed++; skipped++; printf("ok %2d # SKIP %s\n", tested, why); } fflush(stdout); epicsMutexUnlock(testLock); }
epicsShareFunc int epicsAtExit(epicsExitFunc func, void *arg) { int status = -1; epicsThreadOnce ( & exitPvtOnce, exitPvtOnceFunc, 0 ); epicsMutexMustLock ( exitPvtLock ); if ( pExitPvtPerProcess ) { status = epicsAtExitPvt ( pExitPvtPerProcess, func, arg ); } epicsMutexUnlock ( exitPvtLock ); return status; }
static void twdTask(void *arg) { struct tNode *pt; struct mNode *pm; while (twdCtl != twdctlExit) { if (twdCtl == twdctlRun) { epicsMutexMustLock(tLock); pt = (struct tNode *)ellFirst(&tList); while (pt) { int susp = epicsThreadIsSuspended(pt->tid); if (susp != pt->suspended) { epicsMutexMustLock(mLock); pm = (struct mNode *)ellFirst(&mList); while (pm) { if (pm->funcs->notify) { pm->funcs->notify(pm->usr, pt->tid, susp); } pm = (struct mNode *)ellNext(&pm->node); } epicsMutexUnlock(mLock); if (susp) { char tName[40]; epicsThreadGetName(pt->tid, tName, sizeof(tName)); errlogPrintf("Thread %s (%p) suspended\n", tName, (void *)pt->tid); if (pt->callback) { pt->callback(pt->usr); } } pt->suspended = susp; } pt = (struct tNode *)ellNext(&pt->node); } epicsMutexUnlock(tLock); } epicsEventWaitWithTimeout(loopEvent, TASKWD_DELAY); } epicsEventSignal(exitEvent); }
int testDiag(const char *fmt, ...) { va_list pvar; va_start(pvar, fmt); epicsMutexMustLock(testLock); printf("# "); vprintf(fmt, pvar); putchar('\n'); fflush(stdout); epicsMutexUnlock(testLock); va_end(pvar); return 0; }
static int linuxDevFinal(void) { ELLNODE *cur, *next, *isrcur, *isrnext; osdPCIDevice *curdev=NULL; osdISR *isr; epicsMutexMustLock(pciLock); for(cur=ellFirst(&devices), next=cur ? ellNext(cur) : NULL; cur; cur=next, next=next ? ellNext(next) : NULL ) { curdev=CONTAINER(cur,osdPCIDevice,node); epicsMutexMustLock(curdev->devLock); for(isrcur=ellFirst(&curdev->isrs), isrnext=isrcur ? ellNext(isrcur) : NULL; isrcur; isrcur=isrnext, isrnext=isrnext ? ellNext(isrnext) : NULL ) { isr=CONTAINER(isrcur,osdISR,node); stopIsrThread(isr); ellDelete(&curdev->isrs,cur); free(isr); } close_uio(curdev); epicsMutexUnlock(curdev->devLock); epicsMutexDestroy(curdev->devLock); free(curdev); } epicsMutexUnlock(pciLock); epicsMutexDestroy(pciLock); return 0; }
static void xycom566isrcb(CALLBACK *cb) { xy566 *card; epicsUInt16 csr; epicsUInt16 datacnt[32]; epicsUInt16 dcnt; size_t i, ch; callbackGetUser(card,cb); epicsMutexMustLock(card->guard); /* clear number of data points */ memset(datacnt,0,sizeof(datacnt)); /* number of samples taken */ dcnt=READ16(card->base, XY566_RAM); if(dcnt>256){ /* Somehow the sequence was restart w/o resetting * the pointer, or changed by an outside program */ dcnt=256; printf("Data longer then expected\n"); } for(i=0;i<dcnt;i++){ ch=card->seq[i]&0x1f; card->data[ch][datacnt[ch]]=READ16(card->data_base, XY566_DOFF(i)); datacnt[ch]++; if( card->seq[i]&SEQ_END ) break; } /* reset pointers */ WRITE16(card->base, XY566_RAM, 0); WRITE8(card->base, XY566_SEQ, 0); csr=READ16(card->base, XY566_CSR); /* enable sequence controller */ csr|=XY566_CSR_SEQ; WRITE16(card->base, XY566_CSR, csr); scanIoRequest(card->seq_irq); epicsMutexUnlock(card->guard); }