void dbChannelShow(dbChannel *chan, int level, const unsigned short indent) { long elems = chan->addr.no_elements; long felems = chan->final_no_elements; int count = ellCount(&chan->filters); int pre = ellCount(&chan->pre_chain); int post = ellCount(&chan->post_chain); printf("%*sChannel: '%s'\n", indent, "", chan->name); if (level > 0) { printf("%*sfield_type=%s (%d bytes), dbr_type=%s, %ld element%s", indent + 4, "", dbGetFieldTypeString(chan->addr.field_type), chan->addr.field_size, dbGetFieldTypeString(chan->addr.dbr_field_type), elems, elems == 1 ? "" : "s"); if (count) printf("\n%*s%d filter%s (%d pre eventq, %d post eventq)\n", indent + 4, "", count, count == 1 ? "" : "s", pre, post); else printf(", no filters\n"); if (level > 1) dbChannelFilterShow(chan, level - 2, indent + 8); if (count) { printf("%*sfinal field_type=%s (%dB), %ld element%s\n", indent + 4, "", dbGetFieldTypeString(chan->final_type), chan->final_field_size, felems, felems == 1 ? "" : "s"); } } }
static long drvEC1_ANT_io_report (int level) { ST_STD_device *pSTDdev; ST_MASTER *pMaster = get_master(); if(!pMaster) return 0; if(ellCount(pMaster->pList_DeviceTask)) { pSTDdev = (ST_STD_device*) ellFirst (pMaster->pList_DeviceTask); } else { epicsPrintf("Task not found\n"); return 0; } epicsPrintf("Totoal %d task(s) found\n",ellCount(pMaster->pList_DeviceTask)); if(level<1) return 0; while (pSTDdev) { if (level>2) { epicsPrintf(" Sampling Rate: %d/sec\n", pSTDdev->ST_Base.nSamplingRate ); } if (level>3) { epicsPrintf(" status of Buffer-Pool (reused-counter/number of data/buffer pointer)\n"); epicsPrintf(" "); epicsPrintf("\n"); } pSTDdev = (ST_STD_device*) ellNext(&pSTDdev->node); } return 0; }
static void createAndOpen(const char *name, dbChannel**pch) { testOk(!!(*pch = dbChannelCreate(name)), "dbChannel %s created", name); testOk(!(dbChannelOpen(*pch)), "dbChannel opened"); testOk((ellCount(&(*pch)->pre_chain) == 0), "no filters in pre chain"); testOk((ellCount(&(*pch)->post_chain) == 0), "no filters in post chain"); }
epicsShareFunc void taskwdShow(int level) { struct tNode *pt; int mCount, fCount, tCount; char tName[40]; epicsMutexMustLock(mLock); mCount = ellCount(&mList); epicsMutexUnlock(mLock); epicsMutexMustLock(fLock); fCount = ellCount(&fList); epicsMutexUnlock(fLock); epicsMutexMustLock(tLock); tCount = ellCount(&tList); printf("%d monitors, %d threads registered, %d free nodes\n", mCount, tCount, fCount); if (level) { printf("%16.16s %9s %12s %12s %12s\n", "THREAD NAME", "STATE", "EPICS TID", "CALLBACK", "USR ARG"); pt = (struct tNode *)ellFirst(&tList); while (pt != NULL) { epicsThreadGetName(pt->tid, tName, sizeof(tName)); printf("%16.16s %9s %12p %12p %12p\n", tName, pt->suspended ? "Suspended" : "Ok ", (void *)pt->tid, (void *)pt->callback, pt->usr); pt = (struct tNode *)ellNext(&pt->node); } } epicsMutexUnlock(tLock); }
/* print list of stopped records, and breakpoints set in locksets */ long epicsShareAPI dbstat(void) { struct LS_LIST *pnode; struct BP_LIST *pbl; struct EP_LIST *pqe; epicsTimeStamp time; epicsMutexMustLock(bkpt_stack_sem); epicsTimeGetCurrent(&time); /* * Traverse list, reporting stopped records */ pnode = (struct LS_LIST *) ellFirst(&lset_stack); while (pnode != NULL) { if (pnode->precord != NULL) { printf("LSet: %lu Stopped at: %-28.28s #B: %5.5d T: %p\n", pnode->l_num, pnode->precord->name, ellCount(&pnode->bp_list), pnode->taskid); /* for each entrypoint detected, print out entrypoint statistics */ pqe = (struct EP_LIST *) ellFirst(&pnode->ep_queue); while (pqe != NULL) { double diff = epicsTimeDiffInSeconds(&time,&pqe->time); if (diff) { printf(" Entrypoint: %-28.28s #C: %5.5lu C/S: %7.1f\n", pqe->entrypoint->name, pqe->count,diff); } pqe = (struct EP_LIST *) ellNext((ELLNODE *)pqe); } } else { printf("LSet: %lu #B: %5.5d T: %p\n", pnode->l_num, ellCount(&pnode->bp_list), pnode->taskid); } /* * Print out breakpoints set in the lock set */ pbl = (struct BP_LIST *) ellFirst(&pnode->bp_list); while (pbl != NULL) { printf(" Breakpoint: %-28.28s", pbl->precord->name); /* display auto print flag */ if (pbl->precord->bkpt & BKPT_PRINT_MASK) printf(" (ap)\n"); else printf("\n"); pbl = (struct BP_LIST *) ellNext((ELLNODE *)pbl); } pnode = (struct LS_LIST *) ellNext((ELLNODE *)pnode); } epicsMutexUnlock(bkpt_stack_sem); return(0); }
static long drvACQ196_io_report(int level) { ST_STD_device *pSTDdev; ST_ACQ196 *pAcq196; ST_MASTER *pMaster = get_master(); if(!pMaster) return 0; if(ellCount(pMaster->pList_DeviceTask)) pSTDdev = (ST_STD_device*) ellFirst (pMaster->pList_DeviceTask); else { epicsPrintf("Task not found\n"); return 0; } epicsPrintf("Totoal %d task(s) found\n",ellCount(pMaster->pList_DeviceTask)); if(level<1) return 0; while(pSTDdev) { pAcq196 = (ST_ACQ196*)pSTDdev->pUser; epicsPrintf(" Task name: %s, vme_addr: 0x%X, status: 0x%x\n", pSTDdev->taskName, (unsigned int)pSTDdev, pSTDdev->StatusDev ); if(level>2) { epicsPrintf(" Sampling Rate: %d/sec\n", pSTDdev->ST_Base.nSamplingRate ); } if(level>3 ) { epicsPrintf(" status of Buffer-Pool (reused-counter/number of data/buffer pointer)\n"); epicsPrintf(" "); epicsPrintf("\n"); /* epicsPrintf(" callback time: %fusec\n", pAcq196->callbackTimeUsec); epicsPrintf(" SmplRate adj. counter: %d, adj. time: %fusec\n", pAcq196->adjCount_smplRate, pAcq196->adjTime_smplRate_Usec); epicsPrintf(" Gain adj. counter: %d, adj. time: %fusec\n", pAcq196->adjCount_Gain, pAcq196->adjTime_Gain_Usec); */ } /* if(ellCount(pAcq196->pchannelConfig)>0) print_channelConfig(pAcq196,level); */ pSTDdev = (ST_STD_device*) ellNext(&pSTDdev->node); } return 0; }
LOCAL void drvTPG262_scanTask(int param) { drvTPG262Config* pdrvTPG262Config = NULL; double drvTPG262_scanInterval; while(!pdrvTPG262_ellList || ellCount(pdrvTPG262_ellList) <1) { epicsThreadSleep(1.); } while(1) { pdrvTPG262Config = (drvTPG262Config*) ellFirst(pdrvTPG262_ellList); drvTPG262_scanInterval = pdrvTPG262Config->scanInterval; do { if(drvTPG262_scanInterval > pdrvTPG262Config->scanInterval) drvTPG262_scanInterval = pdrvTPG262Config->scanInterval; if(!pdrvTPG262Config->cbCount) { pdrvTPG262Config->cbCount++; pasynManager->queueRequest(pdrvTPG262Config->pasynTPG262User, asynQueuePriorityLow, pdrvTPG262Config->cbTimeout); } pdrvTPG262Config = (drvTPG262Config*) ellNext(&pdrvTPG262Config->node); } while(pdrvTPG262Config); epicsThreadSleep(drvTPG262_scanInterval); } }
static void notifyCallback(CALLBACK *pcallback) { processNotify *ppn = NULL; dbCommon *precord; notifyPvt *pnotifyPvt; callbackGetUser(ppn,pcallback); pnotifyPvt = (notifyPvt *) ppn->pnotifyPvt; precord = dbChannelRecord(ppn->chan); dbScanLock(precord); epicsMutexMustLock(pnotifyGlobal->lock); assert(precord->ppnr); assert(pnotifyPvt->state == notifyRestartCallbackRequested || pnotifyPvt->state == notifyUserCallbackRequested); assert(ellCount(&pnotifyPvt->waitList) == 0); if (pnotifyPvt->cancelWait) { if (pnotifyPvt->state == notifyRestartCallbackRequested) { restartCheck(precord->ppnr); } epicsEventSignal(pnotifyPvt->cancelEvent); epicsMutexUnlock(pnotifyGlobal->lock); dbScanUnlock(precord); return; } if(pnotifyPvt->state == notifyRestartCallbackRequested) { processNotifyCommon(ppn, precord); return; } /* All done. Clean up and call userCallback */ pnotifyPvt->state = notifyUserCallbackActive; assert(precord->ppn!=ppn); callDone(precord, ppn); }
static void drvMK80S_scanTask(int param) { kuDebug (kuTRACE, "[drvMK80S_scanTask] ... \n"); drvMK80SConfig* pdrvMK80SConfig = NULL; double drvMK80S_scanInterval; while(!pdrvMK80S_ellList || ellCount(pdrvMK80S_ellList) <1) { epicsThreadSleep(1.); } while(1) { pdrvMK80SConfig = (drvMK80SConfig*) ellFirst(pdrvMK80S_ellList); drvMK80S_scanInterval = pdrvMK80SConfig->scanInterval; do { if(drvMK80S_scanInterval > pdrvMK80SConfig->scanInterval) { drvMK80S_scanInterval = pdrvMK80SConfig->scanInterval; } if(!pdrvMK80SConfig->cbCount) { pdrvMK80SConfig->cbCount++; kuDebug (kuDEBUG, "[drvMK80S_scanTask] pasynManager->queueRequest() \n"); pasynManager->queueRequest(pdrvMK80SConfig->pasynMK80SUser, asynQueuePriorityLow, pdrvMK80SConfig->cbTimeout); } pdrvMK80SConfig = (drvMK80SConfig*) ellNext(&pdrvMK80SConfig->node); } while(pdrvMK80SConfig); epicsThreadSleep(drvMK80S_scanInterval); } kuDebug (kuCRI, "[drvMK80S_scanTask] end ...\n"); }
static char *dbOpenFile(DBBASE *pdbbase,const char *filename,FILE **fp) { ELLLIST *ppathList = (ELLLIST *)pdbbase->pathPvt; dbPathNode *pdbPathNode; char *fullfilename; *fp = 0; if (!filename) return 0; if (!ppathList || ellCount(ppathList) == 0 || strchr(filename, '/') || strchr(filename, '\\')) { *fp = fopen(filename, "r"); if (*fp && makeDbdDepends) fprintf(stdout, "%s:%s \n", makeDbdDepends, filename); return 0; } pdbPathNode = (dbPathNode *)ellFirst(ppathList); while (pdbPathNode) { fullfilename = dbMalloc(strlen(pdbPathNode->directory) + strlen(filename) + 2); strcpy(fullfilename, pdbPathNode->directory); strcat(fullfilename, "/"); strcat(fullfilename, filename); *fp = fopen(fullfilename, "r"); if (*fp && makeDbdDepends) fprintf(stdout, "%s:%s \n", makeDbdDepends, fullfilename); free((void *)fullfilename); if (*fp) return pdbPathNode->directory; pdbPathNode = (dbPathNode *)ellNext(&pdbPathNode->node); } return 0; }
static void yyerrorAbort(char *str) { yyerror(str); yyAbort = TRUE; while (ellCount(&tempList)) popFirstTemp(); }
static void harnessExit(void *dummy) { epicsTimeStamp ended; int Faulty; if (!Harness) return; epicsTimeGetCurrent(&ended); printf("\n\n EPICS Test Harness Results" "\n ==========================\n\n"); Faulty = ellCount(&faults); if (!Faulty) printf("All tests successful.\n"); else { int Failures = 0; testFailure *f; printf("Failing Program Tests Faults\n" "---------------------------------------\n"); while ((f = (testFailure *)ellGet(&faults))) { Failures += f->failures; printf("%-25s %5d %5d\n", f->name, f->tests, f->failures); if (f->skips) printf("%d subtests skipped\n", f->skips); free(f); } printf("\nFailed %d/%d test programs. %d/%d subtests failed.\n", Faulty, Programs, Failures, Tests); } printf("Programs=%d, Tests=%d, %.0f wallclock secs\n\n", Programs, Tests, epicsTimeDiffInSeconds(&ended, &started)); }
static char *msgbufGetFree(int noConsoleMessage) { msgNode *pnextSend; if (epicsMutexLock(pvtData.msgQueueLock) != epicsMutexLockOK) return 0; if ((ellCount(&pvtData.msgQueue) == 0) && pvtData.missedMessages) { int nchar; pnextSend = msgbufGetNode(); nchar = sprintf(pnextSend->message, "errlog: %d messages were discarded\n", pvtData.missedMessages); pnextSend->length = nchar + 1; pvtData.missedMessages = 0; ellAdd(&pvtData.msgQueue, &pnextSend->node); } pvtData.pnextSend = pnextSend = msgbufGetNode(); if (pnextSend) { pnextSend->noConsoleMessage = noConsoleMessage; pnextSend->length = 0; return pnextSend->message; /* NB: msgQueueLock is still locked */ } ++pvtData.missedMessages; epicsMutexUnlock(pvtData.msgQueueLock); return 0; }
static msgNode *msgbufGetNode(void) { char *pbuffer = pvtData.pbuffer; char *pnextFree; msgNode *pnextSend; if (ellCount(&pvtData.msgQueue) == 0 ) { pnextFree = pbuffer; /* Reset if empty */ } else { msgNode *pfirst = (msgNode *)ellFirst(&pvtData.msgQueue); msgNode *plast = (msgNode *)ellLast(&pvtData.msgQueue); char *plimit = pbuffer + pvtData.buffersize; pnextFree = plast->message + adjustToWorstCaseAlignment(plast->length); if (pfirst > plast) { plimit = (char *)pfirst; } else if (pnextFree + pvtData.msgNeeded > plimit) { pnextFree = pbuffer; /* Hit end, wrap to start */ plimit = (char *)pfirst; } if (pnextFree + pvtData.msgNeeded > plimit) { return 0; /* No room */ } } pnextSend = (msgNode *)pnextFree; pnextSend->message = pnextFree + sizeof(msgNode); pnextSend->length = 0; return pnextSend; }
void dbNotifyCompletion(dbCommon *precord) { processNotify *ppn = precord->ppn; notifyPvt *pnotifyPvt; epicsMutexMustLock(pnotifyGlobal->lock); assert(ppn); assert(precord->ppnr); pnotifyPvt = (notifyPvt *) ppn->pnotifyPvt; if (pnotifyPvt->state != notifyRestartInProgress && pnotifyPvt->state != notifyProcessInProgress) { epicsMutexUnlock(pnotifyGlobal->lock); return; } ellSafeDelete(&pnotifyPvt->waitList, &precord->ppnr->waitNode); if ((ellCount(&pnotifyPvt->waitList) != 0)) { restartCheck(precord->ppnr); } else if (pnotifyPvt->state == notifyProcessInProgress) { pnotifyPvt->state = notifyUserCallbackRequested; restartCheck(precord->ppnr); callbackRequest(&pnotifyPvt->callback); } else if(pnotifyPvt->state == notifyRestartInProgress) { pnotifyPvt->state = notifyRestartCallbackRequested; callbackRequest(&pnotifyPvt->callback); } else { cantProceed("dbNotifyCompletion illegal state"); } epicsMutexUnlock(pnotifyGlobal->lock); }
void dbProcessNotifyExit(void) { assert(ellCount(&pnotifyGlobal->freeList)==0); epicsMutexDestroy(pnotifyGlobal->lock); free(pnotifyGlobal); pnotifyGlobal = NULL; }
int drvM6802_set_DAQstop(drvM6802_taskConfig *ptaskConfig) { drvBufferConfig_Node *pbufferNode = NULL; bufferingThreadQueueData queueData; drvM6802_controlThreadConfig *pfpdpThreadConfig = (drvM6802_controlThreadConfig*) ptaskConfig->pfpdpThreadConfig; if ( !ozSetADsFIFOreset( ptaskConfig) ) { epicsPrintf("\n>>> ozStopADC : ozSetADsFIFOreset...failed! \n"); return ERROR; } /* epicsThreadSleep(0.1); */ ozSetTriggerReset(ptaskConfig); if( stopFpdp() != OK ) printf(" can't stop FPDP \n"); /* epicsThreadSleep(0.1); */ pbufferNode = (drvBufferConfig_Node *)ellFirst(pdrvBufferConfig->pbufferList); ellDelete(pdrvBufferConfig->pbufferList, &pbufferNode->node); pbufferNode->nCnt = ptaskConfig->cnt_DMAcallback; pbufferNode->nStop = 1; printf("\nFPDP >>> DMA done count= %d\n", ptaskConfig->cnt_DMAcallback); epicsPrintf("DAQStop after :ellCnt %d \n", ellCount(pdrvBufferConfig->pbufferList) ); queueData.pNode = pbufferNode; epicsMessageQueueSend(pfpdpThreadConfig->threadQueueId, (void*) &queueData, sizeof(bufferingThreadQueueData)); /* epicsPrintf("\n>>>Last snd ID: %d, DMA cnt: %d\n", (int)pfpdpThreadConfig->threadQueueId, pbufferNode->nCnt); */ return OK; }
void epicsShareAPI gphDumpFP(FILE *fp, gphPvt *pgphPvt) { unsigned int empty = 0; ELLLIST **paplist; int h; if (pgphPvt == NULL) return; printf("Hash table has %d buckets", pgphPvt->size); paplist = pgphPvt->paplist; for (h = 0; h < pgphPvt->size; h++) { ELLLIST *plist = paplist[h]; GPHENTRY *pgphNode; int i = 0; if (plist == NULL) { empty++; continue; } pgphNode = (GPHENTRY *) ellFirst(plist); fprintf(fp, "\n [%3d] %3d ", h, ellCount(plist)); while (pgphNode) { if (!(++i % 3)) fprintf(fp, "\n "); fprintf(fp, " %s %p", pgphNode->name, pgphNode->pvtid); pgphNode = (GPHENTRY *) ellNext((ELLNODE*)pgphNode); } } fprintf(fp, "\n%u buckets empty.\n", empty); }
static void doResolveLinks(dbRecordType *pdbRecordType, dbCommon *precord, void *user) { dbFldDes **papFldDes = pdbRecordType->papFldDes; short *link_ind = pdbRecordType->link_ind; int j; /* For all the links in the record type... */ for (j = 0; j < pdbRecordType->no_links; j++) { dbFldDes *pdbFldDes = papFldDes[link_ind[j]]; DBLINK *plink = (DBLINK *)((char *)precord + pdbFldDes->offset); if (ellCount(&precord->rdes->devList) > 0 && (strcmp(pdbFldDes->name, "INP") == 0 || strcmp(pdbFldDes->name, "OUT") == 0)) { devSup *pdevSup = dbDTYPtoDevSup(pdbRecordType, precord->dtyp); if (pdevSup) { struct dsxt *pdsxt = pdevSup->pdsxt; if (pdsxt && pdsxt->add_record) { pdsxt->add_record(precord); } } } if (plink->type == PV_LINK) dbInitLink(precord, plink, pdbFldDes->field_type); } }
void epicsThreadPoolReport(epicsThreadPool *pool, FILE *fd) { ELLNODE *cur; epicsMutexMustLock(pool->guard); fprintf(fd, "Thread Pool with %u/%u threads\n" " running %d jobs with %u threads\n", pool->threadsRunning, pool->conf.maxThreads, ellCount(&pool->jobs), pool->threadsAreAwake); if (pool->pauseadd) fprintf(fd, " Inhibit queueing\n"); if (pool->pauserun) fprintf(fd, " Pause workers\n"); if (pool->shutdown) fprintf(fd, " Shutdown in progress\n"); for (cur = ellFirst(&pool->jobs); cur; cur = ellNext(cur)) { epicsJob *job = CONTAINER(cur, epicsJob, jobnode); fprintf(fd, " job %p func: %p, arg: %p ", job, job->func, job->arg); if (job->queued) fprintf(fd, "Queued "); if (job->running) fprintf(fd, "Running "); if (job->freewhendone) fprintf(fd, "Free "); fprintf(fd, "\n"); } epicsMutexUnlock(pool->guard); }
LOCAL long drvM6802_io_report(int level) { drvM6802_taskConfig *ptaskConfig; if(!pdrvM6802Config) return 0; if(ellCount(pdrvM6802Config->ptaskList)) ptaskConfig = (drvM6802_taskConfig*) ellFirst (pdrvM6802Config->ptaskList); else { epicsPrintf("Task not found\n"); return 0; } epicsPrintf("Totoal %d task(s) found\n",ellCount(pdrvM6802Config->ptaskList)); if(level<1) return 0; while(ptaskConfig) { epicsPrintf(" Task name: %s, vme_addr: 0x%X, status: 0x%x, connected channels: %d\n", ptaskConfig->taskName, ptaskConfig->vme_addr, ptaskConfig->taskStatus, ellCount(ptaskConfig->pchannelConfig)); if(level>2) { epicsPrintf(" Sampling Rate: %d/sec\n", ptaskConfig->samplingRate ); } if(level>3 ) { epicsPrintf(" status of Buffer-Pool (reused-counter/number of data/buffer pointer)\n"); epicsPrintf(" "); epicsPrintf("\n"); epicsPrintf(" callback time: %fusec\n", ptaskConfig->callbackTimeUsec); epicsPrintf(" SmplRate adj. counter: %d, adj. time: %fusec\n", ptaskConfig->adjCount_smplRate, ptaskConfig->adjTime_smplRate_Usec); epicsPrintf(" Gain adj. counter: %d, adj. time: %fusec\n", ptaskConfig->adjCount_Gain, ptaskConfig->adjTime_Gain_Usec); } /* if(ellCount(ptaskConfig->pchannelConfig)>0) print_channelConfig(ptaskConfig,level); */ ptaskConfig = (drvM6802_taskConfig*) ellNext(&ptaskConfig->node); } return 0; }
LOCAL long drvTPG262_io_report(int level) { drvTPG262Config* pdrvTPG262Config = NULL; if(!pdrvTPG262_ellList) return 0; epicsPrintf("Total %d module(s) found\n", ellCount(pdrvTPG262_ellList)); if(ellCount(pdrvTPG262_ellList)<1) return 0; pdrvTPG262Config = (drvTPG262Config*) ellFirst(pdrvTPG262_ellList); do { drvTPG262_reportPrint(pdrvTPG262Config, level); pdrvTPG262Config = (drvTPG262Config*) ellNext(&pdrvTPG262Config->node); } while(pdrvTPG262Config); return 0; }
static long drvMK80S_io_report(int level) { drvMK80SConfig* pdrvMK80SConfig = NULL; if(!pdrvMK80S_ellList) return 0; kuDebug (kuINFO, "Total %d module(s) found\n", ellCount(pdrvMK80S_ellList)); if(ellCount(pdrvMK80S_ellList)<1) return 0; pdrvMK80SConfig = (drvMK80SConfig*) ellFirst(pdrvMK80S_ellList); do { drvMK80S_reportPrint(pdrvMK80SConfig, level); pdrvMK80SConfig = (drvMK80SConfig*) ellNext(&pdrvMK80SConfig->node); } while(pdrvMK80SConfig); return 0; }
static long drvRTCORE_io_report(int level) { ST_STD_device *pSTDdev; ST_RTcore *pRTcore; ST_MASTER *pMaster = get_master(); if(!pMaster) return 0; if(ellCount(pMaster->pList_DeviceTask)) pSTDdev = (ST_STD_device*) ellFirst (pMaster->pList_DeviceTask); else { epicsPrintf("Task not found\n"); return 0; } epicsPrintf("Totoal %d task(s) found\n",ellCount(pMaster->pList_DeviceTask)); if(level<1) return 0; while(pSTDdev) { pRTcore = (ST_RTcore *)pSTDdev->pUser; if(level>2) { epicsPrintf(" Sampling Rate: %d/sec\n", pSTDdev->ST_Base.nSamplingRate ); } if(level>3 ) { epicsPrintf(" status of Buffer-Pool (reused-counter/number of data/buffer pointer)\n"); epicsPrintf(" "); epicsPrintf("\n"); /* epicsPrintf(" callback time: %fusec\n", pRTcore->callbackTimeUsec); epicsPrintf(" SmplRate adj. counter: %d, adj. time: %fusec\n", pRTcore->adjCount_smplRate, pRTcore->adjTime_smplRate_Usec); epicsPrintf(" Gain adj. counter: %d, adj. time: %fusec\n", pRTcore->adjCount_Gain, pRTcore->adjTime_Gain_Usec); */ } pSTDdev = (ST_STD_device*) ellNext(&pSTDdev->node); } return 0; }
void postEvent(event_list *pel) { int prio; if (scanCtl != ctlRun) return; if (!pel) return; for (prio = 0; prio < NUM_CALLBACK_PRIORITIES; prio++) { if (ellCount(&pel->scan_list[prio].list) >0) callbackRequest(&pel->callback[prio]); } }
static void createAndOpen(const char *chan, const char *json, const char *type, dbChannel**pch, short no) { ELLNODE *node; char name[80]; strncpy(name, chan, sizeof(name)-1); strncat(name, json, sizeof(name)-strlen(name)-1); testOk(!!(*pch = dbChannelCreate(name)), "dbChannel with plugin arr %s created", type); testOk((ellCount(&(*pch)->filters) == no), "channel has %d filter(s) in filter list", no); testOk(!(dbChannelOpen(*pch)), "dbChannel with plugin arr opened"); node = ellFirst(&(*pch)->pre_chain); (void) CONTAINER(node, chFilter, pre_node); testOk((ellCount(&(*pch)->pre_chain) == 0), "arr has no filter in pre chain"); node = ellFirst(&(*pch)->post_chain); (void) CONTAINER(node, chFilter, post_node); testOk((ellCount(&(*pch)->post_chain) == no), "arr has %d filter(s) in post chain", no); }
static void dbRecordBody(void) { DBENTRY *pdbentry; if(duplicate) { duplicate = FALSE; return; } pdbentry = (DBENTRY *)popFirstTemp(); if(ellCount(&tempList)) yyerrorAbort("dbRecordBody: tempList not empty"); dbFreeEntry(pdbentry); }
void casStatsFetch ( unsigned *pChanCount, unsigned *pCircuitCount ) { LOCK_CLIENTQ; { int circuitCount = ellCount ( &clientQ ); if ( circuitCount < 0 ) { *pCircuitCount = 0; } else { *pCircuitCount = (unsigned) circuitCount; } *pChanCount = rsrvChannelCount; } UNLOCK_CLIENTQ; }
static void dbMenuBody(void) { dbMenu *pnewMenu; dbMenu *pMenu; int nChoice; int i; GPHENTRY *pgphentry; if(duplicate) { duplicate = FALSE; return; } pnewMenu = (dbMenu *)popFirstTemp(); pnewMenu->nChoice = nChoice = ellCount(&tempList)/2; pnewMenu->papChoiceName = dbCalloc(pnewMenu->nChoice,sizeof(char *)); pnewMenu->papChoiceValue = dbCalloc(pnewMenu->nChoice,sizeof(char *)); for(i=0; i<nChoice; i++) { pnewMenu->papChoiceName[i] = (char *)popFirstTemp(); pnewMenu->papChoiceValue[i] = (char *)popFirstTemp(); } if(ellCount(&tempList)) yyerrorAbort("dbMenuBody: tempList not empty"); /* Add menu in sorted order */ pMenu = (dbMenu *)ellFirst(&pdbbase->menuList); while(pMenu && strcmp(pMenu->name,pnewMenu->name) >0 ) pMenu = (dbMenu *)ellNext(&pMenu->node); if(pMenu) ellInsert(&pdbbase->menuList,ellPrevious(&pMenu->node),&pnewMenu->node); else ellAdd(&pdbbase->menuList,&pnewMenu->node); pgphentry = gphAdd(pdbbase->pgpHash,pnewMenu->name,&pdbbase->menuList); if(!pgphentry) { yyerrorAbort("gphAdd failed"); } else { pgphentry->userPvt = pnewMenu; } }
static void dbBreakHead(char *name) { brkTable *pbrkTable; GPHENTRY *pgphentry; pgphentry = gphFind(pdbbase->pgpHash,name,&pdbbase->bptList); if(pgphentry) { duplicate = TRUE; return; } pbrkTable = dbCalloc(1,sizeof(brkTable)); pbrkTable->name = epicsStrDup(name); if(ellCount(&tempList)) yyerrorAbort("dbBreakHead:tempList not empty"); allocTemp(pbrkTable); }