static void errlogInitPvt(void *arg) { struct initArgs *pconfig = (struct initArgs *) arg; epicsThreadId tid; pvtData.errlogInitFailed = TRUE; pvtData.buffersize = pconfig->bufsize; pvtData.maxMsgSize = pconfig->maxMsgSize; pvtData.msgNeeded = adjustToWorstCaseAlignment(pvtData.maxMsgSize + sizeof(msgNode)); ellInit(&pvtData.listenerList); ellInit(&pvtData.msgQueue); pvtData.toConsole = TRUE; pvtData.console = stderr; pvtData.waitForWork = epicsEventMustCreate(epicsEventEmpty); pvtData.listenerLock = epicsMutexMustCreate(); pvtData.msgQueueLock = epicsMutexMustCreate(); pvtData.waitForFlush = epicsEventMustCreate(epicsEventEmpty); pvtData.flush = epicsEventMustCreate(epicsEventEmpty); pvtData.flushLock = epicsMutexMustCreate(); pvtData.waitForExit = epicsEventMustCreate(epicsEventEmpty); pvtData.pbuffer = callocMustSucceed(1, pvtData.buffersize, "errlogInitPvt"); errSymBld(); /* Better not to do this lazily... */ tid = epicsThreadCreate("errlog", epicsThreadPriorityLow, epicsThreadGetStackSize(epicsThreadStackSmall), (EPICSTHREADFUNC)errlogThread, 0); if (tid) { pvtData.errlogInitFailed = FALSE; } }
static void generalTime_InitOnce(void *dummy) { ellInit(>Pvt.timeProviders); gtPvt.timeListLock = epicsMutexMustCreate(); ellInit(>Pvt.eventProviders); gtPvt.eventListLock = epicsMutexMustCreate(); }
/* Program entry func */ static void seqg_entry(SS_ID seqg_env) { # line 25 "../pvGet.st" seq_test_init(5 * 4 * 3); # line 26 "../pvGet.st" mutex = epicsMutexMustCreate(); }
int myBoard_findIndex( const char *devicePath ) { int i; fprintf( stderr, "try to find %s\n", devicePath ); for (i = 0; i < numBoard; ++i) { if (!strcmp( myHardwareBoardList[i].devicePath, devicePath )) { fprintf( stderr, "found %s at board[%d]\n", devicePath, i ); return i; } } if (numBoard >= MAX_BOARD) { fprintf( stderr, "exceed MAX_BOARD\n" ); return -1; } /* create new one: i==numBoard */ fprintf( stderr, "create new %s at board[%d]\n", devicePath, i ); strcpy( myHardwareBoardList[i].devicePath, devicePath ); mySoftwareBoardList[i].boardLock = epicsMutexMustCreate(); scanIoInit( &mySoftwareBoardList[i].ioScanPvt ); myHardwareBoardList[i].analogReference = AREF_DIFF; mySoftwareBoardList[i].numberOfActiveAnalogInputs = MAX_CHANNEL_PER_BOARD / 2; if (openBoard( i )) { return -1; } ++numBoard; return i; }
void epicsShareAPI dbPutNotifyInit(void) { if(pnotifyGlobal) return; pnotifyGlobal = dbCalloc(1,sizeof(notifyGlobal)); pnotifyGlobal->lock = epicsMutexMustCreate(); ellInit(&pnotifyGlobal->freeList); }
/* Starts "mcnt" jobs in a pool with initial and max * thread counts "icnt" and "mcnt". * The test ensures that all jobs run in parallel. * "cork" checks the function of pausing the run queue * with epicsThreadPoolQueueRun */ static void postjobs(size_t icnt, size_t mcnt, int cork) { size_t i; epicsThreadPool *pool; countPriv *priv=callocMustSucceed(1, sizeof(*priv), "postjobs priv alloc"); priv->guard=epicsMutexMustCreate(); priv->done=epicsEventMustCreate(epicsEventEmpty); priv->allrunning=epicsEventMustCreate(epicsEventEmpty); priv->count=mcnt; priv->job=callocMustSucceed(mcnt, sizeof(*priv->job), "postjobs job array"); testDiag("postjobs(%lu,%lu)", (unsigned long)icnt, (unsigned long)mcnt); { epicsThreadPoolConfig conf; epicsThreadPoolConfigDefaults(&conf); conf.initialThreads=icnt; conf.maxThreads=mcnt; testOk1((pool=epicsThreadPoolCreate(&conf))!=NULL); if(!pool) return; } if(cork) epicsThreadPoolControl(pool, epicsThreadPoolQueueRun, 0); for(i=0; i<mcnt; i++) { testDiag("i=%lu", (unsigned long)i); priv->job[i] = epicsJobCreate(pool, &countjob, priv); testOk1(priv->job[i]!=NULL); testOk1(epicsJobQueue(priv->job[i])==0); } if(cork) { /* no jobs should have run */ epicsMutexMustLock(priv->guard); testOk1(priv->count==mcnt); epicsMutexUnlock(priv->guard); epicsThreadPoolControl(pool, epicsThreadPoolQueueRun, 1); } testDiag("Waiting for all jobs to start"); epicsEventMustWait(priv->allrunning); testDiag("Stop all"); epicsEventSignal(priv->done); for(i=0; i<mcnt; i++) { testDiag("i=%lu", (unsigned long)i); epicsJobDestroy(priv->job[i]); } epicsThreadPoolDestroy(pool); epicsMutexDestroy(priv->guard); epicsEventDestroy(priv->allrunning); epicsEventDestroy(priv->done); free(priv->job); free(priv); }
static void doInitRecord0(dbRecordType *pdbRecordType, dbCommon *precord, void *user) { struct rset *prset = pdbRecordType->prset; devSup *pdevSup; if (!prset) return; /* unlikely */ precord->rset = prset; precord->rdes = pdbRecordType; precord->mlok = epicsMutexMustCreate(); ellInit(&precord->mlis); /* Reset the process active field */ precord->pact = FALSE; /* Initial UDF severity */ if (precord->udf && precord->stat == UDF_ALARM) precord->sevr = precord->udfs; /* Init DSET NOTE that result may be NULL */ pdevSup = dbDTYPtoDevSup(pdbRecordType, precord->dtyp); precord->dset = pdevSup ? pdevSup->pdset : NULL; if (prset->init_record) prset->init_record(precord, 0); }
static void initPeriodic(void) { dbMenu *pmenu = dbFindMenu(pdbbase, "menuScan"); double quantum = epicsThreadSleepQuantum(); int i; if (!pmenu) { errlogPrintf("initPeriodic: menuScan not present\n"); return; } nPeriodic = pmenu->nChoice - SCAN_1ST_PERIODIC; papPeriodic = dbCalloc(nPeriodic, sizeof(periodic_scan_list*)); periodicTaskId = dbCalloc(nPeriodic, sizeof(void *)); for (i = 0; i < nPeriodic; i++) { periodic_scan_list *ppsl = dbCalloc(1, sizeof(periodic_scan_list)); const char *choice = pmenu->papChoiceValue[i + SCAN_1ST_PERIODIC]; double number; char *unit; int status = epicsParseDouble(choice, &number, &unit); ppsl->scan_list.lock = epicsMutexMustCreate(); ellInit(&ppsl->scan_list.list); ppsl->name = choice; if (status || number == 0) { errlogPrintf("initPeriodic: Bad menuScan choice '%s'\n", choice); ppsl->period = i; } else if (!*unit || !epicsStrCaseCmp(unit, "second") || !epicsStrCaseCmp(unit, "seconds")) { ppsl->period = number; } else if (!epicsStrCaseCmp(unit, "minute") || !epicsStrCaseCmp(unit, "minutes")) { ppsl->period = number * 60; } else if (!epicsStrCaseCmp(unit, "hour") || !epicsStrCaseCmp(unit, "hours")) { ppsl->period = number * 60 * 60; } else if (!epicsStrCaseCmp(unit, "Hz") || !epicsStrCaseCmp(unit, "Hertz")) { ppsl->period = 1 / number; } else { errlogPrintf("initPeriodic: Bad menuScan choice '%s'\n", choice); ppsl->period = i; } number = ppsl->period / quantum; if ((ppsl->period < 2 * quantum) || (number / floor(number) > 1.1)) { errlogPrintf("initPeriodic: Scan rate '%s' is not achievable.\n", choice); } ppsl->scanCtl = ctlPause; ppsl->loopEvent = epicsEventMustCreate(epicsEventEmpty); papPeriodic[i] = ppsl; } }
/* * Initialise the breakpoint stack */ void epicsShareAPI dbBkptInit(void) { if (! bkpt_stack_sem) { bkpt_stack_sem = epicsMutexMustCreate(); lset_stack_count = 0; } }
static void ntpshminit(void*) { ntpShm.ntplock = epicsMutexMustCreate(); callbackSetPriority(priorityLow, &ntpShm.ntpcb); callbackSetCallback(&ntpsetup, &ntpShm.ntpcb); callbackSetUser(0, &ntpShm.ntpcb); }
void asyncQueueCreate( QUEUE_T *p, size_t length ) { p->lock = epicsMutexMustCreate(); p->buffer = malloc( length * sizeof(dbCommon*) ); p->buffer_size = length; memset( p->buffer, 0, length * sizeof(dbCommon*) ); p->used = 0; p->head = 0; }
static void exitPvtOnceFunc(void *pParm) { exitPvtPerThread = epicsThreadPrivateCreate (); assert ( exitPvtPerThread ); pExitPvtPerProcess = createExitPvt (); assert ( pExitPvtPerProcess ); exitPvtLock = epicsMutexMustCreate (); }
/* init routine */ int drvFzoomAsynConfigure(const char *portName, const char *octetPortName) { FZOOM_ID pfzoomPvt; asynStatus status; assert(sizeof(unsigned short) == 2); /* 32 bits required */ pfzoomPvt = callocMustSucceed(1, sizeof(*pfzoomPvt), "drvFzoomAsynConfigure"); pfzoomPvt->portName = epicsStrDup(portName); pfzoomPvt->octetPortName = epicsStrDup(octetPortName); pfzoomPvt->mutexId = epicsMutexMustCreate(); /* initialise position, error threshold */ pfzoomPvt->devInfo.zoomPosition = 1; pfzoomPvt->devInfo.posThreshold = 1010; pfzoomPvt->devInfo.timeoutDelay = 1025; /* connect to asyn octet port with asynOctetSyncIO */ status = pasynOctetSyncIO->connect(octetPortName, 0, &pfzoomPvt->pasynUserOctet, 0); if (status != asynSuccess) { errlogPrintf("%s::drvFzoomAsynConfigure port %s" " can't connect to Octet port %s.\n", driver, portName, octetPortName); return asynError; } /* Create asynUser for asynTrace */ pfzoomPvt->pasynUserTrace = pasynManager->createAsynUser(0, 0); pfzoomPvt->pasynUserTrace->userPvt = pfzoomPvt; status = pasynManager->registerPort(pfzoomPvt->portName, ASYN_CANBLOCK, 1, /* autoconnect */ 0, /* medium priority */ 0); /* default stack size */ if (status != asynSuccess) { errlogPrintf("%s::drvFzoomAsynConfigure port %s" "%s:: can't register port\n", driver, pfzoomPvt->portName, pfzoomPvt->octetPortName); return(asynError); } /* Create asyn interfaces and register with asynManager */ pfzoomPvt->asynStdInterfaces.common.pinterface = (void *)&drvCommon; pfzoomPvt->asynStdInterfaces.drvUser.pinterface = (void *)&drvUser; pfzoomPvt->asynStdInterfaces.int32.pinterface = (void *)&drvInt32; status = pasynStandardInterfacesBase->initialize(pfzoomPvt->portName, &pfzoomPvt->asynStdInterfaces, pfzoomPvt->pasynUserTrace, pfzoomPvt); if (status != asynSuccess) { errlogPrintf("%s::drvFzoomAsynConfigure port %s" " can't register standard interfaces: %s\n", driver, pfzoomPvt->portName, pfzoomPvt->pasynUserTrace->errorMessage); return(asynError); } return (asynSuccess); }
/* * OS-dependent initialization * No need to worry about making this thread-safe since * it must be called before epicsThreadCreate creates * any new threads. */ static void epicsThreadInit (void) { if (!initialized) { rtems_id tid; rtems_task_priority old; rtems_task_set_priority (RTEMS_SELF, epicsThreadGetOssPriorityValue(99), &old); onceMutex = epicsMutexMustCreate(); taskVarMutex = epicsMutexMustCreate (); rtems_task_ident (RTEMS_SELF, 0, &tid); setThreadInfo (tid, "_main_", NULL, NULL); initialized = 1; epicsThreadCreate ("ImsgDaemon", 99, epicsThreadGetStackSize (epicsThreadStackSmall), InterruptContextMessageDaemon, NULL); } }
void dbCaLinkInitIsolated(void) { if (!workListLock) workListLock = epicsMutexMustCreate(); if (!workListEvent) workListEvent = epicsEventMustCreate(epicsEventEmpty); dbCaCtl = ctlExit; epicsAtExit(dbCaExit, NULL); }
static void twdInitOnce(void *arg) { epicsThreadId tid; tLock = epicsMutexMustCreate(); mLock = epicsMutexMustCreate(); fLock = epicsMutexMustCreate(); ellInit(&fList); VALGRIND_CREATE_MEMPOOL(&fList, 0, 0); twdCtl = twdctlRun; loopEvent = epicsEventMustCreate(epicsEventEmpty); exitEvent = epicsEventMustCreate(epicsEventEmpty); tid = epicsThreadCreate("taskwd", epicsThreadPriorityLow, epicsThreadGetStackSize(epicsThreadStackSmall), twdTask, NULL); if (tid == 0) cantProceed("Failed to spawn task watchdog thread\n"); epicsAtExit(twdShutdown, NULL); }
dbStateId dbStateCreate(const char *name) { dbStateId id; if ((id = dbStateFind(name))) return id; id = callocMustSucceed(1, sizeof(dbState), "createDbState"); id->name = epicsStrDup(name); id->lock = epicsMutexMustCreate(); ellAdd(&states, &id->node); return id; }
static long ai_init(int pass) { long i; if (pass) return 0; /* Create timers */ for (i = 0; i < TOTAL_TYPES; i++) { scanIoInit(&scan[i].ioscan); scan[i].wd = wdogCreate(scan_time, i); scan[i].total = 0; scan[i].on = 0; scan[i].rate_sec = parmTypes[i].scan_rate; } /* Init OSD stuff */ scan_mutex = epicsMutexMustCreate(); devIocStatsInitCpuUsage(); devIocStatsInitCpuUtilization(&loadinfo); devIocStatsInitFDUsage(); devIocStatsInitMemUsage(); devIocStatsInitWorkspaceUsage(); devIocStatsInitSuspTasks(); devIocStatsInitIFErrors(); /* Get initial values of a few things that don't change much */ devIocStatsGetClusterInfo(SYS_POOL, &clustinfo[SYS_POOL]); devIocStatsGetClusterInfo(DATA_POOL, &clustinfo[DATA_POOL]); devIocStatsGetClusterUsage(SYS_POOL, &mbufnumber[SYS_POOL]); devIocStatsGetClusterUsage(DATA_POOL, &mbufnumber[DATA_POOL]); devIocStatsGetCpuUtilization(&loadinfo); devIocStatsGetIFErrors(&iferrors); devIocStatsGetFDUsage(&fdusage); /* Count EPICS records */ if (pdbbase) { DBENTRY dbentry; long status; dbInitEntry(pdbbase,&dbentry); status = dbFirstRecordType(&dbentry); while (!status) { recordnumber += dbGetNRecords(&dbentry); status = dbNextRecordType(&dbentry); } dbFinishEntry(&dbentry); } return 0; }
void createMyRingBuffer( MY_RING_BUFFER *pStruct, epicsMutexId shared_lock ) { if (shared_lock == 0) { pStruct->shared_lock = 0; pStruct->lock = epicsMutexMustCreate(); } else { pStruct->shared_lock = 1; pStruct->lock = shared_lock; } if (myRingBufferUniformSize) { pStruct->buffer_size = myRingBufferUniformSize; } pStruct->indexWrite = 0; pStruct->buffer = malloc( pStruct->buffer_size * sizeof(double) ); memset( pStruct->buffer, 0, pStruct->buffer_size * sizeof(double) ); fprintf( stderr, "ring buffer at %p length=%u\n", pStruct->buffer, pStruct->buffer_size ); }
void verifyTryLock () { struct verifyTryLock verify; verify.mutex = epicsMutexMustCreate (); verify.done = epicsEventMustCreate ( epicsEventEmpty ); testOk1(epicsMutexTryLock(verify.mutex) == epicsMutexLockOK); epicsThreadCreate ( "verifyTryLockThread", 40, epicsThreadGetStackSize(epicsThreadStackSmall), verifyTryLockThread, &verify ); testOk1(epicsEventWait ( verify.done ) == epicsEventWaitOK); epicsMutexUnlock ( verify.mutex ); epicsMutexDestroy ( verify.mutex ); epicsEventDestroy ( verify.done ); }
void dbCaAddLinkCallback(struct link *plink, dbCaCallback connect, dbCaCallback monitor, void *userPvt) { caLink *pca; assert(!plink->value.pv_link.pvt); pca = (caLink *)dbCalloc(1, sizeof(caLink)); pca->lock = epicsMutexMustCreate(); pca->plink = plink; pca->pvname = epicsStrDup(plink->value.pv_link.pvname); pca->connect = connect; pca->monitor = monitor; pca->userPvt = userPvt; epicsMutexMustLock(pca->lock); plink->type = CA_LINK; plink->value.pv_link.pvt = pca; addAction(pca, CA_CONNECT); epicsMutexUnlock(pca->lock); }
void scanIoInit(IOSCANPVT *pioscanpvt) { ioscan_head *piosh = dbCalloc(1, sizeof(ioscan_head)); int prio; ioscanInit(); for (prio = 0; prio < NUM_CALLBACK_PRIORITIES; prio++) { io_scan_list *piosl = &piosh->iosl[prio]; callbackSetCallback(ioscanCallback, &piosl->callback); callbackSetPriority(prio, &piosl->callback); callbackSetUser(piosh, &piosl->callback); ellInit(&piosl->scan_list.list); piosl->scan_list.lock = epicsMutexMustCreate(); } epicsMutexMustLock(ioscan_lock); piosh->next = pioscan_list; pioscan_list = piosh; epicsMutexUnlock(ioscan_lock); *pioscanpvt = piosh; }
/* * devLibInit() */ static long devLibInit (void) { rangeItem *pRange; int i; if(devLibInitFlag) return(SUCCESS); if(!pdevLibVME) { epicsPrintf ("pdevLibVME is NULL\n"); return S_dev_internal; } if (NELEMENTS(addrAlloc) != NELEMENTS(addrFree)) { return S_dev_internal; } addrListLock = epicsMutexMustCreate(); epicsMutexMustLock(addrListLock); for (i=0; i<NELEMENTS(addrAlloc); i++) { ellInit (&addrAlloc[i]); ellInit (&addrFree[i]); } for (i=0; i<NELEMENTS(addrAlloc); i++) { pRange = (rangeItem *) malloc (sizeof(*pRange)); if (!pRange) { return S_dev_noMemory; } pRange->pOwnerName = "<Vacant>"; pRange->pPhysical = NULL; pRange->begin = 0; pRange->end = addrLast[i]; ellAdd (&addrFree[i], &pRange->node); } epicsMutexUnlock(addrListLock); devLibInitFlag = TRUE; return pdevLibVME->pDevInit(); }
void xycom220setup(int id,int base) { xy220 *card=malloc(sizeof(xy220)); epicsUInt8 junk; volatile epicsUInt8 **ba; if(!card){ printf("Allocation failed\n"); return; } card->id=id; card->base_addr=base; ba=&card->base; if(devBusToLocalAddr(atVMEA16, card->base_addr, (volatile void **)ba)){ printf("Failed to map %lx for card %x\n",(unsigned long)card->base,id); free(card); return; } if(devReadProbe(1, card->base+U8_XY220_ID, &junk)){ printf("Failed to read %lx for card %x\n",(unsigned long)(card->base+U8_XY220_ID),id); free(card); return; } WRITE16(card->base, XY220_CSR, X220_CSR_RST); WRITE16(card->base, XY220_CSR, X220_CSR_RED|X220_CSR_GRN); card->guard=epicsMutexMustCreate(); if(dbg220>0) printf("%d mapped %lx as %lx\n",id,(unsigned long)card->base,(unsigned long)card->base); ellAdd(&xy220s,&card->node); return; }
void epicsShareAPI gphInitPvt(gphPvt **ppvt, int size) { gphPvt *pgphPvt; if (size & (size - 1)) { printf("gphInitPvt: %d is not a power of 2\n", size); size = DEFAULT_SIZE; } if (size < MIN_SIZE) size = MIN_SIZE; if (size > MAX_SIZE) size = MAX_SIZE; pgphPvt = callocMustSucceed(1, sizeof(gphPvt), "gphInitPvt"); pgphPvt->size = size; pgphPvt->mask = size - 1; pgphPvt->paplist = callocMustSucceed(size, sizeof(ELLLIST *), "gphInitPvt"); pgphPvt->lock = epicsMutexMustCreate(); *ppvt = pgphPvt; return; }
event_list *eventNameToHandle(const char *eventname) { int prio; event_list *pel; static epicsThreadOnceId onceId = EPICS_THREAD_ONCE_INIT; if (!eventname || eventname[0] == 0) return NULL; epicsThreadOnce(&onceId, eventOnce, NULL); epicsMutexMustLock(event_lock); for (pel = pevent_list[0]; pel; pel=pel->next) { if (strcmp(pel->event_name, eventname) == 0) break; } if (pel == NULL) { pel = dbCalloc(1, sizeof(event_list)); strcpy(pel->event_name, eventname); for (prio = 0; prio < NUM_CALLBACK_PRIORITIES; prio++) { callbackSetUser(&pel->scan_list[prio], &pel->callback[prio]); callbackSetPriority(prio, &pel->callback[prio]); callbackSetCallback(eventCallback, &pel->callback[prio]); pel->scan_list[prio].lock = epicsMutexMustCreate(); ellInit(&pel->scan_list[prio].list); } pel->next=pevent_list[0]; pevent_list[0]=pel; { /* backward compatibility */ char* p; long e = strtol(eventname, &p, 0); if (*p == 0 && e > 0 && e <= 255) pevent_list[e] = pel; } } epicsMutexUnlock(event_lock); return pel; }
epicsShareFunc epicsMessageQueueId epicsShareAPI epicsMessageQueueCreate( unsigned int capacity, unsigned int maxMessageSize) { epicsMessageQueueId pmsg; unsigned int slotBytes, slotLongs; assert(capacity != 0); pmsg = (epicsMessageQueueId)callocMustSucceed(1, sizeof(*pmsg), "epicsMessageQueueCreate"); pmsg->capacity = capacity; pmsg->maxMessageSize = maxMessageSize; slotLongs = 1 + ((maxMessageSize + sizeof(unsigned long) - 1) / sizeof(unsigned long)); slotBytes = slotLongs * sizeof(unsigned long); pmsg->buf = (unsigned long *)callocMustSucceed(pmsg->capacity, slotBytes, "epicsMessageQueueCreate"); pmsg->inPtr = pmsg->outPtr = pmsg->firstMessageSlot = (char *)&pmsg->buf[0]; pmsg->lastMessageSlot = (char *)&pmsg->buf[(capacity - 1) * slotLongs]; pmsg->full = false; pmsg->slotSize = slotBytes; pmsg->mutex = epicsMutexMustCreate(); ellInit(&pmsg->sendQueue); ellInit(&pmsg->receiveQueue); ellInit(&pmsg->eventFreeList); return pmsg; }
int MM4000AsynConfig(int card, /* Controller number */ const char *portName, /* asyn port name of serial or GPIB port */ int asynAddress, /* asyn subaddress for GPIB */ int numAxes, /* Number of axes this controller supports */ int movingPollPeriod, /* Time to poll (msec) when an axis is in motion */ int idlePollPeriod) /* Time to poll (msec) when an axis is idle. 0 for no polling */ { AXIS_HDL pAxis; int axis; MM4000Controller *pController; char threadName[20]; int status; int totalAxes; int loopState; int digits; int modelNum; int retry = 0; char *p, *tokSave; char inputBuff[BUFFER_SIZE]; char outputBuff[BUFFER_SIZE]; if (numMM4000Controllers < 1) { printf("MM4000Config: no MM4000 controllers allocated, call MM4000Setup first\n"); return MOTOR_AXIS_ERROR; } if ((card < 0) || (card >= numMM4000Controllers)) { printf("MM4000Config: card must in range 0 to %d\n", numMM4000Controllers-1); return MOTOR_AXIS_ERROR; } if ((numAxes < 1) || (numAxes > MM4000_MAX_AXES)) { printf("MM4000Config: numAxes must in range 1 to %d\n", MM4000_MAX_AXES); return MOTOR_AXIS_ERROR; } pController = &pMM4000Controller[card]; pController->pAxis = (AXIS_HDL) calloc(numAxes, sizeof(motorAxis)); pController->numAxes = numAxes; pController->movingPollPeriod = movingPollPeriod/1000.; pController->idlePollPeriod = idlePollPeriod/1000.; status = pasynOctetSyncIO->connect(portName, asynAddress, &pController->pasynUser, NULL); if (status != asynSuccess) { printf("MM4000AsynConfig: cannot connect to asyn port %s\n", portName); return MOTOR_AXIS_ERROR; } do { status = sendAndReceive(pController, "VE;", inputBuff, sizeof(inputBuff)); retry++; /* Return value is length of response string */ } while (status != asynSuccess && retry < 3); if (status != asynSuccess) return (MOTOR_AXIS_ERROR); strcpy(pController->firmwareVersion, &inputBuff[2]); /* Skip "VE" */ /* Set Motion Master model indicator. */ p = strstr(pController->firmwareVersion, "MM"); if (p == NULL) { printf("MM4000AsynConfig: invalid model = %s\n", pController->firmwareVersion); return MOTOR_AXIS_ERROR; } modelNum = atoi(p+2); if (modelNum == 4000) pController->model = MM4000; else if (modelNum == 4005 || modelNum == 4006) pController->model = MM4005; else { printf("MM4000AsynConfig: invalid model = %s\n", pController->firmwareVersion); return MOTOR_AXIS_ERROR; } sendAndReceive(pController, "TP;", inputBuff, sizeof(inputBuff)); /* The return string will tell us how many axes this controller has */ for (totalAxes = 0, tokSave = NULL, p = epicsStrtok_r(inputBuff, ",", &tokSave); p != 0; p = epicsStrtok_r(NULL, ",", &tokSave), totalAxes++) ; if (totalAxes < numAxes) { printf("MM4000AsynConfig: actual number of axes=%d < numAxes=%d\n", totalAxes, numAxes); return MOTOR_AXIS_ERROR; } for (axis=0; axis<numAxes; axis++) { pAxis = &pController->pAxis[axis]; pAxis->pController = pController; pAxis->card = card; pAxis->axis = axis; pAxis->mutexId = epicsMutexMustCreate(); pAxis->params = motorParam->create(0, MOTOR_AXIS_NUM_PARAMS); /* Determine if encoder present based on open/closed loop mode. */ sprintf(outputBuff, "%dTC", axis+1); sendAndReceive(pController, outputBuff, inputBuff, sizeof(inputBuff)); loopState = atoi(&inputBuff[3]); /* Skip first 3 characters */ if (loopState != 0) pAxis->closedLoop = 1; /* Determine drive resolution. */ sprintf(outputBuff, "%dTU", axis+1); sendAndReceive(pController, outputBuff, inputBuff, sizeof(inputBuff)); pAxis->stepSize = atof(&inputBuff[3]); digits = (int) -log10(pAxis->stepSize) + 2; if (digits < 1) digits = 1; pAxis->maxDigits = digits; /* Save home preset position. */ sprintf(outputBuff, "%dXH", axis+1); sendAndReceive(pController, outputBuff, inputBuff, sizeof(inputBuff)); pAxis->homePreset = atof(&inputBuff[3]); /* Determine low limit */ sprintf(outputBuff, "%dTL", axis+1); sendAndReceive(pController, outputBuff, inputBuff, sizeof(inputBuff)); pAxis->lowLimit = atof(&inputBuff[3]); /* Determine high limit */ sprintf(outputBuff, "%dTR", axis+1); sendAndReceive(pController, outputBuff, inputBuff, sizeof(inputBuff)); pAxis->highLimit = atof(&inputBuff[3]); } pController->pollEventId = epicsEventMustCreate(epicsEventEmpty); /* Create the poller thread for this controller */ epicsSnprintf(threadName, sizeof(threadName), "MM4000:%d", card); epicsThreadCreate(threadName, epicsThreadPriorityMedium, epicsThreadGetStackSize(epicsThreadStackMedium), (EPICSTHREADFUNC) MM4000Poller, (void *) pController); return MOTOR_AXIS_OK; }
/* * Lazy initialization functions */ static void initHookOnce(void *arg) { listLock = epicsMutexMustCreate(); }
/* * rsrv_init () */ int rsrv_init (void) { epicsThreadBooleanStatus tbs; unsigned priorityOfConnectDaemon; epicsThreadId tid; long maxBytesAsALong; long status; clientQlock = epicsMutexMustCreate(); ellInit ( &clientQ ); freeListInitPvt ( &rsrvClientFreeList, sizeof(struct client), 8 ); freeListInitPvt ( &rsrvChanFreeList, sizeof(struct channel_in_use), 512 ); freeListInitPvt ( &rsrvEventFreeList, sizeof(struct event_ext), 512 ); freeListInitPvt ( &rsrvSmallBufFreeListTCP, MAX_TCP, 16 ); initializePutNotifyFreeList (); status = envGetLongConfigParam ( &EPICS_CA_MAX_ARRAY_BYTES, &maxBytesAsALong ); if ( status || maxBytesAsALong < 0 ) { errlogPrintf ( "CAS: EPICS_CA_MAX_ARRAY_BYTES was not a positive integer\n" ); rsrvSizeofLargeBufTCP = MAX_TCP; } else { /* allow room for the protocol header so that they get the array size they requested */ static const unsigned headerSize = sizeof ( caHdr ) + 2 * sizeof ( ca_uint32_t ); ca_uint32_t maxBytes = ( unsigned ) maxBytesAsALong; if ( maxBytes < 0xffffffff - headerSize ) { maxBytes += headerSize; } else { maxBytes = 0xffffffff; } if ( maxBytes < MAX_TCP ) { errlogPrintf ( "CAS: EPICS_CA_MAX_ARRAY_BYTES was rounded up to %u\n", MAX_TCP ); rsrvSizeofLargeBufTCP = MAX_TCP; } else { rsrvSizeofLargeBufTCP = maxBytes; } } freeListInitPvt ( &rsrvLargeBufFreeListTCP, rsrvSizeofLargeBufTCP, 1 ); ellInit ( &beaconAddrList ); prsrv_cast_client = NULL; pCaBucket = NULL; castcp_startStopEvent = epicsEventMustCreate(epicsEventEmpty); castcp_ctl = ctlPause; /* * go down two levels so that we are below * the TCP and event threads started on behalf * of individual clients */ tbs = epicsThreadHighestPriorityLevelBelow ( epicsThreadPriorityCAServerLow, &priorityOfConnectDaemon ); if ( tbs == epicsThreadBooleanStatusSuccess ) { tbs = epicsThreadHighestPriorityLevelBelow ( priorityOfConnectDaemon, &priorityOfConnectDaemon ); if ( tbs != epicsThreadBooleanStatusSuccess ) { priorityOfConnectDaemon = epicsThreadPriorityCAServerLow; } } else { priorityOfConnectDaemon = epicsThreadPriorityCAServerLow; } tid = epicsThreadCreate ( "CAS-TCP", priorityOfConnectDaemon, epicsThreadGetStackSize(epicsThreadStackMedium), req_server, 0); if ( tid == 0 ) { epicsPrintf ( "CAS: unable to start connection request thread\n" ); } epicsEventMustWait(castcp_startStopEvent); return RSRV_OK; }