예제 #1
0
파일: gpHashLib.c 프로젝트: zlxmsu/epics
void epicsShareAPI gphFreeMem(gphPvt *pgphPvt)
{
    ELLLIST **paplist;
    int h;

    /* Caller must ensure that no other thread is using *pvt */
    if (pgphPvt == NULL) return;

    paplist = pgphPvt->paplist;
    for (h = 0; h < pgphPvt->size; h++) {
        ELLLIST *plist = paplist[h];
        GPHENTRY *pgphNode;
        GPHENTRY *next;

        if (plist == NULL) continue;
        pgphNode = (GPHENTRY *) ellFirst(plist);

        while (pgphNode) {
            next = (GPHENTRY *) ellNext((ELLNODE*)pgphNode);
            ellDelete(plist, (ELLNODE*)pgphNode);
            free(pgphNode);
            pgphNode = next;
        }
        free(paplist[h]);
    }
    epicsMutexDestroy(pgphPvt->lock);
    free(paplist);
    free(pgphPvt);
}
예제 #2
0
파일: dbCa.c 프로젝트: T-A-R-L-A/EPICS-Base
static void dbCaLinkFree(caLink *pca)
{
    dbCaCallback callback;
    struct link *plinkPutCallback = 0;

    if (pca->chid) {
        ca_clear_channel(pca->chid);
        --dbca_chan_count;
    }
    callback = pca->putCallback;
    if (callback) {
        plinkPutCallback = pca->plinkPutCallback;
        pca->plinkPutCallback = 0;
        pca->putCallback = 0;
        pca->putType = 0;
    }
    free(pca->pgetNative);
    free(pca->pputNative);
    free(pca->pgetString);
    free(pca->pputString);
    free(pca->pvname);
    epicsMutexDestroy(pca->lock);
    free(pca);
    if (callback) callback(plinkPutCallback);
}
예제 #3
0
void dbProcessNotifyExit(void)
{
    assert(ellCount(&pnotifyGlobal->freeList)==0);
    epicsMutexDestroy(pnotifyGlobal->lock);
    free(pnotifyGlobal);
    pnotifyGlobal = NULL;
}
예제 #4
0
/* Starts "mcnt" jobs in a pool with initial and max
 * thread counts "icnt" and "mcnt".
 * The test ensures that all jobs run in parallel.
 * "cork" checks the function of pausing the run queue
 * with epicsThreadPoolQueueRun
 */
static void postjobs(size_t icnt, size_t mcnt, int cork)
{
    size_t i;
    epicsThreadPool *pool;
    countPriv *priv=callocMustSucceed(1, sizeof(*priv), "postjobs priv alloc");
    priv->guard=epicsMutexMustCreate();
    priv->done=epicsEventMustCreate(epicsEventEmpty);
    priv->allrunning=epicsEventMustCreate(epicsEventEmpty);
    priv->count=mcnt;
    priv->job=callocMustSucceed(mcnt, sizeof(*priv->job), "postjobs job array");

    testDiag("postjobs(%lu,%lu)", (unsigned long)icnt, (unsigned long)mcnt);

    {
        epicsThreadPoolConfig conf;
        epicsThreadPoolConfigDefaults(&conf);
        conf.initialThreads=icnt;
        conf.maxThreads=mcnt;

        testOk1((pool=epicsThreadPoolCreate(&conf))!=NULL);
        if(!pool)
            return;
    }

    if(cork)
        epicsThreadPoolControl(pool, epicsThreadPoolQueueRun, 0);

    for(i=0; i<mcnt; i++) {
        testDiag("i=%lu", (unsigned long)i);
        priv->job[i] = epicsJobCreate(pool, &countjob, priv);
        testOk1(priv->job[i]!=NULL);
        testOk1(epicsJobQueue(priv->job[i])==0);
    }

    if(cork) {
        /* no jobs should have run */
        epicsMutexMustLock(priv->guard);
        testOk1(priv->count==mcnt);
        epicsMutexUnlock(priv->guard);

        epicsThreadPoolControl(pool, epicsThreadPoolQueueRun, 1);
    }

    testDiag("Waiting for all jobs to start");
    epicsEventMustWait(priv->allrunning);
    testDiag("Stop all");
    epicsEventSignal(priv->done);

    for(i=0; i<mcnt; i++) {
        testDiag("i=%lu", (unsigned long)i);
        epicsJobDestroy(priv->job[i]);
    }

    epicsThreadPoolDestroy(pool);
    epicsMutexDestroy(priv->guard);
    epicsEventDestroy(priv->allrunning);
    epicsEventDestroy(priv->done);
    free(priv->job);
    free(priv);
}
예제 #5
0
/* We never really remove a PLC from the list,
 * but this is how it could be done. Maybe. */
static void free_PLC(PLC *plc)
{
    ScanList *list;

    epicsMutexDestroy(plc->lock);
    EIP_dispose(plc->connection);
    free(plc->name);
    free(plc->ip_addr);
    while ((list = DLL_decap(&plc->scanlists)) != 0)
        free_ScanList(list);
    free(plc);
}
예제 #6
0
static
int linuxDevFinal(void)
{
    ELLNODE *cur, *next, *isrcur, *isrnext;
    osdPCIDevice *curdev=NULL;
    osdISR *isr;

    epicsMutexMustLock(pciLock);
    for(cur=ellFirst(&devices), next=cur ? ellNext(cur) : NULL;
        cur;
        cur=next, next=next ? ellNext(next) : NULL )
    {
        curdev=CONTAINER(cur,osdPCIDevice,node);

        epicsMutexMustLock(curdev->devLock);

        for(isrcur=ellFirst(&curdev->isrs), isrnext=isrcur ? ellNext(isrcur) : NULL;
            isrcur;
            isrcur=isrnext, isrnext=isrnext ? ellNext(isrnext) : NULL )
        {
            isr=CONTAINER(isrcur,osdISR,node);

            stopIsrThread(isr);

            ellDelete(&curdev->isrs,cur);
            free(isr);

        }

        close_uio(curdev);

        epicsMutexUnlock(curdev->devLock);
        epicsMutexDestroy(curdev->devLock);
        free(curdev);
    }
    epicsMutexUnlock(pciLock);
    epicsMutexDestroy(pciLock);

    return 0;
}
예제 #7
0
/* We never remove a tag */
static void free_TagInfo(TagInfo *info)
{
    EIP_free_ParsedTag(info->tag);
    free(info->string_tag);
    if (info->data_size > 0)
    {
        free(info->data);
        info->data_size = 0;
        info->data = 0;
    }
    epicsMutexDestroy(info->data_lock);
    free (info);
}
epicsShareFunc void epicsShareAPI
epicsMessageQueueDestroy(epicsMessageQueueId pmsg)
{
    struct eventNode *evp;

    while ((evp = reinterpret_cast < struct eventNode * >
                  ( ellGet(&pmsg->eventFreeList) ) ) != NULL) {
        epicsEventDestroy(evp->event);
        free(evp);
    }
    epicsMutexDestroy(pmsg->mutex);
    free(pmsg->buf);
    free(pmsg);
}
예제 #9
0
static void deletePeriodic(void)
{
    int i;

    for (i = 0; i < nPeriodic; i++) {
        periodic_scan_list *ppsl = papPeriodic[i];
        ellFree(&ppsl->scan_list.list);
        epicsEventDestroy(ppsl->loopEvent);
        epicsMutexDestroy(ppsl->scan_list.lock);
        free(ppsl);
    }

    free(papPeriodic);
    papPeriodic = NULL;
}
예제 #10
0
void verifyTryLock ()
{
    struct verifyTryLock verify;

    verify.mutex = epicsMutexMustCreate ();
    verify.done = epicsEventMustCreate ( epicsEventEmpty );

    testOk1(epicsMutexTryLock(verify.mutex) == epicsMutexLockOK);

    epicsThreadCreate ( "verifyTryLockThread", 40, 
        epicsThreadGetStackSize(epicsThreadStackSmall),
        verifyTryLockThread, &verify );

    testOk1(epicsEventWait ( verify.done ) == epicsEventWaitOK);

    epicsMutexUnlock ( verify.mutex );
    epicsMutexDestroy ( verify.mutex );
    epicsEventDestroy ( verify.done );
}
예제 #11
0
파일: acqExtern.c 프로젝트: Cpppro/acquaman
int acqRemoveOutputHandler(acqMaster_t *master, acqKey_t key)
{
	int i;
	for( i=0; i < master->numOutputHandler; i++)
	{
		if( key == master->outputKeys[i])
			break;
	}
	if (i >= master->numOutputHandler)
		return -1;
	epicsMutexDestroy( master->handlerLock[i]);
	master->numOutputHandler--;
	for( ; i < master->numOutputHandler; i++)
	{
		master->outputKeys[i] = master->outputKeys[i+1];
		master->outputHandler[i] = master->outputHandler[i+1];
	}
	return 0;
}
예제 #12
0
static void ioscanDestroy(void)
{
    ioscan_head *piosh;

    ioscanInit();
    epicsMutexMustLock(ioscan_lock);
    piosh = pioscan_list;
    pioscan_list = NULL;
    epicsMutexUnlock(ioscan_lock);
    while (piosh) {
        ioscan_head *pnext = piosh->next;
        int prio;

        for (prio = 0; prio < NUM_CALLBACK_PRIORITIES; prio++) {
            epicsMutexDestroy(piosh->iosl[prio].scan_list.lock);
            ellFree(&piosh->iosl[prio].scan_list.list);
        }
        free(piosh);
        piosh = pnext;
    }
}
예제 #13
0
파일: caservertask.c 프로젝트: ukaea/epics
/* 
 * destroy_client ()
 */
void destroy_client ( struct client *client )
{
    if ( ! client ) {
        return;
    }
    
    if ( client->tid != 0 ) {
        taskwdRemove ( client->tid );
    }

    if ( client->sock != INVALID_SOCKET ) {
        epicsSocketDestroy ( client->sock );
    }

    if ( client->proto == IPPROTO_TCP ) {
        if ( client->send.buf ) {
            if ( client->send.type == mbtSmallTCP ) {
                freeListFree ( rsrvSmallBufFreeListTCP,  client->send.buf );
            }
            else if ( client->send.type == mbtLargeTCP ) {
                freeListFree ( rsrvLargeBufFreeListTCP,  client->send.buf );
            }
            else {
                errlogPrintf ( "CAS: Corrupt send buffer free list type code=%u during client cleanup?\n",
                    client->send.type );
            }
        }
        if ( client->recv.buf ) {
            if ( client->recv.type == mbtSmallTCP ) {
                freeListFree ( rsrvSmallBufFreeListTCP,  client->recv.buf );
            }
            else if ( client->recv.type == mbtLargeTCP ) {
                freeListFree ( rsrvLargeBufFreeListTCP,  client->recv.buf );
            }
            else {
                errlogPrintf ( "CAS: Corrupt recv buffer free list type code=%u during client cleanup?\n",
                    client->send.type );
            }
        }
    }
    else if ( client->proto == IPPROTO_UDP ) {
        if ( client->send.buf ) {
            free ( client->send.buf );
        }
        if ( client->recv.buf ) {
            free ( client->recv.buf );
        }
    }

    if ( client->eventqLock ) {
        epicsMutexDestroy ( client->eventqLock );
    }

    if ( client->chanListLock ) {
        epicsMutexDestroy ( client->chanListLock );
    }

    if ( client->putNotifyLock ) {
        epicsMutexDestroy ( client->putNotifyLock );
    }

    if ( client->lock ) {
        epicsMutexDestroy ( client->lock );
    }

    if ( client->blockSem ) {
        epicsEventDestroy ( client->blockSem );
    }

    if ( client->pUserName ) {
        free ( client->pUserName );
    }

    if ( client->pHostName ) {
        free ( client->pHostName );
    } 

    freeListFree ( rsrvClientFreeList, client );
}
예제 #14
0
void epicsThreadPoolDestroy(epicsThreadPool *pool)
{
    unsigned int nThr;
    ELLLIST notify;
    ELLNODE *cur;

    if (!pool)
        return;

    ellInit(&notify);

    epicsMutexMustLock(pool->guard);

    /* run remaining queued jobs */
    epicsThreadPoolControlImpl(pool, epicsThreadPoolQueueAdd, 0);
    epicsThreadPoolControlImpl(pool, epicsThreadPoolQueueRun, 1);
    nThr = pool->threadsRunning;
    pool->freezeopt = 1;

    epicsMutexUnlock(pool->guard);

    epicsThreadPoolWait(pool, -1.0);
    /* At this point all queued jobs have run */

    epicsMutexMustLock(pool->guard);

    pool->shutdown = 1;
    /* wakeup all */
    if (pool->threadsWaking < pool->threadsSleeping) {
        pool->threadsWaking = pool->threadsSleeping;
        epicsEventSignal(pool->workerWakeup);
    }

    ellConcat(&notify, &pool->owned);
    ellConcat(&notify, &pool->jobs);

    epicsMutexUnlock(pool->guard);

    if (nThr && epicsEventWait(pool->shutdownEvent) != epicsEventWaitOK){
        errlogMessage("epicsThreadPoolDestroy: wait error");
        return;
    }

    /* all workers are now shutdown */

    /* notify remaining jobs that pool is being destroyed */
    while ((cur = ellGet(&notify)) != NULL) {
        epicsJob *job = CONTAINER(cur, epicsJob, jobnode);

        job->running = 1;
        job->func(job->arg, epicsJobModeCleanup);
        job->running = 0;
        if (job->freewhendone)
            free(job);
        else
            job->pool = NULL; /* orphan */
    }

    epicsEventDestroy(pool->workerWakeup);
    epicsEventDestroy(pool->shutdownEvent);
    epicsEventDestroy(pool->observerWakeup);
    epicsMutexDestroy(pool->guard);

    free(pool);
}
예제 #15
0
static
int linuxDevPCIInit(void)
{

    DIR* sysfsPci_dir=NULL;
    struct dirent* dir;
    int i;
    osdPCIDevice *osd=NULL;
    pciLock = epicsMutexMustCreate();
    int host_is_first = 0;

    pagesize=sysconf(_SC_PAGESIZE);
    if (pagesize==-1) {
        perror("Failed to get pagesize");
        goto fail;
    }

    sysfsPci_dir = opendir("/sys/bus/pci/devices");
    if (!sysfsPci_dir){
        fprintf(stderr, "Could not open /sys/bus/pci/devices!\n");
    	goto fail;
    }

    while ((dir=readdir(sysfsPci_dir))) {
        char* filename;
        FILE* file;
        int fail=0;
        int match;
        unsigned long long int start,stop,flags;
        char dname[80];

    	if (!dir->d_name || dir->d_name[0]=='.') continue; /* Skip invalid entries */

        osd=calloc(1, sizeof(osdPCIDevice));
        if (!osd) {
            errMessage(S_dev_noMemory, "Out of memory");
            goto fail;
        }
        osd->fd=-1;
        osd->cfd = -1;
        for ( i=0; i<sizeof(osd->rfd)/sizeof(osd->rfd[0]); i++ )
            osd->rfd[i] = -1;

        osd->dev.slot = DEVPCI_NO_SLOT;

        match = sscanf(dir->d_name,"%x:%x:%x.%x",
                       &osd->dev.domain,&osd->dev.bus,&osd->dev.device,&osd->dev.function);
        if (match != 4){
            fprintf(stderr, "Could not decode PCI device directory %s\n", dir->d_name);
        }
 
        osd->dev.id.vendor=read_sysfs(&fail, BUSBASE "vendor",
                             osd->dev.domain, osd->dev.bus, osd->dev.device, osd->dev.function);
        osd->dev.id.device=read_sysfs(&fail, BUSBASE "device",
                             osd->dev.domain, osd->dev.bus, osd->dev.device, osd->dev.function);
        osd->dev.id.sub_vendor=read_sysfs(&fail, BUSBASE "subsystem_vendor",
                             osd->dev.domain, osd->dev.bus, osd->dev.device, osd->dev.function);
        osd->dev.id.sub_device=read_sysfs(&fail, BUSBASE "subsystem_device",
                             osd->dev.domain, osd->dev.bus, osd->dev.device, osd->dev.function);
        osd->dev.id.pci_class=read_sysfs(&fail, BUSBASE "class",
                             osd->dev.domain, osd->dev.bus, osd->dev.device, osd->dev.function);
        osd->dev.irq=read_sysfs(&fail, BUSBASE "irq",
                             osd->dev.domain, osd->dev.bus, osd->dev.device, osd->dev.function);
        osd->dev.id.revision=0;

        if (fail) {
            fprintf(stderr, "Warning: Failed to read some attributes of PCI device %04x:%02x:%02x.%x\n"
                         "         This may cause some searches to fail\n",
                         osd->dev.domain, osd->dev.bus, osd->dev.device, osd->dev.function);
            fail=0;
        }

        if(devPCIDebug>=1) {
            fprintf(stderr, "linuxDevPCIInit found %04x:%02x:%02x.%x\n",
                         osd->dev.domain, osd->dev.bus, osd->dev.device, osd->dev.function);
            fprintf(stderr, " as pri %04x:%04x sub %04x:%04x cls %06x\n",
                         osd->dev.id.vendor, osd->dev.id.device,
                         osd->dev.id.sub_vendor, osd->dev.id.sub_device,
                         osd->dev.id.pci_class);
        }

        /* Read BAR info */

        /* Base address */
        
        filename = allocPrintf(BUSBASE "resource",
                         osd->dev.domain, osd->dev.bus, osd->dev.device, osd->dev.function);
        if (!filename) {
            errMessage(S_dev_noMemory, "Out of memory");
            goto fail;
        }
        file=fopen(filename, "r");
        if (!file) {
            fprintf(stderr, "Could not open resource file %s!\n", filename);
            free(filename);
            continue;
        }
        for (i=0; i<PCIBARCOUNT; i++) { /* read 6 BARs */
            match = fscanf(file, "0x%16llx 0x%16llx 0x%16llx\n", &start, &stop, &flags);
        
            if (match != 3) {
                fprintf(stderr, "Could not parse line %i of %s\n", i+1, filename);
                continue;
            }

            osd->dev.bar[i].ioport = (flags & PCI_BASE_ADDRESS_SPACE)==PCI_BASE_ADDRESS_SPACE_IO;
            osd->dev.bar[i].below1M = !!(flags&PCI_BASE_ADDRESS_MEM_TYPE_1M);
            osd->dev.bar[i].addr64 = !!(flags&PCI_BASE_ADDRESS_MEM_TYPE_64);
            osd->displayBAR[i] = start;

            /* offset from start of page to start of BAR */
            osd->offset[i] = osd->displayBAR[i]&(pagesize-1);
            /* region length */
            osd->len[i] = (start || stop ) ? (stop - start + 1) : 0;
        }
        /* rom */
        match = fscanf(file, "%llx %llx %llx\n", &start, &stop, &flags);
        if (match != 3) {
            fprintf(stderr, "Could not parse line %i of %s\n", i+1, filename);
            start = 0;
            stop = 0;
        }

        osd->displayErom = start;
        osd->eromlen = (start || stop ) ? (stop - start + 1) : 0;
        
        fclose(file);
        free(filename);
        
        /* driver name */
        filename = allocPrintf(BUSBASE "driver",
                         osd->dev.domain, osd->dev.bus, osd->dev.device, osd->dev.function);
        if (!filename) {
            errMessage(S_dev_noMemory, "Out of memory");
            goto fail;
        }
        memset (dname, 0, sizeof(dname));
        if (readlink(filename, dname, sizeof(dname)-1) != -1)
            osd->dev.driver = epicsStrDup(basename(dname));
        free(filename);

        osd->devLock = epicsMutexMustCreate();

        if (!ellCount(&devices))
        {
            host_is_first = (osd->dev.bus == 0 && osd->dev.device == 0);
        }
        ellInsert(&devices,host_is_first?ellLast(&devices):NULL,&osd->node);
        osd=NULL;
    }
    if (sysfsPci_dir)
        closedir(sysfsPci_dir);

    sysfsPci_dir = opendir(linuxslotsdir);
    if (sysfsPci_dir){
        while ((dir=readdir(sysfsPci_dir))) {
            unsigned dom, B, D;
            char *fullname;
            FILE *fp;

            if (!dir->d_name || dir->d_name[0]=='.') continue; /* Skip invalid entries */
            if(devPCIDebug>4)
                fprintf(stderr, "examine /slots entry '%s'\n", dir->d_name);

            fullname = allocPrintf("%s/%s/address", linuxslotsdir, dir->d_name);
            if(!fullname) continue;

            if(devPCIDebug>3)
                fprintf(stderr, "found '%s'\n", fullname);

            if((fp=fopen(fullname, "r"))!=NULL) {

                if(fscanf(fp, "%x:%x:%x", &dom, &B, &D)==3) {
                    ELLNODE *cur;
                    if(devPCIDebug>2)
                        fprintf(stderr, "found slot %s with %04u:%02u:%02u.*\n", dir->d_name, dom, B, D);

                    for(cur=ellFirst(&devices); cur; cur=ellNext(cur)) {
                        osdPCIDevice *osd = CONTAINER(cur, osdPCIDevice, node);
                        if(osd->dev.domain!=dom || osd->dev.bus!=B || osd->dev.device!=D)
                            continue;
                        if(osd->dev.slot==DEVPCI_NO_SLOT) {
                            osd->dev.slot = strdup(dir->d_name); // return NULL would mean slot remains unlabeled
                        } else {
                            fprintf(stderr, "Duplicate slot address for %s\n", dir->d_name);
                        }
                    }
                }

                fclose(fp);
            }
            free(fullname);
        }
        closedir(sysfsPci_dir);
    } else if(devPCIDebug>0) {
        fprintf(stderr, "/sys does not provide PCI slots listing\n");
    }


    return 0;
fail:
    if (sysfsPci_dir)
        closedir(sysfsPci_dir);
    epicsMutexDestroy(pciLock);
    return S_dev_badInit;
}
예제 #16
0
/** NDAttributeList destructor
  */
NDAttributeList::~NDAttributeList()
{
  this->clear();
  ellFree(&this->list);
  epicsMutexDestroy(this->lock);
}
예제 #17
0
static void testUDP(void)
{
    caster_t caster;
    shSocket sender;
    osiSockAddr dest;
    union casterUDP buf;

    shSocketInit(&sender);

    sender.sd = shCreateSocket(AF_INET, SOCK_DGRAM, 0);
    if(sender.sd==INVALID_SOCKET) {
        testAbort("Failed to create socket");
        return;
    }

    lock = epicsMutexMustCreate();
    cycled[0] = epicsEventMustCreate(epicsEventEmpty);
    cycled[1] = epicsEventMustCreate(epicsEventEmpty);

    casterInit(&caster);

    caster.udpport = 0; /* test with random port */
    caster.testhook = &testerhook;

    epicsThreadMustCreate("udptester",
                          epicsThreadPriorityMedium,
                          epicsThreadGetStackSize(epicsThreadStackSmall),
                          &tester, &caster);

    epicsEventSignal(cycled[1]);

    /* wait for tester thread to setup socket */
    epicsEventMustWait(cycled[0]);

    epicsMutexMustLock(lock);

    testOk1(caster.udpport!=0);

    testDiag("UDP test with port %d", caster.udpport);

    memset(&dest, 0, sizeof(dest));
    dest.ia.sin_family = AF_INET;
    dest.ia.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
    dest.ia.sin_port = htons(caster.udpport);

    epicsMutexUnlock(lock);

    /* allow tester thread to begin recv() */
    epicsEventSignal(cycled[1]);

    testDiag("Test announcement directly from server");

    memset(&buf, 0, sizeof(buf));
    buf.m_msg.pid = htons(RECAST_MAGIC);
    buf.m_msg.serverIP = htonl(0xffffffff);
    buf.m_msg.serverPort = htons(0x1020);
    buf.m_msg.serverKey = htonl(0x12345678);

    testOk1(0==shSendTo(&sender, &buf.m_bytes, 0x10, 0, &dest));

    /* wait for tester thread to completer recv() and end cycle */
    epicsEventMustWait(cycled[0]);

    epicsMutexMustLock(lock);
    testOk1(cycles==1);
    testOk1(result==0);
    testOk1(caster.haveserv==1);
    testOk1(caster.nameserv.ia.sin_family==AF_INET);
    testOk1(caster.nameserv.ia.sin_addr.s_addr==htonl(INADDR_LOOPBACK));
    testOk1(caster.nameserv.ia.sin_port==htons(0x1020));
    testOk1(caster.servkey==0x12345678);
    epicsMutexUnlock(lock);

    testDiag("Test proxied announcement");

    /* start next cycle */
    epicsEventSignal(cycled[1]);

    /* wait for tester thread to setup socket */
    epicsEventMustWait(cycled[0]);

    epicsMutexMustLock(lock);

    dest.ia.sin_port = htons(caster.udpport);

    epicsMutexUnlock(lock);

    buf.m_msg.serverIP = htonl(0x50607080);

    /* allow tester thread to begin recv() */
    epicsEventSignal(cycled[1]);

    testOk1(0==shSendTo(&sender, &buf.m_bytes, 0x10, 0, &dest));

    /* wait for tester thread to completer recv() and end cycle */
    epicsEventMustWait(cycled[0]);

    epicsMutexMustLock(lock);
    testOk1(cycles==2);
    testOk1(result==0);
    testOk1(caster.haveserv==1);
    testOk1(caster.nameserv.ia.sin_family==AF_INET);
    testOk1(caster.nameserv.ia.sin_addr.s_addr==htonl(0x50607080));
    testOk1(caster.nameserv.ia.sin_port==htons(0x1020));
    epicsMutexUnlock(lock);

    /* begin shutdown cycle */
    epicsEventSignal(cycled[1]);
    epicsEventMustWait(cycled[0]);
    epicsEventSignal(cycled[1]);


    casterShutdown(&caster);

    epicsEventDestroy(cycled[0]);
    epicsEventDestroy(cycled[1]);
    epicsMutexDestroy(lock);
}
예제 #18
0
void destroyMyRingBuffer( MY_RING_BUFFER *pStruct ) {
    if (!pStruct->shared_lock) {
        epicsMutexDestroy( pStruct->lock );
    }
    free( pStruct->buffer );
}
예제 #19
0
epicsThreadPool* epicsThreadPoolCreate(epicsThreadPoolConfig *opts)
{
    size_t i;
    epicsThreadPool *pool;

    /* caller likely didn't initialize the options structure */
    if (opts && opts->maxThreads == 0) {
        errlogMessage("Error: epicsThreadPoolCreate() options provided, but not initialized");
        return NULL;
    }

    pool = calloc(1, sizeof(*pool));
    if (!pool)
        return NULL;

    if (opts)
        memcpy(&pool->conf, opts, sizeof(*opts));
    else
        epicsThreadPoolConfigDefaults(&pool->conf);

    if (pool->conf.initialThreads > pool->conf.maxThreads)
        pool->conf.initialThreads = pool->conf.maxThreads;

    pool->workerWakeup = epicsEventCreate(epicsEventEmpty);
    pool->shutdownEvent = epicsEventCreate(epicsEventEmpty);
    pool->observerWakeup = epicsEventCreate(epicsEventEmpty);
    pool->guard = epicsMutexCreate();

    if (!pool->workerWakeup || !pool->shutdownEvent ||
       !pool->observerWakeup || !pool->guard)
        goto cleanup;

    ellInit(&pool->jobs);
    ellInit(&pool->owned);

    epicsMutexMustLock(pool->guard);

    for (i = 0; i < pool->conf.initialThreads; i++) {
        createPoolThread(pool);
    }

    if (pool->threadsRunning == 0 && pool->conf.initialThreads != 0) {
        epicsMutexUnlock(pool->guard);
        errlogPrintf("Error: Unable to create any threads for thread pool\n");
        goto cleanup;

    }
    else if (pool->threadsRunning < pool->conf.initialThreads) {
        errlogPrintf("Warning: Unable to create all threads for thread pool (%u/%u)\n",
                     pool->threadsRunning, pool->conf.initialThreads);
    }

    epicsMutexUnlock(pool->guard);

    return pool;

cleanup:
    if (pool->workerWakeup)
        epicsEventDestroy(pool->workerWakeup);
    if (pool->shutdownEvent)
        epicsEventDestroy(pool->shutdownEvent);
    if (pool->observerWakeup)
        epicsEventDestroy(pool->observerWakeup);
    if (pool->guard)
        epicsMutexDestroy(pool->guard);

    free(pool);
    return NULL;
}
예제 #20
0
OrderedMutex::~OrderedMutex()
{
    epicsMutexDestroy(mutex);
}
예제 #21
0
epicsShareFunc void seqQueueDestroy(QUEUE q)
{
    epicsMutexDestroy(q->mutex);
    free(q->buffer);
    free(q);
}
예제 #22
0
파일: logClient.c 프로젝트: ukaea/epics
/*
 *  logClientCreate()
 */
logClientId epicsShareAPI logClientCreate (
    struct in_addr server_addr, unsigned short server_port)
{
    epicsTimeStamp begin, current;
    logClient *pClient;
    double diff;

    pClient = calloc (1, sizeof (*pClient));
    if (pClient==NULL) {
        return NULL;
    }

    pClient->addr.sin_family = AF_INET;
    pClient->addr.sin_addr = server_addr;
    pClient->addr.sin_port = htons(server_port);
    ipAddrToDottedIP (&pClient->addr, pClient->name, sizeof(pClient->name));

    pClient->mutex = epicsMutexCreate ();
    if ( ! pClient->mutex ) {
        free ( pClient );
        return NULL;
    }

    pClient->sock = INVALID_SOCKET;
    pClient->connected = 0u;
    pClient->connFailStatus = 0;
    pClient->shutdown = 0;
    pClient->shutdownConfirm = 0;

    epicsAtExit (logClientDestroy, (void*) pClient);
    
    pClient->stateChangeNotify = epicsEventCreate (epicsEventEmpty);
    if ( ! pClient->stateChangeNotify ) {
        epicsMutexDestroy ( pClient->mutex );
        free ( pClient );
        return NULL;
    }
   
    pClient->restartThreadId = epicsThreadCreate (
        "logRestart", epicsThreadPriorityLow, 
        epicsThreadGetStackSize(epicsThreadStackSmall),
        logClientRestart, pClient );
    if ( pClient->restartThreadId == NULL ) {
        epicsMutexDestroy ( pClient->mutex );
        epicsEventDestroy ( pClient->stateChangeNotify );
        free (pClient);
        fprintf(stderr, "log client: unable to start log client connection watch dog thread\n");
        return NULL;
    }

    /*
     * attempt to synchronize with circuit connect
     */
    epicsTimeGetCurrent ( & begin );
    epicsMutexMustLock ( pClient->mutex );
    do {
        epicsMutexUnlock ( pClient->mutex );
        epicsEventWaitWithTimeout ( 
            pClient->stateChangeNotify, 
            LOG_SERVER_CREATE_CONNECT_SYNC_TIMEOUT / 10.0 ); 
        epicsTimeGetCurrent ( & current );
        diff = epicsTimeDiffInSeconds ( & current, & begin );
        epicsMutexMustLock ( pClient->mutex );
    }
    while ( ! pClient->connected && diff < LOG_SERVER_CREATE_CONNECT_SYNC_TIMEOUT );
    epicsMutexUnlock ( pClient->mutex );

    if ( ! pClient->connected ) {
        fprintf (stderr, "log client create: timed out synchronizing with circuit connect to \"%s\" after %.1f seconds\n",
            pClient->name, LOG_SERVER_CREATE_CONNECT_SYNC_TIMEOUT );
    }
        
    return (void *) pClient;
}
예제 #23
0
파일: drvMotorSim.c 프로젝트: Brudhu/motor
static int motorSimCreateAxis( motorSim_t * pDrv, int card, int axis, double lowLimit, double hiLimit, double home, double start )
{
  AXIS_HDL pAxis;
  AXIS_HDL * ppLast = &(pDrv->pFirst);
  start=0;

  for ( pAxis = pDrv->pFirst;
	pAxis != NULL &&
	  ! ((pAxis->card == card) && (pAxis->axis == axis)); 
	pAxis = pAxis->pNext )
    {
      ppLast = &(pAxis->pNext);
    }

  if ( pAxis == NULL)
    {
      pAxis = (AXIS_HDL) calloc( 1, sizeof(motorAxis) );
      if (pAxis != NULL)
	{
	  route_pars_t pars;

	  pAxis->pDrv = pDrv;

	  pars.numRoutedAxes = 1;
	  pars.routedAxisList[0] = 1;
	  pars.Tsync = 0.0;
	  pars.Tcoast = 0.0;
	  pars.axis[0].Amax = 1.0;
	  pars.axis[0].Vmax = 1.0;

	  pAxis->endpoint.T = 0;
	  pAxis->endpoint.axis[0].p = start;
	  pAxis->endpoint.axis[0].v = 0;
	  pAxis->nextpoint.axis[0].p = start;

	  if ((pAxis->route = routeNew( &(pAxis->endpoint), &pars )) != NULL &&
	      (pAxis->params = motorParam->create( 0, MOTOR_AXIS_NUM_PARAMS )) != NULL &&
	      (pAxis->axisMutex = epicsMutexCreate( )) != NULL )
	    {
	      pAxis->card = card;
	      pAxis->axis = axis;
	      pAxis->hiHardLimit = hiLimit;
	      pAxis->lowHardLimit = lowLimit;
	      pAxis->home = home;
              pAxis->print = motorSimLogMsg;
              pAxis->logParam = NULL;
	      motorParam->setDouble(pAxis->params, motorAxisPosition, start);
	      *ppLast = pAxis;
	      pAxis->print( pAxis->logParam, TRACE_FLOW, "Created motor for card %d, signal %d OK", card, axis );
	    }
	  else
	    {
	      if (pAxis->route != NULL) routeDelete( pAxis->route );
	      if (pAxis->params != NULL) motorParam->destroy( pAxis->params );
	      if (pAxis->axisMutex != NULL) epicsMutexDestroy( pAxis->axisMutex );
	      free ( pAxis );
	      pAxis = NULL;
	    }
	}
      else
	{
	  free ( pAxis );
	  pAxis = NULL;
	}
    }
  else
    {
      pAxis->print( pAxis->logParam, TRACE_ERROR, "Motor for card %d, signal %d already exists", card, axis );
      return MOTOR_AXIS_ERROR;
    }

  if (pAxis == NULL)
    {
      pAxis->print( pAxis->logParam, TRACE_ERROR, "Cannot create motor for card %d, signal %d", card, axis );
      return MOTOR_AXIS_ERROR;
    }
    
  return MOTOR_AXIS_OK;
}
예제 #24
0
파일: logClient.c 프로젝트: ukaea/epics
/*
 * logClientDestroy
 */
static void logClientDestroy (logClientId id)
{
    enum epicsSocketSystemCallInterruptMechanismQueryInfo interruptInfo;
    logClient *pClient = (logClient *) id;
    epicsTimeStamp begin, current;
    double diff;

    /* command log client thread to shutdown - taking mutex here */
    /* forces cache flush on SMP machines */
    epicsMutexMustLock ( pClient->mutex );
    pClient->shutdown = 1u;
    epicsMutexUnlock ( pClient->mutex );

    /* unblock log client thread blocking in send() or connect() */
    interruptInfo =
        epicsSocketSystemCallInterruptMechanismQuery ();
    switch ( interruptInfo ) {
    case esscimqi_socketCloseRequired:
        logClientClose ( pClient );
        break;
    case esscimqi_socketBothShutdownRequired:
        shutdown ( pClient->sock, SHUT_WR );
        break;
    case esscimqi_socketSigAlarmRequired:
        epicsSignalRaiseSigAlarm ( pClient->restartThreadId );
        break;
    default:
        break;
    };

    /* wait for confirmation that the thread exited - taking */
    /* mutex here forces cache update on SMP machines */
    epicsTimeGetCurrent ( & begin );
    epicsMutexMustLock ( pClient->mutex );
    do {
        epicsMutexUnlock ( pClient->mutex );
        epicsEventWaitWithTimeout ( 
            pClient->stateChangeNotify, 
            LOG_SERVER_SHUTDOWN_TIMEOUT / 10.0 ); 
        epicsTimeGetCurrent ( & current );
        diff = epicsTimeDiffInSeconds ( & current, & begin );
        epicsMutexMustLock ( pClient->mutex );
    }
    while ( ! pClient->shutdownConfirm && diff < LOG_SERVER_SHUTDOWN_TIMEOUT );
    epicsMutexUnlock ( pClient->mutex );

    if ( ! pClient->shutdownConfirm ) {
        fprintf ( stderr, "log client shutdown: timed out stopping"
            " reconnect thread for \"%s\" after %.1f seconds - cleanup aborted\n",
            pClient->name, LOG_SERVER_SHUTDOWN_TIMEOUT );
        return;
    }

    logClientClose ( pClient );

    epicsMutexDestroy ( pClient->mutex );
   
    epicsEventDestroy ( pClient->stateChangeNotify );

    free ( pClient );
}