/* Action function for state "get" in state set "read_anon" */ static void seqg_action_read_anon_1_get(SS_ID seqg_env, int seqg_trn, int *seqg_pnst) { switch(seqg_trn) { case 0: { # line 77 "../pvGet.st" seq_efSet(seqg_env, ef_read_anon); } return; case 1: { # line 80 "../pvGet.st" epicsMutexMustLock(mutex); # line 81 "../pvGet.st" seqg_var->seqg_vars_read_anon.expected = shared; # line 82 "../pvGet.st" seq_pvGetTmo(seqg_env, 0/*anon*/, ASYNC, DEFAULT_TIMEOUT); # line 83 "../pvGet.st" epicsMutexUnlock(mutex); # line 84 "../pvGet.st" if (seq_pvGetComplete(seqg_env, 0/*anon*/)) { # line 86 "../pvGet.st" testOk(seqg_var->seqg_vars_read_anon.expected == seqg_var->anon, "immediate completion: expected=%d==%d=anon", seqg_var->seqg_vars_read_anon.expected, seqg_var->anon); } else { # line 88 "../pvGet.st" testPass("no immediate completion"); } # line 90 "../pvGet.st" epicsThreadSleep(0.1); # line 91 "../pvGet.st" if (seq_pvGetComplete(seqg_env, 0/*anon*/)) { # line 93 "../pvGet.st" testOk(seqg_var->seqg_vars_read_anon.expected == seqg_var->anon, "completion after delay: expected=%d==%d=anon", seqg_var->seqg_vars_read_anon.expected, seqg_var->anon); } else { # line 95 "../pvGet.st" testPass("no completion after delay"); } } return; } }
/* Action function for state "get" in state set "read_named" */ static void seqg_action_read_named_0_get(SS_ID seqg_env, int seqg_trn, int *seqg_pnst) { switch(seqg_trn) { case 0: { # line 37 "../pvGet.st" seq_efSet(seqg_env, ef_read_named); } return; case 1: { # line 40 "../pvGet.st" epicsMutexMustLock(mutex); # line 41 "../pvGet.st" seqg_var->seqg_vars_read_named.expected = shared; # line 42 "../pvGet.st" seq_pvGetTmo(seqg_env, 1/*named*/, ASYNC, DEFAULT_TIMEOUT); # line 43 "../pvGet.st" epicsMutexUnlock(mutex); # line 44 "../pvGet.st" if (seq_pvGetComplete(seqg_env, 1/*named*/)) { # line 46 "../pvGet.st" testOk(seqg_var->seqg_vars_read_named.expected == seqg_var->seqg_vars_read_named.named, "immediate completion: expected=%d==%d=named", seqg_var->seqg_vars_read_named.expected, seqg_var->seqg_vars_read_named.named); } else { # line 48 "../pvGet.st" testPass("no immediate completion"); } # line 50 "../pvGet.st" epicsThreadSleep(0.1); # line 51 "../pvGet.st" if (seq_pvGetComplete(seqg_env, 1/*named*/)) { # line 53 "../pvGet.st" testOk(seqg_var->seqg_vars_read_named.expected == seqg_var->seqg_vars_read_named.named, "completion after delay: expected=%d==%d=named", seqg_var->seqg_vars_read_named.expected, seqg_var->seqg_vars_read_named.named); } else { # line 55 "../pvGet.st" testPass("no completion after delay"); } } return; } }
static void anonymous_put(SS_ID ss, CHAN *ch) { char *var = valPtr(ch,ss); if (ch->queue) { QUEUE queue = ch->queue; pvType type = ch->type->getType; /*BUG? should that be putType?*/ size_t size = ch->type->size; boolean full; struct putq_cp_arg arg = {ch, var}; DEBUG("anonymous_put: type=%d, size=%d, count=%d, buf_size=%d, q=%p\n", type, size, ch->count, pv_size_n(type, ch->count), queue); print_channel_value(DEBUG, ch, var); /* Note: Must lock here because multiple state sets can issue pvPut calls concurrently. OTOH, no need to lock against CA callbacks, because anonymous and named PVs are disjoint. */ epicsMutexMustLock(ch->varLock); full = seqQueuePutF(queue, putq_cp, &arg); if (full) { errlogSevPrintf(errlogMinor, "pvPut on queued channel '%s' (anonymous): " "last queue element overwritten (queue is full)\n", ch->varName ); } epicsMutexUnlock(ch->varLock); } else { /* Set dirty flag only if monitored */ ss_write_buffer(ch, var, 0, ch->monitored); } /* If there's an event flag associated with this channel, set it */ if (ch->syncedTo) seq_efSet(ss, ch->syncedTo); /* Wake up each state set that uses this channel in an event */ ss_wakeup(ss->prog, ch->eventNum); }
/* Action function for state "get" in state set "read_anon_sync" */ static void seqg_action_read_anon_sync_3_get(SS_ID seqg_env, int seqg_trn, int *seqg_pnst) { switch(seqg_trn) { case 0: { # line 155 "../pvGet.st" seq_efSet(seqg_env, ef_read_anon_sync); } return; case 1: { # line 158 "../pvGet.st" epicsMutexMustLock(mutex); # line 159 "../pvGet.st" seqg_var->seqg_vars_read_anon_sync.expected = shared; # line 160 "../pvGet.st" seq_pvGetTmo(seqg_env, 0/*anon*/, SYNC, DEFAULT_TIMEOUT); # line 161 "../pvGet.st" epicsMutexUnlock(mutex); # line 163 "../pvGet.st" testOk(seqg_var->seqg_vars_read_anon_sync.expected == seqg_var->anon, "synchronous get: expected=%d==%d=anon", seqg_var->seqg_vars_read_anon_sync.expected, seqg_var->anon); # line 164 "../pvGet.st" epicsThreadSleep(0.1); # line 165 "../pvGet.st" epicsMutexMustLock(mutex); # line 166 "../pvGet.st" seqg_var->seqg_vars_read_anon_sync.expected = shared; # line 167 "../pvGet.st" seq_pvGetTmo(seqg_env, 0/*anon*/, SYNC, DEFAULT_TIMEOUT); # line 168 "../pvGet.st" epicsMutexUnlock(mutex); # line 170 "../pvGet.st" testOk(seqg_var->seqg_vars_read_anon_sync.expected == seqg_var->anon, "repeat get after delay: expected=%d==%d=anon", seqg_var->seqg_vars_read_anon_sync.expected, seqg_var->anon); } return; } }
/* Action function for state "get" in state set "read_named_sync" */ static void seqg_action_read_named_sync_2_get(SS_ID seqg_env, int seqg_trn, int *seqg_pnst) { switch(seqg_trn) { case 0: { # line 119 "../pvGet.st" seq_efSet(seqg_env, ef_read_named_sync); } return; case 1: { # line 122 "../pvGet.st" epicsMutexMustLock(mutex); # line 123 "../pvGet.st" seqg_var->seqg_vars_read_named_sync.expected = shared; # line 124 "../pvGet.st" seq_pvGetTmo(seqg_env, 2/*named*/, SYNC, DEFAULT_TIMEOUT); # line 125 "../pvGet.st" epicsMutexUnlock(mutex); # line 127 "../pvGet.st" testOk(seqg_var->seqg_vars_read_named_sync.expected == seqg_var->seqg_vars_read_named_sync.named, "synchronous get: expected=%d==%d=named", seqg_var->seqg_vars_read_named_sync.expected, seqg_var->seqg_vars_read_named_sync.named); # line 128 "../pvGet.st" epicsThreadSleep(0.1); # line 129 "../pvGet.st" epicsMutexMustLock(mutex); # line 130 "../pvGet.st" seqg_var->seqg_vars_read_named_sync.expected = shared; # line 131 "../pvGet.st" seq_pvGetTmo(seqg_env, 2/*named*/, SYNC, DEFAULT_TIMEOUT); # line 132 "../pvGet.st" epicsMutexUnlock(mutex); # line 134 "../pvGet.st" testOk(seqg_var->seqg_vars_read_named_sync.expected == seqg_var->seqg_vars_read_named_sync.named, "repeat get after delay: expected=%d==%d=named", seqg_var->seqg_vars_read_named_sync.expected, seqg_var->seqg_vars_read_named_sync.named); } return; } }
/* * sequencer() - Sequencer main thread entry point. */ void sequencer (void *arg) /* ptr to original (global) state program table */ { PROG *sp = (PROG *)arg; unsigned nss; size_t threadLen; char threadName[THREAD_NAME_SIZE+10]; /* Get this thread's id */ sp->ss->threadId = epicsThreadGetIdSelf(); /* Add the program to the program list */ seqAddProg(sp); createOrAttachPvSystem(sp); if (!pvSysIsDefined(sp->pvSys)) { sp->die = TRUE; goto exit; } /* Call sequencer init function to initialize variables. */ sp->initFunc(sp); /* Initialize state set variables. In safe mode, copy variable block to state set buffers. Must do all this before connecting. */ if (optTest(sp, OPT_SAFE)) { for (nss = 0; nss < sp->numSS; nss++) { SSCB *ss = sp->ss + nss; memcpy(ss->var, sp->var, sp->varSize); } } /* Attach to PV system */ pvSysAttach(sp->pvSys); /* Initiate connect & monitor requests to database channels, waiting for all connections to be established if the option is set. */ if (seq_connect(sp, optTest(sp, OPT_CONN) != pvStatOK)) goto exit; /* Emulate the 'first monitor event' for anonymous PVs */ if (optTest(sp, OPT_SAFE)) { unsigned nch; for (nch=0; nch<sp->numChans; nch++) if (sp->chan[nch].syncedTo && !sp->chan[nch].dbch) seq_efSet(sp->ss, sp->chan[nch].syncedTo); } /* Call program entry function if defined. Treat as if called from 1st state set. */ if (sp->entryFunc) sp->entryFunc(sp->ss); /* Create each additional state set task (additional state set thread names are derived from the first ss) */ epicsThreadGetName(sp->ss->threadId, threadName, sizeof(threadName)); threadLen = strlen(threadName); for (nss = 1; nss < sp->numSS; nss++) { SSCB *ss = sp->ss + nss; epicsThreadId tid; /* Form thread name from program name + state set number */ sprintf(threadName+threadLen, "_%d", nss); /* Spawn the task */ tid = epicsThreadCreate( threadName, /* thread name */ sp->threadPriority, /* priority */ sp->stackSize, /* stack size */ ss_entry, /* entry point */ ss); /* parameter */ DEBUG("Spawning additional state set thread %p: \"%s\"\n", tid, threadName); } /* First state set jumps directly to entry point */ ss_entry(sp->ss); DEBUG(" Wait for other state sets to exit\n"); for (nss = 1; nss < sp->numSS; nss++) { SSCB *ss = sp->ss + nss; epicsEventMustWait(ss->dead); } /* Call program exit function if defined. Treat as if called from 1st state set. */ if (sp->exitFunc) sp->exitFunc(sp->ss); exit: DEBUG(" Disconnect all channels\n"); seq_disconnect(sp); DEBUG(" Remove program instance from list\n"); seqDelProg(sp); errlogSevPrintf(errlogInfo, "Instance %d of sequencer program \"%s\" terminated\n", sp->instance, sp->progName); /* Free all allocated memory */ seq_free(sp); }
static void efSet_x(SS_ID ssId, EV_ID x) { seq_efSet(ssId, x); }