/* * Push 'task' onto the ready_tasks queue. If 'task' has the privilege * flag set, then also push it onto the ready_priority_tasks queue. * * Caller must hold the task manager lock. */ static inline void push_readyq(isc__taskmgr_t *manager, isc__task_t *task) { ENQUEUE(manager->ready_tasks, task, ready_link); if ((task->flags & TASK_F_PRIVILEGED) != 0) ENQUEUE(manager->ready_priority_tasks, task, ready_priority_link); }
void dijkstra(struct Graph *G, int s) { struct Queue* Q; Q = (struct Queue*)malloc(sizeof(struct Queue)); Q->length = MAX_SIZE - 1; Q->head = Q->tail = 0; int i, j; G->costArray[s] = 0; ENQUEUE(Q, s); while(Q->head!=Q->tail) { j = DEQUEUE(Q); G->colorArray[j] = BLACK; for (i=1; i<=G->V; i++) { if (G->adjMatrix[i][j]==0) {continue;} // Not j's neighbors else { // if (G->colorArray[i]!=BLACK) { // Not j's parent if (G->costArray[i]==INFINITE) { // New node G->costArray[i] = G->costArray[j] + G->adjMatrix[i][j]; G->parentArray[i] = j; } else if (G->costArray[i] > G->costArray[j] + G->adjMatrix[i][j]) { // Updated node G->costArray[i] = G->costArray[j] + G->adjMatrix[i][j]; G->parentArray[i] = j; } ENQUEUE(Q, i); // } } } } }
/* Perform menu command COMMAND. */ Boolean AppMenuDoCommand (UInt16 command) { Boolean handled = false; FormDesc *fd; /* See if menu command is a form switch. */ for (fd = g_formList; fd; fd = fd->next) { if (fd->menuCommandID == command) { SwitchToForm (fd->formID); return true; } } /* Handle the rest of the items. */ switch (command) { case CommonOptionsPreferences: PrefShowSetupForm(); return true; case CommonOptionsAbout: AboutShow(); return true; #if 0 case CommonOptionsSwitchclass: SPREF (bluetoothClass) = (SPREF (bluetoothClass) + 1) % 5; SPREF (bluetoothSDP) = (SPREF (bluetoothSDP) + 1) % 5; return true; #endif case CommonConnectionAddDevice: BTSelectDevice(); return true; case CommonConnectionConnect: ENQUEUE (CmdInitiateConnect); return true; case CommonConnectionDisconnect: ENQUEUE (CmdInitiateDisconnect); return true; default: break; } return handled; }
void Topologicalsort( AdjGraph G, int aov[NumVertices] ) { int v, w, nodes; EdgeNode *tmp; EdgeData indegree[NumVertices+1]={0}; QUEUE Q ; MAKENULL( Q ) ; // 计算每个顶点的入度 for( v=1; v<=G.n ; ++v ) { tmp=G.vexlist[v].firstedge; while(tmp) { indegree[tmp->adjvex]++; tmp=tmp->next; } } // 将入度为0的顶点加入队列 for(v=1; v<=G.n; ++v) if ( indegree[v] ==0 ) ENQUEUE( v, Q ) ; nodes = 0 ; while ( !EMPTY( Q ) ) { v = FRONT(Q)->element ; DEQUEUE( Q ) ; //cout << v <<' '; aov[nodes]=v; nodes ++ ; // 已考虑的节点个数加1 // 如果(v, w)是一条边,将w的入度减1,如果w的入度为0,则将w入队 for( w=1; w<=G.n; w++) { if(connect(G, v, w)) { --indegree[w]; if( !(indegree[w])) ENQUEUE(w,Q) ; } } } cout<<endl; if ( nodes < G.n ) cout<<"图中有环路"<<endl; }
ISC_TASKFUNC_SCOPE void isc__task_setprivilege(isc_task_t *task0, isc_boolean_t priv) { isc__task_t *task = (isc__task_t *)task0; isc__taskmgr_t *manager = task->manager; isc_boolean_t oldpriv; LOCK(&task->lock); oldpriv = ISC_TF((task->flags & TASK_F_PRIVILEGED) != 0); if (priv) task->flags |= TASK_F_PRIVILEGED; else task->flags &= ~TASK_F_PRIVILEGED; UNLOCK(&task->lock); if (priv == oldpriv) return; LOCK(&manager->lock); if (priv && ISC_LINK_LINKED(task, ready_link)) ENQUEUE(manager->ready_priority_tasks, task, ready_priority_link); else if (!priv && ISC_LINK_LINKED(task, ready_priority_link)) DEQUEUE(manager->ready_priority_tasks, task, ready_priority_link); UNLOCK(&manager->lock); }
/* Normal event handler. Take first character off queue and send to clock if not a null. Shift characters down and put a null on the end. We assume that there is no parallelism so no race condition, but even if there is nothing bad will happen except that we might send some bad data to the clock once in a while. */ static void arc_event_handler( struct peer *peer ) { struct refclockproc *pp = peer->procptr; register struct arcunit *up = (struct arcunit *)pp->unitptr; int i; char c; #ifdef DEBUG if(debug > 2) { printf("arc: arc_event_handler() called.\n"); } #endif c = up->cmdqueue[0]; /* Next char to be sent. */ /* Shift down characters, shifting trailing \0 in at end. */ for(i = 0; i < CMDQUEUELEN; ++i) { up->cmdqueue[i] = up->cmdqueue[i+1]; } /* Don't send '\0' characters. */ if(c != '\0') { if(write(pp->io.fd, &c, 1) != 1) { msyslog(LOG_NOTICE, "ARCRON: write to fd %d failed", pp->io.fd); } #ifdef DEBUG else if(debug) { printf("arc: sent `%2.2x', fd %d.\n", c, pp->io.fd); } #endif } ENQUEUE(up); }
static fsm_rt_t console_print(const uint8_t *pchBuf,uint8_t chNum) { static uint8_t s_chPrintIndex ; static enum { CONSOLE_PRT_START = 0, CONSOLE_PRT_PRINT }s_tState = CONSOLE_PRT_START; if((NULL == pchBuf) || (!chNum)) { return fsm_rt_err; } switch(s_tState) { case CONSOLE_PRT_START: s_chPrintIndex = 0; s_tState = CONSOLE_PRT_PRINT; //break; case CONSOLE_PRT_PRINT: if(s_chPrintIndex < chNum) { if(ENQUEUE(InOutQueue,&g_tFIFOout,pchBuf[s_chPrintIndex])) { s_chPrintIndex++; } } else { CONSOLE_PRT_RESET(); return fsm_rt_cpl; } break; } return fsm_rt_on_going; }
static inline isc_boolean_t task_send(isc_task_t *task, isc_event_t **eventp) { isc_boolean_t was_idle = ISC_FALSE; isc_event_t *event; /* * Caller must be holding the task lock. */ REQUIRE(eventp != NULL); event = *eventp; REQUIRE(event != NULL); REQUIRE(event->ev_type > 0); REQUIRE(task->state != task_state_done); XTRACE("task_send"); if (task->state == task_state_idle) { was_idle = ISC_TRUE; INSIST(EMPTY(task->events)); task->state = task_state_ready; } INSIST(task->state == task_state_ready || task->state == task_state_running); ENQUEUE(task->events, event, ev_link); *eventp = NULL; return (was_idle); }
void TradeRouteData::SetRecip(TradeRoute route) { Assert(FALSE); m_recip = route; ENQUEUE(); }
int TTY_WriteByte(int handle, byte data) { ComPort *p; p = handleToPort [handle]; if (FULL(p->outputQueue)) return -1; ENQUEUE (p->outputQueue, data); return 0; }
static int Modem_Command(ComPort *p, char *commandString) { byte b; if (CheckStatus (p)) return -1; disable(); p->outputQueue.head = p->outputQueue.tail = 0; p->inputQueue.head = p->inputQueue.tail = 0; enable(); p->bufferUsed = 0; while (*commandString) ENQUEUE (p->outputQueue, *commandString++); ENQUEUE (p->outputQueue, '\r'); // get the transmit rolling DEQUEUE (p->outputQueue, b); outportb(p->uart, b); return 0; }
CARD32 processautorepeat(OsTimerPtr timer, CARD32 now, pointer arg) { xEvent kevent; int keycode; keycode = (long)arg; xf86Info.lastEventTime = kevent.u.keyButtonPointer.time = GetTimeInMillis(); /* * Repeat a key by faking a KeyRelease, and a KeyPress event in rapid * succession */ ENQUEUE(&kevent, keycode, KeyRelease, XE_KEYBOARD); ENQUEUE(&kevent, keycode, KeyPress, XE_KEYBOARD); /* And return the appropriate value so we get rescheduled */ return xf86Info.kbdRate; }
//---------------------------------------------------------------------------- // // Name : CivilisationData::CivilisationData // // Description: Constructor // // Parameters : id : unique civilisation id // owner : player index // civ : civilisation index // gender : leader gender // // Globals : - // // Returns : - // // Remark(s) : Notifies other (network) players of its existence. // //---------------------------------------------------------------------------- CivilisationData::CivilisationData(const ID &id, PLAYER_INDEX owner, sint32 civ, GENDER gender) : GameObj(id.m_id), m_owner(owner), m_civ(civ), m_gender(gender), m_cityStyle(CITY_STYLE_GENERIC) { memset(m_cityname_count, 0, sizeof(m_cityname_count)); memset(m_leader_name, 0, k_MAX_NAME_LEN); memset(m_personality_description, 0, k_MAX_NAME_LEN); memset(m_civilisation_name, 0, k_MAX_NAME_LEN); memset(m_country_name, 0, k_MAX_NAME_LEN); memset(m_singular_name, 0, k_MAX_NAME_LEN); ENQUEUE(); }
static void ISR_8250 (ComPort *p) { byte source = 0; byte b; disable(); while((source = inportb (p->uart + INTERRUPT_ID_REGISTER) & 0x07) != 1) { switch (source) { case IIR_RX_DATA_READY_INTERRUPT: b = inportb (p->uart + RECEIVE_BUFFER_REGISTER); if (! FULL(p->inputQueue)) { ENQUEUE (p->inputQueue, b); } else { p->lineStatus |= LSR_OVERRUN_ERROR; p->statusUpdated = true; } break; case IIR_TX_HOLDING_REGISTER_INTERRUPT: if (! EMPTY(p->outputQueue)) { DEQUEUE (p->outputQueue, b); outportb (p->uart + TRANSMIT_HOLDING_REGISTER, b); } break; case IIR_MODEM_STATUS_INTERRUPT: p->modemStatus = (inportb (p->uart + MODEM_STATUS_REGISTER) & MODEM_STATUS_MASK) | p->modemStatusIgnore; p->statusUpdated = true; break; case IIR_LINE_STATUS_INTERRUPT: p->lineStatus = inportb (p->uart + LINE_STATUS_REGISTER); p->statusUpdated = true; break; } source = inportb (p->uart + INTERRUPT_ID_REGISTER) & 0x07; } outportb (0x20, 0x20); }
static inline void task_ready(isc_task_t *task) { isc_taskmgr_t *manager = task->manager; REQUIRE(VALID_MANAGER(manager)); REQUIRE(task->state == task_state_ready); XTRACE("task_ready"); LOCK(&manager->lock); ENQUEUE(manager->ready_tasks, task, ready_link); #ifdef ISC_PLATFORM_USETHREADS SIGNAL(&manager->work_available); #endif /* ISC_PLATFORM_USETHREADS */ UNLOCK(&manager->lock); }
ISC_TASKFUNC_SCOPE isc_result_t isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action, const void *arg) { isc__task_t *task = (isc__task_t *)task0; isc_boolean_t disallowed = ISC_FALSE; isc_result_t result = ISC_R_SUCCESS; isc_event_t *event; /* * Send a shutdown event with action 'action' and argument 'arg' when * 'task' is shutdown. */ REQUIRE(VALID_TASK(task)); REQUIRE(action != NULL); event = isc_event_allocate(task->manager->mctx, NULL, ISC_TASKEVENT_SHUTDOWN, action, arg, sizeof(*event)); if (event == NULL) return (ISC_R_NOMEMORY); LOCK(&task->lock); if (TASK_SHUTTINGDOWN(task)) { disallowed = ISC_TRUE; result = ISC_R_SHUTTINGDOWN; } else ENQUEUE(task->on_shutdown, event, ev_link); UNLOCK(&task->lock); if (disallowed) isc_mem_put(task->manager->mctx, event, sizeof(*event)); return (result); }
static unsigned int dequeue_events(isc_task_t *task, void *sender, isc_eventtype_t first, isc_eventtype_t last, void *tag, isc_eventlist_t *events, isc_boolean_t purging) { isc_event_t *event, *next_event; unsigned int count = 0; REQUIRE(VALID_TASK(task)); REQUIRE(last >= first); XTRACE("dequeue_events"); /* * Events matching 'sender', whose type is >= first and <= last, and * whose tag is 'tag' will be dequeued. If 'purging', matching events * which are marked as unpurgable will not be dequeued. * * sender == NULL means "any sender", and tag == NULL means "any tag". */ LOCK(&task->lock); for (event = HEAD(task->events); event != NULL; event = next_event) { next_event = NEXT(event, ev_link); if (event->ev_type >= first && event->ev_type <= last && (sender == NULL || event->ev_sender == sender) && (tag == NULL || event->ev_tag == tag) && (!purging || PURGE_OK(event))) { DEQUEUE(task->events, event, ev_link); ENQUEUE(*events, event, ev_link); count++; } } UNLOCK(&task->lock); return (count); }
static inline isc_boolean_t task_shutdown(isc__task_t *task) { isc_boolean_t was_idle = ISC_FALSE; isc_event_t *event, *prev; /* * Caller must be holding the task's lock. */ XTRACE("task_shutdown"); if (! TASK_SHUTTINGDOWN(task)) { XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_SHUTTINGDOWN, "shutting down")); task->flags |= TASK_F_SHUTTINGDOWN; if (task->state == task_state_idle) { INSIST(EMPTY(task->events)); task->state = task_state_ready; was_idle = ISC_TRUE; } INSIST(task->state == task_state_ready || task->state == task_state_running); /* * Note that we post shutdown events LIFO. */ for (event = TAIL(task->on_shutdown); event != NULL; event = prev) { prev = PREV(event, ev_link); DEQUEUE(task->on_shutdown, event, ev_link); ENQUEUE(task->events, event, ev_link); } } return (was_idle); }
/* * Supply Strategy 1 Large Buffers to CP * * May be called in interrupt state. * Must be called with interrupts locked out. * * Arguments: * fup pointer to device unit structure * * Returns: * none */ static void fore_buf_supply_1l(Fore_unit *fup) { H_buf_queue *hbp; Buf_queue *cqp; Buf_descr *bdp; Buf_handle *bhp; KBuffer *m; int nvcc, nbuf, i; /* * Figure out how many buffers we should be giving to the CP. * We're basing this calculation on the current number of open * VCCs thru this device, with certain minimum and maximum values * enforced. This will then allow us to figure out how many more * buffers we need to supply to the CP. This will be rounded up * to fill a supply queue entry. */ nvcc = MAX(fup->fu_open_vcc, BUF_MIN_VCC); nbuf = nvcc * 4 * RECV_MAX_SEGS; nbuf = MIN(nbuf, BUF1_LG_CPPOOL); nbuf -= fup->fu_buf1l_cnt; nbuf = roundup(nbuf, BUF1_LG_ENTSIZE); /* * OK, now supply the buffers to the CP */ while (nbuf > 0) { /* * Acquire a supply queue entry */ hbp = fup->fu_buf1l_tail; if (!((*hbp->hbq_status) & QSTAT_FREE)) break; bdp = hbp->hbq_descr; /* * Get a buffer for each descriptor in the queue entry */ for (i = 0; i < BUF1_LG_ENTSIZE; i++, bdp++) { caddr_t cp; /* * Get a cluster buffer */ KB_ALLOCEXT(m, BUF1_LG_SIZE, KB_F_NOWAIT, KB_T_DATA); if (m == NULL) { break; } KB_HEADSET(m, BUF1_LG_DOFF); /* * Point to buffer handle structure */ bhp = (Buf_handle *)((caddr_t)m + BUF1_LG_HOFF); bhp->bh_type = BHT_S1_LARGE; /* * Setup buffer descriptor */ bdp->bsd_handle = bhp; KB_DATASTART(m, cp, caddr_t); bhp->bh_dma = bdp->bsd_buffer = (H_dma) DMA_GET_ADDR( cp, BUF1_LG_SIZE, BUF_DATA_ALIGN, 0); if (bdp->bsd_buffer == 0) { /* * Unable to assign dma address - free up * this descriptor's buffer */ fup->fu_stats->st_drv.drv_bf_segdma++; KB_FREEALL(m); break; } /* * All set, so queue buffer (handle) */ ENQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1l_bq); } /* * If we we're not able to fill all the descriptors for * an entry, free up what's been partially built */ if (i != BUF1_LG_ENTSIZE) { caddr_t cp; /* * Clean up each used descriptor */ for (bdp = hbp->hbq_descr; i; i--, bdp++) { bhp = bdp->bsd_handle; DEQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1l_bq); m = (KBuffer *) ((caddr_t)bhp - BUF1_LG_HOFF); KB_DATASTART(m, cp, caddr_t); DMA_FREE_ADDR(cp, bhp->bh_dma, BUF1_LG_SIZE, 0); KB_FREEALL(m); } break; } /* * Finally, we've got an entry ready for the CP. * So claim the host queue entry and setup the CP-resident * queue entry. The CP will (potentially) grab the supplied * buffers when the descriptor pointer is set. */ fup->fu_buf1l_tail = hbp->hbq_next; (*hbp->hbq_status) = QSTAT_PENDING; cqp = hbp->hbq_cpelem; cqp->cq_descr = (CP_dma) CP_WRITE((u_long)hbp->hbq_descr_dma); /* * Update counters, etc for supplied buffers */ fup->fu_buf1l_cnt += BUF1_LG_ENTSIZE; nbuf -= BUF1_LG_ENTSIZE; } return; }
static void ISR_16550 (ComPort *p) { int count; byte source; byte b; disable(); while((source = inportb (p->uart + INTERRUPT_ID_REGISTER) & 0x07) != 1) { switch (source) { case IIR_RX_DATA_READY_INTERRUPT: do { b = inportb (p->uart + RECEIVE_BUFFER_REGISTER); if (!FULL(p->inputQueue)) { ENQUEUE (p->inputQueue, b); } else { p->lineStatus |= LSR_OVERRUN_ERROR; p->statusUpdated = true; } } while (inportb (p->uart + LINE_STATUS_REGISTER) & LSR_DATA_READY); break; case IIR_TX_HOLDING_REGISTER_INTERRUPT: count = 16; while ((! EMPTY(p->outputQueue)) && count--) { DEQUEUE (p->outputQueue, b); outportb (p->uart + TRANSMIT_HOLDING_REGISTER, b); } break; case IIR_MODEM_STATUS_INTERRUPT: p->modemStatus = (inportb (p->uart + MODEM_STATUS_REGISTER) & MODEM_STATUS_MASK) | p->modemStatusIgnore; p->statusUpdated = true; break; case IIR_LINE_STATUS_INTERRUPT: p->lineStatus = inportb (p->uart + LINE_STATUS_REGISTER); p->statusUpdated = true; break; } source = inportb (p->uart + INTERRUPT_ID_REGISTER) & 0x07; } // check for lost IIR_TX_HOLDING_REGISTER_INTERRUPT on 16550a! if (inportb (p->uart + LINE_STATUS_REGISTER ) & LSR_TRANSMITTER_EMPTY) { count = 16; while ((! EMPTY(p->outputQueue)) && count--) { DEQUEUE (p->outputQueue, b); outportb (p->uart + TRANSMIT_HOLDING_REGISTER, b); } } outportb (0x20, 0x20); }
void TradeRouteData::SetDestination(Unit dest) { m_destinationCity = dest; ENQUEUE(); }
/* * arc_start - open the devices and initialize data for processing */ static int arc_start( int unit, struct peer *peer ) { register struct arcunit *up; struct refclockproc *pp; int fd; char device[20]; #ifdef HAVE_TERMIOS struct termios arg; #endif msyslog(LOG_NOTICE, "ARCRON: %s: opening unit %d", arc_version, unit); #ifdef DEBUG if(debug) { printf("arc: %s: attempt to open unit %d.\n", arc_version, unit); } #endif /* Prevent a ridiculous device number causing overflow of device[]. */ if((unit < 0) || (unit > 255)) { return(0); } /* * Open serial port. Use CLK line discipline, if available. */ snprintf(device, sizeof(device), DEVICE, unit); if (!(fd = refclock_open(device, SPEED, LDISC_CLK))) return(0); #ifdef DEBUG if(debug) { printf("arc: unit %d using open().\n", unit); } #endif fd = tty_open(device, OPEN_FLAGS, 0777); if(fd < 0) { #ifdef DEBUG if(debug) { printf("arc: failed [tty_open()] to open %s.\n", device); } #endif return(0); } #ifndef SYS_WINNT fcntl(fd, F_SETFL, 0); /* clear the descriptor flags */ #endif #ifdef DEBUG if(debug) { printf("arc: opened RS232 port with file descriptor %d.\n", fd); } #endif #ifdef HAVE_TERMIOS tcgetattr(fd, &arg); arg.c_iflag = IGNBRK | ISTRIP; arg.c_oflag = 0; arg.c_cflag = B300 | CS8 | CREAD | CLOCAL | CSTOPB; arg.c_lflag = 0; arg.c_cc[VMIN] = 1; arg.c_cc[VTIME] = 0; tcsetattr(fd, TCSANOW, &arg); #else msyslog(LOG_ERR, "ARCRON: termios not supported in this driver"); (void)close(fd); return 0; #endif up = emalloc(sizeof(*up)); /* Set structure to all zeros... */ memset(up, 0, sizeof(*up)); pp = peer->procptr; pp->io.clock_recv = arc_receive; pp->io.srcclock = (caddr_t)peer; pp->io.datalen = 0; pp->io.fd = fd; if (!io_addclock(&pp->io)) { close(fd); pp->io.fd = -1; free(up); return(0); } pp->unitptr = (caddr_t)up; /* * Initialize miscellaneous variables */ peer->precision = PRECISION; peer->stratum = 2; /* Default to stratum 2 not 0. */ pp->clockdesc = DESCRIPTION; if (peer->MODE > 3) { msyslog(LOG_NOTICE, "ARCRON: Invalid mode %d", peer->MODE); return 0; } #ifdef DEBUG if(debug) { printf("arc: mode = %d.\n", peer->MODE); } #endif switch (peer->MODE) { case 1: memcpy((char *)&pp->refid, REFID_MSF, 4); break; case 2: memcpy((char *)&pp->refid, REFID_DCF77, 4); break; case 3: memcpy((char *)&pp->refid, REFID_WWVB, 4); break; default: memcpy((char *)&pp->refid, REFID, 4); break; } /* Spread out resyncs so that they should remain separated. */ up->next_resync = current_time + INITIAL_RESYNC_DELAY + (67*unit)%1009; #if 0 /* Not needed because of zeroing of arcunit structure... */ up->resyncing = 0; /* Not resyncing yet. */ up->saved_flags = 0; /* Default is all flags off. */ /* Clear send buffer out... */ { int i; for(i = CMDQUEUELEN; i >= 0; --i) { up->cmdqueue[i] = '\0'; } } #endif #ifdef ARCRON_KEEN up->quality = QUALITY_UNKNOWN; /* Trust the clock immediately. */ #else up->quality = MIN_CLOCK_QUALITY;/* Don't trust the clock yet. */ #endif peer->action = arc_event_handler; ENQUEUE(up); return(1); }
void os2PostKbdEvent(unsigned scanCode, Bool down) { KeyClassRec *keyc = ((DeviceIntPtr)xf86Info.pKeyboard)->key; Bool updateLeds = FALSE; Bool UsePrefix = FALSE; Bool Direction = FALSE; xEvent kevent; KeySym *keysym; int keycode; static int lockkeys = 0; /* * and now get some special keysequences */ if ((ModifierDown(ControlMask | AltMask)) || (ModifierDown(ControlMask | AltLangMask))) { switch (scanCode) { case KEY_BackSpace: if (!xf86Info.dontZap) GiveUp(0); return; case KEY_KP_Minus: /* Keypad - */ if (!xf86Info.dontZoom) { if (down) xf86ZoomViewport(xf86Info.currentScreen, -1); return; } break; case KEY_KP_Plus: /* Keypad + */ if (!xf86Info.dontZoom) { if (down) xf86ZoomViewport(xf86Info.currentScreen, 1); return; } break; } } /* CTRL-ESC is std OS/2 hotkey for going back to PM and popping up * window list... handled by keyboard driverand PM if you tell it. This is * what we have done, and thus should never detect this key combo */ if (ModifierDown(ControlMask) && scanCode==KEY_Escape) { /* eat it */ return; } else if (ModifierDown(AltLangMask|AltMask) && scanCode==KEY_Escape) { /* same here */ return; } /* * Now map the scancodes to real X-keycodes ... */ keycode = scanCode + MIN_KEYCODE; keysym = (keyc->curKeySyms.map + keyc->curKeySyms.mapWidth * (keycode - keyc->curKeySyms.minKeyCode)); #ifdef XKB if (noXkbExtension) { #endif /* Filter autorepeated caps/num/scroll lock keycodes. */ #define CAPSFLAG 0x01 #define NUMFLAG 0x02 #define SCROLLFLAG 0x04 #define MODEFLAG 0x08 if (down) { switch (keysym[0]) { case XK_Caps_Lock: if (lockkeys & CAPSFLAG) return; else lockkeys |= CAPSFLAG; break; case XK_Num_Lock: if (lockkeys & NUMFLAG) return; else lockkeys |= NUMFLAG; break; case XK_Scroll_Lock: if (lockkeys & SCROLLFLAG) return; else lockkeys |= SCROLLFLAG; break; } if (keysym[1] == XF86XK_ModeLock) { if (lockkeys & MODEFLAG) return; else lockkeys |= MODEFLAG; } } else { switch (keysym[0]) { case XK_Caps_Lock: lockkeys &= ~CAPSFLAG; break; case XK_Num_Lock: lockkeys &= ~NUMFLAG; break; case XK_Scroll_Lock: lockkeys &= ~SCROLLFLAG; break; } if (keysym[1] == XF86XK_ModeLock) lockkeys &= ~MODEFLAG; } /* * LockKey special handling: * ignore releases, toggle on & off on presses. * Don't deal with the Caps_Lock keysym directly, * but check the lock modifier */ #ifndef PC98 if (keyc->modifierMap[keycode] & LockMask || keysym[0] == XK_Scroll_Lock || keysym[1] == XF86XK_ModeLock || keysym[0] == XK_Num_Lock) { Bool flag; if (!down) return; flag = !KeyPressed(keycode); if (!flag) down = !down; if (keyc->modifierMap[keycode] & LockMask) xf86Info.capsLock = flag; if (keysym[0] == XK_Num_Lock) xf86Info.numLock = flag; if (keysym[0] == XK_Scroll_Lock) xf86Info.scrollLock = flag; if (keysym[1] == XF86XK_ModeLock) xf86Info.modeSwitchLock = flag; updateLeds = TRUE; } #endif /* not PC98 */ /* normal, non-keypad keys */ if (scanCode < KEY_KP_7 || scanCode > KEY_KP_Decimal) { /* magic ALT_L key on AT84 keyboards for multilingual support */ if (xf86Info.kbdType == KB_84 && ModifierDown(AltMask) && keysym[2] != NoSymbol) { UsePrefix = TRUE; Direction = TRUE; } } #ifdef XKB /* Warning: got position wrong first time */ } #endif /* check for an autorepeat-event */ if ((down && KeyPressed(keycode)) && (xf86Info.autoRepeat != AutoRepeatModeOn || keyc->modifierMap[keycode])) return; xf86Info.lastEventTime = kevent.u.keyButtonPointer.time = GetTimeInMillis(); /* * And now send these prefixes ... * NOTE: There cannot be multiple Mode_Switch keys !!!! */ if (UsePrefix) { ENQUEUE(&kevent, keyc->modifierKeyMap[keyc->maxKeysPerModifier*7], Direction ? KeyPress : KeyRelease, XE_KEYBOARD); ENQUEUE(&kevent, keycode, down ? KeyPress : KeyRelease, XE_KEYBOARD); ENQUEUE(&kevent, keyc->modifierKeyMap[keyc->maxKeysPerModifier*7], Direction ? KeyRelease : KeyPress, XE_KEYBOARD); } else { #ifdef XFreeDGA if (((ScrnInfoPtr)(xf86Info.currentScreen->devPrivates[xf86ScreenIndex].ptr))->directMode&XF86DGADirectKeyb) { XF86DirectVideoKeyEvent(&kevent, keycode, down ? KeyPress : KeyRelease); } else #endif { ENQUEUE(&kevent, keycode, down ? KeyPress : KeyRelease, XE_KEYBOARD); } } if (updateLeds) xf86KbdLeds(); }
int main() { tipofilalancamento inicio, fim; tipo_equipe_OK topo; struct equipe equipe; struct equipe_OK equipe_OK; int cont = 1, lancamento_bem_sucedido = 1; init(&inicio,&fim); init_lista_sucesso(&topo); while (cont == 1) { printf("Entre com o nome da equipe:\n"); gets(equipe.nome); equipe.tentativas = 0; ENQUEUE(&inicio, &fim, equipe); printf("\nDeseja cadastrar mais equipes?\n"); printf("(\"1\" = SIM, quero cadastrar mais equipes / \"0\" = NAO, quero iniciar os lancamentos)\n"); scanf("%d", &cont); } while (!IsEmpty(inicio, fim)) { if (FIRST(inicio, fim, &equipe) == 1) { printf("Lancamento da Equipe \"%s\"", equipe.nome); printf("\n"); printf("Entre com os dados do lancamento:\n\n"); printf("Lancamento bem sucedido? (\"1\" = SIM / \"0\" = NAO): "); scanf("%d",&lancamento_bem_sucedido); if (lancamento_bem_sucedido == 1) { printf("\n"); printf("Entre com a distancia do alvo: "); scanf("%d", &equipe_OK.distancia_do_alvo); printf("\n"); printf("Entre com o tempo de propulsao (em s): "); scanf("%f", &equipe_OK.tempo_de_propulsao); printf("\n"); PUSH(&topo, equipe); printf("\nConfirmacao: Equipe \"%s\" | Tempo de propulsao: %d | Distancia do alvo: %f", &equipe_OK.nome, &equipe_OK.tempo_de_propulsao, &equipe_OK.distancia_do_alvo); DEQUEUE(&inicio, &fim, &equipe); } else { equipe.tentativas += 1; if (equipe.tentativas == 2) { DEQUEUE(&inicio, &fim, &equipe); printf("Equipe desclassificada após 2 tentativas sem sucesso\n"); } } scanf("%d", &cont); printf("\n"); } } cont = 1; while (cont != 0) { printf("Fim dos lançamentos! O que deseja fazer agora?\n"); printf("1 = Ver o número de equipes que concluíram a competição\n"); printf("2 = Ver equipe com melhor resultado\n"); printf("0 = Finalizar o programa\n"); scanf("%d", &cont); switch (cont) { case 1: break; case 2: break; case 0: while(!IsEmpty){ POP(&topo, ); } break; } } system("PAUSE"); return 1; }
void isc_taskmgr_destroy(isc_taskmgr_t **managerp) { isc_taskmgr_t *manager; isc_task_t *task; unsigned int i; /* * Destroy '*managerp'. */ REQUIRE(managerp != NULL); manager = *managerp; REQUIRE(VALID_MANAGER(manager)); #ifndef ISC_PLATFORM_USETHREADS UNUSED(i); if (manager->refs > 1) { manager->refs--; *managerp = NULL; return; } #endif /* ISC_PLATFORM_USETHREADS */ XTHREADTRACE("isc_taskmgr_destroy"); /* * Only one non-worker thread may ever call this routine. * If a worker thread wants to initiate shutdown of the * task manager, it should ask some non-worker thread to call * isc_taskmgr_destroy(), e.g. by signalling a condition variable * that the startup thread is sleeping on. */ /* * Unlike elsewhere, we're going to hold this lock a long time. * We need to do so, because otherwise the list of tasks could * change while we were traversing it. * * This is also the only function where we will hold both the * task manager lock and a task lock at the same time. */ LOCK(&manager->lock); /* * Make sure we only get called once. */ INSIST(!manager->exiting); manager->exiting = ISC_TRUE; /* * Post shutdown event(s) to every task (if they haven't already been * posted). */ for (task = HEAD(manager->tasks); task != NULL; task = NEXT(task, link)) { LOCK(&task->lock); if (task_shutdown(task)) ENQUEUE(manager->ready_tasks, task, ready_link); UNLOCK(&task->lock); } #ifdef ISC_PLATFORM_USETHREADS /* * Wake up any sleeping workers. This ensures we get work done if * there's work left to do, and if there are already no tasks left * it will cause the workers to see manager->exiting. */ BROADCAST(&manager->work_available); UNLOCK(&manager->lock); /* * Wait for all the worker threads to exit. */ for (i = 0; i < manager->workers; i++) (void)isc_thread_join(manager->threads[i], NULL); #else /* ISC_PLATFORM_USETHREADS */ /* * Dispatch the shutdown events. */ UNLOCK(&manager->lock); while (isc__taskmgr_ready()) (void)isc__taskmgr_dispatch(); if (!ISC_LIST_EMPTY(manager->tasks)) isc_mem_printallactive(stderr); INSIST(ISC_LIST_EMPTY(manager->tasks)); #endif /* ISC_PLATFORM_USETHREADS */ manager_free(manager); *managerp = NULL; }
void TradeRouteData::SetCost(double cost) { m_transportCost = cost; ENQUEUE(); }
bool TradeRouteData::GeneratePath() { float cost = 0.0; sint32 const nwp = m_wayPoints.Num(); for (sint32 wp = 0; wp < nwp - 1; ++wp) { if (wp == 0) { if (!g_theTradeAstar.FindPath (m_payingFor, m_wayPoints[wp], m_wayPoints[wp + 1], *m_astarPath, cost, FALSE ) ) { return false; } } else { Path partialAstarPath; if (g_theTradeAstar.FindPath (m_payingFor, m_wayPoints[wp], m_wayPoints[wp + 1], partialAstarPath, cost, FALSE ) ) { m_astarPath->Concat(partialAstarPath); } else { return false; } } m_transportCost += cost; } m_transportCost = std::max<double> (1.0, (double)((int)tradeutil_GetNetTradeCosts(m_transportCost))); m_path.Insert(m_wayPoints[0]); MapPoint pnt; for (sint32 p = 1; p < m_astarPath->Num(); p++) { WORLD_DIRECTION d; m_astarPath->GetCurrentDir(d); sint32 r = m_path[p-1].GetNeighborPosition(d, pnt); Assert(r); if (r) { m_path.Insert(pnt); g_theWorld->GetCell(pnt)->AddTradeRoute(m_id); g_radarMap->RedrawTile(&pnt); if (g_theWorld->IsWater(pnt)) { m_crossesWater = true; } } m_astarPath->IncDir(); } ENQUEUE(); return true; }
void TradeRouteData::SetSource(Unit source) { m_sourceCity = source; ENQUEUE(); }
/*** *** Task Manager. ***/ static void dispatch(isc_taskmgr_t *manager) { isc_task_t *task; #ifndef ISC_PLATFORM_USETHREADS unsigned int total_dispatch_count = 0; isc_tasklist_t ready_tasks; #endif /* ISC_PLATFORM_USETHREADS */ REQUIRE(VALID_MANAGER(manager)); /* * Again we're trying to hold the lock for as short a time as possible * and to do as little locking and unlocking as possible. * * In both while loops, the appropriate lock must be held before the * while body starts. Code which acquired the lock at the top of * the loop would be more readable, but would result in a lot of * extra locking. Compare: * * Straightforward: * * LOCK(); * ... * UNLOCK(); * while (expression) { * LOCK(); * ... * UNLOCK(); * * Unlocked part here... * * LOCK(); * ... * UNLOCK(); * } * * Note how if the loop continues we unlock and then immediately lock. * For N iterations of the loop, this code does 2N+1 locks and 2N+1 * unlocks. Also note that the lock is not held when the while * condition is tested, which may or may not be important, depending * on the expression. * * As written: * * LOCK(); * while (expression) { * ... * UNLOCK(); * * Unlocked part here... * * LOCK(); * ... * } * UNLOCK(); * * For N iterations of the loop, this code does N+1 locks and N+1 * unlocks. The while expression is always protected by the lock. */ #ifndef ISC_PLATFORM_USETHREADS ISC_LIST_INIT(ready_tasks); #endif LOCK(&manager->lock); while (!FINISHED(manager)) { #ifdef ISC_PLATFORM_USETHREADS /* * For reasons similar to those given in the comment in * isc_task_send() above, it is safe for us to dequeue * the task while only holding the manager lock, and then * change the task to running state while only holding the * task lock. */ while ((EMPTY(manager->ready_tasks) || manager->exclusive_requested) && !FINISHED(manager)) { XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_WAIT, "wait")); WAIT(&manager->work_available, &manager->lock); XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_AWAKE, "awake")); } #else /* ISC_PLATFORM_USETHREADS */ if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM || EMPTY(manager->ready_tasks)) break; #endif /* ISC_PLATFORM_USETHREADS */ XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_WORKING, "working")); task = HEAD(manager->ready_tasks); if (task != NULL) { unsigned int dispatch_count = 0; isc_boolean_t done = ISC_FALSE; isc_boolean_t requeue = ISC_FALSE; isc_boolean_t finished = ISC_FALSE; isc_event_t *event; INSIST(VALID_TASK(task)); /* * Note we only unlock the manager lock if we actually * have a task to do. We must reacquire the manager * lock before exiting the 'if (task != NULL)' block. */ DEQUEUE(manager->ready_tasks, task, ready_link); manager->tasks_running++; UNLOCK(&manager->lock); LOCK(&task->lock); INSIST(task->state == task_state_ready); task->state = task_state_running; XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_RUNNING, "running")); isc_stdtime_get(&task->now); do { if (!EMPTY(task->events)) { event = HEAD(task->events); DEQUEUE(task->events, event, ev_link); /* * Execute the event action. */ XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_EXECUTE, "execute action")); if (event->ev_action != NULL) { UNLOCK(&task->lock); (event->ev_action)(task,event); LOCK(&task->lock); } dispatch_count++; #ifndef ISC_PLATFORM_USETHREADS total_dispatch_count++; #endif /* ISC_PLATFORM_USETHREADS */ } if (task->references == 0 && EMPTY(task->events) && !TASK_SHUTTINGDOWN(task)) { isc_boolean_t was_idle; /* * There are no references and no * pending events for this task, * which means it will not become * runnable again via an external * action (such as sending an event * or detaching). * * We initiate shutdown to prevent * it from becoming a zombie. * * We do this here instead of in * the "if EMPTY(task->events)" block * below because: * * If we post no shutdown events, * we want the task to finish. * * If we did post shutdown events, * will still want the task's * quantum to be applied. */ was_idle = task_shutdown(task); INSIST(!was_idle); } if (EMPTY(task->events)) { /* * Nothing else to do for this task * right now. */ XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_EMPTY, "empty")); if (task->references == 0 && TASK_SHUTTINGDOWN(task)) { /* * The task is done. */ XTRACE(isc_msgcat_get( isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_DONE, "done")); finished = ISC_TRUE; task->state = task_state_done; } else task->state = task_state_idle; done = ISC_TRUE; } else if (dispatch_count >= task->quantum) { /* * Our quantum has expired, but * there is more work to be done. * We'll requeue it to the ready * queue later. * * We don't check quantum until * dispatching at least one event, * so the minimum quantum is one. */ XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_QUANTUM, "quantum")); task->state = task_state_ready; requeue = ISC_TRUE; done = ISC_TRUE; } } while (!done); UNLOCK(&task->lock); if (finished) task_finished(task); LOCK(&manager->lock); manager->tasks_running--; #ifdef ISC_PLATFORM_USETHREADS if (manager->exclusive_requested && manager->tasks_running == 1) { SIGNAL(&manager->exclusive_granted); } #endif /* ISC_PLATFORM_USETHREADS */ if (requeue) { /* * We know we're awake, so we don't have * to wakeup any sleeping threads if the * ready queue is empty before we requeue. * * A possible optimization if the queue is * empty is to 'goto' the 'if (task != NULL)' * block, avoiding the ENQUEUE of the task * and the subsequent immediate DEQUEUE * (since it is the only executable task). * We don't do this because then we'd be * skipping the exit_requested check. The * cost of ENQUEUE is low anyway, especially * when you consider that we'd have to do * an extra EMPTY check to see if we could * do the optimization. If the ready queue * were usually nonempty, the 'optimization' * might even hurt rather than help. */ #ifdef ISC_PLATFORM_USETHREADS ENQUEUE(manager->ready_tasks, task, ready_link); #else ENQUEUE(ready_tasks, task, ready_link); #endif } } } #ifndef ISC_PLATFORM_USETHREADS ISC_LIST_APPENDLIST(manager->ready_tasks, ready_tasks, ready_link); #endif UNLOCK(&manager->lock); }
_X_HIDDEN void sunPostKbdEvent(int sun_ktype, Firm_event *event) { Bool down; KeyClassRec *keyc = ((DeviceIntPtr)xf86Info.pKeyboard)->key; Bool updateLeds = FALSE; xEvent kevent; KeySym *keysym; int keycode; static int lockkeys = 0; /* Give down a value */ if (event->value == VKEY_DOWN) down = TRUE; else down = FALSE; #if defined(KB_USB) if(sun_ktype == KB_USB) keycode = usbmap[event->id]; else #endif keycode = map[event->id]; /* * and now get some special keysequences */ #ifdef XKB if (((xf86Info.ddxSpecialKeys == SKWhenNeeded) && (!xf86Info.ActionKeyBindingsSet)) || noXkbExtension || (xf86Info.ddxSpecialKeys == SKAlways)) #endif { if (!(ModifierDown(ShiftMask)) && ((ModifierDown(ControlMask | AltMask)) || (ModifierDown(ControlMask | AltLangMask)))) { switch (keycode) { /* * The idea here is to pass the scancode down to a list of * registered routines. There should be some standard conventions * for processing certain keys. */ case KEY_BackSpace: xf86ProcessActionEvent(ACTION_TERMINATE, NULL); break; /* * Check grabs */ case KEY_KP_Divide: xf86ProcessActionEvent(ACTION_DISABLEGRAB, NULL); break; case KEY_KP_Multiply: xf86ProcessActionEvent(ACTION_CLOSECLIENT, NULL); break; /* * Video mode switches */ case KEY_KP_Minus: /* Keypad - */ if (down) xf86ProcessActionEvent(ACTION_PREV_MODE, NULL); if (!xf86Info.dontZoom) return; break; case KEY_KP_Plus: /* Keypad + */ if (down) xf86ProcessActionEvent(ACTION_NEXT_MODE, NULL); if (!xf86Info.dontZoom) return; break; } } } /* * Now map the scancodes to real X-keycodes ... */ if (keycode == KEY_NOTUSED) { xf86MsgVerb(X_INFO, 0, "raw code %d mapped to KEY_NOTUSED -- please report\n", event->id); return; } if (keycode == KEY_UNKNOWN) { xf86MsgVerb(X_INFO, 0, "raw code %d mapped to KEY_UNKNOWN -- please report\n", event->id); return; } keycode += MIN_KEYCODE; keysym = keyc->curKeySyms.map + (keyc->curKeySyms.mapWidth * (keycode - keyc->curKeySyms.minKeyCode)); #ifdef XKB if (noXkbExtension) #endif { /* * Toggle lock keys. */ #define CAPSFLAG 0x01 #define NUMFLAG 0x02 #define SCROLLFLAG 0x04 #define MODEFLAG 0x08 if (down) { /* * Handle the KeyPresses of the lock keys. */ switch (keysym[0]) { case XK_Caps_Lock: if (lockkeys & CAPSFLAG) { lockkeys &= ~CAPSFLAG; return; } lockkeys |= CAPSFLAG; updateLeds = TRUE; xf86Info.capsLock = down; break; case XK_Num_Lock: if (lockkeys & NUMFLAG) { lockkeys &= ~NUMFLAG; return; } lockkeys |= NUMFLAG; updateLeds = TRUE; xf86Info.numLock = down; break; case XK_Scroll_Lock: if (lockkeys & SCROLLFLAG) { lockkeys &= ~SCROLLFLAG; return; } lockkeys |= SCROLLFLAG; updateLeds = TRUE; xf86Info.scrollLock = down; break; } } else { /* * Handle the releases of the lock keys. */ switch (keysym[0]) { case XK_Caps_Lock: if (lockkeys & CAPSFLAG) return; updateLeds = TRUE; xf86Info.capsLock = down; break; case XK_Num_Lock: if (lockkeys & NUMFLAG) return; updateLeds = TRUE; xf86Info.numLock = down; break; case XK_Scroll_Lock: if (lockkeys & SCROLLFLAG) return; updateLeds = TRUE; xf86Info.scrollLock = down; break; } } if (updateLeds) xf86KbdLeds(); /* * If this keycode is not a modifier key, and its down initiate the * autorepeate sequence. (Only necessary if not using XKB). * * If its not down, then reset the timer. */ if (!keyc->modifierMap[keycode]) { if (down) { startautorepeat(keycode); } else { TimerFree(sunTimer); sunTimer = NULL; } } } xf86Info.lastEventTime = kevent.u.keyButtonPointer.time = GetTimeInMillis(); /* * And now send these prefixes ... * NOTE: There cannot be multiple Mode_Switch keys !!!! */ ENQUEUE(&kevent, keycode, (down ? KeyPress : KeyRelease), XE_KEYBOARD); }