isc_result_t isc_condition_destroy(isc_condition_t *cond) { isc_condition_thread_t *next, *threadcond; REQUIRE(cond != NULL); REQUIRE(cond->waiters == 0); (void)CloseHandle(cond->events[LSIGNAL]); /* * Delete the threadlist */ threadcond = ISC_LIST_HEAD(cond->threadlist); while (threadcond != NULL) { next = ISC_LIST_NEXT(threadcond, link); DEQUEUE(cond->threadlist, threadcond, link); (void) CloseHandle(threadcond->handle[LBROADCAST]); free(threadcond); threadcond = next; } return (ISC_R_SUCCESS); }
void dijkstra(struct Graph *G, int s) { struct Queue* Q; Q = (struct Queue*)malloc(sizeof(struct Queue)); Q->length = MAX_SIZE - 1; Q->head = Q->tail = 0; int i, j; G->costArray[s] = 0; ENQUEUE(Q, s); while(Q->head!=Q->tail) { j = DEQUEUE(Q); G->colorArray[j] = BLACK; for (i=1; i<=G->V; i++) { if (G->adjMatrix[i][j]==0) {continue;} // Not j's neighbors else { // if (G->colorArray[i]!=BLACK) { // Not j's parent if (G->costArray[i]==INFINITE) { // New node G->costArray[i] = G->costArray[j] + G->adjMatrix[i][j]; G->parentArray[i] = j; } else if (G->costArray[i] > G->costArray[j] + G->adjMatrix[i][j]) { // Updated node G->costArray[i] = G->costArray[j] + G->adjMatrix[i][j]; G->parentArray[i] = j; } ENQUEUE(Q, i); // } } } } }
ISC_TASKFUNC_SCOPE void isc__task_setprivilege(isc_task_t *task0, isc_boolean_t priv) { isc__task_t *task = (isc__task_t *)task0; isc__taskmgr_t *manager = task->manager; isc_boolean_t oldpriv; LOCK(&task->lock); oldpriv = ISC_TF((task->flags & TASK_F_PRIVILEGED) != 0); if (priv) task->flags |= TASK_F_PRIVILEGED; else task->flags &= ~TASK_F_PRIVILEGED; UNLOCK(&task->lock); if (priv == oldpriv) return; LOCK(&manager->lock); if (priv && ISC_LINK_LINKED(task, ready_link)) ENQUEUE(manager->ready_priority_tasks, task, ready_priority_link); else if (!priv && ISC_LINK_LINKED(task, ready_priority_link)) DEQUEUE(manager->ready_priority_tasks, task, ready_priority_link); UNLOCK(&manager->lock); }
static char *Modem_Response(ComPort *p) { byte b; if (CheckStatus (p)) return NULL; while (! EMPTY(p->inputQueue)) { DEQUEUE (p->inputQueue, b); if (p->bufferUsed == (sizeof(p->buffer) - 1)) b = '\r'; if (b == '\r' && p->bufferUsed) { p->buffer[p->bufferUsed] = 0; Con_Printf("%s\n", p->buffer); SCR_UpdateScreen (); p->bufferUsed = 0; return p->buffer; } if (b < ' ' || b > 'z') continue; p->buffer[p->bufferUsed] = b; p->bufferUsed++; } return NULL; }
/* * Dequeue and return a pointer to the first task on the current ready * list for the manager. * If the task is privileged, dequeue it from the other ready list * as well. * * Caller must hold the task manager lock. */ static inline isc__task_t * pop_readyq(isc__taskmgr_t *manager) { isc__task_t *task; if (manager->mode == isc_taskmgrmode_normal) task = HEAD(manager->ready_tasks); else task = HEAD(manager->ready_priority_tasks); if (task != NULL) { DEQUEUE(manager->ready_tasks, task, ready_link); if (ISC_LINK_LINKED(task, ready_priority_link)) DEQUEUE(manager->ready_priority_tasks, task, ready_priority_link); } return (task); }
int main (void) { s_version_assert (2, 1); // Prepare our context and sockets void *context = zmq_init (1); void *frontend = zmq_socket (context, ZMQ_XREP); void *backend = zmq_socket (context, ZMQ_XREP); zmq_bind (frontend, "tcp://*:5555"); // For clients zmq_bind (backend, "tcp://*:5556"); // For workers // Queue of available workers int available_workers = 0; char *worker_queue [MAX_WORKERS]; while (1) { zmq_pollitem_t items [] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers if (available_workers) zmq_poll (items, 2, -1); else zmq_poll (items, 1, -1); // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { zmsg_t *zmsg = zmsg_recv (backend); // Use worker address for LRU routing assert (available_workers < MAX_WORKERS); worker_queue [available_workers++] = zmsg_unwrap (zmsg); // Return reply to client if it's not a READY if (strcmp (zmsg_address (zmsg), "READY") == 0) zmsg_destroy (&zmsg); else zmsg_send (&zmsg, frontend); } if (items [1].revents & ZMQ_POLLIN) { // Now get next client request, route to next worker zmsg_t *zmsg = zmsg_recv (frontend); // REQ socket in worker needs an envelope delimiter zmsg_wrap (zmsg, worker_queue [0], ""); zmsg_send (&zmsg, backend); // Dequeue and drop the next worker address free (worker_queue [0]); DEQUEUE (worker_queue); available_workers--; } } // We never exit the main loop return 0; }
void TTY_Flush(int handle) { byte b; ComPort *p; p = handleToPort [handle]; if (inportb (p->uart + LINE_STATUS_REGISTER ) & LSR_TRANSMITTER_EMPTY) { DEQUEUE (p->outputQueue, b); outportb(p->uart, b); } }
static fsm_rt_t console_check(void) { static uint8_t s_chTemp = 0; static uint8_t s_chNum = 0; static uint8_t *s_pchPRT = NULL; static enum { CONSOLE_CHECK_START = 0, CONSOLE_CHECK_CMD, CONSOLE_CHECK_PRT, }s_tState; switch(s_tState) { case CONSOLE_CHECK_START: s_pchPRT = NULL; s_tState = CONSOLE_CHECK_CMD; //break; case CONSOLE_CHECK_CMD: if(DEQUEUE(InOutQueue,&g_tFIFOin,&s_chTemp)) { if ((s_chTemp >= 32) && (s_chTemp <= 127) ){ if(s_chCmdBufIndex >= CONSOLE_BUF_SIZE) { break; } s_chCmdBuf[s_chCmdBufIndex++] = s_chTemp; s_pchPRT = &s_chTemp; s_chNum = 1; } else if('\r' == s_chTemp ) { COSOLE_CHECK_RESET(); return fsm_rt_cpl; } else if('\b' == s_chTemp ){ if(s_chCmdBufIndex <= 2) { break; } s_chCmdBufIndex--; s_pchPRT = (uint8_t*)c_chDelChar; s_chNum = UBOUND(c_chDelChar); } s_tState = CONSOLE_CHECK_PRT; } break; case CONSOLE_CHECK_PRT: if(fsm_rt_cpl == console_print(s_pchPRT,s_chNum)) { s_tState = CONSOLE_CHECK_CMD; } break; } return fsm_rt_on_going; }
void Topologicalsort( AdjGraph G, int aov[NumVertices] ) { int v, w, nodes; EdgeNode *tmp; EdgeData indegree[NumVertices+1]={0}; QUEUE Q ; MAKENULL( Q ) ; // 计算每个顶点的入度 for( v=1; v<=G.n ; ++v ) { tmp=G.vexlist[v].firstedge; while(tmp) { indegree[tmp->adjvex]++; tmp=tmp->next; } } // 将入度为0的顶点加入队列 for(v=1; v<=G.n; ++v) if ( indegree[v] ==0 ) ENQUEUE( v, Q ) ; nodes = 0 ; while ( !EMPTY( Q ) ) { v = FRONT(Q)->element ; DEQUEUE( Q ) ; //cout << v <<' '; aov[nodes]=v; nodes ++ ; // 已考虑的节点个数加1 // 如果(v, w)是一条边,将w的入度减1,如果w的入度为0,则将w入队 for( w=1; w<=G.n; w++) { if(connect(G, v, w)) { --indegree[w]; if( !(indegree[w])) ENQUEUE(w,Q) ; } } } cout<<endl; if ( nodes < G.n ) cout<<"图中有环路"<<endl; }
int TTY_ReadByte(int handle) { int ret; ComPort *p; p = handleToPort [handle]; if ((ret = CheckStatus (p)) != 0) return ret; if (EMPTY (p->inputQueue)) return ERR_TTY_NODATA; DEQUEUE (p->inputQueue, ret); return (ret & 0xff); }
static void ISR_8250 (ComPort *p) { byte source = 0; byte b; disable(); while((source = inportb (p->uart + INTERRUPT_ID_REGISTER) & 0x07) != 1) { switch (source) { case IIR_RX_DATA_READY_INTERRUPT: b = inportb (p->uart + RECEIVE_BUFFER_REGISTER); if (! FULL(p->inputQueue)) { ENQUEUE (p->inputQueue, b); } else { p->lineStatus |= LSR_OVERRUN_ERROR; p->statusUpdated = true; } break; case IIR_TX_HOLDING_REGISTER_INTERRUPT: if (! EMPTY(p->outputQueue)) { DEQUEUE (p->outputQueue, b); outportb (p->uart + TRANSMIT_HOLDING_REGISTER, b); } break; case IIR_MODEM_STATUS_INTERRUPT: p->modemStatus = (inportb (p->uart + MODEM_STATUS_REGISTER) & MODEM_STATUS_MASK) | p->modemStatusIgnore; p->statusUpdated = true; break; case IIR_LINE_STATUS_INTERRUPT: p->lineStatus = inportb (p->uart + LINE_STATUS_REGISTER); p->statusUpdated = true; break; } source = inportb (p->uart + INTERRUPT_ID_REGISTER) & 0x07; } outportb (0x20, 0x20); }
fsm_rt_rt check_func_key(uint8_t chKeyTemp,uint8_t *pchKeyCode) { static uint8_t s_chIndex ; switch(s_tState) { case FUNC_KEY_CHECK_START: s_chIndex = 0; s_tState = FUNC_KEY_CHECK_FIRST; //break; case FUNC_KEY_CHECK_FIRST: do { if( chKeyTemp == chFunKeyCode[s_chIndex]) { s_tState = FUNC_KEY_CHECK_SECOND; s_chIndex = 2 + 1 << s_chIndex ; break; } } while(s_chIndex++ < 2); s_chIndex = 0; break; case FUNC_KEY_CHECK_SECOND:{ uint8_t chIndex = 0; do { if( chKeyTemp == chFunKeyCode[s_chIndex]) { *pchKeyCode = s_chIndex; return fsm_rt_cpl; break; } s_chIndex += chIndex; } while(chIndex++ < 4); break; case FUNC_KEY_CHECK_FAIL: FUNC_KEY_CHECK_RESET(); RESET_PEEK(); DEQUEUE(); *pchKeyCode = KEY_ESC; //realse_s; return fsm_rt_cpl; } } return fsm_rt_on_going; }
ISC_TASKFUNC_SCOPE isc_boolean_t isc__task_purgeevent(isc_task_t *task0, isc_event_t *event) { isc__task_t *task = (isc__task_t *)task0; isc_event_t *curr_event, *next_event; /* * Purge 'event' from a task's event queue. * * XXXRTH: WARNING: This method may be removed before beta. */ REQUIRE(VALID_TASK(task)); /* * If 'event' is on the task's event queue, it will be purged, * unless it is marked as unpurgeable. 'event' does not have to be * on the task's event queue; in fact, it can even be an invalid * pointer. Purging only occurs if the event is actually on the task's * event queue. * * Purging never changes the state of the task. */ LOCK(&task->lock); for (curr_event = HEAD(task->events); curr_event != NULL; curr_event = next_event) { next_event = NEXT(curr_event, ev_link); if (curr_event == event && PURGE_OK(event)) { DEQUEUE(task->events, curr_event, ev_link); break; } } UNLOCK(&task->lock); if (curr_event == NULL) return (ISC_FALSE); isc_event_free(&curr_event); return (ISC_TRUE); }
yaml_emitter_delete(yaml_emitter_t *emitter) { assert(emitter); /* Non-NULL emitter object expected. */ BUFFER_DEL(emitter, emitter->buffer); BUFFER_DEL(emitter, emitter->raw_buffer); STACK_DEL(emitter, emitter->states); while (!QUEUE_EMPTY(emitter, emitter->events)) { yaml_event_delete(&DEQUEUE(emitter, emitter->events)); } QUEUE_DEL(emitter, emitter->events); STACK_DEL(emitter, emitter->indents); while (!STACK_EMPTY(empty, emitter->tag_directives)) { yaml_tag_directive_t tag_directive = POP(emitter, emitter->tag_directives); yaml_free(tag_directive.handle); yaml_free(tag_directive.prefix); } STACK_DEL(emitter, emitter->tag_directives); yaml_free(emitter->anchors); memset(emitter, 0, sizeof(yaml_emitter_t)); }
static unsigned int dequeue_events(isc_task_t *task, void *sender, isc_eventtype_t first, isc_eventtype_t last, void *tag, isc_eventlist_t *events, isc_boolean_t purging) { isc_event_t *event, *next_event; unsigned int count = 0; REQUIRE(VALID_TASK(task)); REQUIRE(last >= first); XTRACE("dequeue_events"); /* * Events matching 'sender', whose type is >= first and <= last, and * whose tag is 'tag' will be dequeued. If 'purging', matching events * which are marked as unpurgable will not be dequeued. * * sender == NULL means "any sender", and tag == NULL means "any tag". */ LOCK(&task->lock); for (event = HEAD(task->events); event != NULL; event = next_event) { next_event = NEXT(event, ev_link); if (event->ev_type >= first && event->ev_type <= last && (sender == NULL || event->ev_sender == sender) && (tag == NULL || event->ev_tag == tag) && (!purging || PURGE_OK(event))) { DEQUEUE(task->events, event, ev_link); ENQUEUE(*events, event, ev_link); count++; } } UNLOCK(&task->lock); return (count); }
yaml_parser_delete(yaml_parser_t *parser) { assert(parser); /* Non-NULL parser object expected. */ BUFFER_DEL(parser, parser->raw_buffer); BUFFER_DEL(parser, parser->buffer); while (!QUEUE_EMPTY(parser, parser->tokens)) { yaml_token_delete(&DEQUEUE(parser, parser->tokens)); } QUEUE_DEL(parser, parser->tokens); STACK_DEL(parser, parser->indents); STACK_DEL(parser, parser->simple_keys); STACK_DEL(parser, parser->states); STACK_DEL(parser, parser->marks); while (!STACK_EMPTY(parser, parser->tag_directives)) { yaml_tag_directive_t tag_directive = POP(parser, parser->tag_directives); yaml_free(tag_directive.handle); yaml_free(tag_directive.prefix); } STACK_DEL(parser, parser->tag_directives); memset(parser, 0, sizeof(yaml_parser_t)); }
static int Modem_Command(ComPort *p, char *commandString) { byte b; if (CheckStatus (p)) return -1; disable(); p->outputQueue.head = p->outputQueue.tail = 0; p->inputQueue.head = p->inputQueue.tail = 0; enable(); p->bufferUsed = 0; while (*commandString) ENQUEUE (p->outputQueue, *commandString++); ENQUEUE (p->outputQueue, '\r'); // get the transmit rolling DEQUEUE (p->outputQueue, b); outportb(p->uart, b); return 0; }
static inline isc_boolean_t task_shutdown(isc__task_t *task) { isc_boolean_t was_idle = ISC_FALSE; isc_event_t *event, *prev; /* * Caller must be holding the task's lock. */ XTRACE("task_shutdown"); if (! TASK_SHUTTINGDOWN(task)) { XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_SHUTTINGDOWN, "shutting down")); task->flags |= TASK_F_SHUTTINGDOWN; if (task->state == task_state_idle) { INSIST(EMPTY(task->events)); task->state = task_state_ready; was_idle = ISC_TRUE; } INSIST(task->state == task_state_ready || task->state == task_state_running); /* * Note that we post shutdown events LIFO. */ for (event = TAIL(task->on_shutdown); event != NULL; event = prev) { prev = PREV(event, ev_link); DEQUEUE(task->on_shutdown, event, ev_link); ENQUEUE(task->events, event, ev_link); } } return (was_idle); }
/*** *** Task Manager. ***/ static void dispatch(isc_taskmgr_t *manager) { isc_task_t *task; #ifndef ISC_PLATFORM_USETHREADS unsigned int total_dispatch_count = 0; isc_tasklist_t ready_tasks; #endif /* ISC_PLATFORM_USETHREADS */ REQUIRE(VALID_MANAGER(manager)); /* * Again we're trying to hold the lock for as short a time as possible * and to do as little locking and unlocking as possible. * * In both while loops, the appropriate lock must be held before the * while body starts. Code which acquired the lock at the top of * the loop would be more readable, but would result in a lot of * extra locking. Compare: * * Straightforward: * * LOCK(); * ... * UNLOCK(); * while (expression) { * LOCK(); * ... * UNLOCK(); * * Unlocked part here... * * LOCK(); * ... * UNLOCK(); * } * * Note how if the loop continues we unlock and then immediately lock. * For N iterations of the loop, this code does 2N+1 locks and 2N+1 * unlocks. Also note that the lock is not held when the while * condition is tested, which may or may not be important, depending * on the expression. * * As written: * * LOCK(); * while (expression) { * ... * UNLOCK(); * * Unlocked part here... * * LOCK(); * ... * } * UNLOCK(); * * For N iterations of the loop, this code does N+1 locks and N+1 * unlocks. The while expression is always protected by the lock. */ #ifndef ISC_PLATFORM_USETHREADS ISC_LIST_INIT(ready_tasks); #endif LOCK(&manager->lock); while (!FINISHED(manager)) { #ifdef ISC_PLATFORM_USETHREADS /* * For reasons similar to those given in the comment in * isc_task_send() above, it is safe for us to dequeue * the task while only holding the manager lock, and then * change the task to running state while only holding the * task lock. */ while ((EMPTY(manager->ready_tasks) || manager->exclusive_requested) && !FINISHED(manager)) { XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_WAIT, "wait")); WAIT(&manager->work_available, &manager->lock); XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_AWAKE, "awake")); } #else /* ISC_PLATFORM_USETHREADS */ if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM || EMPTY(manager->ready_tasks)) break; #endif /* ISC_PLATFORM_USETHREADS */ XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_WORKING, "working")); task = HEAD(manager->ready_tasks); if (task != NULL) { unsigned int dispatch_count = 0; isc_boolean_t done = ISC_FALSE; isc_boolean_t requeue = ISC_FALSE; isc_boolean_t finished = ISC_FALSE; isc_event_t *event; INSIST(VALID_TASK(task)); /* * Note we only unlock the manager lock if we actually * have a task to do. We must reacquire the manager * lock before exiting the 'if (task != NULL)' block. */ DEQUEUE(manager->ready_tasks, task, ready_link); manager->tasks_running++; UNLOCK(&manager->lock); LOCK(&task->lock); INSIST(task->state == task_state_ready); task->state = task_state_running; XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL, ISC_MSG_RUNNING, "running")); isc_stdtime_get(&task->now); do { if (!EMPTY(task->events)) { event = HEAD(task->events); DEQUEUE(task->events, event, ev_link); /* * Execute the event action. */ XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_EXECUTE, "execute action")); if (event->ev_action != NULL) { UNLOCK(&task->lock); (event->ev_action)(task,event); LOCK(&task->lock); } dispatch_count++; #ifndef ISC_PLATFORM_USETHREADS total_dispatch_count++; #endif /* ISC_PLATFORM_USETHREADS */ } if (task->references == 0 && EMPTY(task->events) && !TASK_SHUTTINGDOWN(task)) { isc_boolean_t was_idle; /* * There are no references and no * pending events for this task, * which means it will not become * runnable again via an external * action (such as sending an event * or detaching). * * We initiate shutdown to prevent * it from becoming a zombie. * * We do this here instead of in * the "if EMPTY(task->events)" block * below because: * * If we post no shutdown events, * we want the task to finish. * * If we did post shutdown events, * will still want the task's * quantum to be applied. */ was_idle = task_shutdown(task); INSIST(!was_idle); } if (EMPTY(task->events)) { /* * Nothing else to do for this task * right now. */ XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_EMPTY, "empty")); if (task->references == 0 && TASK_SHUTTINGDOWN(task)) { /* * The task is done. */ XTRACE(isc_msgcat_get( isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_DONE, "done")); finished = ISC_TRUE; task->state = task_state_done; } else task->state = task_state_idle; done = ISC_TRUE; } else if (dispatch_count >= task->quantum) { /* * Our quantum has expired, but * there is more work to be done. * We'll requeue it to the ready * queue later. * * We don't check quantum until * dispatching at least one event, * so the minimum quantum is one. */ XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK, ISC_MSG_QUANTUM, "quantum")); task->state = task_state_ready; requeue = ISC_TRUE; done = ISC_TRUE; } } while (!done); UNLOCK(&task->lock); if (finished) task_finished(task); LOCK(&manager->lock); manager->tasks_running--; #ifdef ISC_PLATFORM_USETHREADS if (manager->exclusive_requested && manager->tasks_running == 1) { SIGNAL(&manager->exclusive_granted); } #endif /* ISC_PLATFORM_USETHREADS */ if (requeue) { /* * We know we're awake, so we don't have * to wakeup any sleeping threads if the * ready queue is empty before we requeue. * * A possible optimization if the queue is * empty is to 'goto' the 'if (task != NULL)' * block, avoiding the ENQUEUE of the task * and the subsequent immediate DEQUEUE * (since it is the only executable task). * We don't do this because then we'd be * skipping the exit_requested check. The * cost of ENQUEUE is low anyway, especially * when you consider that we'd have to do * an extra EMPTY check to see if we could * do the optimization. If the ready queue * were usually nonempty, the 'optimization' * might even hurt rather than help. */ #ifdef ISC_PLATFORM_USETHREADS ENQUEUE(manager->ready_tasks, task, ready_link); #else ENQUEUE(ready_tasks, task, ready_link); #endif } } } #ifndef ISC_PLATFORM_USETHREADS ISC_LIST_APPENDLIST(manager->ready_tasks, ready_tasks, ready_link); #endif UNLOCK(&manager->lock); }
int main() { tipofilalancamento inicio, fim; tipo_equipe_OK topo; struct equipe equipe; struct equipe_OK equipe_OK; int cont = 1, lancamento_bem_sucedido = 1; init(&inicio,&fim); init_lista_sucesso(&topo); while (cont == 1) { printf("Entre com o nome da equipe:\n"); gets(equipe.nome); equipe.tentativas = 0; ENQUEUE(&inicio, &fim, equipe); printf("\nDeseja cadastrar mais equipes?\n"); printf("(\"1\" = SIM, quero cadastrar mais equipes / \"0\" = NAO, quero iniciar os lancamentos)\n"); scanf("%d", &cont); } while (!IsEmpty(inicio, fim)) { if (FIRST(inicio, fim, &equipe) == 1) { printf("Lancamento da Equipe \"%s\"", equipe.nome); printf("\n"); printf("Entre com os dados do lancamento:\n\n"); printf("Lancamento bem sucedido? (\"1\" = SIM / \"0\" = NAO): "); scanf("%d",&lancamento_bem_sucedido); if (lancamento_bem_sucedido == 1) { printf("\n"); printf("Entre com a distancia do alvo: "); scanf("%d", &equipe_OK.distancia_do_alvo); printf("\n"); printf("Entre com o tempo de propulsao (em s): "); scanf("%f", &equipe_OK.tempo_de_propulsao); printf("\n"); PUSH(&topo, equipe); printf("\nConfirmacao: Equipe \"%s\" | Tempo de propulsao: %d | Distancia do alvo: %f", &equipe_OK.nome, &equipe_OK.tempo_de_propulsao, &equipe_OK.distancia_do_alvo); DEQUEUE(&inicio, &fim, &equipe); } else { equipe.tentativas += 1; if (equipe.tentativas == 2) { DEQUEUE(&inicio, &fim, &equipe); printf("Equipe desclassificada após 2 tentativas sem sucesso\n"); } } scanf("%d", &cont); printf("\n"); } } cont = 1; while (cont != 0) { printf("Fim dos lançamentos! O que deseja fazer agora?\n"); printf("1 = Ver o número de equipes que concluíram a competição\n"); printf("2 = Ver equipe com melhor resultado\n"); printf("0 = Finalizar o programa\n"); scanf("%d", &cont); switch (cont) { case 1: break; case 2: break; case 0: while(!IsEmpty){ POP(&topo, ); } break; } } system("PAUSE"); return 1; }
int main (int argc, char *argv[]) { // Prepare our context and sockets void *context = zmq_init (1); void *frontend = zmq_socket (context, ZMQ_XREP); void *backend = zmq_socket (context, ZMQ_XREP); zmq_bind (frontend, "ipc://frontend.ipc"); zmq_bind (backend, "ipc://backend.ipc"); int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) { pthread_t client; pthread_create (&client, NULL, client_thread, context); } int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) { pthread_t worker; pthread_create (&worker, NULL, worker_thread, context); } // Logic of LRU loop // - Poll backend always, frontend only if 1+ worker ready // - If worker replies, queue worker as ready and forward reply // to client if necessary // - If client requests, pop next worker and send request to it // Queue of available workers int available_workers = 0; char *worker_queue [NBR_WORKERS]; while (1) { // Initialize poll set zmq_pollitem_t items [] = { // Always poll for worker activity on backend { backend, 0, ZMQ_POLLIN, 0 }, // Poll front-end only if we have available workers { frontend, 0, ZMQ_POLLIN, 0 } }; if (available_workers) zmq_poll (items, 2, -1); else zmq_poll (items, 1, -1); // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { zmsg_t *zmsg = zmsg_recv (backend); // Use worker address for LRU routing assert (available_workers < NBR_WORKERS); worker_queue [available_workers++] = zmsg_unwrap (zmsg); // Forward message to client if it's not a READY if (strcmp (zmsg_address (zmsg), "READY") == 0) zmsg_destroy (&zmsg); else { zmsg_send (&zmsg, frontend); if (--client_nbr == 0) break; // Exit after N messages } } if (items [1].revents & ZMQ_POLLIN) { // Now get next client request, route to next worker zmsg_t *zmsg = zmsg_recv (frontend); zmsg_wrap (zmsg, worker_queue [0], ""); zmsg_send (&zmsg, backend); // Dequeue and drop the next worker address free (worker_queue [0]); DEQUEUE (worker_queue); available_workers--; } } sleep (1); zmq_term (context); return 0; }
/* * Free Buffer Supply Queue Data Structures * * Arguments: * fup pointer to device unit structure * * Returns: * none */ void fore_buf_free(Fore_unit *fup) { Buf_handle *bhp; KBuffer *m; /* * Free any previously supplied and not returned buffers */ if (fup->fu_flags & CUF_INITED) { /* * Run through Strategy 1 Small queue */ while ((bhp = Q_HEAD(fup->fu_buf1s_bq, Buf_handle)) != NULL) { caddr_t cp; /* * Back off to buffer */ m = (KBuffer *)((caddr_t)bhp - BUF1_SM_HOFF); /* * Dequeue handle and free buffer */ DEQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1s_bq); KB_DATASTART(m, cp, caddr_t); DMA_FREE_ADDR(cp, bhp->bh_dma, BUF1_SM_SIZE, 0); KB_FREEALL(m); } /* * Run through Strategy 1 Large queue */ while ((bhp = Q_HEAD(fup->fu_buf1l_bq, Buf_handle)) != NULL) { caddr_t cp; /* * Back off to buffer */ m = (KBuffer *)((caddr_t)bhp - BUF1_LG_HOFF); /* * Dequeue handle and free buffer */ DEQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1l_bq); KB_DATASTART(m, cp, caddr_t); DMA_FREE_ADDR(cp, bhp->bh_dma, BUF1_LG_SIZE, 0); KB_FREEALL(m); } } /* * Free the status words */ if (fup->fu_buf1s_stat) { if (fup->fu_buf1s_statd) { DMA_FREE_ADDR(fup->fu_buf1s_stat, fup->fu_buf1s_statd, sizeof(Q_status) * (BUF1_SM_QUELEN + BUF1_LG_QUELEN), ATM_DEV_NONCACHE); } atm_dev_free((volatile void *)fup->fu_buf1s_stat); fup->fu_buf1s_stat = NULL; fup->fu_buf1s_statd = NULL; fup->fu_buf1l_stat = NULL; fup->fu_buf1l_statd = NULL; } /* * Free the transmit descriptors */ if (fup->fu_buf1s_desc) { if (fup->fu_buf1s_descd) { DMA_FREE_ADDR(fup->fu_buf1s_desc, fup->fu_buf1s_descd, sizeof(Buf_descr) * ((BUF1_SM_QUELEN * BUF1_SM_ENTSIZE) + (BUF1_LG_QUELEN * BUF1_LG_ENTSIZE)), 0); } atm_dev_free(fup->fu_buf1s_desc); fup->fu_buf1s_desc = NULL; fup->fu_buf1s_descd = NULL; fup->fu_buf1l_desc = NULL; fup->fu_buf1l_descd = NULL; } return; }
/* * Supply Strategy 1 Large Buffers to CP * * May be called in interrupt state. * Must be called with interrupts locked out. * * Arguments: * fup pointer to device unit structure * * Returns: * none */ static void fore_buf_supply_1l(Fore_unit *fup) { H_buf_queue *hbp; Buf_queue *cqp; Buf_descr *bdp; Buf_handle *bhp; KBuffer *m; int nvcc, nbuf, i; /* * Figure out how many buffers we should be giving to the CP. * We're basing this calculation on the current number of open * VCCs thru this device, with certain minimum and maximum values * enforced. This will then allow us to figure out how many more * buffers we need to supply to the CP. This will be rounded up * to fill a supply queue entry. */ nvcc = MAX(fup->fu_open_vcc, BUF_MIN_VCC); nbuf = nvcc * 4 * RECV_MAX_SEGS; nbuf = MIN(nbuf, BUF1_LG_CPPOOL); nbuf -= fup->fu_buf1l_cnt; nbuf = roundup(nbuf, BUF1_LG_ENTSIZE); /* * OK, now supply the buffers to the CP */ while (nbuf > 0) { /* * Acquire a supply queue entry */ hbp = fup->fu_buf1l_tail; if (!((*hbp->hbq_status) & QSTAT_FREE)) break; bdp = hbp->hbq_descr; /* * Get a buffer for each descriptor in the queue entry */ for (i = 0; i < BUF1_LG_ENTSIZE; i++, bdp++) { caddr_t cp; /* * Get a cluster buffer */ KB_ALLOCEXT(m, BUF1_LG_SIZE, KB_F_NOWAIT, KB_T_DATA); if (m == NULL) { break; } KB_HEADSET(m, BUF1_LG_DOFF); /* * Point to buffer handle structure */ bhp = (Buf_handle *)((caddr_t)m + BUF1_LG_HOFF); bhp->bh_type = BHT_S1_LARGE; /* * Setup buffer descriptor */ bdp->bsd_handle = bhp; KB_DATASTART(m, cp, caddr_t); bhp->bh_dma = bdp->bsd_buffer = (H_dma) DMA_GET_ADDR( cp, BUF1_LG_SIZE, BUF_DATA_ALIGN, 0); if (bdp->bsd_buffer == 0) { /* * Unable to assign dma address - free up * this descriptor's buffer */ fup->fu_stats->st_drv.drv_bf_segdma++; KB_FREEALL(m); break; } /* * All set, so queue buffer (handle) */ ENQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1l_bq); } /* * If we we're not able to fill all the descriptors for * an entry, free up what's been partially built */ if (i != BUF1_LG_ENTSIZE) { caddr_t cp; /* * Clean up each used descriptor */ for (bdp = hbp->hbq_descr; i; i--, bdp++) { bhp = bdp->bsd_handle; DEQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1l_bq); m = (KBuffer *) ((caddr_t)bhp - BUF1_LG_HOFF); KB_DATASTART(m, cp, caddr_t); DMA_FREE_ADDR(cp, bhp->bh_dma, BUF1_LG_SIZE, 0); KB_FREEALL(m); } break; } /* * Finally, we've got an entry ready for the CP. * So claim the host queue entry and setup the CP-resident * queue entry. The CP will (potentially) grab the supplied * buffers when the descriptor pointer is set. */ fup->fu_buf1l_tail = hbp->hbq_next; (*hbp->hbq_status) = QSTAT_PENDING; cqp = hbp->hbq_cpelem; cqp->cq_descr = (CP_dma) CP_WRITE((u_long)hbp->hbq_descr_dma); /* * Update counters, etc for supplied buffers */ fup->fu_buf1l_cnt += BUF1_LG_ENTSIZE; nbuf -= BUF1_LG_ENTSIZE; } return; }
int TTY_Connect(int handle, char *host) { double start; ComPort *p; char *response = NULL; keydest_t save_key_dest; byte dialstring[64]; byte b; p = handleToPort[handle]; if ((p->modemStatus & MODEM_STATUS_MASK) != MODEM_STATUS_MASK) { Con_Printf ("Serial: line not ready ("); if ((p->modemStatus & MSR_CTS) == 0) Con_Printf(" CTS"); if ((p->modemStatus & MSR_DSR) == 0) Con_Printf(" DSR"); if ((p->modemStatus & MSR_CD) == 0) Con_Printf(" CD"); Con_Printf(" )"); return -1; } // discard any scraps in the input buffer while (! EMPTY (p->inputQueue)) DEQUEUE (p->inputQueue, b); CheckStatus (p); if (p->useModem) { save_key_dest = key_dest; key_dest = key_console; key_count = -2; Con_Printf ("Dialing...\n"); snprintf (dialstring, sizeof(dialstring), "AT D%c %s\r", p->dialType, host); Modem_Command (p, dialstring); start = Sys_DoubleTime(); while(1) { if ((Sys_DoubleTime() - start) > 60.0) { Con_Printf("Dialing failure!\n"); break; } IN_SendKeyEvents (); if (key_count == 0) { if (key_lastpress != K_ESCAPE) { key_count = -2; continue; } Con_Printf("Aborting...\n"); while ((Sys_DoubleTime() - start) < 5.0) ; disable(); p->outputQueue.head = p->outputQueue.tail = 0; p->inputQueue.head = p->inputQueue.tail = 0; outportb(p->uart + MODEM_CONTROL_REGISTER, inportb(p->uart + MODEM_CONTROL_REGISTER) & ~MCR_DTR); enable(); start = Sys_DoubleTime(); while ((Sys_DoubleTime() - start) < 0.75) ; outportb(p->uart + MODEM_CONTROL_REGISTER, inportb(p->uart + MODEM_CONTROL_REGISTER) | MCR_DTR); response = "Aborted"; break; } response = Modem_Response(p); if (!response) continue; if (Q_strncmp(response, "CONNECT", 7) == 0) { disable(); p->modemRang = true; p->modemConnected = true; p->outputQueue.head = p->outputQueue.tail = 0; p->inputQueue.head = p->inputQueue.tail = 0; enable(); key_dest = save_key_dest; key_count = 0; m_return_onerror = false; return 0; } if (Q_strncmp(response, "NO CARRIER", 10) == 0) break; if (Q_strncmp(response, "NO DIALTONE", 11) == 0) break; if (Q_strncmp(response, "NO DIAL TONE", 12) == 0) break; if (Q_strncmp(response, "NO ANSWER", 9) == 0) break; if (Q_strncmp(response, "BUSY", 4) == 0) break; if (Q_strncmp(response, "ERROR", 5) == 0) break; } key_dest = save_key_dest; key_count = 0; if (m_return_onerror) { key_dest = key_menu; m_state = m_return_state; m_return_onerror = false; Q_strncpy(m_return_reason, response, 31); } return -1; } m_return_onerror = false; return 0; }
int main (int argc, char *argv[]) { s_version_assert (2, 1); // Prepare our context and sockets void *context = zmq_init (1); void *frontend = zmq_socket (context, ZMQ_XREP); void *backend = zmq_socket (context, ZMQ_XREP); zmq_bind (frontend, "ipc://frontend.ipc"); zmq_bind (backend, "ipc://backend.ipc"); int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) { pthread_t client; pthread_create (&client, NULL, client_thread, NULL); } int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) { pthread_t worker; pthread_create (&worker, NULL, worker_thread, NULL); } // Logic of LRU loop // - Poll backend always, frontend only if 1+ worker ready // - If worker replies, queue worker as ready and forward reply // to client if necessary // - If client requests, pop next worker and send request to it // Queue of available workers int available_workers = 0; char *worker_queue [10]; while (1) { // Initialize poll set zmq_pollitem_t items [] = { // Always poll for worker activity on backend { backend, 0, ZMQ_POLLIN, 0 }, // Poll front-end only if we have available workers { frontend, 0, ZMQ_POLLIN, 0 } }; if (available_workers) zmq_poll (items, 2, -1); else zmq_poll (items, 1, -1); // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { // Queue worker address for LRU routing char *worker_addr = s_recv (backend); assert (available_workers < NBR_WORKERS); worker_queue [available_workers++] = worker_addr; // Second frame is empty char *empty = s_recv (backend); assert (empty [0] == 0); free (empty); // Third frame is READY or else a client reply address char *client_addr = s_recv (backend); // If client reply, send rest back to frontend if (strcmp (client_addr, "READY") != 0) { empty = s_recv (backend); assert (empty [0] == 0); free (empty); char *reply = s_recv (backend); s_sendmore (frontend, client_addr); s_sendmore (frontend, ""); s_send (frontend, reply); free (reply); if (--client_nbr == 0) break; // Exit after N messages } free (client_addr); } if (items [1].revents & ZMQ_POLLIN) { // Now get next client request, route to LRU worker // Client request is [address][empty][request] char *client_addr = s_recv (frontend); char *empty = s_recv (frontend); assert (empty [0] == 0); free (empty); char *request = s_recv (frontend); s_sendmore (backend, worker_queue [0]); s_sendmore (backend, ""); s_sendmore (backend, client_addr); s_sendmore (backend, ""); s_send (backend, request); free (client_addr); free (request); // Dequeue and drop the next worker address free (worker_queue [0]); DEQUEUE (worker_queue); available_workers--; } } zmq_close (frontend); zmq_close (backend); zmq_term (context); return 0; }
static void ISR_16550 (ComPort *p) { int count; byte source; byte b; disable(); while((source = inportb (p->uart + INTERRUPT_ID_REGISTER) & 0x07) != 1) { switch (source) { case IIR_RX_DATA_READY_INTERRUPT: do { b = inportb (p->uart + RECEIVE_BUFFER_REGISTER); if (!FULL(p->inputQueue)) { ENQUEUE (p->inputQueue, b); } else { p->lineStatus |= LSR_OVERRUN_ERROR; p->statusUpdated = true; } } while (inportb (p->uart + LINE_STATUS_REGISTER) & LSR_DATA_READY); break; case IIR_TX_HOLDING_REGISTER_INTERRUPT: count = 16; while ((! EMPTY(p->outputQueue)) && count--) { DEQUEUE (p->outputQueue, b); outportb (p->uart + TRANSMIT_HOLDING_REGISTER, b); } break; case IIR_MODEM_STATUS_INTERRUPT: p->modemStatus = (inportb (p->uart + MODEM_STATUS_REGISTER) & MODEM_STATUS_MASK) | p->modemStatusIgnore; p->statusUpdated = true; break; case IIR_LINE_STATUS_INTERRUPT: p->lineStatus = inportb (p->uart + LINE_STATUS_REGISTER); p->statusUpdated = true; break; } source = inportb (p->uart + INTERRUPT_ID_REGISTER) & 0x07; } // check for lost IIR_TX_HOLDING_REGISTER_INTERRUPT on 16550a! if (inportb (p->uart + LINE_STATUS_REGISTER ) & LSR_TRANSMITTER_EMPTY) { count = 16; while ((! EMPTY(p->outputQueue)) && count--) { DEQUEUE (p->outputQueue, b); outportb (p->uart + TRANSMIT_HOLDING_REGISTER, b); } } outportb (0x20, 0x20); }
int main (void) { // Prepare our context and sockets void *context = zmq_ctx_new (); void *frontend = zmq_socket (context, ZMQ_ROUTER); void *backend = zmq_socket (context, ZMQ_ROUTER); zmq_bind (frontend, "ipc://frontend.ipc"); zmq_bind (backend, "ipc://backend.ipc"); int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) { pthread_t client; pthread_create (&client, NULL, client_task, NULL); } int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) { pthread_t worker; pthread_create (&worker, NULL, worker_task, NULL); } // .split main task body // Here is the main loop for the least-recently-used queue. It has two // sockets; a frontend for clients and a backend for workers. It polls // the backend in all cases, and polls the frontend only when there are // one or more workers ready. This is a neat way to use 0MQ's own queues // to hold messages we're not ready to process yet. When we get a client // reply, we pop the next available worker, and send the request to it, // including the originating client identity. When a worker replies, we // re-queue that worker, and we forward the reply to the original client, // using the reply envelope. // Queue of available workers int available_workers = 0; char *worker_queue [10]; while (1) { zmq_pollitem_t items [] = { { backend, 0, ZMQ_POLLIN, 0 }, { frontend, 0, ZMQ_POLLIN, 0 } }; // Poll frontend only if we have available workers int rc = zmq_poll (items, available_workers ? 2 : 1, -1); if (rc == -1) break; // Interrupted // Handle worker activity on backend if (items [0].revents & ZMQ_POLLIN) { // Queue worker identity for load-balancing char *worker_id = s_recv (backend); assert (available_workers < NBR_WORKERS); worker_queue [available_workers++] = worker_id; // Second frame is empty char *empty = s_recv (backend); assert (empty [0] == 0); free (empty); // Third frame is READY or else a client reply identity char *client_id = s_recv (backend); // If client reply, send rest back to frontend if (strcmp (client_id, "READY") != 0) { empty = s_recv (backend); assert (empty [0] == 0); free (empty); char *reply = s_recv (backend); s_sendmore (frontend, client_id); s_sendmore (frontend, ""); s_send (frontend, reply); free (reply); if (--client_nbr == 0) break; // Exit after N messages } free (client_id); } // .split handling a client request // Here is how we handle a client request: if (items [1].revents & ZMQ_POLLIN) { // Now get next client request, route to last-used worker // Client request is [identity][empty][request] char *client_id = s_recv (frontend); char *empty = s_recv (frontend); assert (empty [0] == 0); free (empty); char *request = s_recv (frontend); s_sendmore (backend, worker_queue [0]); s_sendmore (backend, ""); s_sendmore (backend, client_id); s_sendmore (backend, ""); s_send (backend, request); free (client_id); free (request); // Dequeue and drop the next worker identity free (worker_queue [0]); DEQUEUE (worker_queue); available_workers--; } } zmq_close (frontend); zmq_close (backend); zmq_ctx_destroy (context); return 0; }