user_datum_t *declare_user(void) { char *id = queue_remove(id_queue), *dest_id = NULL; user_datum_t *user = NULL, *dest_user = NULL; int retval; uint32_t value = 0; if (id == NULL) { yyerror("no user name"); return NULL; } if ((user = (user_datum_t *) malloc(sizeof(*user))) == NULL) { yyerror("Out of memory!"); free(id); return NULL; } user_datum_init(user); retval = declare_symbol(SYM_USERS, id, (hashtab_datum_t *) user, &value, &value); if (retval == 0) { user->s.value = value; if ((dest_id = strdup(id)) == NULL) { yyerror("Out of memory!"); return NULL; } } else { /* this user was already declared in this module, or error */ dest_id = id; user_datum_destroy(user); free(user); } if (retval == 0 || retval == 1) { /* create a new user_datum_t for this decl, if necessary */ hashtab_t users_tab; assert(stack_top->type == 1); if (stack_top->parent == NULL) { /* in parent, so use global symbol table */ users_tab = policydbp->p_users.table; } else { users_tab = stack_top->decl->p_users.table; } dest_user = (user_datum_t *) hashtab_search(users_tab, dest_id); if (dest_user == NULL) { if ((dest_user = (user_datum_t *) malloc(sizeof(*dest_user))) == NULL) { yyerror("Out of memory!"); free(dest_id); return NULL; } user_datum_init(dest_user); dest_user->s.value = value; if (user_implicit_bounds(users_tab, dest_id, dest_user)) { free(dest_id); user_datum_destroy(dest_user); free(dest_user); return NULL; } if (hashtab_insert(users_tab, dest_id, dest_user)) { yyerror("Out of memory!"); free(dest_id); user_datum_destroy(dest_user); free(dest_user); return NULL; } } else { free(dest_id); } } else { free(dest_id); } switch (retval) { case -3:{ yyerror("Out of memory!"); return NULL; } case -2:{ yyerror("duplicate declaration of user"); return NULL; } case -1:{ yyerror("could not declare user here"); return NULL; } case 0:{ return dest_user; } case 1:{ return dest_user; /* user already declared for this block */ } default:{ abort(); /* should never get here */ } } }
int main() { Queue *q = queue_new(); HashTable *h = hashtable_new(512, header_hash_fn, header_eq_fn); HashEnum e; IsHeader *hp; int i, j; printf("Testing Queue...\n"); queue_insert(q, 1); queue_insert(q, 2); queue_insert(q, 3); queue_insert(q, 4); queue_insert(q, 5); queue_remove(q); queue_insert(q, 1); queue_insert(q, 2); queue_insert(q, 3); queue_insert(q, 4); queue_remove(q); queue_remove(q); queue_insert(q, 5); while (!queue_isempty(q)) printf("%u ", (unsigned)queue_remove(q)); printf("\n"); printf("Testing Hash...\n"); for (i=1; i<1024; i++) hashtable_insert(h, i, malloc(sizeof(IsHeader))); j = 0; e = hashenum_create(h); while((hp = hashenum_next(&e)) != NULL) { hp->offset = header_hash_fn(j); for (i=0; i<32; i++) hp->offsets[i] = i; hp->name = malloc(sizeof(char)*64); sprintf(hp->name, "I am number %d", j++); } for (j=10; j<800; j+=31) { hp = hashtable_find(h, (uint16)j); if (hp == NULL) { printf("Did not find key %d\n", j); continue; } printf("Found key %d - offset: %d name: %s\n", j, hp->offset, hp->name); printf("Offsets:"); for (i=0; i<32; i++) printf(" %d", hp->offsets[i]); printf("\n"); } printf("*** Super Test ***\n"); e = hashenum_create(h); while((hp = hashenum_next(&e)) != NULL) { printf("%d ", hp->offset%1024 ); } printf("\n"); hashtable_freecontents(h); free(h); free(q); return 0; }
void sjf(){ // se há uma tarefa rodando if ( processo_corrente != NULL ){ // se a tarefa rodando chegou ao fim da execução if (processo_corrente->duracao == processo_corrente->tempo_executado_total) { //migra a tarefa para o estado terminado processo_corrente->estado_atual = 3; // terminado queue_append((queue_t **) &finesh, (queue_t *) processo_corrente); //libera o processador processo_corrente = NULL; qtd_Eprocess++; pthread_mutex_unlock(&processador); // senão }else{ // se a tarefa rodando chegou ao fim de seu quantum // migra a tarefa para a fila de prontos // libera o processador // fim se } // fim se } if ((queue_size((queue_t *) finesh)) < (qtd_Tprocess)){ // para cada tarefa i int i; iterador = processes; for(i = 0; i < queue_size((queue_t *) processes); i++){ // se a tarefa i inicia agora (em t) if( iterador->inicio == tempo){ // coloca a tarefa na fila de prontos aux = (queue_process_t *) queue_remove((queue_t **) &processes, (queue_t *) iterador); queue_append((queue_t **) &ready, (queue_t *) iterador); iterador->estado_atual = 1; iterador = processes; // fim se }else{ iterador = iterador->next; } // fim para } // se o processador estiver livre if (!pthread_mutex_trylock(&processador)) { // se houver tarefa na fila de prontas if (queue_size((queue_t *) ready) > 0) { // escolhe uma tarefa da fila de prontas aux = ready; iterador = ready; for(i = 0; i < queue_size((queue_t *) ready); i++){ if (aux->duracao > iterador->duracao){ aux = iterador; iterador = iterador->next; }else iterador = iterador->next; } // migra essa tarefa para o estado "rodando" processo_corrente = (queue_process_t *) queue_remove((queue_t **) &ready, (queue_t *) aux); processo_corrente->estado_atual = 2; numero_troca_contexto++; // fim se } // fim se } // imprime linha do diagrama com o estado de cada tarefa imprime(); }else{ tempo_medio_vida = (float)(tempo) / (float)qtd_Tprocess; tempo = tmax; } }
static void workspace_nfs_lookup_cached(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info) { struct resource_struct *resource=call_info->object->resource; struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data; struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data; char *path=call_info->pathinfo.path + call_info->relpath; struct stat st; int result=0; memset(&st, 0, sizeof(struct stat)); if (strlen(path)==0) path=(char *) rootpath; logoutput("workspace_nfs_lookup_cached, path %s", path); pthread_mutex_lock(&nfs_export->mutex); result=nfs_stat(nfs_ctx, path, &st); pthread_mutex_unlock(&nfs_export->mutex); if (result<0) { result=abs(result); if (result==ENOENT) { struct inode_struct *inode=entry->inode; unsigned int error=0; inode=entry->inode; inode->alias=NULL; remove_entry(entry, &error); queue_remove(call_info->object, entry, &error); entry=NULL; } fuse_reply_err(req, result); } else { struct fuse_entry_param e; struct inode_struct *inode=entry->inode; inode->mode=st.st_mode; inode->nlink=st.st_nlink; inode->uid=st.st_uid; inode->gid=st.st_gid; inode->rdev=st.st_rdev; inode->mtim.tv_sec=st.st_mtim.tv_sec; inode->mtim.tv_nsec=st.st_mtim.tv_nsec; inode->ctim.tv_sec=st.st_ctim.tv_sec; inode->ctim.tv_nsec=st.st_ctim.tv_nsec; inode->size=st.st_size; e.ino = inode->ino; e.generation = 1; e.attr_timeout = fs_options.attr_timeout; e.entry_timeout = fs_options.entry_timeout; get_current_time(&entry->synctime); e.attr.st_dev = 0; e.attr.st_ino = e.ino; e.attr.st_mode = st.st_mode; e.attr.st_nlink = st.st_nlink; e.attr.st_uid = st.st_uid; e.attr.st_gid = st.st_gid; e.attr.st_rdev = st.st_rdev; e.attr.st_size = st.st_size; e.attr.st_atim.tv_sec = st.st_atim.tv_sec; e.attr.st_atim.tv_nsec = st.st_atim.tv_nsec; e.attr.st_mtim.tv_sec = st.st_mtim.tv_sec; e.attr.st_mtim.tv_nsec = st.st_mtim.tv_nsec; e.attr.st_ctim.tv_sec = st.st_ctim.tv_sec; e.attr.st_ctim.tv_nsec = st.st_ctim.tv_nsec; e.attr.st_blksize=_DEFAULT_BLOCKSIZE; if (inode->size % e.attr.st_blksize == 0) { e.attr.st_blocks = inode->size / e.attr.st_blksize; } else { e.attr.st_blocks = 1 + inode->size / e.attr.st_blksize; } fuse_reply_entry(req, &e); } free_path_pathinfo(&call_info->pathinfo); }
/* **++ ** ROUTINE: lbr_get_rdt ** ** FUNCTIONAL DESCRIPTION: ** ** Gets the RDT of a library module. ** ** RETURNS: cond_value, longword (unsigned), write only, by value ** ** PROTOTYPE: ** ** lbr_get_rdt(char *libfile, char *module, TIME *rdt); ** ** libfile: ASCIZ_string, read only, by reference ** module: ASCIZ_string, read only, by reference ** rdt: date_time, quadword (signed), write only, by reference ** ** IMPLICIT INPUTS: None. ** ** IMPLICIT OUTPUTS: None. ** ** COMPLETION CODES: Any from the LBR$ routines. ** ** SIDE EFFECTS: lbrque modified. ** **-- */ unsigned int lbr_get_rdt (char *lib, char *mod, TIME *rdt) { unsigned int lbrfunc=LBR$C_READ, lbr$c_knf = 0x08680F2, libidx = 1; char real_name[256]; unsigned char fid[28]; struct dsc$descriptor libdsc, moddsc; struct LBR *lbr; unsigned int status, len; unsigned short rfa[3]; struct mhddef mhd; /* ** First look up the library file */ status = file_find(lib, 0, real_name, fid); if (!OK(status)) return status; /* ** Already open? */ for (lbr = (struct LBR *)lbrque.head; lbr != (struct LBR *)&lbrque; lbr = lbr->flink) { if (memcmp(fid, &lbr->nam.nam$t_dvi, 28) == 0) break; } /* ** If not open yet, construct a context block and open it. */ if (lbr == (struct LBR *) &lbrque) { /* ** Already have max number of libraries open? If so, close one. */ if (lbrcount >= LBR$C_MAXCTL) { queue_remove(lbrque.head, &lbr); lbr$close(&lbr->lbrctx); lbrcount -= 1; } else { lbr = malloc(sizeof(struct LBR)); } queue_insert(lbr, lbrque.tail); lbr->lbrctx = 0; lbr->nam = cc$rms_nam; lbr->nam.nam$b_rss = sizeof(lbr->rspec)-1; lbr->nam.nam$b_ess = sizeof(lbr->espec)-1; lbr->nam.nam$l_esa = lbr->espec; lbr->nam.nam$l_rsa = lbr->rspec; status = lbr$ini_control(&lbr->lbrctx, &lbrfunc, 0, &lbr->nam); if (!OK(status)) lib$signal(status); INIT_SDESC(libdsc, strlen(real_name), real_name); status = lbr$open(&lbr->lbrctx, &libdsc); if (!OK(status)) return status; lbrcount += 1; } /* ** Look up the module in question... */ INIT_SDESC(moddsc, strlen(mod), mod); /* status = lbr$lookup_key(&lbr->lbrctx, &moddsc, rfa); */ mod2search4 = &moddsc; return_rfa_here = rfa; status = lbr$get_index(&lbr->lbrctx, &libidx, &caseblindsearch); if (status != 2) return lbr$c_knf; /* caseblindsearch returns 2 on success, 1 on failure */ /* ** ... and get the RDT from the module header */ INIT_SDESC(moddsc, sizeof(mhd), &mhd); status = lbr$set_module(&lbr->lbrctx, rfa, &moddsc, &len); if (!OK(status) && (status != LBR$_HDRTRUNC)) lib$signal(status); memcpy(rdt, &mhd.mhd$l_datim, 8); return SS$_NORMAL; }
/* * task_swapout: * A reference to the task must be held. * * Start swapping out a task by sending an AST_SWAPOUT to each thread. * When the threads reach a clean point, they queue themselves up on the * swapout_thread_q to be swapped out by the task_swap_swapout_thread. * The task can be swapped in at any point in this process. * * A task will not be fully swapped out (i.e. its map residence count * at zero) until all currently-swapped threads run and reach * a clean point, at which time they will be swapped again, * decrementing the swap_ast_waiting count on the task. * * Locking: no locks held upon entry and exit. * Task_lock is held throughout this function. */ kern_return_t task_swapout(task_t task) { thread_act_t thr_act; thread_t thread; queue_head_t *list; int s; task_swapout_lock(); task_lock(task); /* * NOTE: look into turning these into assertions if they * are invariants. */ if ((task->swap_state != TASK_SW_IN) || (!task->active)) { task_unlock(task); task_swapout_unlock(); return(KERN_FAILURE); } if (task->swap_flags & TASK_SW_ELIGIBLE) { queue_remove(&eligible_tasks, task, task_t, swapped_tasks); task->swap_flags &= ~TASK_SW_ELIGIBLE; } task_swapout_unlock(); /* set state to avoid races with task_swappable(FALSE) */ task->swap_state = TASK_SW_GOING_OUT; task->swap_rss = pmap_resident_count(task->map->pmap); task_swaprss_out += task->swap_rss; task->swap_ast_waiting = task->thr_act_count; /* * halt all threads in this task: * We don't need the thread list lock for traversal. */ list = &task->thr_acts; thr_act = (thread_act_t) queue_first(list); while (!queue_end(list, (queue_entry_t) thr_act)) { boolean_t swappable; thread_act_t ract; thread = act_lock_thread(thr_act); s = splsched(); if (!thread) swappable = (thr_act->swap_state != TH_SW_UNSWAPPABLE); else { thread_lock(thread); swappable = TRUE; for (ract = thread->top_act; ract; ract = ract->lower) if (ract->swap_state == TH_SW_UNSWAPPABLE) { swappable = FALSE; break; } } if (swappable) thread_ast_set(thr_act, AST_SWAPOUT); if (thread) thread_unlock(thread); splx(s); assert((thr_act->ast & AST_TERMINATE) == 0); act_unlock_thread(thr_act); thr_act = (thread_act_t) queue_next(&thr_act->thr_acts); } task->swap_stamp = sched_tick; task->swap_nswap++; assert((task->swap_flags&TASK_SW_WANT_IN) == 0); /* put task on the queue of swapped out tasks */ task_swapper_lock(); #if TASK_SW_DEBUG if (task_swap_debug && on_swapped_list(task)) { printf("task 0x%X already on list\n", task); Debugger(""); } #endif /* TASK_SW_DEBUG */ queue_enter(&swapped_tasks, task, task_t, swapped_tasks); tasks_swapped_out++; task_swapouts++; task_swapper_unlock(); task_unlock(task); return(KERN_SUCCESS); }
/* * Build a non-deterministic finite automata using Aho-Corasick construction * The keyword trie must already be built via bnfa_add_pattern_states() */ static int bnfa_build_nfa(bnfa_struct_t * bnfa) { int r, s, i; QUEUE q, *queue = &q; bnfa_state_t *fail_state = bnfa->fail_state; bnfa_match_node_t **match_list = bnfa->match_list; bnfa_match_node_t *mlist; bnfa_match_node_t *px; /* Init a Queue */ queue_init(queue); /* Add the state 0 transitions 1st, * the states at depth 1, fail to state 0 */ for (i = 0; i < bnfa->alphabet_size; i++) { /* note that state zero deos not fail, * it just returns 0..nstates-1 */ s = bnfa_list_get_next_state(bnfa, 0, i); if (s) /* don't bother adding state zero */ { if (queue_add(queue, s)) { queue_free(queue); return -1; } fail_state[s] = 0; } } /* Build the fail state successive layer of transitions */ while (queue_count(queue) > 0) { r = queue_remove(queue); /* Find Final States for any Failure */ for (i = 0; i < bnfa->alphabet_size; i++) { int fs, next; s = bnfa_list_get_next_state(bnfa,r,i); if (s == (int)BNFA_FAIL_STATE) continue; if (queue_add(queue, s)) { queue_free(queue); return -1; } fs = fail_state[r]; /* * Locate the next valid state for 'i' starting at fs */ while ((next=bnfa_list_get_next_state(bnfa,fs,i)) == (int)BNFA_FAIL_STATE) { fs = fail_state[fs]; } /* * Update 's' state failure state to point to the next valid state */ fail_state[s] = next; /* * Copy 'next'states match_list into 's' states match_list, * we just create a new list nodes, the patterns are not copied. */ for (mlist = match_list[next];mlist;mlist = mlist->next) { /* Dup the node, don't copy the data */ px = BNFA_MALLOC(sizeof(bnfa_match_node_t)); if (!px) { queue_free(queue); return 0; } px->data = mlist->data; px->next = match_list[s]; /* insert at head */ match_list[s] = px; } } } /* Clean up the queue */ queue_free(queue); /* optimize the failure states */ if (bnfa->opt) bnfa_opt_nfa(bnfa); return 0; }
/* * Send a message. * * The current thread will be blocked until any other thread * receives the message and calls msg_reply() for the target * object. When new message has been reached to the object, it * will be received by highest priority thread waiting for * that message. A thread can send a message to any object if * it knows the object id. */ int msg_send(object_t obj, void *msg, size_t size, u_long timeout) { struct msg_header *hdr; thread_t th; void *kmsg; int rc; if (!user_area(msg)) return EFAULT; if (size < sizeof(struct msg_header)) return EINVAL; sched_lock(); if (!object_valid(obj)) { sched_unlock(); return EINVAL; } if (obj->owner != cur_task() && !task_capable(CAP_IPC)) { sched_unlock(); return EPERM; } /* * A thread can not send a message when the * thread is already receiving from the target * object. This will obviously cause a deadlock. */ if (obj == cur_thread->recvobj) { sched_unlock(); return EDEADLK; } /* * Translate message address to the kernel linear * address. So that a receiver thread can access * the message via kernel pointer. We can catch * the page fault here. */ if ((kmsg = kmem_map(msg, size)) == NULL) { /* Error - no physical address for the message */ sched_unlock(); return EFAULT; } /* * The sender ID in the message header is filled * by the kernel. So, the receiver can trust it. */ hdr = (struct msg_header *)kmsg; hdr->task = cur_task(); /* Save information about the message block. */ cur_thread->msgaddr = kmsg; cur_thread->msgsize = size; /* * If receiver already exists, wake it up. * Highest priority thread will get this message. */ if (!queue_empty(&obj->recvq)) { th = msg_dequeue(&obj->recvq); sched_unsleep(th, 0); } /* * Sleep until we get a reply message. * Note: Do not touch any data in the object * structure after we wakeup. This is because the * target object may be deleted during we were * sleeping. */ cur_thread->sendobj = obj; msg_enqueue(&obj->sendq, cur_thread); rc = sched_tsleep(&ipc_event, timeout); if (rc == SLP_INTR) queue_remove(&cur_thread->ipc_link); cur_thread->sendobj = NULL; sched_unlock(); /* * Check sleep result. */ switch (rc) { case SLP_BREAK: return EAGAIN; /* Receiver has been terminated */ case SLP_INVAL: return EINVAL; /* Object has been deleted */ case SLP_INTR: return EINTR; /* Exception */ case SLP_TIMEOUT: return ETIMEDOUT; /* Timeout */ default: /* DO NOTHING */ break; } return 0; }
// -----------------------Tarefa Dispatcher-------------------------- task_t* scheduler() { // Retorna o primeiro elemento da fila exec = (taskqueue_t*) queue_remove((queue_t**) &ready, (queue_t*) ready); return exec->task; }
/* * Build Non-Deterministic Finite Automata */ static void Build_DFA (ACSM_STRUCT * acsm) { int r, s; int i; QUEUE q, *queue = &q; ACSM_PATTERN * mlist=0; ACSM_PATTERN * px=0; /* Init a Queue */ queue_init (queue); /* Add the state 0 transitions 1st */ /*1st depth Node's FailState is 0, fail(x)=0 */ for (i = 0; i < ALPHABET_SIZE; i++) { s = acsm->acsmStateTable[0].NextState[i]; if (s) { queue_add (queue, s); acsm->acsmStateTable[s].FailState = 0; } } /* Build the fail state transitions for each valid state */ while (queue_count (queue) > 0) { r = queue_remove (queue); /* Find Final States for any Failure */ for (i = 0; i < ALPHABET_SIZE; i++) { int fs, next; /*** Note NextState[i] is a const variable in this block ***/ if ((s = acsm->acsmStateTable[r].NextState[i]) != ACSM_FAIL_STATE) { queue_add (queue, s); fs = acsm->acsmStateTable[r].FailState; /* * Locate the next valid state for 'i' starting at s */ /**** Note the variable "next" ****/ /*** Note "NextState[i]" is a const variable in this block ***/ while ((next=acsm->acsmStateTable[fs].NextState[i]) == ACSM_FAIL_STATE) { fs = acsm->acsmStateTable[fs].FailState; } /* * Update 's' state failure state to point to the next valid state */ acsm->acsmStateTable[s].FailState = next; ACSM_PATTERN* pat = acsm->acsmStateTable[next].MatchList; for (; pat != NULL; pat = pat->next) { AddMatchListEntry(acsm, s, pat); } } else { acsm->acsmStateTable[r].NextState[i] = acsm->acsmStateTable[acsm->acsmStateTable[r].FailState].NextState[i]; } } } /* Clean up the queue */ queue_free (queue); }
/* * Receive a message. * * A thread can receive a message from the object which was * created by any thread belongs to same task. If the message * has not arrived yet, it blocks until any message comes in. * * The size argument specifies the "maximum" size of the message * buffer to receive. If the sent message is larger than this * size, the kernel will automatically clip the message to the * receive buffer size. * * When message is received, the sender thread is removed from * object's send queue. So, another thread can receive the * subsequent message from that object. This is important for * the multi-thread server which receives some messages * simultaneously. */ int msg_receive(object_t obj, void *msg, size_t size, u_long timeout) { thread_t th; size_t len; int rc, err = 0; if (!user_area(msg)) return EFAULT; sched_lock(); if (!object_valid(obj)) { err = EINVAL; goto out; } if (obj->owner != cur_task()) { err = EACCES; goto out; } /* * Check if this thread finished previous receive * operation. A thread can not receive different * messages at once. */ if (cur_thread->recvobj) { err = EBUSY; goto out; } cur_thread->recvobj = obj; /* * If no message exists, wait until message arrives. */ while (queue_empty(&obj->sendq)) { /* * Block until someone sends the message. */ msg_enqueue(&obj->recvq, cur_thread); rc = sched_tsleep(&ipc_event, timeout); if (rc != 0) { /* * Receive is failed due to some reasons. */ switch (rc) { case SLP_INVAL: err = EINVAL; /* Object has been deleted */ break; case SLP_INTR: queue_remove(&cur_thread->ipc_link); err = EINTR; /* Got exception */ break; case SLP_TIMEOUT: err = ETIMEDOUT; /* Timeout */ break; default: panic("msg_receive"); break; } cur_thread->recvobj = NULL; goto out; } /* * Even if this thread is woken by the sender thread, * the message may be received by another thread * before this thread runs. This can occur when * higher priority thread becomes runnable at that * time. So, it is necessary to check the existence * of the sender, again. */ } th = msg_dequeue(&obj->sendq); /* * Copy out the message to the user-space. * The smaller buffer size is used as copy length * between sender and receiver thread. */ len = min(size, th->msgsize); if (len > 0) { if (umem_copyout(th->msgaddr, msg, len)) { msg_enqueue(&obj->sendq, th); cur_thread->recvobj = NULL; err = EFAULT; goto out; } } /* * Detach the message from the target object. */ cur_thread->sender = th; th->receiver = cur_thread; out: sched_unlock(); return err; }
void queueSet_endlessIOLoop(queueSet_t self){ while (1){ if (_kbhit()){ char c = _getch(); if (c == VK_ESCAPE) { queueSet_delete(self); break; } } int queueNum = rand() % 2 + 1; queue_t queue = (queueNum == 1) ? self->queue1 : self->queue2; queue_t anotherQueue = (queueNum == 1) ? self->queue2 : self->queue1; int num = getRandomInt(); if (num > 0){ queue_add(queue, num); for (int i = 0; i < self->numOfSubscribers; i++){ if (self->subscribers[i]->addPrint) self->subscribers[i]->addPrint(self->subscribers[i], queueNum, num); } if (queue_getSize(queue) > 10){ if (queue_getSize(anotherQueue) < 9){ Sleep(30); int prevElem = queue_remove(queue); queue_add(anotherQueue, prevElem); for (int i = 0; i < self->numOfSubscribers; i++){ if (self->subscribers[i]->addPrint_transfer) self->subscribers[i]->addPrint_transfer(self->subscribers[i], queueNum, prevElem); } } else{ //removing last elements int enqueueMain = queue_remove(queue); int enqueueAnother = queue_remove(anotherQueue); for (int i = 0; i < self->numOfSubscribers; i++){ if (self->subscribers[i]->fullOverflow) self->subscribers[i]->fullOverflow(self->subscribers[i], queueNum, enqueueMain, enqueueAnother); } } } } else{ int prevNum = queue_remove(queue); for (int i = 0; i < self->numOfSubscribers; i++){ if (self->subscribers[i]->removePrint) self->subscribers[i]->removePrint(self->subscribers[i], queueNum, prevNum); } if (queue_getSize(anotherQueue) >= 2){ Sleep(300); int transNum = queue_remove(anotherQueue); queue_add(queue, transNum); for (int i = 0; i < self->numOfSubscribers; i++){ if (self->subscribers[i]->removePrint_transfer) self->subscribers[i]->removePrint_transfer(self->subscribers[i], queueNum, transNum); } } else{ int num11 = abs(getRandomInt()); int num12 = abs(getRandomInt()); int num13 = abs(getRandomInt()); int num22 = abs(getRandomInt()); int num23 = abs(getRandomInt()); queue_add(queue, num11); queue_add(queue, num12); queue_add(queue, num13); queue_add(anotherQueue, num22); queue_add(anotherQueue, num23); for (int i = 0; i < self->numOfSubscribers; i++){ if (self->subscribers[i]->fullProcessing) self->subscribers[i]->fullProcessing(self->subscribers[i], queueNum, num11, num12, num13, num22, num23); } } } Sleep(300); } }
void scheduler_remove(thread *th) { queue_remove(&main_queue, th); }
static void bt_health_mdep_cfg_data(const void *buf, uint16_t len) { const struct hal_cmd_health_mdep *cmd = buf; struct health_app *app; struct mdep_cfg *mdep = NULL; uint8_t status; DBG(""); app = queue_find(apps, match_app_by_id, INT_TO_PTR(cmd->app_id)); if (!app) { status = HAL_STATUS_INVALID; goto fail; } mdep = new0(struct mdep_cfg, 1); if (!mdep) { status = HAL_STATUS_INVALID; goto fail; } mdep->role = cmd->role; mdep->data_type = cmd->data_type; mdep->channel_type = android2channel_type(cmd->channel_type); mdep->id = queue_length(app->mdeps) + 1; if (cmd->descr_len > 0) { mdep->descr = malloc0(cmd->descr_len); memcpy(mdep->descr, cmd->descr, cmd->descr_len); } if (!queue_push_tail(app->mdeps, mdep)) { status = HAL_STATUS_FAILED; goto fail; } if (app->num_of_mdep != queue_length(app->mdeps)) goto send_rsp; /* add sdp record from app configuration data */ /* * TODO: Check what to be done if mupltple applications are trying to * register with different role and different configurations. * 1) Does device supports SOURCE and SINK at the same time ? * 2) Does it require different SDP records or one record with * multile MDEP configurations ? */ if (update_sdp_record(app) < 0) { error("health: HDP SDP record preparation failed"); status = HAL_STATUS_FAILED; goto fail; } send_app_reg_notify(app, HAL_HEALTH_APP_REG_SUCCESS); send_rsp: ipc_send_rsp(hal_ipc, HAL_SERVICE_ID_HEALTH, HAL_OP_HEALTH_MDEP, HAL_STATUS_SUCCESS); return; fail: if (status != HAL_STATUS_SUCCESS) { free_mdep_cfg(mdep); queue_remove(apps, app); free_health_app(app); } ipc_send_rsp(hal_ipc, HAL_SERVICE_ID_HEALTH, HAL_OP_HEALTH_MDEP, status); }
/** * 功能: 取出队列中的数据,若对队列为空,根据timeout的不同值有不同的处理方式. * 1.timeout>0 悬挂调用的任务,当timeout到期的时候唤醒任务并返回timeout标志 * 2.timeout=0 永久悬挂调用的任务,直到从队列中等到数据. * 3.timeout=-1 不悬挂任务,若队列为空会返回队列为空标志. * 当有任务被悬挂的时候,将会调用调度器. * * 参数: * 输入: 输出: * EASYRTOS_QUEUE *qptr 队列指针 EASYRTOS_QUEUE *qptr 队列指针 * int32_t timeout timeout时间,依赖于心跳时间 void *msgptr 获取的消息 * void *msgptr 获取的消息 * * 返回: * EASYRTOS_OK 成功 * EASYRTOS_TIMEOUT 信号量timeout到期 * EASYRTOS_WOULDBLOCK 本来会被悬挂但由于timeout为-1所以返回了 * EASYRTOS_ERR_DELETED 队列在悬挂任务时被删除 * EASYRTOS_ERR_CONTEXT 错误的上下文调用 * EASYRTOS_ERR_PARAM 参数错误 * EASYRTOS_ERR_QUEUE 将任务加入运行队列失败 * EASYRTOS_ERR_TIMER 注册定时器未成功 * * 调用的函数: * eCurrentContext(); * tcbEnqueuePriority (&qptr->getSuspQ, curr_tcb_ptr); * eTimerRegister (&timerCb); * tcb_dequeue_entry (&qptr->getSuspQ, curr_tcb_ptr); * easyRTOSSched (FALSE); * queue_remove (qptr, msgptr); */ ERESULT eQueueTake (EASYRTOS_QUEUE *qptr, int32_t timeout, void *msgptr) { CRITICAL_STORE; ERESULT status; QUEUE_TIMER timerData; EASYRTOS_TIMER timerCb; EASYRTOS_TCB *curr_tcb_ptr; /* 参数检查 */ if ((qptr == NULL)) //|| (msgptr == NULL)) { status = EASYRTOS_ERR_PARAM; } else { /* 进入临界区 */ CRITICAL_ENTER (); /* 若队列中没有消息,则悬挂任务 */ if (qptr->num_msgs_stored == 0) { /* timeout>0 悬挂任务 */ if (timeout >= 0) { /* 获取当前任务TCB */ curr_tcb_ptr = eCurrentContext(); /* 检查我们是否在任务上下文 */ if (curr_tcb_ptr) { /* 将当前任务增加到receive悬挂队列中 */ if (tcbEnqueuePriority (&qptr->getSuspQ, curr_tcb_ptr) == EASYRTOS_OK) { /* 将任务状态设置为悬挂 */ curr_tcb_ptr->state = TASK_PENDED; status = EASYRTOS_OK; /* 注册定时器回调 */ if (timeout) { /* 填充定时器需要的数据 */ timerData.tcb_ptr = curr_tcb_ptr; timerData.queue_ptr = qptr; timerData.suspQ = &qptr->getSuspQ; /* 填充回调需要的数据 */ timerCb.cb_func = eQueueTimerCallback; timerCb.cb_data = (POINTER)&timerData; timerCb.cb_ticks = timeout; /* 在任务TCB中存储定时器回调,方便对其进行取消操作 */ curr_tcb_ptr->pended_timo_cb = &timerCb; /* 注册定时器 */ if (eTimerRegister (&timerCb) != EASYRTOS_OK) { /* 注册失败 */ status = EASYRTOS_ERR_TIMER; (void)tcb_dequeue_entry (&qptr->getSuspQ, curr_tcb_ptr); curr_tcb_ptr->state = TASK_RUN; curr_tcb_ptr->pended_timo_cb = NULL; } } /* 不需要注册定时器 */ else { curr_tcb_ptr->pended_timo_cb = NULL; } /* 退出临界区 */ CRITICAL_EXIT(); if (status == EASYRTOS_OK) { /* 当前任务被悬挂,我们将调用调度器 */ easyRTOSSched (FALSE); /* 下次任务将从此处开始运行,此时队列被删除 或者timeout到期 或者调用了eQueueGive */ status = curr_tcb_ptr->pendedWakeStatus; /** * 检测pendedWakeStatus,若其值为EASYRTOS_OK,则说明 * 读取是成功的,若为其他的值,则说明有可能队列被删除 * 或者timeout到期,此时我们只需要退出就好了 */ if (status == EASYRTOS_OK) { /* 进入临界区 */ CRITICAL_ENTER(); /* 将消息复制出来 */ status = queue_remove (qptr, msgptr); /* 退出临界区 */ CRITICAL_EXIT(); } } } else { /* 将任务加入悬挂列表失败 */ CRITICAL_EXIT (); status = EASYRTOS_ERR_QUEUE; } } else { /* 不在任务上下文中们,无法悬挂任务 */ CRITICAL_EXIT (); status = EASYRTOS_ERR_CONTEXT; } } else { /* timeout == -1, 不需要悬挂任务,且队列此时数据量为0 */ CRITICAL_EXIT(); status = EASYRTOS_WOULDBLOCK; } } else { /* 不需要阻塞任务,直接把消息复制出来 */ status = queue_remove (qptr, msgptr); /* 退出临界区 */ CRITICAL_EXIT (); /** * 只在任务上下文环境调用调度器。 * 中断环境会有eIntExit()调用调度器。. */ if (eCurrentContext()) easyRTOSSched (FALSE); } } return (status); }
/* * Receive a message. * * A thread can receive a message from the object which was * created by any thread belongs to same task. If the message * has not reached yet, it blocks until any message comes in. * * The size argument specifies the "maximum" size of the message * buffer to receive. If the sent message is larger than this * size, the kernel will automatically clip the message to this * maximum buffer size. * * When a message is received, the sender thread is removed from * object's send queue. So, another thread can receive the * subsequent message from that object. This is important for * the multi-thread server which must receive multiple messages * simultaneously. */ int msg_receive(object_t obj, void *msg, size_t size) { thread_t t; size_t len; int rc, error = 0; if (!user_area(msg)) return EFAULT; sched_lock(); if (!object_valid(obj)) { sched_unlock(); return EINVAL; } if (obj->owner != curtask) { sched_unlock(); return EACCES; } /* * Check if this thread finished previous receive * operation. A thread can not receive different * messages at once. */ if (curthread->recvobj) { sched_unlock(); return EBUSY; } curthread->recvobj = obj; /* * If no message exists, wait until message arrives. */ while (queue_empty(&obj->sendq)) { /* * Block until someone sends a message. */ msg_enqueue(&obj->recvq, curthread); rc = sched_sleep(&ipc_event); if (rc != 0) { /* * Receive is failed due to some reasons. */ switch (rc) { case SLP_INVAL: error = EINVAL; /* Object has been deleted */ break; case SLP_INTR: queue_remove(&curthread->ipc_link); error = EINTR; /* Got exception */ break; default: panic("msg_receive"); break; } curthread->recvobj = NULL; sched_unlock(); return error; } /* * Check the existence of the sender thread again. * Even if this thread is woken by the sender thread, * the message may be received by another thread. * This may happen when another high priority thread * becomes runnable before we receive the message. */ } t = msg_dequeue(&obj->sendq); /* * Copy out the message to the user-space. */ len = MIN(size, t->msgsize); if (len > 0) { if (copyout(t->msgaddr, msg, len)) { msg_enqueue(&obj->sendq, t); curthread->recvobj = NULL; sched_unlock(); return EFAULT; } } /* * Detach the message from the target object. */ curthread->sender = t; t->receiver = curthread; sched_unlock(); return error; }
static unsigned decode_nd_tree (wfa_t *wfa, bitfile_t *input) /* * Read 'wfa' prediction tree of given 'input' stream. * * No return value. * * Side effects: * 'wfa->into' is filled with the decoded values */ { lqueue_t *queue; /* queue of states */ int next, state; /* state and its current child */ unsigned total = 0; /* total number of predicted states */ u_word_t sum0, sum1; /* Probability model */ u_word_t code; /* The present input code value */ u_word_t low; /* Start of the current code range */ u_word_t high; /* End of the current code range */ /* * Initialize arithmetic decoder */ code = get_bits (input, 16); low = 0; high = 0xffff; sum0 = 1; sum1 = 11; queue = alloc_queue (sizeof (int)); state = wfa->root_state; queue_append (queue, &state); /* * Traverse the WFA tree in breadth first order (using a queue). */ while (queue_remove (queue, &next)) { unsigned label; if (wfa->level_of_state [next] > wfa->wfainfo->p_max_level + 1) { /* * Nondetermismn is not allowed at levels larger than * 'wfa->wfainfo->p_max_level'. */ for (label = 0; label < MAXLABELS; label++) if (ischild (state = wfa->tree [next][label])) queue_append (queue, &state); /* continue with childs */ } else if (wfa->level_of_state [next] > wfa->wfainfo->p_min_level) { for (label = 0; label < MAXLABELS; label++) if (ischild (state = wfa->tree [next][label])) { unsigned count; /* Current interval count */ unsigned range; /* Current interval range */ count = (((code - low) + 1) * sum1 - 1) / ((high - low) + 1); if (count < sum0) { /* * Decode a '0' symbol * First, the range is expanded to account for the * symbol removal. */ range = (high - low) + 1; high = low + (u_word_t) ((range * sum0) / sum1 - 1 ); RESCALE_INPUT_INTERVAL; /* * Update the frequency counts */ sum0++; sum1++; if (sum1 > 50) /* scale the symbol frequencies */ { sum0 >>= 1; sum1 >>= 1; if (!sum0) sum0 = 1; if (sum0 >= sum1) sum1 = sum0 + 1; } if (wfa->level_of_state [state] > wfa->wfainfo->p_min_level) queue_append (queue, &state); } else { /* * Decode a '1' symbol * First, the range is expanded to account for the * symbol removal. */ range = (high - low) + 1; high = low + (u_word_t) ((range * sum1) / sum1 - 1); low = low + (u_word_t) ((range * sum0) / sum1); RESCALE_INPUT_INTERVAL; /* * Update the frequency counts */ sum1++; if (sum1 > 50) /* scale the symbol frequencies */ { sum0 >>= 1; sum1 >>= 1; if (!sum0) sum0 = 1; if (sum0 >= sum1) sum1 = sum0 + 1; } append_edge (next, 0, -1, label, wfa); total++; }
/* * Send a message. * * The current thread will be blocked until any other thread * receives and reply the message. A thread can send a * message to any object if it knows the object id. */ int msg_send(object_t obj, void *msg, size_t size) { struct msg_header *hdr; thread_t t; void *kmsg; int rc; if (!user_area(msg)) return EFAULT; if (size < sizeof(struct msg_header)) return EINVAL; sched_lock(); if (!object_valid(obj)) { sched_unlock(); return EINVAL; } /* * A thread can not send a message when it is * already receiving from the target object. * It will obviously cause a deadlock. */ if (obj == curthread->recvobj) { sched_unlock(); return EDEADLK; } /* * Translate message address to the kernel linear * address. So that a receiver thread can access * the message via kernel pointer. We can catch * the page fault here. */ if ((kmsg = kmem_map(msg, size)) == NULL) { sched_unlock(); return EFAULT; } curthread->msgaddr = kmsg; curthread->msgsize = size; /* * The sender ID is filled in the message header * by the kernel. So, the receiver can trust it. */ hdr = (struct msg_header *)kmsg; hdr->task = curtask; /* * If receiver already exists, wake it up. * The highest priority thread can get the message. */ if (!queue_empty(&obj->recvq)) { t = msg_dequeue(&obj->recvq); sched_unsleep(t, 0); } /* * Sleep until we get a reply message. * Note: Do not touch any data in the object * structure after we wakeup. This is because the * target object may be deleted while we are sleeping. */ curthread->sendobj = obj; msg_enqueue(&obj->sendq, curthread); rc = sched_sleep(&ipc_event); if (rc == SLP_INTR) queue_remove(&curthread->ipc_link); curthread->sendobj = NULL; sched_unlock(); /* * Check sleep result. */ switch (rc) { case SLP_BREAK: return EAGAIN; /* Receiver has been terminated */ case SLP_INVAL: return EINVAL; /* Object has been deleted */ case SLP_INTR: return EINTR; /* Exception */ default: /* DO NOTHING */ break; } return 0; }
kern_return_t task_swapin(task_t task, boolean_t make_unswappable) { register queue_head_t *list; register thread_act_t thr_act, next; thread_t thread; int s; boolean_t swappable = TRUE; task_lock(task); switch (task->swap_state) { case TASK_SW_OUT: { vm_map_t map = task->map; /* * Task has made it all the way out, which means * that vm_map_res_deallocate has been done; set * state to TASK_SW_COMING_IN, then bring map * back in. We could actually be racing with * the thread_swapout_enqueue, which does the * vm_map_res_deallocate, but that race is covered. */ task->swap_state = TASK_SW_COMING_IN; assert(task->swap_ast_waiting == 0); assert(map->res_count >= 0); task_unlock(task); mutex_lock(&map->s_lock); vm_map_res_reference(map); mutex_unlock(&map->s_lock); task_lock(task); assert(task->swap_state == TASK_SW_COMING_IN); } break; case TASK_SW_GOING_OUT: /* * Task isn't all the way out yet. There is * still at least one thread not swapped, and * vm_map_res_deallocate has not been done. */ task->swap_state = TASK_SW_COMING_IN; assert(task->swap_ast_waiting > 0 || (task->swap_ast_waiting == 0 && task->thr_act_count == 0)); assert(task->map->res_count > 0); TASK_STATS_INCR(task_sw_race_going_out); break; case TASK_SW_IN: assert(task->map->res_count > 0); #if TASK_SW_DEBUG task_swapper_lock(); if (task_swap_debug && on_swapped_list(task)) { printf("task 0x%X on list, state is SW_IN\n", task); Debugger(""); } task_swapper_unlock(); #endif /* TASK_SW_DEBUG */ TASK_STATS_INCR(task_sw_race_in); if (make_unswappable) { task->swap_state = TASK_SW_UNSWAPPABLE; task_unlock(task); task_swapout_ineligible(task); } else task_unlock(task); return(KERN_SUCCESS); case TASK_SW_COMING_IN: /* * Raced with another task_swapin and lost; * wait for other one to complete first */ assert(task->map->res_count >= 0); /* * set MAKE_UNSWAPPABLE so that whoever is swapping * the task in will make it unswappable, and return */ if (make_unswappable) task->swap_flags |= TASK_SW_MAKE_UNSWAPPABLE; task->swap_flags |= TASK_SW_WANT_IN; assert_wait((event_t)&task->swap_state, FALSE); task_unlock(task); thread_block((void (*)(void)) 0); TASK_STATS_INCR(task_sw_race_coming_in); return(KERN_SUCCESS); case TASK_SW_UNSWAPPABLE: /* * This can happen, since task_terminate * unconditionally calls task_swapin. */ task_unlock(task); return(KERN_SUCCESS); default: panic("task_swapin bad state"); break; } if (make_unswappable) task->swap_flags |= TASK_SW_MAKE_UNSWAPPABLE; assert(task->swap_state == TASK_SW_COMING_IN); task_swapper_lock(); #if TASK_SW_DEBUG if (task_swap_debug && !on_swapped_list(task)) { printf("task 0x%X not on list\n", task); Debugger(""); } #endif /* TASK_SW_DEBUG */ queue_remove(&swapped_tasks, task, task_t, swapped_tasks); tasks_swapped_out--; task_swapins++; task_swapper_unlock(); /* * Iterate through all threads for this task and * release them, as required. They may not have been swapped * out yet. The task remains locked throughout. */ list = &task->thr_acts; thr_act = (thread_act_t) queue_first(list); while (!queue_end(list, (queue_entry_t) thr_act)) { boolean_t need_to_release; next = (thread_act_t) queue_next(&thr_act->thr_acts); /* * Keep task_swapper_lock across thread handling * to synchronize with task_swap_swapout_thread */ task_swapper_lock(); thread = act_lock_thread(thr_act); s = splsched(); if (thr_act->ast & AST_SWAPOUT) { /* thread hasn't gotten the AST yet, just clear it */ thread_ast_clear(thr_act, AST_SWAPOUT); need_to_release = FALSE; TASK_STATS_INCR(task_sw_before_ast); splx(s); act_unlock_thread(thr_act); } else { /* * If AST_SWAPOUT was cleared, then thread_hold, * or equivalent was done. */ need_to_release = TRUE; /* * Thread has hit AST, but it may not have * been dequeued yet, so we need to check. * NOTE: the thread may have been dequeued, but * has not yet been swapped (the task_swapper_lock * has been dropped, but the thread is not yet * locked), and the TH_SW_TASK_SWAPPING flag may * not have been cleared. In this case, we will do * an extra remque, which the task_swap_swapout_thread * has made safe, and clear the flag, which is also * checked by the t_s_s_t before doing the swapout. */ if (thread) thread_lock(thread); if (thr_act->swap_state & TH_SW_TASK_SWAPPING) { /* * hasn't yet been dequeued for swapout, * so clear flags and dequeue it first. */ thr_act->swap_state &= ~TH_SW_TASK_SWAPPING; assert(thr_act->thread == THREAD_NULL || !(thr_act->thread->state & TH_SWAPPED_OUT)); queue_remove(&swapout_thread_q, thr_act, thread_act_t, swap_queue); TASK_STATS_INCR(task_sw_before_swap); } else { TASK_STATS_INCR(task_sw_after_swap); /* * It's possible that the thread was * made unswappable before hitting the * AST, in which case it's still running. */ if (thr_act->swap_state == TH_SW_UNSWAPPABLE) { need_to_release = FALSE; TASK_STATS_INCR(task_sw_unswappable); } } if (thread) thread_unlock(thread); splx(s); act_unlock_thread(thr_act); } task_swapper_unlock(); /* * thread_release will swap in the thread if it's been * swapped out. */ if (need_to_release) { act_lock_thread(thr_act); thread_release(thr_act); act_unlock_thread(thr_act); } thr_act = next; } if (task->swap_flags & TASK_SW_MAKE_UNSWAPPABLE) { task->swap_flags &= ~TASK_SW_MAKE_UNSWAPPABLE; task->swap_state = TASK_SW_UNSWAPPABLE; swappable = FALSE; } else { task->swap_state = TASK_SW_IN; } task_swaprss_in += pmap_resident_count(task->map->pmap); task_swap_total_time += sched_tick - task->swap_stamp; /* note when task came back in */ task->swap_stamp = sched_tick; if (task->swap_flags & TASK_SW_WANT_IN) { task->swap_flags &= ~TASK_SW_WANT_IN; thread_wakeup((event_t)&task->swap_state); } assert((task->swap_flags & TASK_SW_ELIGIBLE) == 0); task_unlock(task); #if TASK_SW_DEBUG task_swapper_lock(); if (task_swap_debug && on_swapped_list(task)) { printf("task 0x%X on list at end of swap in\n", task); Debugger(""); } task_swapper_unlock(); #endif /* TASK_SW_DEBUG */ /* * Make the task eligible to be swapped again */ if (swappable) task_swapout_eligible(task); return(KERN_SUCCESS); }
static DBusMessage *characteristic_start_notify(DBusConnection *conn, DBusMessage *msg, void *user_data) { struct characteristic *chrc = user_data; struct bt_gatt_client *gatt = chrc->service->client->gatt; const char *sender = dbus_message_get_sender(msg); struct async_dbus_op *op; struct notify_client *client; if (!(chrc->props & BT_GATT_CHRC_PROP_NOTIFY || chrc->props & BT_GATT_CHRC_PROP_INDICATE)) return btd_error_not_supported(msg); /* Each client can only have one active notify session. */ client = queue_find(chrc->notify_clients, match_notify_sender, sender); if (client) return client->notify_id ? btd_error_failed(msg, "Already notifying") : btd_error_in_progress(msg); client = notify_client_create(chrc, sender); if (!client) return btd_error_failed(msg, "Failed allocate notify session"); queue_push_tail(chrc->notify_clients, client); queue_push_tail(chrc->service->client->all_notify_clients, client); /* * If the device is currently not connected, return success. We will * automatically try and register all clients when a GATT client becomes * ready. */ if (!gatt) { DBusMessage *reply; reply = g_dbus_create_reply(msg, DBUS_TYPE_INVALID); if (reply) return reply; /* * Clean up and respond with an error instead of timing out to * avoid any ambiguities. */ error("Failed to construct D-Bus message reply"); goto fail; } op = new0(struct async_dbus_op, 1); if (!op) goto fail; op->data = client; op->msg = dbus_message_ref(msg); client->notify_id = bt_gatt_client_register_notify(gatt, chrc->value_handle, register_notify_cb, notify_cb, op, async_dbus_op_free); if (client->notify_id) return NULL; async_dbus_op_free(op); fail: queue_remove(chrc->notify_clients, client); queue_remove(chrc->service->client->all_notify_clients, client); /* Directly free the client */ notify_client_free(client); return btd_error_failed(msg, "Failed to register notify session"); }
/* * Manage next request event */ void datadev_request( datadev_t ddp) { kern_return_t rc; io_req_t ior; spl_t s; s = splsched(); mutex_lock(&datadev_lock); if (ddp != (datadev_t)0) { /* * Queue current request */ queue_enter(&datadev_wait, ddp, datadev_t, dd_chain); } /* * Try to start next request */ if (queue_empty(&datadev_wait) || datadev_ior == (io_req_t)0) { /* * No request or no pending read */ mutex_unlock(&datadev_lock); splx(s); return; } /* * Extract first waiting request */ ddp = (datadev_t)queue_first(&datadev_wait); /* * Extract pending I/O request */ ior = datadev_ior; datadev_ior = (io_req_t)0; /* * Allocate read memory */ if (ior->io_count < ddp->dd_size) { /* * Return size error for this request */ mutex_unlock(&datadev_lock); splx(s); ior->io_error = D_INVALID_SIZE; } else { /* * Move waiting request from the waiting queue to the active one. */ queue_remove(&datadev_wait, ddp, datadev_t, dd_chain); queue_enter(&datadev_curr, ddp, datadev_t, dd_chain); mutex_unlock(&datadev_lock); splx(s); /* * Activate the request */ bcopy(ddp->dd_name, ior->io_data, ddp->dd_size); ddp->dd_dev = ior->io_unit; ior->io_residual = ior->io_count - ddp->dd_size; ior->io_error = D_SUCCESS; } io_completed(ior, FALSE); }
/* * Removes an entry from pre_cache or cache. Removes from the hash table. */ static void del(struct mq_policy *mq, struct entry *e) { queue_remove(&e->list); hash_remove(e); }
channel_t channel_select(int need_default,char *fmt,...){ va_list argp; int cnt = strlen(fmt); queue_t select_node[HEAP_SELECT_CNT]; channel_t channels[HEAP_SELECT_CNT]; channel *chan=NULL; uvc_ctx *ctx=uvc_self(); channel_t c; int i = 0; if (cnt < 0)return -1; va_start(argp, fmt); for (i = 0; i<cnt; i++) { c = va_arg(argp, channel_t); channels[i] = c; select_node[i].ext = ctx; if (fmt[i] == 'r' && channel_readable(c)){ return c; } else if (fmt[i] == 'w' && channel_writeable(c)){ return c; } else if (fmt[i] != 'w' && fmt[i] != 'r'){ abort(); } } va_end(argp); if (need_default){ return -1; } for(i=0;i<cnt;i++) { c = channels[i]; if(fmt[i]=='r' ){ chan = channel_pool_get(get_chan_pool(), c); queue_insert_head(&chan->readq, &select_node[i]); } else if(fmt[i]=='w'){ chan = channel_pool_get(get_chan_pool(), c); queue_insert_head(&chan->writq, &select_node[i]); }else if(fmt[i] != 'w' && fmt[i] != 'r'){ abort(); } } uvc_yield(); c = 0; for (i = 0; i < cnt; i++) { if (fmt[i] == 'r' && channel_readable(channels[i])){ assert(c == 0); c = channels[i]; } else if (fmt[i] == 'w' && channel_writeable(channels[i])){ assert(c == 0); c = channels[i]; } else{ queue_remove(&select_node[i]); } } assert(c != 0); //assert("won't be run here :\n"); return c; }
/* * Build Non-Deterministic Finite Automata */ static void Build_NFA (ACSM_STRUCT * acsm) { int r, s; int i; QUEUE q, *queue = &q; ACSM_PATTERN * mlist=0; ACSM_PATTERN * px=0; /* Init a Queue */ queue_init (queue); /* Add the state 0 transitions 1st */ for (i = 0; i < ALPHABET_SIZE; i++) { s = acsm->acsmStateTable[0].NextState[i]; if (s) { queue_add (queue, s); acsm->acsmStateTable[s].FailState = 0; } } /* Build the fail state transitions for each valid state */ while (queue_count (queue) > 0) { r = queue_remove (queue); /* Find Final States for any Failure */ for (i = 0; i < ALPHABET_SIZE; i++) { int fs, next; if ((s = acsm->acsmStateTable[r].NextState[i]) != ACSM_FAIL_STATE) { queue_add (queue, s); fs = acsm->acsmStateTable[r].FailState; /* * Locate the next valid state for 'i' starting at s */ while ((next=acsm->acsmStateTable[fs].NextState[i]) == ACSM_FAIL_STATE) { fs = acsm->acsmStateTable[fs].FailState; } /* * Update 's' state failure state to point to the next valid state */ acsm->acsmStateTable[s].FailState = next; /* * Copy 'next'states MatchList to 's' states MatchList, * we copy them so each list can be AC_FREE'd later, * else we could just manipulate pointers to fake the copy. */ for (mlist = acsm->acsmStateTable[next].MatchList; mlist != NULL ; mlist = mlist->next) { px = CopyMatchListEntry (mlist); if( !px ) { //FatalError("*** Out of memory Initializing Aho Corasick in acsmx.c ****"); } /* Insert at front of MatchList */ px->next = acsm->acsmStateTable[s].MatchList; acsm->acsmStateTable[s].MatchList = px; } } } } /* Clean up the queue */ queue_free (queue); }
int main() { TQueue queue; TInfo info; int select, capacity; printf("Inserisci la dimensione della coda! \n"); scanf("%d", &capacity); queue=queue_create(capacity); select=menu(); while (select>=1 && select<=4) { switch (select) { case 1: if(!queue_is_full(&queue)){ info=ReadInfo(); queue_add(&queue, info); } else{ printf("La coda è piena! \n"); } select=menu(); break; case 2: if(!queue_is_empty(&queue)){ info=queue_front(&queue); printf("\n Prossimo cliente:\n"); PrintInfo(&info); } else{ printf("La coda è vuota! \n"); } select=menu(); break; case 3: if(!queue_is_empty(&queue)){ info=queue_remove(&queue); printf("\n Stai servendo il cliente:\n"); PrintInfo(&info); } else{ printf("La coda è vuota! \n"); } select=menu(); break; case 4: queue_destroy(&queue); printf("Programma chiuso correttamente! \n"); select=0; break; } } return 0; }
context *process_schedule() { process *p; int prio; int level; // don't preempt if we're at or above LEVEL_NOPREEMPT if(cpu_levels[smp_cpu_id()] >= LEVEL_NOPREEMPT) return &process_current[smp_cpu_id()]->ctx; // make sure we're not interrupted level = level_go(LEVEL_NOINTS); spinlock_grab(&process_spinlock); // find a process to run // if nothing to run then just return if(!process_queues_runnable_mask) { // make sure the currently running process still wants to run if(process_current[smp_cpu_id()]->msgstate == PROCESS_MSGSTATE_READY) { spinlock_release(&process_spinlock); level_return(level); return &process_current[smp_cpu_id()]->ctx; } else { // if not, then we truly have nothing to run, and we can't go back // to the old process - in this case, when all else fails, panic // :-) panic("NO RUNNABLE PROCESSES AND CURRENT PROCESS IS NOT RUNNABLE"); } } // put the currently-running process back on the run queue if it hasn't // been put somewhere else already // we need to do this here so that it has a chance of being chosen below // (if it's the highest-priority process) if(process_current[smp_cpu_id()] && !process_current[smp_cpu_id()]->queue && process_current[smp_cpu_id()]->msgstate == PROCESS_MSGSTATE_READY) { process *oldproc = process_current[smp_cpu_id()]; oldproc->running = 0; oldproc->level = level; queue_remove(oldproc); queue_insert(&process_queues[oldproc->prio],oldproc); process_queues_runnable_mask |= (1 << oldproc->prio); } restart_sched: // highest priority with a runnable process prio = bits_highest_set(process_queues_runnable_mask); if(!process_queues_runnable_mask) panic("NO RUNNABLE PROCESSES"); // take the process off the run queue p = process_queues[prio].head; queue_remove(p); // if the queue is now empty, take note of that if(!process_queues[prio].head) process_queues_runnable_mask &= ~(1 << prio); // if the process has the delete flag set, delete it if(p->exit_flag && (p->level == LEVEL_USER)) { spinlock_release(&process_spinlock); level_return(level); process_do_delete(p); level = level_go(LEVEL_NOINTS); spinlock_grab(&process_spinlock); goto restart_sched; } // set the process state as "running" p->running = 1; p->cpu = smp_cpu_id(); process_current[smp_cpu_id()] = p; // release locks spinlock_release(&process_spinlock); level_return(level); // return new context return &p->ctx; }
void event_remove(EventQueue* queue, Event* event) { queue_remove((Queue*)queue, (QueueItem*)event); }
static void destroy_gatt_req(struct gatt_request *req) { queue_remove(req->dis->gatt_op, req); bt_dis_unref(req->dis); free(req); }
// Lets the user clean up to prevent memory leaks. void queue_destroy(queue* q) { queue_element* elem; while (queue_remove(q, &elem)) {} free(q); }
type_datum_t *declare_type(unsigned char primary, unsigned char isattr) { char *id; type_datum_t *typdatum; int retval; uint32_t value = 0; id = (char *)queue_remove(id_queue); if (!id) { yyerror("no type/attribute name?"); return NULL; } if (strcmp(id, "self") == 0) { yyerror ("'self' is a reserved type name and may not be declared."); free(id); return NULL; } typdatum = (type_datum_t *) malloc(sizeof(type_datum_t)); if (!typdatum) { yyerror("Out of memory!"); free(id); return NULL; } type_datum_init(typdatum); typdatum->primary = primary; typdatum->flavor = isattr ? TYPE_ATTRIB : TYPE_TYPE; retval = declare_symbol(SYM_TYPES, id, typdatum, &value, &value); if (retval == 0 || retval == 1) { if (typdatum->primary) { typdatum->s.value = value; } } else { /* error occurred (can't have duplicate type declarations) */ free(id); type_datum_destroy(typdatum); free(typdatum); } switch (retval) { case -3:{ yyerror("Out of memory!"); return NULL; } case -2:{ yyerror2("duplicate declaration of type/attribute"); return NULL; } case -1:{ yyerror("could not declare type/attribute here"); return NULL; } case 0: case 1:{ return typdatum; } default:{ abort(); /* should never get here */ } } }