static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb, int dir, int *id, int *len, void **pkt) { char *fifobar, *desc; int entry = 0, pkt_id = 0; int ret; if (dir == SENDQ) { fifobar = ccb->ccb_u1.send_fifobar; desc = ccb->ccb_u2.send_desc; } else { fifobar = ccb->ccb_u3.recv_fifobar; desc = ccb->ccb_u4.recv_desc; } ret = fifo_dequeue(hw, fifobar, &entry); if (ret) { pkt_id = get_entry_id(entry); if (id) *id = pkt_id; if (len) *len = get_entry_len(entry); if (pkt) *pkt = (void *)(desc + desc_mem_sz(pkt_id)); } return ret; }
void *dequer(void *arg) { int i; inc(&b1); while (b1!=(NENQER+NDEQER)); inc(&b2); while (b2!=(NENQER+NDEQER)); while (1) { i=(int)fifo_dequeue(q); if (i==0) break; # if (PRINT) printf("%d\n", i); # endif # if (CHECK) if (chararr[i-1]) printf("%d is already seen\n", i); else chararr[i-1]=1; # endif } dec(&d1); while (d1!=0); dec(&d2); while (d2!=0); return NULL; }
static void gather() { unsigned long int nb_token = 0; while (nb_token != MAX_TOKEN) { unsigned int tmp_token = fifo_dequeue(&process_to_gather); nb_token++; // display critical section // uart_lock_acquire(); uart_puts("gather on core "); uart_putd(core_id()); uart_puts(" - consuming token "); uart_putd(tmp_token); uart_putc('\n'); uart_lock_release(); // display critical section // } // display critical section // uart_lock_acquire(); uart_puts("gather on core "); uart_putd(core_id()); uart_puts(" - END\n"); uart_lock_release(); // display critical section // end_scatter_gather = 1; }
int main( int argc, char **argv ) { fifo_queue_t q; fifo_init(&q); #pragma omp parallel #pragma omp single nowait { int i; for(i=1;i<5;++i) { #pragma omp task { int j; for(j = 0; j < 1000; ++j) { fifo_enqueue(&q, i*1000+j); } } #pragma omp task { int d, j; for(j = 0; j < 1000; ++j) { d = fifo_dequeue(&q); if (d) printf("dequeue %d\n", d); } } } } int d; while (true) { d = fifo_dequeue(&q); if (d == -1) break; printf("dequeue %d\n", d); } assert(fifo_empty(&q)); #pragma omp taskwait fifo_free(&q); return 0; }
void *mempool_malloc(struct mempool *mp) { virtual_addr_t entity_va; if (!mp) { return NULL; } if (fifo_dequeue(mp->f, &entity_va)) { return (void *)entity_va; } return NULL; }
bool hopcroft_karp_bfs(testcase t) { fifo q = fifo_create(); fifo_item item; int v, u; /* for each v in G1 */ for(v=1; v<t->num_students+1; v++) { /* if Pair_G1[v] == NIL */ if(t->pair_g1[v] == NIL) { /* Dist[v] = 0 */ t->dist[v] = 0; /* Enqueue(Q, v) */ fifo_queue(q, v); } /* else */ else { /* Dist[v] = INFINITE */ t->dist[v] = INFINITE; } } /* Dist[NIL] = INFINITE */ t->dist[NIL] = INFINITE; /* while Empty(Q) == false */ while(q->size != 0) { /* v = Dequeue(Q) */ v = fifo_dequeue(q); /* if Dist[v] < Dist[NIL] */ if(t->dist[v] < t->dist[NIL]) { /* for each u in Adv[v] */ for(item = t->adjacencies[v]->first; item != NULL; item = item->next) { u = item->value; /* if Dist[ Pair_G2[u] ] == INFINITE */ if(t->dist[t->pair_g2[u]] == INFINITE) { /* Dist{ Pair_G2[u] ] = Dist[v] + 1 */ t->dist[t->pair_g2[u]] = t->dist[v] + 1; /* Enqueue(Q, Pair_G2[u]) */ fifo_queue(q, t->pair_g2[u]); } } } } /* return Dist[NIL] != INFINITE */ fifo_destroy(q); return (t->dist[NIL] != INFINITE); }
void rosout_fetch(struct msg__rosgraph_msgs__Log **msgpp) { urosAssert(msgpp != NULL); *msgpp = (struct msg__rosgraph_msgs__Log *)fifo_dequeue(&rosoutQueue); }
static void* worker(tdat_t *td) { #ifdef HAVE_SIGNAL_H /* set this thread as cancellable */ if (set_cancellable() != 0) { td->status = OPNORM_THREAD; return NULL; } #endif /* unpack arguments, mostly for readability but also saves a few dereferences */ tdat_shared_t *ts = td->shared; index_t n = ts->n, m = ts->m; double p = ts->p, q = ts->q, eps = ts->eps, LCRP = ts->LCRP, SCD = ts->SCD; const double *M = ts->M; fifo_t *fifo = ts->fifo; /* working data */ double pcent[n]; cube_t cube0, cube1; patch_t patch; double tmax = 0.0; patch.centres = pcent; int fifoerr; while (1) { /* thread cancellation point */ pthread_testcancel(); /* dequeue a cube */ if (pthread_mutex_lock(&(ts->fifolock)) < 0) { td->status = OPNORM_THREAD; return NULL; } fifoerr = fifo_dequeue(fifo, &cube0); if (pthread_mutex_unlock(&(ts->fifolock)) < 0) { td->status = OPNORM_THREAD; return NULL; } if (fifoerr != FIFO_OK) { td->status = (fifoerr == FIFO_EMPTY ? OPNORM_OK : OPNORM_FIFO); return NULL; } cube_print(&cube0, n, ">"); /* nfifo is the total number dequeue */ td->nfifo++; /* cube subdivide */ int hwnexp = cube0.hwnexp + 1; double halfwidth = ldexp(1, -hwnexp); /* if halfwidth < DBL_EPSILON then we cannot calulate the centres of the subdivided cubes accurately, we break out and report that the requested accuracy could not be achieved */ if (halfwidth < DBL_EPSILON) { td->status = OPNORM_INACC; return NULL; } for (size_t k = 0 ; k < (1UL << (n-1)) ; k++) { cube1.side = cube0.side; cube1.hwnexp = hwnexp; /* we give our cube1 a temporary set of centres while we evaluate and decide whether to jetison or enqueue it, only if the latter do we make a malloc and copy the temporary centres. this saves a *lot* of malloc/free pairs */ double centres[n]; cube1.centres = centres; size_t k0 = k; for (size_t j = 0 ; j < n ; j++) { if (cube0.side == j) { cube1.centres[j] = cube0.centres[j]; continue; } cube1.centres[j] = cube0.centres[j] + ((k0 % 2) ? halfwidth : -halfwidth); k0 /= 2; } cube_print(&cube1, n, "<"); /* get the corresponding patch */ cube_to_patch(&cube1, n, p, LCRP, SCD, &patch); patch_print(patch, n); double ratio = radius_to_ratio(patch.radius, p); /* check for patch viability - this check almost always succeeds (on Drury K, this is false in 80/164016 cases, so 0.05% of the time) very small beer. Yet it introduces a branch point, so one might think it worth removing. Timing tests indicate that there is no speedup in doing so, so we keep it. */ if (ratio > 0) { /* evaluate M at patch centre */ double v[m]; ts->matvec(M, pcent, m, n, v); td->neval++; double t = pnorm(v, m, q); /* test first with the previous value of tmax */ if (t < tmax) { /* test whether we can jettison this cube */ if (t < (tmax * ratio * (1 + eps))) continue; /* note that the global ts->tmax >= tmax so it would be pointless (and cost a mutex access) to test for that here */ } else { if (pthread_mutex_lock(&(ts->maxlock)) < 0) { td->status = OPNORM_THREAD; return NULL; } /* update local tmax from global */ tmax = ts->tmax; /* if we have found a new global maximum then we update it (and the global maximising vector) as well as the local copy */ if (t > tmax) { ts->tmax = tmax = t; if (ts->vmax) memcpy(ts->vmax, pcent, n*sizeof(double)); } if (pthread_mutex_unlock(&(ts->maxlock)) < 0) { td->status = OPNORM_THREAD; return NULL; } /* test whether we can jettison this cube but now with the updated value of tmax */ if (t < (tmax * ratio * (1 + eps))) continue; } } /* we will enqueue this cube, so we need to allocate and copy its temporary centres set */ if (! (cube1.centres = malloc(n*sizeof(double)))) { td->status = OPNORM_ALLOC; return NULL; } memcpy(cube1.centres, centres, n*sizeof(double)); if (pthread_mutex_lock(&(ts->fifolock)) < 0) { free(cube1.centres); td->status = OPNORM_THREAD; return NULL; } fifoerr = fifo_enqueue(fifo, &cube1); if (pthread_mutex_unlock(&(ts->fifolock)) < 0) { td->status = OPNORM_THREAD; return NULL; } switch(fifoerr) { case FIFO_OK: break; case FIFO_USERMAX: td->status = OPNORM_FIFOMAX; return NULL; default: td->status = OPNORM_FIFO; return NULL; } } free(cube0.centres); } /* we should not arrive here */ td->status = OPNORM_BUG; return NULL; }