// Called on worker thread to get an item. Will wait if no items are available. // Does busy wait for 2ms. qitem* queue_pop(queue *queue) { qitem *r = qpop(&queue->q); if (r) return r; else { TIME start; GETTIME(start); INITTIME; while (1) { u64 diff; TIME stop; sched_yield(); GETTIME(stop); NANODIFF(stop, start, diff); r = qpop(&queue->q); if (r) return r; if (diff > 2000000) // 2ms max busy wait { SEM_WAIT(queue->sem); } } } }
// Get item or wait max time. qitem* queue_timepop(queue *queue, uint32_t miliseconds) { qitem *r = qpop(&queue->q); if (r) return r; else { if (SEM_TIMEDWAIT(queue->sem, miliseconds) != 0) return NULL; else return qpop(&queue->q); } }
// scheduler thread is the single consumer of tls_reuseq // producers are worker threads or scheduler thread itself. // We have a fixed number of events that are populated on first call. // If return NULL, caller should busy wait, go do something else or sleep. qitem* queue_get_item(void) { // qitem *res; if (tls_reuseq == NULL) { tls_reuseq = calloc(1,sizeof(intq)); initq(tls_reuseq); populate(tls_reuseq); } return qpop(tls_reuseq); }
int main(){ struct queue q; q.front = 0; q.rear = 0; int i = 0; for(; i < 1000; i++){ qpush(&q, i); int a = qpop(&q); printf("%d\n", a); } exit(0); }
void queue_intq_destroy(intq *q) { qitem *it; if (q == NULL) return; while ((it = qpop(q))) { #ifndef _TESTAPP_ if (it->env) enif_free_env(it->env); #endif if (it->blockStart) free(it); } if (q->head) { if (q->head->blockStart) free(q->head); } free(q); }
int process(void) { int i, j, k; int cn, cm, cd; int tmp; for(i=0; i<n+2; i++) for(j=0; j<m+2; j++) for(k=0; k<4; k++) matrix[i][j][k] = INF; qfront = qrear = 0; matrix[tn][tm][0] = matrix[tn][tm][1] = matrix[tn][tm][2] = matrix[tn][tm][3]=0; qpush(tn, tm, 0); qpush(tn, tm, 1); qpush(tn, tm, 2); qpush(tn, tm, 3); while(qfront != qrear) { qpop(&cn, &cm, &cd); for(k=0; k<4; k++) { if(table[cn+dy[k]][cm+dx[k]] && k!=neg[cd]) { tmp = matrix[cn][cm][cd] + (cd==k ? 0 : 1); if(matrix[cn+dy[k]][cm+dx[k]][k] > tmp) { matrix[cn+dy[k]][cm+dx[k]][k] = tmp; qpush(cn+dy[k], cm+dx[k], k); } } } } for(i=0; i<n+2; i++) { for(j=0; j<m+2; j++) { trace[i][j] = 0; for(k=1; k<4; k++) if(matrix[i][j][trace[i][j]] > matrix[i][j][k]) trace[i][j] = k; } } return 0; }
// Return item if available, otherwise NULL. qitem* queue_trypop(queue *queue) { return qpop(&queue->q); }
int doBoards(const node* dictionary, const boggleBoard* boards, int boardCount, int rank, int size, int* pStart, int timing) { assert(size > 1); /* Use tag 0 for completion messages, 1 for steal requests, and 2 for work given */ int i, j, total, boardSize, target, completed, someoneWants, who, remaining, work; int gotWork, donorFinished; point pt; queue* queue = qinit(); int done[size-1]; int want[size-1]; int notDone[size-1]; MPI_Request doneRequests[size-1]; MPI_Request wantRequests[size-1]; MPI_Request workRequest; /* We don't want everyone choosing the same increments randomly, so add the rank to the seed */ struct mt19937p state; sgenrand(10302011UL + rank, &state); /* Index the requests such that current thread is -1 (i.e. not present), and all */ /* the other threads follow round robin, wrapping around at 'size'. We're not */ /* explicitly interested in the contents of 'done' or 'want', only the signals. */ for (i = 1; i < size; ++i) { want[i-1] = done[i-1] = 0; target = (rank + i) % size; MPI_Irecv(done + i - 1, 1, MPI_INT, target, 0, MPI_COMM_WORLD, doneRequests + i - 1); MPI_Irecv(want + i - 1, 1, MPI_INT, target, 1, MPI_COMM_WORLD, wantRequests + i - 1); } completed = 0; total = 0; boardSize = boards[0].n; /* Maintain a local work queue with assigned board indices */ for (i = pStart[rank]; i < pStart[rank+1]; ++i) qpush(queue, i); /* Do assigned work and listen for requests for extra work */ while (!qempty(queue)) { i = qpop(queue); /* Try to find someone who's asking for work */ MPI_Testany(size-1, wantRequests, &who, &someoneWants, MPI_STATUS_IGNORE); who = (rank + 1 + who) % size; if (someoneWants) { /* printf("Who let thread %d steal my (thread %d) work (#%d)?!\n", who, rank, i); */ MPI_Send(&i, 1, MPI_INT, who, 2, MPI_COMM_WORLD); /* Reopen asynchronous receive to thread */ MPI_Irecv(want + i - 1, 1, MPI_INT, who, 1, MPI_COMM_WORLD, wantRequests - 1 + (size + who - rank) % size); } else { for (j = 0; j < boardSize * boardSize; ++j) { pt.x = j / boardSize; pt.y = j % boardSize; total += exploreOne(dictionary, boards + i, pt, timing); } } } /* Broadcast that we are done (MPI_Scatter is a little annoying) */ completed = 1; for (i = 1; i < size; ++i) MPI_Send(&completed, 1, MPI_INT, (rank + i) % size, 0, MPI_COMM_WORLD); /* Loop while everyone isn't done */ while(!allDone(doneRequests, size)) { remaining = 0; /* Find all of the workers that have not completed */ for (i = 0; i < size - 1; ++i) { MPI_Test(doneRequests + i, &donorFinished, MPI_STATUS_IGNORE); if (!donorFinished) notDone[remaining++] = i; } if (remaining > 0) { i = notDone[genrand(&state) % remaining]; target = (rank + i + 1) % size; /* Pick one of the guys at random and send him a request */ MPI_Send(&target, 1, MPI_INT, target, 1, MPI_COMM_WORLD); MPI_Irecv(&work, 1, MPI_INT, target, 2, MPI_COMM_WORLD, &workRequest); gotWork = donorFinished = 0; /* Check if he wrote back or if he finished in the meantime */ while (!gotWork && !donorFinished) { MPI_Test(&workRequest, &gotWork, MPI_STATUS_IGNORE); MPI_Test(doneRequests + i, &donorFinished, MPI_STATUS_IGNORE); if (gotWork) { for (j = 0; j < boardSize * boardSize; ++j) { pt.x = j / boardSize; pt.y = j % boardSize; total += exploreOne(dictionary, boards + work, pt, timing); } } } } } qdest(queue); return total; }