int main(int argc, char *argv[]) { int ntasks, rank; int sendbuf[2 * NTASKS], recvbuf[2 * NTASKS]; int printbuf[2 * NTASKS * NTASKS]; int offsets[NTASKS] = { 0, 1, 2, 4 }; int counts[NTASKS] = { 1, 1, 2, 4 }; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &ntasks); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (ntasks != NTASKS) { if (rank == 0) fprintf(stderr, "Run this program with %i tasks.\n", NTASKS); MPI_Abort(MPI_COMM_WORLD, -1); } init_buffers(sendbuf, recvbuf, 2 * NTASKS); print_buffers(printbuf, sendbuf, 2 * NTASKS); /* TODO start: perform collective communication */ /* TODO end */ print_buffers(printbuf, recvbuf, 2 * NTASKS); MPI_Finalize(); return 0; }
static void cleanup_buffer(struct print_buffer *buffer) { struct print_buffer *prev, *next; pthread_setspecific(__buffer_key, NULL); pthread_mutex_lock(&__buffer_lock); print_buffers(); prev = buffer->prev; next = buffer->next; if (prev) prev->next = next; else __first_buffer = next; if (next) next->prev = prev; pthread_mutex_unlock(&__buffer_lock); free(buffer->ring); free(buffer); }
static void cleanup_buffer(struct print_buffer *buffer) { struct print_buffer *prev, *next; assert_nrt(); pthread_setspecific(buffer_key, NULL); pthread_mutex_lock(&buffer_lock); print_buffers(); pthread_mutex_unlock(&buffer_lock); #ifdef CONFIG_XENO_FASTSYNCH /* Return the buffer to the pool */ { unsigned long old_bitmap, bitmap; unsigned i, j; if ((unsigned long)buffer - pool_start >= pool_len) goto dofree; j = ((unsigned long)buffer - pool_start) / pool_buf_size; i = j / BITS_PER_LONG; j = j % BITS_PER_LONG; old_bitmap = xnarch_atomic_get(&pool_bitmap[i]); do { bitmap = old_bitmap; old_bitmap = xnarch_atomic_cmpxchg(&pool_bitmap[i], bitmap, bitmap | (1UL << j)); } while (old_bitmap != bitmap); return; } dofree: #endif /* CONFIG_XENO_FASTSYNCH */ pthread_mutex_lock(&buffer_lock); prev = buffer->prev; next = buffer->next; if (prev) prev->next = next; else first_buffer = next; if (next) next->prev = prev; buffers--; pthread_mutex_unlock(&buffer_lock); free(buffer->ring); free(buffer); }
static void cleanup_buffer(struct print_buffer *buffer) { struct print_buffer *prev, *next; assert_nrt(); pthread_setspecific(buffer_key, NULL); pthread_mutex_lock(&buffer_lock); print_buffers(); pthread_mutex_unlock(&buffer_lock); /* Return the buffer to the pool */ { unsigned long old_bitmap, bitmap; unsigned i, j; if ((unsigned long)buffer - pool_start >= pool_len) goto dofree; j = ((unsigned long)buffer - pool_start) / pool_buf_size; i = j / __WORDSIZE; j = j % __WORDSIZE; old_bitmap = atomic_long_read(&pool_bitmap[i]); do { bitmap = old_bitmap; old_bitmap = atomic_long_cmpxchg(&pool_bitmap[i], bitmap, bitmap | (1UL << j)); } while (old_bitmap != bitmap); return; } dofree: pthread_mutex_lock(&buffer_lock); prev = buffer->prev; next = buffer->next; if (prev) prev->next = next; else first_buffer = next; if (next) next->prev = prev; buffers--; pthread_mutex_unlock(&buffer_lock); free(buffer->ring); free(buffer); }
/* *** Deferred Output Management *** */ void rt_print_flush_buffers(void) { assert_nrt(); pthread_mutex_lock(&buffer_lock); print_buffers(); pthread_mutex_unlock(&buffer_lock); }
static void forward_frame(FRAME frame, int inLink) { // DETERMINE NODE TO SEND OUT ON TO REACH ACTUAL DESTINATION frame.link = get_route(frame.destNode); printf("\t\t\t\t\tFORWARDING FRAME VIA LINK %d\n", frame.link); // THROW FRAME DOWN TO DATA LINK LAYER datalink_down(frame, inLink, frame.link); print_buffers(frame.link); }
static void *printer_loop(void *arg) { while (1) { nanosleep(&__print_period, NULL); pthread_mutex_lock(&__buffer_lock); print_buffers(); pthread_mutex_unlock(&__buffer_lock); } }
struct page * get_seg_page(struct segment *segp) { int index = segp->offset / BUF_IN_PAGE; struct page *page; //dprintk("get_seg_page:segnum=%d,segp->start=%Lu,segp->offset=%d,index=%d\n", //segp->segnum,segp->start, segp->offset, index); assert(index < LFS_SEGSIZE); page = segp->pages[index]; print_buffers(page, segp->start + index * BUF_IN_PAGE); // dprintk("returning page with index %d that is mapped to %Lu\n", index, page_buffers(page)->b_blocknr); return page; }
static void timeout_link_4(CnetEvent ev, CnetTimerID timer, CnetData data) { FRAME frame; frame.link = 4; // GET FRAME THAT TIMED OUT FROM WINDOW + RESEND FRAME ON LINK 4 int seqNum = (int)data; printf("TIMEOUT:\nOUT LINK: %d\nSEQ NO: %d\n\n", frame.link, seqNum); frame = window[frame.link - 1][seqNum]; transmit_frame(frame); print_buffers(frame.link); }
void rt_print_cleanup(void) { struct print_buffer *buffer = pthread_getspecific(buffer_key); if (buffer) cleanup_buffer(buffer); else { pthread_mutex_lock(&buffer_lock); print_buffers(); pthread_mutex_unlock(&buffer_lock); } pthread_cancel(printer_thread); }
static void *printer_loop(void *arg) { while (1) { pthread_mutex_lock(&buffer_lock); while (buffers == 0) pthread_cond_wait(&printer_wakeup, &buffer_lock); print_buffers(); pthread_mutex_unlock(&buffer_lock); nanosleep(&print_period, NULL); } return NULL; }
static void network_down( char* data, size_t dataLength, int destination ) { FRAME frame; // READ MESSAGE FROM APPLICATION LAYER AND ENCAPSULATE INTO FRAME frame.len = dataLength; memcpy( frame.data, data, frame.len ); frame.srcNode = nodeinfo.nodenumber; frame.destNode = destination; frame.kind = DL_DATA; free(data); // DATA IS MEM COPIED SO CAN FREE ORIGINAL // DETERMINE THE ROUTE OF WHERE TO SEND TO VIA ROUTING TABLE frame.link = get_route(frame.destNode); // THROW FRAME DOWN TO DATA LINK LAYER datalink_down(frame, frame.link, 0); // PRINT CONTENT OF WINDOW AND BUFFER print_buffers(frame.link); }
static void ack_received(FRAME frame, int link) { FRAME tempFrame; int first, second, third, fourth; // PRINT ACKOWLEDGEMENT MESSAGE printf("\n\t\t\t\t\tACK RECEIVED\n"); printf("\t\t\t\t\tIN LINK:%d\n", link); printf("\t\t\t\t\tSEQ NO:\t%d\n", frame.seq); // ENSURE ACK NUMBER IS BETWEEN ACK EXPECTED AND NEXT FRAME TO SEND if (between(ackExpected[link - 1], frame.seq, nextFrameToSend[link - 1])) { // LOOP UNTIL ACKEXPECTED IS ONE MORE THAN THE SEQNUM OF THE ACK while (between(ackExpected[link - 1], frame.seq, nextFrameToSend[link - 1])) { // STOP THE TIMER FOR THAT FRAME TO PREVENT A TIMEOUT CNET_stop_timer(timers[link - 1][ackExpected[link - 1]]); // INCREMENT ACKEXPECTED AND DECREASE NUMBER IN WINDOW inc(&ackExpected[link - 1]); numInWindow[link - 1] -= 1; } } else { // ERRORS SHOULD ALL BE CAUGHT BEFORE THIS // STILL CHECK REGARDLESS, AS A FAILSAFE printf("\n\t\t\t\t\tERROR: OUTSIDE WINDOW BOUNDS\n"); } // ENSURE WINDOW SIZE IS VALID AND BUFFER IS NOT EMPTY while (numInWindow[link - 1] < MAX_SEQ && numInBuffer[link - 1] > 0) { // ADD FRAMES FROM THE BUFFER TO THE WINDOW printf("\t\t\t\t\tSENDING FRAME FROM BUFFER\n"); // REMOVE FRAME FROM THE FRONT OF THE BUFFER tempFrame = buffer[link - 1][bufferBounds[link - 1][0]]; inc(&bufferBounds[link - 1][0]); numInBuffer[link - 1] -= 1; // STORE THE FRAME FROM THE BUFFER IN THE WINDOW tempFrame.seq = nextFrameToSend[link - 1]; window[link - 1][nextFrameToSend[link - 1]] = tempFrame; numInWindow[link - 1] += 1; // TRANSMIT THE FRAME FROM THE BUFFER (NOW IN THE WINDOW) tempFrame.link = get_route(tempFrame.destNode); transmit_frame(tempFrame); inc(&nextFrameToSend[link - 1]); } // IF ALL LINK WINDOWS NOT FULL AND ALL BUFFER'S EMPTY // THIS KEEPS EFFICIECNY AS HIGH AS POSSIBLE first = ( numInBuffer[0] == 0 ) && ( numInWindow[0] < MAX_SEQ ); second = ( numInBuffer[1] == 0 ) && ( numInWindow[1] < MAX_SEQ ); third = ( numInBuffer[2] == 0 ) && ( numInWindow[2] < MAX_SEQ ); fourth = ( numInBuffer[3] == 0 ) && ( numInWindow[3] < MAX_SEQ ); // REENABLE APPLICATION LAYER TO GENERATE MESSAGES AGAIN if ( first && second && third && fourth ) { CHECK(CNET_enable_application(ALLNODES)); for ( int ii = 0; ii < nodeinfo.nlinks; ii++ ) CNET_set_LED(ii, "green" ); } print_buffers(link); }