void func_worker_1(void* arg) { int worker_1_param = *((int*)arg); // For each of the messages that is received for (int i = 0; i < 5; ++i) { mp_recv(&chan1); for(int j = 0; j < chan1.buf_size; j++){ // Copy the received data to the send buffer int to_offset = i * chan1.buf_size + j; volatile char _SPM * copy_to; volatile char _SPM * copy_from; copy_to = (volatile char _SPM *)chan2.write_buf + to_offset; copy_from = (volatile char _SPM *)chan1.read_buf + j; // Like a Cesar code, shifting the ascii alphabet *copy_to = (*copy_from)+(char)worker_1_param; } // Acknowledge the received data. mp_ack(&chan1); } mp_barrier(&comm); mp_send(&chan2); int ret = 0; corethread_exit(&ret); return; }
int latency_master(int cnt, int bytes) { int i; unsigned long long total = 0; TIMER_START; for (i=0; i<cnt; i++) { if (flush & FLUSH_BETWEEN_ITERATIONS) { inval_dcache(); inval_mcache(); } mp_send_size(&m2s,bytes); } TIMER_STOP; mp_recv(&s2m); mp_ack(&s2m); total = TIMER_ELAPSED; total -= calibrate_cache_flush(cnt); return(total/cnt); }
// The main function for the other thread on the another core void work(void* arg) { // Measure execution time with the clock cycle timer volatile _IODEV int *timer_ptr = (volatile _IODEV int *) (PATMOS_IO_TIMER+4); int val, data; qpd_t *channel = mp_create_qport(1, SINK, BUF_SIZE, NUM_BUF); mp_init_ports(); for (;;) { mp_recv(channel, 0); val = *timer_ptr; data = *(volatile int _SPM *) channel->read_buf; mp_ack(channel, 0); // Return time stamp and the change value in the shared variable end_time = val; field = data + 1; } }
int bandwidth_master(int cnt, int bytes) { int i; unsigned long long total = 0; TIMER_START; for (i=0; i<cnt; i++) { if (flush & FLUSH_BETWEEN_ITERATIONS) { inval_dcache(); inval_mcache(); } mp_send_size(&m2s, bytes); } mp_recv(&s2m); mp_ack(&s2m); TIMER_STOP; total = TIMER_ELAPSED; total -= calibrate_cache_flush(cnt); return(((unsigned long long)cnt*bytes*1000000)/(total*1024)); /* KB/sec */ }
void mp_recv_size(mpd_t* mpd_ptr, int bytes) { int k = 0; while(k < bytes) { mp_recv(mpd_ptr); int chunk = 0; if ( bytes-k >= mpd_ptr->buf_size) { // If the remaining data is more than the size of the buffer chunk = mpd_ptr->buf_size; } else { // The remaining data all fits in a buffer chunk = bytes-k; } // Copy the chunk of data to the write buffer // for (int j = 0; j < chunk; ++j) { // *((volatile char _SPM *)mpd_ptr.write_buf+j) = send_data[k+j]; // } mp_ack(mpd_ptr); k += chunk; } }
int main() { puts("Master"); corethread_t worker_1 = 2; // For now the core ID int worker_1_param = 1; char send_data[] = "Hello World!, Sending messages is cool!"; char recv_data[40]; // Initialization of message passing buffers // mp_chan_init() return false if local and remote // addresses are not aligned to words if (!mp_chan_init(&chan1, get_cpuid(), worker_1, MP_CHAN_1_BUF_SIZE, MP_CHAN_1_NUM_BUF)) { abort(); } if (!mp_chan_init(&chan2, worker_1, get_cpuid(), MP_CHAN_2_BUF_SIZE, MP_CHAN_2_NUM_BUF)) { abort(); } puts("Initialized buffers"); if (!mp_communicator_init(&comm, 2, cores, 0)) { abort(); } puts("Initialized barrier"); corethread_create(&worker_1,&func_worker_1,(void*)&worker_1_param); int i = 0; // While there is still data to be sent while(i < sizeof(send_data)) { int chunk = 0; if ( sizeof(send_data)-i >= chan1.buf_size) { // If the remaining data is more than the size of the buffer chunk = chan1.buf_size; } else { // The remaining data all fits in a buffer chunk = sizeof(send_data)-i; } // Copy the chunk of data to the write buffer for (int j = 0; j < chunk; ++j) { *((volatile char _SPM *)chan1.write_buf+j) = send_data[i+j]; } // Send the chunk of data mp_send(&chan1); i += chunk; } puts("Messages sent"); mp_barrier(&comm); puts("Barrier reached"); mp_recv(&chan2); puts("Message recv"); // Copy the received data to the recv_data array for(int i = 0; i < sizeof(recv_data)-1; i++) { recv_data[i] = (*((volatile char _SPM *)chan2.read_buf+i)-worker_1_param); } // Acknowledge the received data mp_ack(&chan2); recv_data[39] = '\0'; puts(recv_data); int* res; corethread_join(worker_1,&res); return *res; }