static void set_view (GbpSpellNavigator *self, GtkTextView *view) { GtkTextIter start; GtkTextIter end; g_assert (GBP_IS_SPELL_NAVIGATOR (self)); g_assert (self->view == NULL); g_assert (self->buffer == NULL); if (view != self->view) { self->view = g_object_ref (view); self->buffer = g_object_ref (gtk_text_view_get_buffer (view)); init_boundaries (self); gtk_text_buffer_get_iter_at_mark (self->buffer, &start, self->start_boundary); gtk_text_buffer_get_iter_at_mark (self->buffer, &end, self->end_boundary); self->words_count = gbp_spell_navigator_count_words (self, &start, &end); g_object_notify_by_pspec (G_OBJECT (self), properties [PROP_VIEW]); } }
int main(int argc, char const *argv[]) { int size, rank; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD,&rank); MPI_Comm_size(MPI_COMM_WORLD,&size); MPI_Status status; int subROWS=ROWS/size; double *local,*local_new; double *p,*p_new; init_grid(&local,&local_new,COLS,subROWS+2); if (0==rank) { init_grid(&p,&p_new,COLS,ROWS+2); init_boundaries(p,COLS,ROWS+2); memmove(p_new,p,COLS*(ROWS+2)*sizeof(double)); printf_buffer(p_new,COLS,ROWS+2); } MPI_Barrier(MPI_COMM_WORLD); return 0; }
int main(int argc, char const *argv[]) { int size, rank; //MPI_Init(&argc, &argv); MPI_Init(NULL,NULL); MPI_Comm_rank(MPI_COMM_WORLD,&rank); MPI_Comm_size(MPI_COMM_WORLD,&size); MPI_Status status; int subROWS=ROWS/size; double *local,*local_new; double *p,*p_new; init_grid(&local,&local_new,COLS,subROWS+2); if (0==rank) { printf("Size: %d\nSubrow size is:%d\nThis is the unrelaxed data we have:\n",size,subROWS); init_grid(&p,&p_new,COLS,ROWS+2); init_boundaries(p,COLS,ROWS+2); memmove(p_new,p,COLS*(ROWS+2)*sizeof(double)); printf_buffer(p_new,COLS,ROWS+2); printf("data ended\n"); } MPI_Barrier(MPI_COMM_WORLD); /*free(local); free(local_new); free(p); free(p_new);*/ //return 0; //MPI_Barrier(MPI_COMM_WORLD) if(0==rank) for(int j=0;j<size;j++) MPI_Send(p+j*subROWS*COLS,(subROWS+2)*COLS,MPI_DOUBLE,j,0,MPI_COMM_WORLD); MPI_Recv(local,(subROWS+2)*COLS,MPI_DOUBLE,0,0,MPI_COMM_WORLD,NULL); register int count=50; int up_nbr,dn_nbr; double *firstbuf, *lastbuf; init_grid(&firstbuf,&lastbuf,COLS,1); while(count>=0) { ///* //printf("Rank:%d\n",rank); memmove(local_new,local,COLS*(subROWS+2)*sizeof(double)); relax(local_new,local,COLS,subROWS+2); //printf("After relax\n"); //printf_buffer(local_new,COLS,subROWS+2); memmove(lastbuf,local_new,COLS*sizeof(double)); memmove(firstbuf,local_new+(subROWS+1)*COLS,COLS*sizeof(double)); up_nbr=(rank==size-1) ? MPI_PROC_NULL : rank+1; dn_nbr=(rank==0) ? MPI_PROC_NULL : rank-1; //refresh these two buffers here MPI_Sendrecv(local_new+(1)*COLS,COLS,MPI_DOUBLE,dn_nbr,0,firstbuf,COLS,MPI_DOUBLE,up_nbr,MPI_ANY_TAG,MPI_COMM_WORLD,&status); MPI_Sendrecv(local_new+(subROWS)*COLS,COLS,MPI_DOUBLE,up_nbr,0,lastbuf,COLS,MPI_DOUBLE,dn_nbr,MPI_ANY_TAG,MPI_COMM_WORLD,&status); MPI_Barrier(MPI_COMM_WORLD); memmove(local_new,lastbuf,COLS*sizeof(double)); memmove(local_new+(subROWS+1)*COLS,firstbuf,COLS*sizeof(double)); //maybe things here memmove(local,local_new,COLS*(subROWS+2)*sizeof(double)); //printf("After send receive\n"); //printf_buffer(local_new,COLS,subROWS+2); //printf("Rank:%d over\n",rank); // count=count-1; } MPI_Send(local_new+1*COLS,(subROWS)*COLS,MPI_DOUBLE,0,(subROWS+2),MPI_COMM_WORLD); //receive relaxed data if (0==rank) for(int i=0;i<size;i++) { MPI_Probe(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status); //MPI_Recv(p_new+(status,MPI_SOURCE*subROWS+1)*COLS,(subROWS)*COLS,MPI_DOUBLE,status,MPI_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status); MPI_Recv(p_new+(status.MPI_SOURCE*subROWS+1)*COLS,(subROWS)*COLS,MPI_DOUBLE,status.MPI_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status); } if(0==rank) { printf("Final result:\n"); printf_buffer(p_new,COLS,ROWS+2); } free(local); free(local_new); free(p); free(p_new); free(firstbuf); free(lastbuf); MPI_Finalize(); return 0; }
int main(int argc, char **argv) { // char hostname[HOST_NAME_MAX]; // gethostname(hostname,HOST_NAME_MAX); // printf("Hostname: %s\n",hostname); int size, rank; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD,&rank); MPI_Comm_size(MPI_COMM_WORLD,&size); MPI_Status status; if (size > ROWS ) {puts("Too many tasks for size!"); exit(10);} int subROWS = ROWS/size; double *local,*local_new; double *p,*p_new; init_grid(&local,&local_new,COLS,subROWS+2); if (0 == rank) { init_grid(&p,&p_new,COLS,ROWS+2); init_boundaries(p,COLS,ROWS+2); memmove(p_new,p, COLS*(ROWS+2) * sizeof(double) ); print_buffer(p_new,COLS,ROWS+2); } /* Wait for everyone to get here. */ MPI_Barrier(MPI_COMM_WORLD); /* Distribute data */ if (0 == rank ) for (int j=0; j < size; j++) MPI_Send(p+j*subROWS*COLS, (subROWS+2)*COLS,MPI_DOUBLE,j,0,MPI_COMM_WORLD); MPI_Recv(local,(subROWS+2)*COLS,MPI_DOUBLE,0,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE); /* Do some work */ register int count = 1000; int up_nbr, dn_nbr ; double *firstbuf, *lastbuf; init_grid(&firstbuf,&lastbuf,COLS,1); while (count-- > 0) { relax(local_new,local,COLS,subROWS+2); memmove(lastbuf,local_new ,COLS*sizeof(double)); memmove(firstbuf,local_new + (subROWS+1)*COLS,COLS*sizeof(double)); up_nbr = (rank == size - 1) ? MPI_PROC_NULL : rank + 1; dn_nbr = (rank == 0) ? MPI_PROC_NULL : rank - 1; MPI_Sendrecv(local_new + (1)*COLS ,COLS,MPI_DOUBLE, dn_nbr,0,firstbuf,COLS,MPI_DOUBLE,up_nbr,MPI_ANY_TAG,MPI_COMM_WORLD,&status); MPI_Sendrecv(local_new+(subROWS)*COLS,COLS,MPI_DOUBLE,up_nbr,0,lastbuf,COLS,MPI_DOUBLE,dn_nbr,MPI_ANY_TAG,MPI_COMM_WORLD,&status); MPI_Barrier(MPI_COMM_WORLD); memmove(local_new ,lastbuf,COLS*sizeof(double)); memmove(local_new + (subROWS+1)*COLS,firstbuf,COLS*sizeof(double)); // /*For Debugging*/ memmove(local_new,local, COLS*(subROWS+2) * sizeof(double) ); // Copy updated grid into "present-time" grid. memmove(local,local_new,COLS*(subROWS+2)*sizeof(double)); } /* Send local buffers back to main process. */ MPI_Send(local_new+1*COLS,(subROWS)*COLS,MPI_DOUBLE,0,(subROWS+2),MPI_COMM_WORLD); /* Main process receives local buffers. */ if ( 0 == rank ) for ( int i =0; i < size; i++) { MPI_Probe(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status); MPI_Recv(p_new+(status.MPI_SOURCE*subROWS+1)*COLS,(subROWS)*COLS,MPI_DOUBLE,status.MPI_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status); } // Print out the results. if ( 0 == rank ){ // /* Uncomment for debugging. */ print_buffer(p,COLS,ROWS+2); print_buffer(p_new,COLS,ROWS+2); if ( 0 == rank ) { free(p_new); free(p); } } free(local); free(local_new); MPI_Finalize(); return 0; }