self& operator()(int32_t& val) { boundary_check(4); val = ntohl(*reinterpret_cast<const int32_t*>(cursor_)); std::advance(cursor_, 4); return *this; }
self& operator()(uint64_t& val) { boundary_check(8); val = ntohll(*reinterpret_cast<const uint64_t*>(cursor_)); std::advance(cursor_, 8); return *this; }
self& operator()(std::array<t, count>& val) { boundary_check(count * sizeof(t)); for (size_t i(0); i < count; ++i) (*this)(val[i]); return *this; }
self& operator()(double& val) { boundary_check(8); uint64_t temp(ntohll(*reinterpret_cast<const uint64_t*>(cursor_))); val = *reinterpret_cast<double*>(temp); std::advance(cursor_, 8); return *this; }
self& operator()(float& val) { boundary_check(4); conversion c; c.integer = ntohl(*reinterpret_cast<const uint32_t*>(cursor_)); val = c.real; std::advance(cursor_, 4); return *this; }
self& operator()(vector3<int8_t>& val) { boundary_check(2); // Offsets within a chunk are stored in a more compact form uint16_t temp; (*this)(temp); val.x = temp % chunk_size; val.y = (temp / chunk_size) % chunk_size; val.z = (temp / chunk_area) % chunk_size; return *this; }
int main(int argc, char *argv[]) { int numtasks, taskid; int n = atoi(argv[1]); const int num_item =2; int blocklengths[2] = {1,1}; int seed = clock(); int i,j; clock_t endt,start; srand(seed); MPI_Init(&argc,&argv); MPI_Comm_rank(MPI_COMM_WORLD,&taskid); MPI_Comm_size(MPI_COMM_WORLD,&numtasks); if(n % numtasks != 0){ printf("points number not dividable by number of processors\n"); MPI_Finalize(); return -1; } MPI_Status status; MPI_Datatype types[2] = {MPI_INT, MPI_INT}; MPI_Datatype mpi_point_type; MPI_Aint offsets[2]; offsets[0] = offsetof(point, x); offsets[1] = offsetof(point, y); MPI_Type_create_struct(num_item, blocklengths, offsets, types, &mpi_point_type); MPI_Type_commit(&mpi_point_type); point s[n]; struct point_struct *p_x = (struct point_struct*)malloc(n*sizeof(struct point_struct)); double *dist_closest_pair = (double*)malloc(numtasks*sizeof(double)); int offset[numtasks], share_len = (n/numtasks); offset[taskid] = taskid * share_len; if (taskid == MASTER){ for(i = 0; i < n ; i++) { s[i].x = rand()%1000; s[i].y = rand()%1000; } for(i=0 ; i<n ; i++){ p_x[i].x = s[i].x; p_x[i].y = s[i].y; } start = clock(); b_s_x(n,p_x); }//MASTER MPI_Scatter(&p_x[0], share_len, mpi_point_type, &p_x[offset[taskid]], share_len, mpi_point_type, MASTER , MPI_COMM_WORLD); for(i=0 ;i < numtasks; i++){ if(taskid == i ){ dist_closest_pair[taskid] = Closest_Pair(taskid,offset[taskid], offset[taskid]+share_len-1, share_len, p_x); } } MPI_Gather(&dist_closest_pair[taskid] , 1, MPI_DOUBLE, &dist_closest_pair[taskid] , 1 , MPI_DOUBLE , MASTER , MPI_COMM_WORLD); if(taskid == MASTER){ point p_y[2*share_len]; int x[numtasks-1]; for(i=0 ;i< numtasks-1 ; i++){ x[i]= (i*share_len)+share_len; } double d_boundary[numtasks-1], d_min_proc=dist_closest_pair[0]; for(i=1 ; i<numtasks ; i++){ if(d_min_proc > dist_closest_pair[i]) d_min_proc=dist_closest_pair[i]; } for(i=0 ; i<numtasks-1 ; i++){ for(j=x[i]-share_len ; j<x[i]+share_len ; j++){ p_y[j] = p_x[j]; } b_s_y(2*share_len,p_y); d_boundary[i] = boundary_check(x[i]-share_len, 2*share_len, p_y, x[i], d_min_proc ); } double D_min = d_min_proc; for(i=0 ; i<numtasks-1 ; i++){ if(d_boundary[i] < D_min ) D_min = d_boundary[i]; } printf("\n minimum distanse is : %f.\n",D_min); } MPI_Finalize(); return 0; }
self& operator()(vector3<t>& val) { boundary_check(3 * sizeof(t)); return (*this)(val.x)(val.y)(val.z); }
self& operator()(direction_type& val) { boundary_check(1); val = static_cast<direction_type>(*cursor_++); return *this; }
self& operator()(unsigned char& val) { boundary_check(1); val = *cursor_++; return *this; }
self& operator()(signed char& val) // Looks dumb, but it is needed. { boundary_check(1); val = *cursor_++; return *this; }
self& operator()(bool& val) { boundary_check(1); val = ((*cursor_++) != 0); return *this; }