Exemplo n.º 1
0
void myMxM(Matrix A, Matrix v, Matrix u)
{
  Matrix temp = createMatrix(A->rows, v->cols);
#pragma omp parallel
  {
    int* displ, *cols;
    splitVector(v->cols, num_threads(), &cols, &displ);
    MxM2(A, v, temp, displ[get_thread()], cols[get_thread()],
         displ[get_thread()], 1.0, 0.0);
    free(cols);
    free(displ);
  }
#ifdef HAVE_MPI
  for (int i=0;i<v->as_vec->comm_size;++i) {
    Matrix t = subMatrix(temp, v->as_vec->displ[i]/v->cols,
                         v->as_vec->sizes[i]/v->cols, 0, v->cols);
    MPI_Reduce(t->data[0], u->data[0], v->as_vec->sizes[i],
               MPI_DOUBLE, MPI_SUM, i, *v->as_vec->comm);
    freeMatrix(t);
  }
#else
  memcpy(u->data[0], temp->data[0], u->as_vec->len*sizeof(double));
#endif
  freeMatrix(temp);
}
Exemplo n.º 2
0
int main(int argc, char** argv)
{
  int i, j, N, flag;
  Vector grid;
  Matrix b, e;
  double time, sum, h;
  int rank, size;
  int mpi_top_coords[2];
  int mpi_top_sizes[2];

  init_app(argc, argv, &rank, &size);

  N=atoi(argv[1]);

  // setup topology
  mpi_top_sizes[0] = mpi_top_sizes[1] = 0;
  MPI_Dims_create(size, 2, mpi_top_sizes);
  int periodic[2] = {0, 0};
  MPI_Comm comm;
  MPI_Cart_create(MPI_COMM_WORLD, 2, mpi_top_sizes, periodic, 0, &comm);
  MPI_Cart_coords(comm, rank, 2, mpi_top_coords);

  int* size1;
  int* displ1;
  int* size2;
  int* displ2;
  splitVector(N, mpi_top_sizes[0], &size1, &displ1);
  splitVector(N, mpi_top_sizes[1], &size2, &displ2);

  b = createMatrix(size1[mpi_top_coords[0]], size2[mpi_top_coords[1]]);
  for (j=0;j<b->cols;++j)
    for(i=0;i<b->rows;++i)
      b->data[j][i] = (j+displ2[mpi_top_coords[1]])*N+1+(i+displ1[mpi_top_coords[0]]);
  b->glob_rows = N;
  b->glob_cols = N;
  b->as_vec->comm = &comm;

  saveMatrixMPI(b, "meh.asc");

  freeMatrix(b);
  MPI_Comm_free(&comm);

  close_app();
  return 0;
}
Exemplo n.º 3
0
Vector createColumnMatrixMPI(int matrixSize, MPI_Comm* comm)
{
	ColumnMatrix result = (ColumnMatrix)malloc(sizeof(columnMatrix_t));
	result->comm = comm;
	MPI_Comm_size(*comm,&result->commSize);
	MPI_Comm_rank(*comm,&result->commRank);
	splitVector(matrixSize, result->commSize,&result->blockSize,&result->displacement);
	result->localSize = result->blockSize[result->commRank];
	result->data = malloc(result->localSize*matrixSize*sizeof(double));
	result->globalSize = matrixSize;
	
	return result;
}
Exemplo n.º 4
0
Vector createVectorMPI(int length, MPI_Comm* comm)
{
	Vector result = (Vector)malloc(sizeof(vector_t));
	result->comm = comm;
	MPI_Comm_size(*comm,&result->commSize);
	MPI_Comm_rank(*comm,&result->commRank);
	splitVector(length, result->commSize,&result->blockSize,&result->displacement);
	result->localSize = result->blockSize[result->commRank];
	result->data = malloc(result->localSize);
	result->globalSize = length;
	
	return result;
}
Exemplo n.º 5
0
int main(int argc, char** argv)
{
  int rank, size;
  init_app(argc, argv, &rank, &size);

  if (argc < 3) {
    printf("need two parameters, the matrix size and the number of vectors\n");
    close_app();
    return 1;
  }
  int N=atoi(argv[1]);
  int K=atoi(argv[2]);

  Matrix A = createMatrix(N,N);
  // identity matrix
  for (int i=0;i<N;++i)
    A->data[i][i] = 1.0;
  
  int *displ, *cols;
  splitVector(K, size, &cols, &displ);
  Matrix v = createMatrix(N,cols[rank]);
  // fill with column number
  for (int i=0;i<cols[rank];++i)
    for (int j=0;j<N;++j)
      v->data[i][j] = i+displ[rank];

  double time = WallTime();
  double sum = dosum(A,v);

  if (rank == 0) {
    printf("sum: %f\n", sum);
    printf("elapsed: %f\n", WallTime()-time);
  }

  char s[128];
  sprintf(s,"vec-%i.asc", rank);
  saveVectorSerial(s, v->as_vec);
  sprintf(s,"mat-%i.asc", rank);
  saveMatrixSerial(s, v);

  sprintf(s,"vec.asc");
  saveVectorMPI(s, v->as_vec);

  freeMatrix(v);
  freeMatrix(A);
  free(displ);
  free(cols);

  close_app();
  return 0;
}
Exemplo n.º 6
0
Vector createVectorMPI(int globLen, int allocdata, MPI_Comm* comm)
{
  Vector result = (Vector)calloc(1, sizeof(vector_t));
  result->comm = comm;
  MPI_Comm_size(*comm, &result->comm_size);
  MPI_Comm_rank(*comm, &result->comm_rank);
  splitVector(globLen, result->comm_size, &result->sizes, &result->displ);
  result->len = result->sizes[result->comm_rank];
  if (allocdata)
    result->data = calloc(result->len, sizeof(double));
  else
    result->data = NULL;
  result->globLen = globLen;

  return result;
}
Exemplo n.º 7
0
int main(int argc, char** argv)
{
  int i, j, N, K;
  Matrix A, v;
  double time, sum;
  int rank, size;
  int *displ, *cols;

  init_app(argc, argv, &rank, &size);

  if (argc < 3) {
    printf("need two parameters, the matrix size and the number of vectors\n");
    close_app();
    return 1;
  }
  N=atoi(argv[1]);
  K=atoi(argv[2]);

  A = createMatrix(N,N);
  // identity matrix
  for (i=0;i<N;++i)
    A->data[i][i] = 1.0;
  
  splitVector(K, size, &cols, &displ);
  v = createMatrix(N,cols[rank]);
  // fill with column number
  for (i=0;i<cols[rank];++i)
    for (j=0;j<N;++j)
      v->data[i][j] = i+displ[rank];

  time = WallTime();
  sum = dosum(A,v);

  if (rank == 0) {
    printf("sum: %f\n", sum);
    printf("elapsed: %f\n", WallTime()-time);
  }

  freeMatrix(v);
  freeMatrix(A);
  free(displ);
  free(cols);

  close_app();
  return 0;
}
Exemplo n.º 8
0
int main(int argc, char** argv)
{
  int size, rank;
  #ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  #endif

  if (!(size & (size-1))==0) {
    printf("Number of processes must be power of two");
    #ifdef HAVE_MPI
    MPI_Finalize();
    #endif
    return 1;
  }

  double time = WallTime();
  double Sn=(M_PI*M_PI)/6;
  double sum=0;

  for (int i = 4; i <15 ; ++i)
  {
    int n= pow(2, i);
    int *startIndex, *len;
    splitVector(n, size, &len, &startIndex);
    Vector vec = genVector(startIndex[rank],startIndex[rank]+len[rank]);
    sum = doSum(vec);

    #ifdef HAVE_MPI
    double s2=sum;
    MPI_Reduce(&s2, &sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
    #endif

    if (rank == 0)
    {
      printf("Diff (n=%d) = %f,",n, sum-Sn);
      printf(" Elapsed: %fs\n", WallTime()-time);
    }
  }

  #ifdef HAVE_MPI
  MPI_Finalize();
  #endif
  return 0;
}
Exemplo n.º 9
0
void SortEngine::startChildren()
{
  //Splitting vector
  QVector<unsigned int> childOneVector, childTwoVector;
  splitVector(childOneVector, childTwoVector);
  
  //Calling first child
  _createPipe(_fdReadSon1);
  _createPipe(_fdWriteSon1);
  _saveQVectorToPipe(_fdReadSon1[1], childOneVector);
  callChild(_fdReadSon1,_fdWriteSon1);

  //Calling second child
  _createPipe(_fdReadSon2);
  _createPipe(_fdWriteSon2);
  _saveQVectorToPipe(_fdReadSon2[1], childTwoVector);
  callChild(_fdReadSon2,_fdWriteSon2);
}
Exemplo n.º 10
0
Arquivo: main.cpp Projeto: yuriks/Pong
void collideBallWithPaddle(Gem& ball, const Paddle& paddle) {
	SpriteMatrix matrix = paddle.getSpriteMatrix();

	// Left sphere
	vec2 left = {-24, 0};
	// Right sphere
	vec2 right = {24, 0};

	matrix.transform(&left.x, &left.y);
	matrix.transform(&right.x, &right.y);

	static const int PADDLE_RADIUS = 8;

	fixed24_8 rel_ball_x = ball.pos_x - paddle.pos_x;
	fixed24_8 rel_ball_y = ball.pos_y - paddle.pos_y;
	vec2 rel_ball = {rel_ball_x.toFloat(), rel_ball_y.toFloat()};

	vec2 nearest_point = pointLineSegmentNearestPoint(rel_ball, left, right);
	vec2 penetration = rel_ball - nearest_point;
	float d_sqr = length_sqr(penetration);
	float r = PADDLE_RADIUS + Gem::RADIUS;
	if (d_sqr < r*r) {
		vec2 vel = {ball.vel_x.toFloat(), ball.vel_y.toFloat()};
		int score_addition = static_cast<int>(ball.score_value * (ball.vel_y.toFloat() / 128.f));
		ball.score_value = std::min(ball.score_value + std::max(score_addition, 0), Gem::MAX_VALUE);

		float d = std::sqrt(d_sqr);
		float sz = r - d;

		vec2 normal = penetration / d;
		fixed24_8 push_back_x(sz * normal.x);
		fixed24_8 push_back_y(sz * normal.y);

		ball.pos_x += push_back_x;
		ball.pos_y += push_back_y;

		vec2 par, perp;
		splitVector(vel, normal, &par, &perp);
		vel = perp - par;

		ball.vel_x = fixed16_16(vel.x);
		ball.vel_y = fixed16_16(vel.y);
	}
}
Exemplo n.º 11
0
void GS(Matrix u, double tolerance, int maxit)
{
  int it=0;
  Matrix b = cloneMatrix(u);
  Matrix e = cloneMatrix(u);
  Matrix v = cloneMatrix(u);
  int* sizes, *displ;
  splitVector(u->rows-2, 2*max_threads(), &sizes, &displ);
  copyVector(b->as_vec, u->as_vec);
  fillVector(u->as_vec, 0.0);
  double max = tolerance+1;
  while (max > tolerance && ++it < maxit) {
    copyVector(e->as_vec, u->as_vec);
    copyVector(u->as_vec, b->as_vec);
    for (int color=0;color<2;++color) {
      for (int i=1;i<u->cols-1;++i) {
#pragma omp parallel
        {
          int cnt=displ[get_thread()*2+color]+1;
          for (int j=0;j<sizes[get_thread()*2+color];++j, ++cnt) {
            u->data[i][cnt] += v->data[i][cnt-1];
            u->data[i][cnt] += v->data[i][cnt+1];
            u->data[i][cnt] += v->data[i-1][cnt];
            u->data[i][cnt] += v->data[i+1][cnt];
            u->data[i][cnt] /= 4.0;
            v->data[i][cnt] = u->data[i][cnt];
          }
        }
      }
    }
    axpy(e->as_vec, u->as_vec, -1.0);
    max = sqrt(innerproduct(e->as_vec, e->as_vec));
  }
  printf("number of iterations %i %f\n", it, max);
  freeMatrix(b);
  freeMatrix(e);
  freeMatrix(v);
  free(sizes);
  free(displ);
}
Exemplo n.º 12
0
int main(int argc, char** argv)
{
  int rank, size;
  init_app(argc, argv, &rank, &size);

  if (argc < 3) {
    printf("need two parameters, the matrix size and the number of vectors\n");
    close_app();
    return 1;
  }
  int N=atoi(argv[1]);
  int K=atoi(argv[2]);

  int *displ, *cols;
  splitVector(K, size, &cols, &displ);

  Matrix A = createMatrixMPI(N, -1, N, N, &WorldComm);
  // identity matrix
  for (int i=0;i<A->cols;++i)
    A->data[i][i] = 1.0;
  
  Matrix v = createMatrixMPI(-1, K, N, K, &WorldComm);
  // fill with column number
  for (int i=0;i<v->rows;++i)
    for (int j=0;j<v->cols;++j)
      v->data[j][i] = j;

  double time = WallTime();
  double sum = dosum(A,v);

  if (rank == 0) {
    printf("sum: %f\n", sum);
    printf("elapsed: %f\n", WallTime()-time);
  }

  freeMatrix(v);
  freeMatrix(A);
  close_app();
  return 0;
}
Exemplo n.º 13
0
void runPoisson(int rank, int size, int n){
  double time=MPI_Wtime();
  Real **b, *diag, *RecvBuf,*z, h, maxError;
  int i, j, m, nn, *len, *disp;

  m  = n-1;
  nn = 4*n;
  splitVector(m, size, &len, &disp);
  diag = createRealArray (m);
  b    = createReal2DArray (len[rank],m);
  z    = createRealArray (nn);
  h    = 1./(Real)n;

  #pragma omp parallel for schedule(static)
  for (i=0; i < m; i++) {
    diag[i] = 2.*(1.-cos((i+1)*M_PI/(Real)n));
  }

  #pragma omp for
  for (j=0; j < len[rank]; j++) {
  #pragma omp parallel for schedule(static)
    for (i=0; i < m; i++) {
      Real x=(Real)(j+1+disp[rank])/n;
      Real y=(Real) (i+1)/n;
      b[j][i] = h*h * funcf(x,y);
    }
  }

  #pragma omp parallel for schedule(static)
  for (j=0; j < len[rank]; j++) {
    Real* zt = createRealArray (nn);
    fst_(b[j], &n, zt, &nn);
    free(zt);
  }

  transpose(b, size, len, disp, rank, m);

  #pragma omp parallel for schedule(static)
  for (i=0; i < len[rank]; i++) {
    Real* zt  = createRealArray (nn);
    fstinv_(b[i], &n, zt, &nn);
    free(zt);
  }

  #pragma omp for
  for (j=0; j < len[rank]; j++) {
  #pragma omp parallel for schedule(static)
    for (i=0; i < m; i++) {
      b[j][i] = b[j][i]/(diag[i]+diag[j+disp[rank]]);
    }
  }

  #pragma omp parallel for schedule(static)
  for (i=0; i < len[rank]; i++) {
    Real* zt  = createRealArray (nn);
    fst_(b[i], &n, zt, &nn);
    free(zt);
  }

  transpose(b, size, len, disp, rank, m);

  #pragma omp parallel for schedule(static)
  for (j=0; j < len[rank]; j++) {
    Real* zt  = createRealArray (nn);
    fstinv_(b[j], &n, zt, &nn);
    free(zt);
  }




  if (rank==0)
  {
    RecvBuf = createRealArray (m*m);
  }
  gatherMatrix(b, m, RecvBuf, len, disp,0);

  if (rank==0)
  {
    for (int j=0; j < m; j++) {
      for (int i=0; i < m; i++) {
        printf("%e %e %e \n",(Real)i/m,(Real)j/m,RecvBuf[j*m+i] );
      }
    }
  }
}
Exemplo n.º 14
0
Arquivo: main.cpp Projeto: yuriks/Pong
void collideBallWithBall(Gem& a, Gem& b) {
	vec2 dv = {(a.pos_x - b.pos_x).toFloat(), (a.pos_y - b.pos_y).toFloat()};
	float d_sqr = length_sqr(dv);

	if (d_sqr < (2*Gem::RADIUS)*(2*Gem::RADIUS)) {
		fixed16_16 rel_vel_x = a.vel_x - b.vel_x;
		fixed16_16 rel_vel_y = a.vel_y - b.vel_y;
		vec2 rel_vel = {rel_vel_x.toFloat(), rel_vel_y.toFloat()};
		float rel_speed_sqr = length_sqr(rel_vel);
		
		if (rel_speed_sqr >= Gem::MERGE_SPEED*Gem::MERGE_SPEED) {
			fixed32_0 two(2);
			a.pos_x = (a.pos_x + b.pos_x) / two;
			a.pos_y = (a.pos_y + b.pos_y) / two;

			a.vel_x = a.vel_x + b.vel_x;
			a.vel_y = a.vel_y + b.vel_y;

			a.score_value += b.score_value;

			b.pos_x = -9999;
			b.pos_y = 9999;
			b.vel_x = b.vel_y = 0;
			b.score_value = 0;
		} else {
			float d = std::sqrt(d_sqr);
			float sz = Gem::RADIUS - d / 2.0f;

			vec2 normal = dv / d;
			fixed24_8 push_back_x(sz * normal.x);
			fixed24_8 push_back_y(sz * normal.y);

			a.pos_x += push_back_x;
			a.pos_y += push_back_y;
			b.pos_x -= push_back_x;
			b.pos_y -= push_back_y;

			vec2 a_par, a_perp;
			vec2 b_par, b_perp;

			vec2 a_vel = {a.vel_x.toFloat(), a.vel_y.toFloat()};
			vec2 b_vel = {b.vel_x.toFloat(), b.vel_y.toFloat()};
			splitVector(a_vel, normal, &a_par, &a_perp);
			splitVector(b_vel, -normal, &b_par, &b_perp);

			static const float friction = 1.0f;
			static const float bounce = 0.9f;

			float A = (1.0f + bounce) / 2.0f;
			float B = (1.0f - bounce) / 2.0f;

			a_vel = A*b_par + B*a_par + friction*a_perp;
			b_vel = A*a_par + B*b_par + friction*b_perp;

			a.vel_x = fixed16_16(a_vel.x);
			a.vel_y = fixed16_16(a_vel.y);

			b.vel_x = fixed16_16(b_vel.x);
			b.vel_y = fixed16_16(b_vel.y);
		}
	}
}
Exemplo n.º 15
0
Arquivo: main.c Projeto: raymondt8/ex4
int main(int argc,char** argv)
{
	/////////////////////////////
	//Changable settings for the program
	bool enableOpenMP = 0;
	bool enableMPI = 0;
	int vectorSize = 256;
	/////////////////////////////
	int k=atoi(argv[1]);
	//for(int k=3;k<=14;k++){
		vectorSize = 2^k;
	if(enableOpenMP) omp_set_num_threads(4);

	if(enableMPI)
	{
		int rank , mpiSize;
		MPI_Init (&argc , &argv);
		MPI_Comm_size(MPI_COMM_WORLD , &mpiSize);
		MPI_Comm_rank(MPI_COMM_WORLD , &rank);
		
		struct Vector* globalVector = newVector();
		struct Vector* localVector = newVector();
		double globalSum =0.0, localSum =0.0;	
		double endTime = 0.0, startTime = 0.0;
		int *localSize, *displ;
		
		splitVector(vectorSize,mpiSize,&localSize,&displ);	
		allocVector(localSize[rank], localVector);
		
		if(rank ==0)
		{
			startTime = MPI_Wtime();
			initVector(vectorSize,globalVector,enableOpenMP);
		}

		MPI_Scatterv(globalVector->data,localSize,displ,MPI_DOUBLE,localVector->data,localSize[rank],MPI_DOUBLE, 0,MPI_COMM_WORLD);

		localSum = vectorSum(localVector,enableOpenMP);
		
		MPI_Reduce(&localSum,&globalSum,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
	
		if(rank==0)
		{
			double diff = fabs((PI*PI)/6-globalSum);
			endTime = MPI_Wtime();
			
			printf("Difference MPI |S-Sn| = %f \n",diff);
			printf("Walltime MPI = %f \n",endTime - startTime);
			
			freeVector(globalVector);
		}

		if(rank!=0) free(globalVector);

		freeVector(localVector);
		free(localSize);
		free(displ);

		MPI_Finalize();
	
	}else
	{
		struct timeval* startTime = malloc(sizeof(struct timeval));
		struct timeval* endTime = malloc(sizeof(struct timeval));
		struct Vector* globalVector = newVector();
		
		gettimeofday(startTime,0);

		initVector(vectorSize,globalVector,enableOpenMP);
	
		double sumSn = vectorSum(globalVector, enableOpenMP);
		double diff = fabs((PI*PI)/6-sumSn);
	
		gettimeofday(endTime,0);
	
		printf("Difference serial |S-Sn| = %f \n",diff);
		printf("Walltime serial = %f\n",(endTime->tv_sec+endTime->tv_usec)/1.0e6 -(startTime->tv_sec+startTime->tv_usec)/1.0e6);
		
		freeVector(globalVector);
		free(startTime); free(endTime);
	}
	//}
	return 0;
}
Exemplo n.º 16
0
void ChunkSplitter::_runAutosplit(const NamespaceString& nss,
                                  const BSONObj& min,
                                  const BSONObj& max,
                                  long dataWritten) {
    if (!_isPrimary) {
        return;
    }

    try {
        const auto opCtx = cc().makeOperationContext();
        const auto routingInfo = uassertStatusOK(
            Grid::get(opCtx.get())->catalogCache()->getCollectionRoutingInfo(opCtx.get(), nss));

        uassert(ErrorCodes::NamespaceNotSharded,
                "Could not split chunk. Collection is no longer sharded",
                routingInfo.cm());

        const auto cm = routingInfo.cm();
        const auto chunk = cm->findIntersectingChunkWithSimpleCollation(min);

        // Stop if chunk's range differs from the range we were expecting to split.
        if ((0 != chunk.getMin().woCompare(min)) || (0 != chunk.getMax().woCompare(max)) ||
            (chunk.getShardId() != ShardingState::get(opCtx.get())->getShardName())) {
            LOG(1) << "Cannot auto-split chunk with range '"
                   << redact(ChunkRange(min, max).toString()) << "' for nss '" << nss
                   << "' on shard '" << ShardingState::get(opCtx.get())->getShardName()
                   << "' because since scheduling auto-split the chunk has been changed to '"
                   << redact(chunk.toString()) << "'";
            return;
        }

        const ChunkRange chunkRange(chunk.getMin(), chunk.getMax());

        const auto balancerConfig = Grid::get(opCtx.get())->getBalancerConfiguration();
        // Ensure we have the most up-to-date balancer configuration
        uassertStatusOK(balancerConfig->refreshAndCheck(opCtx.get()));

        if (!balancerConfig->getShouldAutoSplit()) {
            return;
        }

        const uint64_t maxChunkSizeBytes = balancerConfig->getMaxChunkSizeBytes();

        LOG(1) << "about to initiate autosplit: " << redact(chunk.toString())
               << " dataWritten since last check: " << dataWritten
               << " maxChunkSizeBytes: " << maxChunkSizeBytes;

        auto splitPoints = uassertStatusOK(splitVector(opCtx.get(),
                                                       nss,
                                                       cm->getShardKeyPattern().toBSON(),
                                                       chunk.getMin(),
                                                       chunk.getMax(),
                                                       false,
                                                       boost::none,
                                                       boost::none,
                                                       boost::none,
                                                       maxChunkSizeBytes));

        if (splitPoints.size() <= 1) {
            // No split points means there isn't enough data to split on; 1 split point means we
            // have between half the chunk size to full chunk size so there is no need to split yet
            return;
        }

        // We assume that if the chunk being split is the first (or last) one on the collection,
        // this chunk is likely to see more insertions. Instead of splitting mid-chunk, we use the
        // very first (or last) key as a split point.
        //
        // This heuristic is skipped for "special" shard key patterns that are not likely to produce
        // monotonically increasing or decreasing values (e.g. hashed shard keys).

        // Keeps track of the minKey of the top chunk after the split so we can migrate the chunk.
        BSONObj topChunkMinKey;

        if (KeyPattern::isOrderedKeyPattern(cm->getShardKeyPattern().toBSON())) {
            if (0 ==
                cm->getShardKeyPattern().getKeyPattern().globalMin().woCompare(chunk.getMin())) {
                // MinKey is infinity (This is the first chunk on the collection)
                BSONObj key =
                    findExtremeKeyForShard(opCtx.get(), nss, cm->getShardKeyPattern(), true);
                if (!key.isEmpty()) {
                    splitPoints.front() = key.getOwned();
                    topChunkMinKey = cm->getShardKeyPattern().getKeyPattern().globalMin();
                }
            } else if (0 ==
                       cm->getShardKeyPattern().getKeyPattern().globalMax().woCompare(
                           chunk.getMax())) {
                // MaxKey is infinity (This is the last chunk on the collection)
                BSONObj key =
                    findExtremeKeyForShard(opCtx.get(), nss, cm->getShardKeyPattern(), false);
                if (!key.isEmpty()) {
                    splitPoints.back() = key.getOwned();
                    topChunkMinKey = key.getOwned();
                }
            }
        }

        uassertStatusOK(splitChunkAtMultiplePoints(opCtx.get(),
                                                   chunk.getShardId(),
                                                   nss,
                                                   cm->getShardKeyPattern(),
                                                   cm->getVersion(),
                                                   chunkRange,
                                                   splitPoints));

        const bool shouldBalance = isAutoBalanceEnabled(opCtx.get(), nss, balancerConfig);

        log() << "autosplitted " << nss << " chunk: " << redact(chunk.toString()) << " into "
              << (splitPoints.size() + 1) << " parts (maxChunkSizeBytes " << maxChunkSizeBytes
              << ")"
              << (topChunkMinKey.isEmpty() ? "" : " (top chunk migration suggested" +
                          (std::string)(shouldBalance ? ")" : ", but no migrations allowed)"));

        // Balance the resulting chunks if the autobalance option is enabled and if we split at the
        // first or last chunk on the collection as part of top chunk optimization.

        if (!shouldBalance || topChunkMinKey.isEmpty()) {
            return;
        }

        // Tries to move the top chunk out of the shard to prevent the hot spot from staying on a
        // single shard. This is based on the assumption that succeeding inserts will fall on the
        // top chunk.
        moveChunk(opCtx.get(), nss, topChunkMinKey);
    } catch (const DBException& ex) {
        log() << "Unable to auto-split chunk " << redact(ChunkRange(min, max).toString())
              << " in nss " << nss << causedBy(redact(ex.toStatus()));
    } catch (const std::exception& e) {
        log() << "caught exception while splitting chunk: " << redact(e.what());
    }
}
Exemplo n.º 17
0
int main(int argc, char** argv)
{
  int rank, size;
  init_app(argc, argv, &rank, &size);

  if (argc < 2) {
    printf("usage: %s <N> [L]\n",argv[0]);
    close_app();
    return 1;
  }

  /* the total number of grid points in each spatial direction is (N+1) */
  /* the total number of degrees-of-freedom in each spatial direction is (N-1) */
  int N  = atoi(argv[1]);
  int M  = N-1;
  double L=1.0;
  if (argc > 2)
    L = atof(argv[2]);

  double h = L/N;

  Vector grid = createVector(M);
  for (int i=0;i<M;++i)
    grid->data[i] = (i+1)*h;

  int coords[2] = {0};
  int sizes[2] = {1};
#ifdef HAVE_MPI
  sizes[0] = sizes[1] = 0;
  MPI_Dims_create(size,2,sizes);
  int periodic[2];
  periodic[0] = periodic[1] = 0;
  MPI_Comm comm;
  MPI_Cart_create(MPI_COMM_WORLD,2,sizes,periodic,0,&comm);
  MPI_Cart_coords(comm,rank,2,coords);
#endif

  int* len[2];
  int* displ[2];
  splitVector(M, sizes[0], &len[0], &displ[0]);
  splitVector(M, sizes[1], &len[1], &displ[1]);

#ifdef HAVE_MPI
  Matrix u = createMatrixMPI(len[0][coords[0]]+2, len[1][coords[1]]+2, M, M, &comm);
#else
  Matrix u = createMatrix(M+2, M+2);
#endif
  evalMeshDispl(u, grid, grid, poisson_source,
                displ[0][coords[0]], displ[1][coords[1]]);
  scaleVector(u->as_vec, h*h);

  double time = WallTime();
  GS(u, 1e-6, 5000);

  evalMesh2Displ(u, grid, grid, exact_solution, -1.0,
                 displ[0][coords[0]], displ[1][coords[1]]);
  double max = maxNorm(u->as_vec);

  if (rank == 0) {
    printf("elapsed: %f\n", WallTime()-time);
    printf("max: %f\n", max);
  }

  freeMatrix(u);
  freeVector(grid);
  for (int i=0;i<2;++i) {
    free(len[i]);
    free(displ[i]);
  }

  close_app();
  return 0;
}