Esempio n. 1
0
int main(int argc, char *argv[]){
    //--Declare variables
    int *a, *b, *c, i, m, n, p, range;

    // --Check no. of arguments
    if (argc < 5) {
      cout<< endl << endl << "\t....OOOps, INVALID No of Arguements,\n";
      exit(1);
    }// end if

	
    //--Get input data
    m = atoi(argv[1]);	// rows of matrix a
    n = atoi(argv[2]);	// cols of matrix a
    p = atoi(argv[3]);	// cols of matrix c
    range = atoi(argv[4]);// range of matrices a and b

    //--Allocate Space for  Matrix a
    a = (int *) malloc (m * n * sizeof (int));

    //--Allocate Space for  Matrix b
    b = (int *) malloc (n * p * sizeof (int));

    //--Allocate Space for  Matrix c
    c = (int *) malloc (m * p * sizeof (int));
    int * local_c = (int *) malloc(p * sizeof (int));
    int pc,id;	
    MPI::Init(argc,argv);
    pc = MPI::COMM_WORLD.Get_size ();
    id = MPI::COMM_WORLD.Get_rank ();
    if(id == 0){

        cout<<  "Siavah Katebzadeh" 
        <<endl<<"Parallel program to generate randomly two matrices a(m by n) and b(n by p)"
        <<endl<<"and then computing c = a * b"
        <<endl<<"To compile: gcc fname.c,"
        <<endl<<"To run: a.out m n p r (all integer)"
        <<endl<<"where: m is no. of rows of matrix a, n is no. of cols of matrix a, "
        <<endl<<"p is no. of cols of matrix c, and r is range of matrices a and b.";
    //--Generate and print Matrix a
        genMatrix(a, m, n, range);

    //--Generate and print Matrix b
        genMatrix(b, n, p, range);
    }
    MPI::COMM_WORLD.Bcast(a,n*m,MPI::INT,0);
    MPI::COMM_WORLD.Bcast(b,m*p,MPI::INT,0);
    mpyMatrix(a, b, local_c, m, n, p,id);
    MPI::COMM_WORLD.Gather(local_c, p, MPI::INT, c + id * p, p, MPI::INT, 0);
    //--Print Matrices a, b and c
    if( id == 0)
        printMatrices(a, b, c, m, n, p);

    MPI::Finalize ();
    // --free allocated spaces
    free (a);//free allocated space for matrix a
    free (b);//free allocated space for matrix b
    free (c);//free allocated space for matrix c
    return 1;
} // end main
Esempio n. 2
0
int main()
{
	srand(time(0));

	printf("#Alg\tN\tcost\titerations\n");

	for(int i = 20; i<1000; i+=2)
	{

		// the matrix that contains the compatatibilies
		int * D = (int*) malloc( sizeof(int)*i*i );
		// the array that contains a solution
		int * a = (int*) malloc( sizeof(int)*i );
		initArray(a, -1, i);

		//initialize the matrix
		genMatrix(D, i);

		// generate a solution
		genSolution(a, i);
		
		// calculate its cost
		//printf("Initial cost: %d\n", cost(D, a, i ) );
		
		// run the algorithms
		alg1(i, D, a);
		alg2(i, D, a);

		// free() the malloc()
		free(D);
		free(a);
	}

	return 0;
}
Esempio n. 3
0
int main(int argc,char * argv[])
{
  if (argc != 3) //quit if not input "filename" "size"
  {
    std::cerr<<"Usage: "<<argv[0]<<" fullnetwork "<<" nodeSize "<<std::endl;
    exit(EXIT_FAILURE);
  }
  int nodeNum = atoi(argv[2]);
  std::clock_t c_start = std::clock();
  
  int *network = genMatrix(argv[1],nodeNum,nodeNum);

  genBW(network,nodeNum);

  std::clock_t c_end = std::clock();
  std::cout<<"It costs : "<<(c_end-c_start)<<" ms."<<std::endl;
}
Esempio n. 4
0
int main(int argc, char* argv[]) 
{
  double *A, *B, *b, *y;
  int n;
  
  int my_rank, p;
  int i;
  
  /* Obtain number of rows and columns. We do not check for eroneous
     input. */
  n = atoi(argv[1]);

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &p);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
  
  /* Find how many rows per process. */
  int *rows;
  rows = (int*)Malloc(p, sizeof(int), MPI_COMM_WORLD, my_rank);
  calcNumsPerProcess(n, p, rows);
  
  /* Allocate memory. */
  b = Malloc(n, sizeof(double), MPI_COMM_WORLD, my_rank);
  if (my_rank == 0) 
    {
      A = (double*)Malloc(n*n, sizeof(double), MPI_COMM_WORLD, my_rank);
      y = (double*)Malloc(n, sizeof(double), MPI_COMM_WORLD, my_rank);
    } 
  B = (double*)Malloc(rows[my_rank]*n, sizeof(double), MPI_COMM_WORLD, my_rank);
  
  /* Generate matrix and vector */
  if (my_rank == 0) 
    { 
      genMatrix(n, n, A);
      genVector(n, b);
    }
  
  /* Distribute A */
  int *displs;
  int *sendcounts;
  if (my_rank == 0)
    {
      displs = malloc(sizeof(int)*p);
      sendcounts = malloc(sizeof(int)*p);
      for (i=0; i<p; i++)
	sendcounts[i] = rows[i]*n;
      displs[0] = 0;
      for (i=1; i<p; i++)
	displs[i] = displs[i-1] + sendcounts[i-1];
    }

  MPI_Scatterv(A, sendcounts, displs, MPI_DOUBLE, 
	       B, rows[my_rank]*n, MPI_DOUBLE, 0, MPI_COMM_WORLD);
  /* Distribute b */
  MPI_Bcast(b, n, MPI_DOUBLE, 0, MPI_COMM_WORLD); 
  
  double time = MPI_Wtime();
  parallelMatrixTimesVector(rows[my_rank], n, B, b, y, 0, my_rank, p, MPI_COMM_WORLD);
  time = MPI_Wtime()-time;
  
  /* Collect the max time from all processes. */
  double timerecv;

  MPI_Reduce(&time,&timerecv, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
  if (my_rank==0) {
    printf("%d %d % .2e\n", p, n, timerecv);
  }
  
  if (my_rank==0){
    free(sendcounts);
    free(displs);
    free(y);
  }
  free(A);
  free(b);
  free(rows);
  
  MPI_Finalize();
  return 0;
}  
Esempio n. 5
0
int main(int argc, char * argv[])
{
    int rank, np;
    int * D;
    int * a;
    int i;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &np);

    int res=-1;
    int * results;

    srand(rank + time(0));

    for(i = 20; i<100; i+=2)
    {

        // the matrix that contains the compatatibilies
        D = (int*) malloc( sizeof(int)*i*i );
        // the array that contains a solution
        a = (int*) malloc( sizeof(int)*i );

        initArray(a, -1, i);

        if(rank==0)
        {
            //initialize the matrix
            genMatrix(D, i);

            // allocate the array to receive the gold
            results = (int*) malloc( sizeof(int)*np );
        }

        // generate a solution
        genSolution(a, i);

        //send compatibily matrix and initial solution to other processes
        MPI_Bcast(D, sizeof(int)*i*i, MPI_BYTE, 0, MPI_COMM_WORLD);
        //MPI_Bcast(a, sizeof(int)*i, MPI_BYTE, 0, MPI_COMM_WORLD);

        res = alg2(i, D, a, rank);

        //MPI_Barrier(MPI_COMM_WORLD);

        MPI_Gather(&res, 1, MPI_INT, results, 1, MPI_INT, 0, MPI_COMM_WORLD);

        if(rank==0)
        {
            printf("%d\t%d\n", i, getMin(results, np) );

            // clean
            free(results);
        }

        free(D);
        free(a);

    }

    MPI_Finalize();

    return 0;
}
Esempio n. 6
0
double *LeastSquare(double *x,double *y,int n,int m){
    double **result;
    result = genMatrix(x,y,n,m);
    double *r = solve(result,m+1);
    return r;
}
Esempio n. 7
0
int main(int argc, char* argv[]) 
{
  double *A, *B, *b, *y;
  int num_rows, num_cols;
  
  int my_rank, p;
  int i;
  
  /* Obtain number of rows and columns. We do not check for eroneous
     input. */
  num_rows = atoi(argv[1]);
  num_cols= atoi(argv[2]);
  
  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &p);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
  
  /* number of rows on my_rank */
  int local_num_rows = NUM_ROWS(my_rank, p, num_rows);
  
  /* Allocate memory */
  b = Malloc(num_cols, sizeof(double), MPI_COMM_WORLD, my_rank);
  if (my_rank == 0) 
    {
      A = (double*)Malloc(num_rows*num_cols, sizeof(double), MPI_COMM_WORLD, my_rank);
      y = (double*)Malloc(num_rows, sizeof(double), MPI_COMM_WORLD, my_rank);
    } 
  B = (double*)Malloc(local_num_rows*num_cols, sizeof(double), MPI_COMM_WORLD, my_rank);
  
  /* Generate matrix and vector */
  if (my_rank == 0) 
    { 
      genMatrix(num_rows, num_cols, A);
      genVector(num_cols, b);
    }
  
  /* Distribute A */
  int *displs;
  int *sendcounts;
  if (my_rank == 0)
    {
      displs = malloc(sizeof(int)*p);
      sendcounts = malloc(sizeof(int)*p);
      sendcounts[0] = NUM_ROWS(0,p,num_rows)*num_cols;
      displs[0] = 0;
      for (i=1; i<p; i++)
	{
	  displs[i] = displs[i-1] + sendcounts[i-1];
	  sendcounts[i] = NUM_ROWS(i,p,num_rows)*num_cols;
	}
    }
  MPI_Scatterv(A, sendcounts, displs, MPI_DOUBLE, 
	       B, local_num_rows*num_cols, MPI_DOUBLE, 0, MPI_COMM_WORLD);
  
  /* Distribute b */
  MPI_Bcast(b, num_cols, MPI_DOUBLE, 0, MPI_COMM_WORLD); 
  
  /* Multiply */
  double time = MPI_Wtime();
  parallelMatrixTimesVector(local_num_rows, num_cols, B, b, y, 0, my_rank, p, MPI_COMM_WORLD);
  time = MPI_Wtime()-time;
  
  /* Collect the max time from all processes. */
  double timerecv;
  MPI_Reduce(&time,&timerecv, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
  if (my_rank==0)
    printf("Computed with p = %d, m = %d, n = %d in % .2e seconds\n", p, num_rows, num_cols, timerecv);
  
  
  if (my_rank == 0)
    getResult(num_rows, num_cols, A, b, y);

  if (my_rank==0){
    free(sendcounts);
    free(displs);
    free(y);
    free(A);
    free(b);
  }
  free(B);

  MPI_Finalize();
  return 0;
}  
Esempio n. 8
0
int main(int argc, char** argv) {

    std::cout << "**************************************************\n"
            << "*** Introduction to Visual Computing, SS 2015  ***\n"
            << "*** Exercise 03: OpenCV Stub                   ***\n"
            << "**************************************************\n"
            << "*** Author: Dipl.-Inf. Sven Sickert            ***\n"
            << "**************************************************\n\n";

    // create a new window
    cv::namedWindow("My Image Window", CV_WINDOW_AUTOSIZE || CV_WINDOW_KEEPRATIO);
    //cv::Mat img(480, 640, CV_8UC3);
    cv::Mat * img = genMatrix();

    Object3DListType objects3D;
    create3DObject(objects3D);

    //Lade Ausgangs-Parameter
    std::vector<double> position;
    position.push_back(posZ);
    position.push_back(posY);
    position.push_back(posX);
    std::vector<double> direction;
    direction.push_back(vZ);
    direction.push_back(vY);
    direction.push_back(vX);
    double focalDistance = FOCAL_DISTANCE;
    char key;
    bool quit;


    //Verschiebe Objekte in einen sichtbaren Bereich
    for (Object3DListType::iterator objIt = objects3D.begin(); objIt != objects3D.end(); objIt++) {
        for (Point3DListType::iterator pointIt = (*objIt).first.begin(); pointIt != (*objIt).first.end(); pointIt++) {
            (*pointIt)[0] += 30;
            (*pointIt)[1] += 30;
            (*pointIt)[2] += 30;
        }
    }

    do {
        key = (char) cv::waitKey();
        delete img;
        //std::cout << key << " " << std::endl;
        std::cout << "--------Position--------" << std::endl;
        std::cout << "posX = " << position[0] << std::endl;
        std::cout << "posY = " << position[1] << std::endl;
        std::cout << "posZ = " << position[2] << std::endl;
        std::cout << "vX = " << direction[0] << std::endl;
        std::cout << "vY = " << direction[1] << std::endl;
        std::cout << "vZ = " << direction[2] << std::endl;

        switch (key) {
            case 'q':
                quit = true;
                break;

                //--------Position    
            case 'w':
                position[1]++;
                break;
            case 's':
                position[1]--;
                break;
            case 'a':
                position[0]--;
                break;
            case 'd':
                position[0]++;
                break;
            case 'x':
                position[2]++;
                break;
            case 'y':
                position[2]--;
                break;
                //--------Blickvector
            case 'R':
                direction[0]++;
                break;
            case 'Q':
                direction[1]--;
                break;
            case 'T':
                direction[0]--;
                break;
            case 'S':
                direction[1]++;
                break;
            case ',':
                direction[2]--;
                break;
            case '.':
                direction[2]++;
                break;


        }
        img = genMatrix();
        Object2DListType objects2D = project2D(objects3D, position, direction, focalDistance);
        draw2DObjects(*img, objects2D);
        cv::imshow("My Image Window", *img);

    } while (!quit);






    return 0;
}
Esempio n. 9
0
int main(int argc, char *argv[]){
    //--Declare variables
    int *a, *b, *c, i, m, n, p, range, chunk;

    if (argc < 5) {
      cout<< endl << endl << "\t....OOOps, INVALID No of Arguements,\n";
      exit(1);
    }// end if

    int pc,id;	
    MPI::Init(argc,argv);
    pc = MPI::COMM_WORLD.Get_size ();
    id = MPI::COMM_WORLD.Get_rank ();
    if (id == 0){
	cout << "-- Siavash Katebzadeh" 
        << endl <<"-- Parallel program to generate randomly two matrices a(m by n) and b(n by p)i" 
        << endl <<"-- and then computing c = a * b"
        << endl <<"-- To compile make parallel-2, To run  mpirun -n core ./bin/parallel-2 m n p r (all integer)" 
        << endl <<"-- where: core is no. of processors, m is no. of rows of matrix a, n is no. of cols of matrix a, ....." 
        << endl <<"-- p is no. of cols of matrix c and r is range of matrices a and b" << endl ;
    }
	
    //--Get input data
    m = atoi(argv[1]);	// rows of matrix a
    n = atoi(argv[2]);	// cols of matrix a
    p = atoi(argv[3]);	// cols of matrix c
    range = atoi(argv[4]);// range of matrices a and b
    chunk = m / pc + (m % pc == 0 ? 0 : 1);
    //chunk = m / pc;
    //--Allocate Space for  Matrix a
    a = (int *) malloc ((m % pc == 0 ? m : m * 2 - m % pc ) * n * sizeof (int));
    //a = (int *) malloc (m * n * sizeof (int));

    //--Allocate Space for  Matrix b
    b = (int *) malloc (n * p * sizeof (int));

    //--Allocate Space for  Matrix c
    //c = (int *) malloc (m * p * sizeof (int));
    c = (int *) malloc ((m % pc == 0 ? m : m * 2 - m % pc) * p * sizeof (int));
    int * local_c = (int *) malloc(chunk * p * sizeof (int));

    if(id == 0){
    //--Generate and print Matrix a
        genMatrix(a, m, n, range);

    //--Generate and print Matrix b
        genMatrix(b, n, p, range);
    }
    MPI::COMM_WORLD.Bcast(a,n*m,MPI::INT,0);
    MPI::COMM_WORLD.Bcast(b,m*p,MPI::INT,0);


	

    mpyMatrix(a, b, local_c, m, n, p, chunk, id);
    MPI::COMM_WORLD.Gather(local_c,p * chunk,MPI::INT,c+(id * chunk * p) ,chunk * p,MPI::INT,0);
    //--Print Matrices a, b and c
    if( id == 0)
    	printMatrices(a, b, c, m, n, p);

    MPI::Finalize ();
    // --free allocated spaces
    free (a);//free allocated space for matrix a
    free (b);//free allocated space for matrix b
    free (c);//free allocated space for matrix c
    free (local_c);//free allocated space for matrix c
    return 0;
} // end main
Esempio n. 10
0
int main(int argc, char* argv[]) 
{
  double *A, *b, *y;
  int m, n;
  
  int my_rank, p;
  int i, dest, source;
  int offset;
  
  MPI_Status status;
  
  m = atoi(argv[1]);
  n = atoi(argv[2]);
  
  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &p);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
  

  // Find how many rows per process.
  int *rows;
  rows = (int*)malloc(sizeof(int)*p);
  for (i=0; i < p; i++)
    rows[i] = m/p;
  for (i=0; i < m % p; i++)
    rows[i]++;
  
  // Allocate memory.
  b = malloc( n*sizeof(double));
  if (my_rank == 0) 
    {
      A = (double*)malloc(m * n * sizeof(double));
      y = (double*)malloc(m * sizeof(double));
    } 
  else 
    {
      A = (double*)malloc(rows[my_rank] * n * sizeof(double));
      y = (double*)malloc(rows[my_rank] * sizeof(double));
    }	  

  // Generate matrix and vector
  if (my_rank == 0) 
    { 
      genMatrix(m, n, A);
      genVector(n, b);
    }

  
  // PUT THIS INTO SEPARATE FUNCTION parallelMatrixVectorProduct and in a file parallmatvec.c


  int tag = 0;
  if (my_rank ==0)
    {
      int offset = 0;
      for (dest = 1; dest < p; dest++) 
	{
	  offset += rows[dest-1];
	  MPI_Send(A + offset*n, rows[dest]*n, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
	}
    }
  else
    MPI_Recv(A, rows[my_rank]*n, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD, &status);
  
  MPI_Bcast(b, n, MPI_DOUBLE, 0, MPI_COMM_WORLD); 
  
  compDotProduct(rows[my_rank], n, A, b, y);
  
  // Get the data
  if (my_rank !=0)
    MPI_Send(y, rows[my_rank], MPI_DOUBLE, 0, tag, MPI_COMM_WORLD);
  else
    {
      offset = 0;
      for (source = 1; source < p; source++)
	{
	  offset += rows[source-1];
	  MPI_Recv(y+offset, rows[source], MPI_DOUBLE, source, tag, MPI_COMM_WORLD, &status);
	}
    }
  // END OF parallMatrixVectorProduct
  

  if (my_rank == 0)
    {
      getResult(m, n, A, b, y);
    }
    
  free(A);
  free(y);
  free(b);
  free(rows);
  
  
  
  MPI_Finalize();
  return 0;
  
  
}  
Esempio n. 11
0
int main(int argc, char* argv[])
{
  int         my_rank;       /* rank of process      */
  int         p;             /* number of processes  */
  int         source=0;        /* rank of sender       */
  int         destination;
  int         tag = 0;       /* tag for messages     */
  MPI_Status  status;        /* status for receive   */

  int rowsPerProcess;
  int processesInUse;

  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
  MPI_Comm_size(MPI_COMM_WORLD, &p);

  int m = atoi(argv[1]);
  int n = atoi(argv[2]);

  //allocate b
  double *b = (double *)malloc(m*sizeof(double));


  //assign number of rows process is responsible for
    if (my_rank > n%p - 1)
      rowsPerProcess = n/p;
    else 
      rowsPerProcess = n/p+1;

  double *rowResult;
  if (my_rank == 0)
     rowResult = (double *) malloc(n*sizeof(double));
  else
     rowResult = (double *) malloc(rowsPerProcess*sizeof(double));


  //Clear memory contents of final vector
  if(my_rank ==0){
    for ( int i = 0; i < n; i ++){
      *rowResult = 0;
      rowResult++;
    }
    rowResult = rowResult - n; 
  }
  else{
    for ( int i = 0; i < rowsPerProcess; i ++){
      *rowResult = 0;
      rowResult++;
    }
    rowResult = rowResult - rowsPerProcess; 
  }

  //assign number of processes that will be used
  if (p>n){
    processesInUse = n;
  }
  else
    processesInUse = p;


  //allocate mem for rows
  double *rowofA;
  if (my_rank == 0){
     rowofA = (double *) malloc(n*m*sizeof(double));
  }
  else{
     rowofA = (double *) malloc(rowsPerProcess*m*sizeof(double)); 
  }    


  //Enter process 0 main
  if (my_rank == 0){
    int *dest = (int *) malloc((processesInUse)*sizeof(int));  
    double *totalResult;   // = (double *)malloc(n*sizeof(double));
    int *numjobs = (int *)malloc(processesInUse*sizeof(int));

    //building destination list
    for (int i = 0; i < processesInUse; i++){
      *dest = i;
      //printf("dest:%d\n", *dest);
      dest = dest + 1;
    }
    dest = dest - (processesInUse);

    //initialize numjobs[]
    for (int i = 0; i < processesInUse; i++){
      if ( *dest > n%processesInUse - 1){
	*numjobs = n/processesInUse;
      }
      else{
	*numjobs = n/processesInUse+1;
      }
      dest++;
      numjobs++;
    }
    dest = dest - processesInUse;
    numjobs = numjobs - (processesInUse);
  
    //gen matrix
    genMatrix(m,n, rowofA);
    //gen vector
    genVector(m, b);


    //split and send A and b
    split_send(rowofA, b, rowofA, processesInUse, m, n, numjobs, my_rank, dest, tag);

    //calculate: result[] = matrix row * vector
    calc_res(rowofA, b, m, n, processesInUse, rowsPerProcess, rowResult);
    totalResult = rowResult;
    rowResult = rowResult + rowsPerProcess;
    numjobs++;

    //receive results from other processes -- load into final array
    for (int i = 1; i < processesInUse; i ++){
      MPI_Recv(rowResult, *numjobs, MPI_DOUBLE, i, tag, MPI_COMM_WORLD, &status);
      //printf("0 Received from %f from %d\n", *rowResult, i);
      rowResult = rowResult + *numjobs;
      numjobs++;
    } 
    rowResult = totalResult;

    //print final array
    printf("Total Result: [ ");
    for (int i = 0; i < n; i++){
      printf("%f ", *totalResult);
      totalResult++;
    }
    printf("]\n");
    totalResult = rowResult;

    //Call getResult
    getResult(m,n, rowofA,b,totalResult);

  }
  //All processess other than 0 and < n rows
  else if (my_rank < n){

    //Receive rowsofA
    MPI_Recv(rowofA, m*rowsPerProcess, MPI_DOUBLE, source, tag, MPI_COMM_WORLD, &status);
    
    //get b
    MPI_Bcast(b, m, MPI_DOUBLE, 0, MPI_COMM_WORLD);

    //calculate: result[] = matrix row * vector
    calc_res(rowofA, b, m, n,processesInUse,rowsPerProcess, rowResult);

    //send results to 0
    destination = 0;
    MPI_Send(rowResult, rowsPerProcess, MPI_DOUBLE, destination, tag, MPI_COMM_WORLD);
      
}


  MPI_Finalize();

  return 0;
}