Esempio n. 1
0
int main(int argc, char *argv[]) {
  
  if (argc < 4) printUse();
  
  int pipeIndex = 0; //argument index of the pipe symbol ':'
  for (int i = 1; i < argc; i++) {
    if (strcmp(argv[i], ":") == 0) {
      pipeIndex = i;
      break;
    }
  }
  
  if (pipeIndex == 0) printUse(); //no colon found
  
  if (pipeIndex == 1) printUse(); //colon cannot be first argument
  
  if (pipeIndex == argc - 1) printUse(); //colon cannot be last argument
  
  char **write = calloc(sizeof(char *), (pipeIndex)); //initialize write and read arrays
  char **read = calloc(sizeof(char *), (argc - pipeIndex) + 2); //while setting every index to null
  
  for (int i = 1; i < pipeIndex; i++) { //assign everything before the colon to write array
    write[i - 1] = argv[i]; 
  }
  
  for (int i = pipeIndex + 1; i < argc; i++) { //assign everything after the colon to read array
    read[i - pipeIndex - 1] = argv[i];
  }
  
  int fd[2];
  int status;
  pipe(fd);
  
  if (fork ()) {
    close(fd[0]); // close read end
    dup2(fd[1], STDOUT_FILENO);
    close(fd[1]);
    execvp(write[0], write);
    wait(&status);
  } else {
    close(fd[1]); //close write end
    dup2(fd[0], STDIN_FILENO);
    close(fd[0]);
    execvp(read[0], read);
  }
  
  return 0;
}
Esempio n. 2
0
int main(int argc, char **argv) {
	struct sockaddr_in addr;
	int sd, addrlen = sizeof(addr);

	if (argc != 2) {
		printUse(argv[0]);
		return 0;
	}

	if ( (sd = socket(PF_INET, SOCK_STREAM, 0)) < 0 )
		PERROR("Socket");

	addr.sin_family = AF_INET;
	addr.sin_port = htons(MY_PORT);
	addr.sin_addr.s_addr = INADDR_ANY;

	if ( bind(sd, (struct sockaddr*)&addr, addrlen) < 0 )
		PERROR("Bind");

	if ( listen(sd, 20) < 0 )
		PERROR("Listen");

	printf ("Server listening on port: %d\n", MY_PORT);

	char *rootDir = argv[1];
	int lenRootDir = strlen(rootDir)-1;
	while(rootDir[lenRootDir] == '/') {
		rootDir[lenRootDir] = '\0';
		lenRootDir--;
	}

	while (1) {
		int len;
		int client = accept(sd, (struct sockaddr*)&addr, &addrlen);
		printf("Connected: %s:%d\n", inet_ntoa(addr.sin_addr), ntohs(addr.sin_port));

		if ( (len = recv(client, buffer, MAXBUF, 0)) > 0 ) {
			FILE* ClientFP = fdopen(client, "w");
			if ( ClientFP == NULL ) perror("fpopen");
			else {
				char Req[MAXPATH] = { 0 };
				sscanf(buffer, "GET %s HTTP", Req);
				printf("Request: \"%s\"\n", Req);
				ServeStatic(ClientFP, Req, rootDir);
				fclose(ClientFP);
			}
		}
		close(client);
	}
	return 0;
}
Esempio n. 3
0
int main(int argc, char *argv[])
{
  double minTime        = 99999.0;
  double testTime       = 0.0;
  double maxBidirBw     = 0.0;
  double maxUnidirBw    = 0.0;
  int    maxBidirSize   = 0;
  int    maxUnidirSize  = 0;
  char*  procFile       = NULL;
  int*   procList       = NULL;
  int    procListSize;
  int    rank, wsize, iters, procs;
  int    messStart, messStop, messFactor, messSize;
  int    procsPerNode, totalops, procIdx;
  int    useBarrier, printPairs, useNearestRank, dummy;
  double currBw;
  char   allocPattern;
  MPI_Comm activeComm;

  command = argv[0];

  MPI_Init(&argc, &argv);

  MPI_Comm_size(MPI_COMM_WORLD, &wsize);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);


if(rank == 0) {
    //sleep(5);
    //while (!__sync_bool_compare_and_swap(&r_sync, 59, 0)){ sleep(2); printf("sync value %d \n", r_sync); };
    int k = 0;
    #pragma omp parallel
    #pragma omp atomic 
                k++;
    printf ("Done Sync. Number of Threads counted = %i\n",k);


    int N = 1048576;
    int i = 0;
    int j = 0;
  double start, diff;
for(j=0; j < 50 ; j++) {
    #pragma omp parallel for
    for (i=0; i<N; i++) {
            a[i] = 1.0;
            b[i] = 2.0;
        }


    start = MPI_Wtime();
    #pragma omp parallel for
    for (i = 0; i < N; i++) {
        a[i]= b[i];
    }
        diff = MPI_Wtime() - start ;
        printf("ELAPSED time to copy : %11.6f N: %d \n",diff, N);
}
}else{
//__sync_fetch_and_add(&r_sync, 1);
printf("OK other ranks  \n");
//sleep(100);
}
MPI_Barrier(MPI_COMM_WORLD);

MPI_Finalize();

return 0 ;



  if ( !processArgs(argc, argv, rank, wsize, &iters, &dummy,
                    &messStart, &messStop, &messFactor, 
                    &procFile, &procsPerNode, &allocPattern, 
                    &printPairs, &useBarrier, &useNearestRank) )
  {
    if ( rank == 0 )
      printUse();

    MPI_Finalize();
    exit(-1);
  }

  if ( ! getProcList(procFile, wsize, &procList, &procListSize,
                     procsPerNode, allocPattern) )
  {
    if ( procFile )
      printf("Failed to get process list from file %s.\n", procFile);
    else
      printf("Failed to allocate process list.\n");

    exit(-1);
  }

  if ( rank == 0 )
    printReportHeader();


  //Set up intra-node shared memory structures.
  if(rank == 0) {
        //One rank per node allocates shared send request lists.
        req_array = malloc(wsize*sizeof(ReqInfo));
        /*if(!IS_SM_BUF((void*)req_array)){
            printf("Failed to allocate from SM region :%p \n", req_array);
            exit(-1);
        }*/
  }
  //broadcast address of the pointer variable so that each rank has access to it
  MPI_Bcast(&req_array , 1, MPI_LONG, 0, MPI_COMM_WORLD);
  //printf("Broadcasting of shared mem region complete! rank : %d  region_addr : %lu region_ptr : %lu \n", rank, &req_array, req_array);
  //printf("Access shm region ! rank : %d  region for sync : %lu  \n", rank, req_array[rank].sync);
 /* 
  if(rank == 0){
	char* comBuf  = (char*)malloc(10);
	memset(comBuf, 1, 10);
	req_array[30].buffer = comBuf ;
	comBuf[0] = 'n';
        req_array[30].sync = malloc(sizeof(int));
        req_array[30].sync[0] = 12;
	printf("rank : %d done sending buff ptr : %p  sync ptr : %p \n",rank, comBuf, req_array[30].sync );
        printf("sleeping ! pid() %d \n", getpid());
	sleep(40000);
  }else if(rank == 30){
	while(req_array[30].sync == NULL){
	
	}

	while(req_array[30].sync[0] != 12){
	
	}
	printf("rank : %d  buffer value: %c sync value : %d  buff ptr : %p  sync ptr : %p  \n",
		rank, req_array[30].buffer[0], req_array[30].sync[0], req_array[30].buffer, req_array[30].sync );
        printf("sleeping ! pid() %d \n", getpid());
		sleep(40000);
  } 
  MPI_Barrier(MPI_COMM_WORLD);
  return 0;*/

  printPairs = 1 ;
  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( rank == 0 && printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    /*  Create Communicator of all active processes  */
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);
//    messStart = 8388608;
//    messStop = 4096 ;
//    messStop = 8388608 ;
    for ( messSize = messStart; messSize <= messStop;
          messSize *= messFactor )
    {
      testTime = runUnicomTest(procs, messSize, iters, rank, 
                               wsize, procsPerNode, allocPattern, 
                               useBarrier, useNearestRank, &activeComm);

      if ( rank == 0 && testTime > 0 )
      {
        totalops   = iters * (procs/2);
        currBw = ((double)totalops*(double)messSize/testTime)/1000000;

        if ( currBw > maxUnidirBw )
        {
          maxUnidirBw   = currBw;
          maxUnidirSize = messSize;
        }

        if ( testTime < minTime )
          minTime = testTime;
      }
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    if ( rank == 0 )
      printf("\n");
  }
  
  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( rank == 0 && printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    // Create Communicator of all active processes
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);

    for ( messSize = messStart; messSize <= messStop; 
          messSize *= messFactor )
    {
      testTime = runBicomTest(procs, messSize, iters, rank,
                              wsize, procsPerNode, allocPattern,
                              useBarrier, useNearestRank, &activeComm);

      if ( rank == 0 && testTime > 0 )
      {
        totalops   = iters * procs;
        currBw = (((double)totalops*(double)messSize)/testTime)/1000000;

        if ( currBw > maxBidirBw )
        {
          maxBidirBw   = currBw;
          maxBidirSize = messSize;
        }

        if ( testTime < minTime )
          minTime = testTime;
      }
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    if ( rank == 0 )
      printf("\n");
  }


  if ( rank == 0 )
  {
    printf("Max Unidirectional Bandwith :  %13.2f for message size of %7d bytes\n",
           maxUnidirBw, maxUnidirSize);
    printf("Max  Bidirectional Bandwith :  %13.2f for message size of %7d bytes\n",
           maxBidirBw, maxBidirSize);

    printParameters(iters, messStart, messStop, messFactor, 
                    procFile, procsPerNode, allocPattern, useBarrier);
    free(req_array);
  }

  printReportFooter(minTime, rank, wsize, procsPerNode, useNearestRank);
  
  free(procList);

  MPI_Finalize();

  exit(0);
}
Esempio n. 4
0
main(int argc, char** argv)
{
  int rank, wsize, iters, i, procs, currtarg, dummy;
  double diff = 0.0;
  double start, max, mintime = 9999;
  MPI_Status stat;
  char comBuf;
  MPI_Comm activeComm;
  char*  procFile       = NULL;
  int*   procList       = NULL;
  int    procListSize;
  int    messStart, messStop, messFactor;
  int    procsPerNode, procIdx, useBarrier, printPairs, useNearestRank;
  char   allocPattern;

  command = argv[0];

  MPI_Init(&argc, &argv);

  MPI_Comm_size(MPI_COMM_WORLD, &wsize);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if ( !processArgs(argc, argv, rank, wsize, &iters, 
                    &dummy, &messStart, &messStop, &messFactor, 
                    &procFile, &procsPerNode, &allocPattern, 
                    &printPairs, &useBarrier, &useNearestRank) )
  {
    if ( rank == 0 )
      printUse();

    MPI_Finalize();
    exit(-1);
  }

  if ( ! getProcList(procFile, wsize, &procList, &procListSize,
                     procsPerNode, allocPattern) )
  {
    if ( procFile )
      printf("Failed to get process list from file %s.\n", procFile);
    else
      printf("Failed to allocate process list.\n");

    exit(-1);
  }

  if ( rank == 0 )
    printReportHeader();

  currtarg = getTargetRank(rank, wsize, procsPerNode, useNearestRank);

  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    /*  Create Communicator of all active processes  */
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);
    

    if ( isActiveProc(rank, wsize, procsPerNode, procs, 
                      allocPattern, useNearestRank) )
    {
      if ( rank < currtarg )
      {
        /*  Ensure pair communication has been initialized  */
        MPI_Recv(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD, &stat);
        MPI_Send(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD);
      }
      else 
      {
        /*  Ensure pair communication has been initialized  */
        MPI_Send(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD);
        MPI_Recv(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD, &stat);
      }
    
      //generic_barrier(activeComm);
      MPI_Barrier(activeComm);
      //generic_barrier(activeComm);
      MPI_Barrier(activeComm);

      if ( rank < currtarg )
      {
        /*  Time operation loop  */
        start = MPI_Wtime();

        for ( i = 0; i < iters; i++ )
        {
          MPI_Send(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD);
          MPI_Recv(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD, &stat);
        }
      }
      else 
      {
        /*  Time operation loop  */
        start = MPI_Wtime();

        for ( i = 0; i < iters; i++ )
        {
          MPI_Recv(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD, &stat);
          MPI_Send(&comBuf, 0, MPI_INT, currtarg, 0, MPI_COMM_WORLD);
        }
      }

      if ( useBarrier )
        MPI_Barrier(activeComm);
        //generic_barrier(activeComm);

      diff = MPI_Wtime() - start;
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    /*  Get maximum sample length  */
    MPI_Reduce(&diff, &max, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);

    if ( rank == 0 )
    {
      if ( max < mintime )
        mintime = max;

      printf(outputFormat, procs, max/iters/2*1000000);
    }
  }
  
  if ( rank == 0 )
  {
    printParameters(iters, procFile, procsPerNode, 
                    allocPattern, useBarrier);
  }

  printReportFooter(mintime, rank, wsize, procsPerNode, useNearestRank);

  MPI_Finalize();

  exit(0);
}
Esempio n. 5
0
File: com.c Progetto: 8l/insieme
int
main ( int argc, char *argv[] )
{
  int *messList = NULL;
  int testIdx, doTestLoop;
  int i;

  executableName = "com";

  MPI_Init ( &argc, &argv );
  MPI_Get_processor_name ( hostName, &i );

  /* Set global wsize and rank values */
  MPI_Comm_size ( MPI_COMM_WORLD, &wsize );
  MPI_Comm_rank ( MPI_COMM_WORLD, &rank );

  if ( !initAllTestTypeParams ( &testParams ) )
  {
    MPI_Finalize (  );
    exit ( 1 );
  }

  argStruct.testList = "Bidirectional, BidirAsync";

  if ( !processArgs ( argc, argv ) )
  {
    if ( rank == 0 )
      printUse (  );

    MPI_Finalize (  );
    exit ( 1 );
  }

  /* If using a source directory of process rank target files,
   * get the next appropriate file.
   */
  if ( targetDirectory != NULL && getNextTargetFile (  ) == 0 )
  {
    prestaAbort ( "Failed to open target file in target directory %s\n",
                  targetDirectory );
  }

  doTestLoop = 1;
  while ( doTestLoop )
  {
    if ( !setupTestListParams (  ) || !initAllTestTypeParams ( &testParams ) )
    {
      if ( rank == 0 )
        printUse (  );

      MPI_Finalize (  );
      exit ( 1 );
    }

#ifdef PRINT_ENV
    if ( rank == 0 )
      printEnv();
#endif

    printReportHeader (  );

    for ( testIdx = 0; testIdx < TYPETOT; testIdx++ )
    {
      if ( argStruct.testList == NULL
           || ( argStruct.testList != NULL
                && strstr ( argStruct.testList,
                            testParams[testIdx].name ) != NULL ) )
      {
        prestaRankDebug ( 0, "running test index %d\n", testIdx );
        runTest ( &testParams[testIdx] );
      }
    }

    if ( presta_check_data == 1 )
    {
      MPI_Reduce ( &presta_data_err_total, &presta_global_data_err_total,
                   1, MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD );
    }

    if ( targetDirectory == NULL || getNextTargetFile (  ) == 0 )
    {
      doTestLoop = 0;
    }
  }

  printSeparator (  );

  freeBuffers ( &testParams );
  free ( messList );

  MPI_Finalize (  );

  exit ( 0 );
}
Esempio n. 6
0
int main(int argc, char *argv[])
{
  double minTime        = 99999.0;
  double testTime       = 0.0;
  double maxBidirBw     = 0.0;
  double maxUnidirBw    = 0.0;
  int    maxBidirSize   = 0;
  int    maxUnidirSize  = 0;
  char*  procFile       = NULL;
  int*   procList       = NULL;
  int    procListSize;
  int    rank, wsize, iters, procs;
  int    messStart, messStop, messFactor, messSize;
  int    procsPerNode, totalops, procIdx;
  int    useBarrier, printPairs, useNearestRank, dummy;
  double currBw;
  char   allocPattern;
  MPI_Comm activeComm;

  command = argv[0];

  MPI_Init(&argc, &argv);

  MPI_Comm_size(MPI_COMM_WORLD, &wsize);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if ( !processArgs(argc, argv, rank, wsize, &iters, &dummy,
                    &messStart, &messStop, &messFactor, 
                    &procFile, &procsPerNode, &allocPattern, 
                    &printPairs, &useBarrier, &useNearestRank) )
  {
    if ( rank == 0 )
      printUse();

    MPI_Finalize();
    exit(-1);
  }

  if ( ! getProcList(procFile, wsize, &procList, &procListSize,
                     procsPerNode, allocPattern) )
  {
    if ( procFile )
      printf("Failed to get process list from file %s.\n", procFile);
    else
      printf("Failed to allocate process list.\n");

    exit(-1);
  }

  if ( rank == 0 )
    printReportHeader();

  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( rank == 0 && printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    /*  Create Communicator of all active processes  */
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);

    for ( messSize = messStart; messSize <= messStop; 
          messSize *= messFactor )
    {
      testTime = runUnicomTest(procs, messSize, iters, rank, 
                               wsize, procsPerNode, allocPattern, 
                               useBarrier, useNearestRank, &activeComm);

      if ( rank == 0 && testTime > 0 )
      {
        totalops   = iters * (procs/2);
        currBw = ((double)totalops*(double)messSize/testTime)/1000000;

        if ( currBw > maxUnidirBw )
        {
          maxUnidirBw   = currBw;
          maxUnidirSize = messSize;
        }

        if ( testTime < minTime )
          minTime = testTime;
      }
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    if ( rank == 0 )
      printf("\n");
  }
  
  for ( procIdx = 0; procIdx < procListSize; procIdx++ )
  {
    procs = procList[procIdx];

    if ( rank == 0 && printPairs )
    {
      printActivePairs(procs, rank, wsize, procsPerNode, 
                       allocPattern, useNearestRank);
    }

    /*  Create Communicator of all active processes  */
    createActiveComm(procs, rank, wsize, procsPerNode, 
                     allocPattern, printPairs, useNearestRank, &activeComm);

    for ( messSize = messStart; messSize <= messStop; 
          messSize *= messFactor )
    {
      testTime = runBicomTest(procs, messSize, iters, rank, 
                              wsize, procsPerNode, allocPattern, 
                              useBarrier, useNearestRank, &activeComm);

      if ( rank == 0 && testTime > 0 )
      {
        totalops   = iters * procs;
        currBw = (((double)totalops*(double)messSize)/testTime)/1000000;

        if ( currBw > maxBidirBw )
        {
          maxBidirBw   = currBw;
          maxBidirSize = messSize;
        }

        if ( testTime < minTime )
          minTime = testTime;
      }
    }

    if ( activeComm != MPI_COMM_NULL )
      MPI_Comm_free(&activeComm);

    if ( rank == 0 )
      printf("\n");
  }

  if ( rank == 0 )
  {
    printf("Max Unidirectional Bandwith :  %13.2f for message size of %7d bytes\n",
           maxUnidirBw, maxUnidirSize);
    printf("Max  Bidirectional Bandwith :  %13.2f for message size of %7d bytes\n",
           maxBidirBw, maxBidirSize);

    printParameters(iters, messStart, messStop, messFactor, 
                    procFile, procsPerNode, allocPattern, useBarrier);
  }

  printReportFooter(minTime, rank, wsize, procsPerNode, useNearestRank);
  
  free(procList);

  MPI_Finalize();

  exit(0);
}