Exemplo n.º 1
0
FORTRAN_API void FORT_CALL mpi_file_sync_(MPI_Fint *fh, int *ierr )
{
    MPI_File fh_c;
    
    fh_c = MPI_File_f2c(*fh);
    *ierr = MPI_File_sync(fh_c);
}
int lemonWriteRecordDataNonBlocking(void *source, MPI_Offset const *nbytes, LemonWriter* writer)
{
  int err;

  if (source == NULL || nbytes == 0 || writer == (LemonWriter*)NULL)
  {
    fprintf(stderr, "[LEMON] Node %d reports in lemonWriteRecordDataNonBlocking:\n"
                    "        NULL pointer or uninitialized writer provided.\n", writer->my_rank);
    return LEMON_ERR_PARAM;
  }

  if (writer->is_busy)
    lemonFinishWriting(writer);

  if (writer->my_rank == 0)
    err = MPI_File_iwrite_at(*writer->fp, writer->off + writer->pos, source, *nbytes, MPI_BYTE, &writer->request);

  writer->is_busy = 1;
  writer->is_collective = 0;
  writer->buffer = source;
  writer->bytes_wanted = *nbytes;

  MPI_File_sync(*writer->fp);
  MPI_Bcast(&err, 1, MPI_INT, 0, writer->cartesian);

  if (err != MPI_SUCCESS)
  {
    fprintf(stderr, "[LEMON] Node %d reports in lemonWriteRecordDataNonBlocking:\n"
                    "        MPI_File_iwrite_at returned error %d.\n", writer->my_rank, err);
    return LEMON_ERR_WRITE;
  }
  return LEMON_SUCCESS;
}
void cache_flush_ind(int myid,
		     int numprocs,
		     int size,
		     char *filename)
{
    char *buf;
    MPI_File fh;
    double time;
    int64_t comp = 0;

    assert(size != 0);

    if ((buf = (char *) malloc(MAX_BUFFER_SIZE * sizeof(char))) == NULL)
    {
	fprintf(stderr, "cache_flush_all: malloc buf of size %d failed\n",
		    MAX_BUFFER_SIZE);
    }
    
    MPI_File_open(MPI_COMM_SELF, filename,
		  MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
    MPI_File_set_view(fh, 0, MPI_BYTE, MPI_BYTE, 
		      "native", MPI_INFO_NULL);
    MPI_File_seek(fh, 0, MPI_SEEK_SET);

    time = MPI_Wtime();

    while (comp != size)
    {
	if (size - comp > MAX_BUFFER_SIZE)
	{
	    comp += MAX_BUFFER_SIZE;
	    MPI_File_write(fh, buf, MAX_BUFFER_SIZE, 
			   MPI_BYTE, MPI_STATUS_IGNORE);
	}
	else
	{
	    int tmp_bytes = size - comp;
	    comp += size - comp;
	    MPI_File_write(fh, buf, tmp_bytes,
                           MPI_BYTE, MPI_STATUS_IGNORE);

	}
	
    }
    
    free(buf);

    MPI_File_sync(fh);
    time = MPI_Wtime() - time;
    MPI_File_close(&fh);
    MPI_File_delete(filename, MPI_INFO_NULL);
    fprintf(stderr, 
	    "proc %d:cache_flush_ind: File %s written/deleted of "
	    "size %.1f MBytes\n"
	    "Time: %f secs Bandwidth: %f MBytes / sec\n\n",
	    myid,
	    filename, comp*numprocs/1024.0/1024.0,
	    time, comp*numprocs/1024.0/1024.0 / time);
}
int lemonWriteLatticeParallelMapped(LemonWriter *writer, void *data, MPI_Offset siteSize, int const *latticeDims, int const *mapping)
{
  int        written;
  int        error;
  MPI_Status status;
  LemonSetup setup;

  error = lemonClearWriterState(writer);
  if (error != LEMON_SUCCESS)
    return error;

  lemonSetupIOTypes(&setup, writer->cartesian, siteSize, latticeDims, mapping);

  /* Install the data organization we worked out above on the file as a view */
  MPI_Barrier(writer->cartesian);
  MPI_File_set_view(*writer->fp, writer->off + writer->pos, setup.etype, setup.ftype, "native", MPI_INFO_NULL);

  /* Blast away! */
  MPI_File_write_at_all(*writer->fp, writer->pos, data, setup.localVol, setup.etype, &status);
  MPI_File_sync(*writer->fp);

  MPI_Barrier(writer->cartesian);

  writer->pos += setup.totalVol * siteSize;

  /* We should reset the shared file pointer, in an MPI_BYTE based view... */
  MPI_Barrier(writer->cartesian);
  MPI_File_set_view(*writer->fp, 0, MPI_BYTE, MPI_BYTE, "native", MPI_INFO_NULL);

  /* Free up the resources we claimed for this operation. */
  lemonFreeIOTypes(&setup);

  MPI_Get_count(&status, MPI_BYTE, &written);
  if (written != siteSize * setup.localVol)
  {
    fprintf(stderr, "[LEMON] Node %d reports in lemonWriteLatticeParallel:\n"
                    "        Could not write the required amount of data.\n", writer->my_rank);
    return LEMON_ERR_WRITE;
  }

  return LEMON_SUCCESS;
}
Exemplo n.º 5
0
JNIEXPORT void JNICALL Java_mpi_File_sync(
        JNIEnv *env, jobject jthis, jlong fh)
{
    int rc = MPI_File_sync((MPI_File)fh);
    ompi_java_exceptionCheck(env, rc);
}
Exemplo n.º 6
0
int main(int argc, char** argv) {
  MPI_Init(&argc, &argv);

  setup_globals();

  /* Parse arguments. */
  int SCALE = 16;
  int edgefactor = 16; /* nedges / nvertices, i.e., 2*avg. degree */
  // if (argc >= 2) SCALE = atoi(argv[1]);
  // if (argc >= 3) edgefactor = atoi(argv[2]);
  char* name = argv[1];
  if (argc >= 3) SCALE = atoi(argv[2]);
  if (argc >= 4) edgefactor = atoi(argv[3]);
  // if (argc <= 1 || argc >= 4 || SCALE == 0 || edgefactor == 0) {
  //   if (rank == 0) {
  //     fprintf(stderr, "Usage: %s SCALE edgefactor\n  SCALE = log_2(# vertices) [integer, required]\n  edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]);
  //   }
  if (argc <= 2 || argc >= 5 || SCALE == 0 || edgefactor == 0) {
    if (rank == 0) {
      fprintf(stderr, "Usage: %s filename SCALE edgefactor\n  SCALE = log_2(# vertices) [integer, required]\n  edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]);
    }
    MPI_Abort(MPI_COMM_WORLD, 1);
  }
  uint64_t seed1 = 2, seed2 = 3;

  // const char* filename = getenv("TMPFILE");
  const char* filename = name;

  /* If filename is NULL, store data in memory */

  tuple_graph tg;
  tg.nglobaledges = (int64_t)(edgefactor) << SCALE;
  int64_t nglobalverts = (int64_t)(1) << SCALE;

  tg.data_in_file = (filename != NULL);

  if (tg.data_in_file) {
      printf("data in file \n");

    MPI_File_set_errhandler(MPI_FILE_NULL, MPI_ERRORS_ARE_FATAL);
    // MPI_File_open(MPI_COMM_WORLD, (char*)filename, MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL | MPI_MODE_DELETE_ON_CLOSE | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &tg.edgefile);
    MPI_File_open(MPI_COMM_WORLD, (char*)filename, MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &tg.edgefile);
    MPI_File_set_size(tg.edgefile, tg.nglobaledges * sizeof(packed_edge));
    MPI_File_set_view(tg.edgefile, 0, packed_edge_mpi_type, packed_edge_mpi_type, "native", MPI_INFO_NULL);
    MPI_File_set_atomicity(tg.edgefile, 0);
  }

  /* Make the raw graph edges. */
  /* Get roots for BFS runs, plus maximum vertex with non-zero degree (used by
   * validator). */
  int num_bfs_roots = 64;
  int64_t* bfs_roots = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t));
  int64_t max_used_vertex = 0;

  double make_graph_start = MPI_Wtime();
  {
    /* Spread the two 64-bit numbers into five nonzero values in the correct
     * range. */
    uint_fast32_t seed[5];
    make_mrg_seed(seed1, seed2, seed);

    /* As the graph is being generated, also keep a bitmap of vertices with
     * incident edges.  We keep a grid of processes, each row of which has a
     * separate copy of the bitmap (distributed among the processes in the
     * row), and then do an allreduce at the end.  This scheme is used to avoid
     * non-local communication and reading the file separately just to find BFS
     * roots. */
    MPI_Offset nchunks_in_file = (tg.nglobaledges + FILE_CHUNKSIZE - 1) / FILE_CHUNKSIZE;
    int64_t bitmap_size_in_bytes = int64_min(BITMAPSIZE, (nglobalverts + CHAR_BIT - 1) / CHAR_BIT);
    if (bitmap_size_in_bytes * size * CHAR_BIT < nglobalverts) {
      bitmap_size_in_bytes = (nglobalverts + size * CHAR_BIT - 1) / (size * CHAR_BIT);
    }
    int ranks_per_row = ((nglobalverts + CHAR_BIT - 1) / CHAR_BIT + bitmap_size_in_bytes - 1) / bitmap_size_in_bytes;
    int nrows = size / ranks_per_row;
    int my_row = -1, my_col = -1;
    unsigned char* restrict has_edge = NULL;
    MPI_Comm cart_comm;
    {
      int dims[2] = {size / ranks_per_row, ranks_per_row};
      int periods[2] = {0, 0};
      MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm);
    }
    int in_generating_rectangle = 0;
    if (cart_comm != MPI_COMM_NULL) {
      in_generating_rectangle = 1;
      {
        int dims[2], periods[2], coords[2];
        MPI_Cart_get(cart_comm, 2, dims, periods, coords);
        my_row = coords[0];
        my_col = coords[1];
      }
      MPI_Comm this_col;
      MPI_Comm_split(cart_comm, my_col, my_row, &this_col);
      MPI_Comm_free(&cart_comm);
      has_edge = (unsigned char*)xMPI_Alloc_mem(bitmap_size_in_bytes);
      memset(has_edge, 0, bitmap_size_in_bytes);
      /* Every rank in a given row creates the same vertices (for updating the
       * bitmap); only one writes them to the file (or final memory buffer). */
      packed_edge* buf = (packed_edge*)xmalloc(FILE_CHUNKSIZE * sizeof(packed_edge));
      MPI_Offset block_limit = (nchunks_in_file + nrows - 1) / nrows;
      // fprintf(stderr, "%d: nchunks_in_file = %" PRId64 ", block_limit = %" PRId64 " in grid of %d rows, %d cols\n", rank, (int64_t)nchunks_in_file, (int64_t)block_limit, nrows, ranks_per_row);
      if (tg.data_in_file) {
        tg.edgememory_size = 0;
        tg.edgememory = NULL;
      } else {
        int my_pos = my_row + my_col * nrows;
        int last_pos = (tg.nglobaledges % ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row) != 0) ?
                       (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row) :
                       -1;
        int64_t edges_left = tg.nglobaledges % FILE_CHUNKSIZE;
        int64_t nedges = FILE_CHUNKSIZE * (tg.nglobaledges / ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row)) +
                         FILE_CHUNKSIZE * (my_pos < (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row)) +
                         (my_pos == last_pos ? edges_left : 0);
        /* fprintf(stderr, "%d: nedges = %" PRId64 " of %" PRId64 "\n", rank, (int64_t)nedges, (int64_t)tg.nglobaledges); */
        tg.edgememory_size = nedges;
        tg.edgememory = (packed_edge*)xmalloc(nedges * sizeof(packed_edge));
      }
      MPI_Offset block_idx;
      for (block_idx = 0; block_idx < block_limit; ++block_idx) {
        /* fprintf(stderr, "%d: On block %d of %d\n", rank, (int)block_idx, (int)block_limit); */
        MPI_Offset start_edge_index = int64_min(FILE_CHUNKSIZE * (block_idx * nrows + my_row), tg.nglobaledges);
        MPI_Offset edge_count = int64_min(tg.nglobaledges - start_edge_index, FILE_CHUNKSIZE);
        packed_edge* actual_buf = (!tg.data_in_file && block_idx % ranks_per_row == my_col) ?
                                  tg.edgememory + FILE_CHUNKSIZE * (block_idx / ranks_per_row) :
                                  buf;
        /* fprintf(stderr, "%d: My range is [%" PRId64 ", %" PRId64 ") %swriting into index %" PRId64 "\n", rank, (int64_t)start_edge_index, (int64_t)(start_edge_index + edge_count), (my_col == (block_idx % ranks_per_row)) ? "" : "not ", (int64_t)(FILE_CHUNKSIZE * (block_idx / ranks_per_row))); */
        if (!tg.data_in_file && block_idx % ranks_per_row == my_col) {
          assert (FILE_CHUNKSIZE * (block_idx / ranks_per_row) + edge_count <= tg.edgememory_size);
        }

	// debug
	char* wtxbuf = (char*)xmalloc(FILE_CHUNKSIZE * sizeof(packed_edge));

        // generate_kronecker_range(seed, SCALE, start_edge_index, start_edge_index + edge_count, actual_buf);
        generate_kronecker_range(seed, SCALE, start_edge_index, start_edge_index + edge_count, actual_buf);
        if (tg.data_in_file && my_col == (block_idx % ranks_per_row)) { /* Try to spread writes among ranks */
          // MPI_File_write_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE);


	    // debug
	    printf("%d: %d, %d\n", rank, start_edge_index, edge_count);
	    int i;
	    // for (i = start_edge_index; i < start_edge_index + 3; i++) {
	    // if(block_idx == 0) {
	    // 	for (i = 0; i < 3; i++) {
	    // 	    if (edge_count > 3)
	    // 		printf("%d: %d\t%d\n", rank, actual_buf[i].v0, actual_buf[i].v1);
	    // 	}

	    // }

	    
	    

          MPI_File_write_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE);
        }
        ptrdiff_t i;
#ifdef _OPENMP
#pragma omp parallel for
#endif
        for (i = 0; i < edge_count; ++i) {
          int64_t src = get_v0_from_edge(&actual_buf[i]);
          int64_t tgt = get_v1_from_edge(&actual_buf[i]);
          if (src == tgt) continue;
          if (src / bitmap_size_in_bytes / CHAR_BIT == my_col) {
#ifdef _OPENMP
#pragma omp atomic
#endif
            has_edge[(src / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (src % CHAR_BIT));
          }
          if (tgt / bitmap_size_in_bytes / CHAR_BIT == my_col) {
#ifdef _OPENMP
#pragma omp atomic
#endif
            has_edge[(tgt / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (tgt % CHAR_BIT));
          }
        }
      }
      free(buf);
#if 0
      /* The allreduce for each root acts like we did this: */
      MPI_Allreduce(MPI_IN_PLACE, has_edge, bitmap_size_in_bytes, MPI_UNSIGNED_CHAR, MPI_BOR, this_col);
#endif
      MPI_Comm_free(&this_col);
    } else {
      tg.edgememory = NULL;
      tg.edgememory_size = 0;
    }
    MPI_Allreduce(&tg.edgememory_size, &tg.max_edgememory_size, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD);

#ifndef GEN_ONLY
    /* Find roots and max used vertex */
    {
      uint64_t counter = 0;
      int bfs_root_idx;
      for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) {
        int64_t root;
        while (1) {
          double d[2];
          make_random_numbers(2, seed1, seed2, counter, d);
          root = (int64_t)((d[0] + d[1]) * nglobalverts) % nglobalverts;
          counter += 2;
          if (counter > 2 * nglobalverts) break;
          int is_duplicate = 0;
          int i;
          for (i = 0; i < bfs_root_idx; ++i) {
            if (root == bfs_roots[i]) {
              is_duplicate = 1;
              break;
            }
          }
          if (is_duplicate) continue; /* Everyone takes the same path here */
          int root_ok = 0;
          if (in_generating_rectangle && (root / CHAR_BIT / bitmap_size_in_bytes) == my_col) {
            root_ok = (has_edge[(root / CHAR_BIT) % bitmap_size_in_bytes] & (1 << (root % CHAR_BIT))) != 0;
          }
          MPI_Allreduce(MPI_IN_PLACE, &root_ok, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD);
          if (root_ok) break;
        }
        bfs_roots[bfs_root_idx] = root;
      }
      num_bfs_roots = bfs_root_idx;

      /* Find maximum non-zero-degree vertex. */
      {
        int64_t i;
        max_used_vertex = 0;
        if (in_generating_rectangle) {
          for (i = bitmap_size_in_bytes * CHAR_BIT; i > 0; --i) {
            if (i > nglobalverts) continue;
            if (has_edge[(i - 1) / CHAR_BIT] & (1 << ((i - 1) % CHAR_BIT))) {
              max_used_vertex = (i - 1) + my_col * CHAR_BIT * bitmap_size_in_bytes;
              break;
            }
          }
        }
        MPI_Allreduce(MPI_IN_PLACE, &max_used_vertex, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD);
      }
    }
#endif

    if (in_generating_rectangle) {
      MPI_Free_mem(has_edge);
    }
    if (tg.data_in_file) {
      MPI_File_sync(tg.edgefile);
    }
  }

  double make_graph_stop = MPI_Wtime();
  double make_graph_time = make_graph_stop - make_graph_start;
  if (rank == 0) { /* Not an official part of the results */
    fprintf(stderr, "graph_generation:               %f s\n", make_graph_time);
  }


  //debug
#ifndef GEN_ONLY //!GEN_ONLY

  /* Make user's graph data structure. */
  double data_struct_start = MPI_Wtime();
  make_graph_data_structure(&tg);
  double data_struct_stop = MPI_Wtime();
  double data_struct_time = data_struct_stop - data_struct_start;
  if (rank == 0) { /* Not an official part of the results */
    fprintf(stderr, "construction_time:              %f s\n", data_struct_time);
  }

  /* Number of edges visited in each BFS; a double so get_statistics can be
   * used directly. */
  double* edge_counts = (double*)xmalloc(num_bfs_roots * sizeof(double));

  /* Run BFS. */
  int validation_passed = 1;
  double* bfs_times = (double*)xmalloc(num_bfs_roots * sizeof(double));
  double* validate_times = (double*)xmalloc(num_bfs_roots * sizeof(double));
  uint64_t nlocalverts = get_nlocalverts_for_pred();
  int64_t* pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t));

  int bfs_root_idx;
  for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) {
    int64_t root = bfs_roots[bfs_root_idx];

    if (rank == 0) fprintf(stderr, "Running BFS %d\n", bfs_root_idx);

    /* Clear the pred array. */
    memset(pred, 0, nlocalverts * sizeof(int64_t));

    /* Do the actual BFS. */
    double bfs_start = MPI_Wtime();
    run_bfs(root, &pred[0]);
    double bfs_stop = MPI_Wtime();
    bfs_times[bfs_root_idx] = bfs_stop - bfs_start;
    if (rank == 0) fprintf(stderr, "Time for BFS %d is %f\n", bfs_root_idx, bfs_times[bfs_root_idx]);

    /* Validate result. */
    if (rank == 0) fprintf(stderr, "Validating BFS %d\n", bfs_root_idx);

    double validate_start = MPI_Wtime();
    int64_t edge_visit_count;
    int validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count);
    double validate_stop = MPI_Wtime();
    validate_times[bfs_root_idx] = validate_stop - validate_start;
    if (rank == 0) fprintf(stderr, "Validate time for BFS %d is %f\n", bfs_root_idx, validate_times[bfs_root_idx]);
    edge_counts[bfs_root_idx] = (double)edge_visit_count;
    if (rank == 0) fprintf(stderr, "TEPS for BFS %d is %g\n", bfs_root_idx, edge_visit_count / bfs_times[bfs_root_idx]);

    if (!validation_passed_one) {
      validation_passed = 0;
      if (rank == 0) fprintf(stderr, "Validation failed for this BFS root; skipping rest.\n");
      break;
    }
  }

  MPI_Free_mem(pred);
  free(bfs_roots);
  free_graph_data_structure();

#endif //!GEN_ONLY

  if (tg.data_in_file) {
    MPI_File_close(&tg.edgefile);
  } else {
    free(tg.edgememory); tg.edgememory = NULL;
  }

#ifndef GEN_ONLY
  /* Print results. */
  if (rank == 0) {
    if (!validation_passed) {
      fprintf(stdout, "No results printed for invalid run.\n");
    } else {
      int i;
      fprintf(stdout, "SCALE:                          %d\n", SCALE);
      fprintf(stdout, "edgefactor:                     %d\n", edgefactor);
      fprintf(stdout, "NBFS:                           %d\n", num_bfs_roots);
      fprintf(stdout, "graph_generation:               %g\n", make_graph_time);
      fprintf(stdout, "num_mpi_processes:              %d\n", size);
      fprintf(stdout, "construction_time:              %g\n", data_struct_time);
      double stats[s_LAST];
      get_statistics(bfs_times, num_bfs_roots, stats);
      fprintf(stdout, "min_time:                       %g\n", stats[s_minimum]);
      fprintf(stdout, "firstquartile_time:             %g\n", stats[s_firstquartile]);
      fprintf(stdout, "median_time:                    %g\n", stats[s_median]);
      fprintf(stdout, "thirdquartile_time:             %g\n", stats[s_thirdquartile]);
      fprintf(stdout, "max_time:                       %g\n", stats[s_maximum]);
      fprintf(stdout, "mean_time:                      %g\n", stats[s_mean]);
      fprintf(stdout, "stddev_time:                    %g\n", stats[s_std]);
      get_statistics(edge_counts, num_bfs_roots, stats);
      fprintf(stdout, "min_nedge:                      %.11g\n", stats[s_minimum]);
      fprintf(stdout, "firstquartile_nedge:            %.11g\n", stats[s_firstquartile]);
      fprintf(stdout, "median_nedge:                   %.11g\n", stats[s_median]);
      fprintf(stdout, "thirdquartile_nedge:            %.11g\n", stats[s_thirdquartile]);
      fprintf(stdout, "max_nedge:                      %.11g\n", stats[s_maximum]);
      fprintf(stdout, "mean_nedge:                     %.11g\n", stats[s_mean]);
      fprintf(stdout, "stddev_nedge:                   %.11g\n", stats[s_std]);
      double* secs_per_edge = (double*)xmalloc(num_bfs_roots * sizeof(double));
      for (i = 0; i < num_bfs_roots; ++i) secs_per_edge[i] = bfs_times[i] / edge_counts[i];
      get_statistics(secs_per_edge, num_bfs_roots, stats);
      fprintf(stdout, "min_TEPS:                       %g\n", 1. / stats[s_maximum]);
      fprintf(stdout, "firstquartile_TEPS:             %g\n", 1. / stats[s_thirdquartile]);
      fprintf(stdout, "median_TEPS:                    %g\n", 1. / stats[s_median]);
      fprintf(stdout, "thirdquartile_TEPS:             %g\n", 1. / stats[s_firstquartile]);
      fprintf(stdout, "max_TEPS:                       %g\n", 1. / stats[s_minimum]);
      fprintf(stdout, "harmonic_mean_TEPS:             %g\n", 1. / stats[s_mean]);
      /* Formula from:
       * Title: The Standard Errors of the Geometric and Harmonic Means and
       *        Their Application to Index Numbers
       * Author(s): Nilan Norris
       * Source: The Annals of Mathematical Statistics, Vol. 11, No. 4 (Dec., 1940), pp. 445-448
       * Publisher(s): Institute of Mathematical Statistics
       * Stable URL: http://www.jstor.org/stable/2235723
       * (same source as in specification). */
      fprintf(stdout, "harmonic_stddev_TEPS:           %g\n", stats[s_std] / (stats[s_mean] * stats[s_mean] * sqrt(num_bfs_roots - 1)));
      free(secs_per_edge); secs_per_edge = NULL;
      free(edge_counts); edge_counts = NULL;
      get_statistics(validate_times, num_bfs_roots, stats);
      fprintf(stdout, "min_validate:                   %g\n", stats[s_minimum]);
      fprintf(stdout, "firstquartile_validate:         %g\n", stats[s_firstquartile]);
      fprintf(stdout, "median_validate:                %g\n", stats[s_median]);
      fprintf(stdout, "thirdquartile_validate:         %g\n", stats[s_thirdquartile]);
      fprintf(stdout, "max_validate:                   %g\n", stats[s_maximum]);
      fprintf(stdout, "mean_validate:                  %g\n", stats[s_mean]);
      fprintf(stdout, "stddev_validate:                %g\n", stats[s_std]);
#if 0
      for (i = 0; i < num_bfs_roots; ++i) {
        fprintf(stdout, "Run %3d:                        %g s, validation %g s\n", i + 1, bfs_times[i], validate_times[i]);
      }
#endif
    }
  }
  free(bfs_times);
  free(validate_times);

#endif
  cleanup_globals();
  MPI_Finalize();
  return 0;
}
Exemplo n.º 7
0
int main(int argc, char **argv)
{
    int buf[1024], amode, flag, mynod, len, i;
    MPI_File fh;
    MPI_Status status;
    MPI_Datatype newtype;
    MPI_Offset disp, offset;
    MPI_Group group;
    MPI_Datatype etype, filetype;
    char datarep[25], *filename;

    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &mynod);

/* process 0 takes the file name as a command-line argument and 
   broadcasts it to other processes */
    if (!mynod) {
	i = 1;
	while ((i < argc) && strcmp("-fname", *argv)) {
	    i++;
	    argv++;
	}
	if (i >= argc) {
	    printf("\n*#  Usage: misc  <mpiparameter> -- -fname filename\n\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}
	argv++;
	len = strlen(*argv);
	filename = (char *) malloc(len+1);
	strcpy(filename, *argv);
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
    }
    else {
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	filename = (char *) malloc(len+1);
	MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
    }


    MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
                  MPI_INFO_NULL, &fh);

    MPI_File_write(fh, buf, 1024, MPI_INT, &status);

    MPI_File_sync(fh);

    MPI_File_get_amode(fh, &amode);
    if (!mynod) printf("testing MPI_File_get_amode\n");
    if (amode != (MPI_MODE_CREATE | MPI_MODE_RDWR))
	printf("amode is %d, should be %d\n\n", amode, MPI_MODE_CREATE |
                      MPI_MODE_RDWR);

    MPI_File_get_atomicity(fh, &flag);
    if (flag) printf("atomicity is %d, should be 0\n", flag);
    if (!mynod) printf("setting atomic mode\n");
    MPI_File_set_atomicity(fh, 1);
    MPI_File_get_atomicity(fh, &flag);
    if (!flag) printf("atomicity is %d, should be 1\n", flag);
    MPI_File_set_atomicity(fh, 0);
    if (!mynod) printf("reverting back to nonatomic mode\n");

    MPI_Type_vector(10, 10, 20, MPI_INT, &newtype);
    MPI_Type_commit(&newtype);

    MPI_File_set_view(fh, 1000, MPI_INT, newtype, "native", MPI_INFO_NULL);
    if (!mynod) printf("testing MPI_File_get_view\n");
    MPI_File_get_view(fh, &disp, &etype, &filetype, datarep);
    if ((disp != 1000) || strcmp(datarep, "native"))
	printf("disp = %I64, datarep = %s, should be 1000, native\n\n", disp, datarep);

    if (!mynod) printf("testing MPI_File_get_byte_offset\n");
    MPI_File_get_byte_offset(fh, 10, &disp);
    if (disp != (1000+20*sizeof(int))) printf("byte offset = %I64, should be %d\n\n", disp, (int) (1000+20*sizeof(int)));

    MPI_File_get_group(fh, &group);

    if (!mynod) printf("testing MPI_File_set_size\n");
    MPI_File_set_size(fh, 1000+15*sizeof(int));
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_File_sync(fh);
    MPI_File_get_size(fh, &disp);
    if (disp != 1000+15*sizeof(int)) printf("file size = %I64, should be %d\n\n", disp, (int) (1000+15*sizeof(int)));
 
    if (!mynod) printf("seeking to eof and testing MPI_File_get_position\n");
    MPI_File_seek(fh, 0, MPI_SEEK_END);
    MPI_File_get_position(fh, &disp);
    if (disp != 10) printf("file pointer posn = %I64, should be 10\n\n", disp);

    if (!mynod) printf("testing MPI_File_get_byte_offset\n");
    MPI_File_get_byte_offset(fh, disp, &offset);
    if (offset != (1000+20*sizeof(int))) printf("byte offset = %I64, should be %d\n\n", offset, (int) (1000+20*sizeof(int)));
    MPI_Barrier(MPI_COMM_WORLD);

    if (!mynod) printf("testing MPI_File_seek with MPI_SEEK_CUR\n");
    MPI_File_seek(fh, -10, MPI_SEEK_CUR);
    MPI_File_get_position(fh, &disp);
    MPI_File_get_byte_offset(fh, disp, &offset);
    if (offset != 1000)
	printf("file pointer posn in bytes = %I64, should be 1000\n\n", offset);

    if (!mynod) printf("preallocating disk space up to 8192 bytes\n");
    MPI_File_preallocate(fh, 8192);

    if (!mynod) printf("closing the file and deleting it\n");
    MPI_File_close(&fh);
    
    MPI_Barrier(MPI_COMM_WORLD);
    if (!mynod) MPI_File_delete(filename, MPI_INFO_NULL);

    MPI_Type_free(&newtype);
    MPI_Type_free(&filetype);
    MPI_Group_free(&group);
    free(filename);
    MPI_Finalize(); 
    return 0;
}
void do_ffApply(SEXP ans,
                double *data,
                SEXP margin,
                SEXP function,
                int my_start,
                int my_end,
                int nrows,
                int ncols,
                int worldRank,
                char *out_filename) {

  MPI_Status stat;
  MPI_Comm comm = MPI_COMM_WORLD;
  MPI_File fh;

  SEXP data_chunk, R_fcall, parsedCmd = R_NilValue;  
  double *rchunk;
  int i,k, offset=0, count=0;
  int no_of_protects = 0;
  
  /* Open the file handler */
  MPI_File_open(comm, out_filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);

  /* The MPI_FILE_SET_VIEW routine changes the process's view of the data in the file */
  MPI_File_set_view(fh, 0, MPI_DOUBLE, MPI_DOUBLE, "native", MPI_INFO_NULL);

  /* Parse the command, returns a function object */
  DEBUG("\n**PROTECTING 2 : Worker process %3d \n", worldRank);
  PROTECT(parsedCmd = parseExpression(function));
  no_of_protects++;

   /* Create R LANGSXP Vector, R function holder
     length of the vector is 1 + number of arguments */
  PROTECT(R_fcall = lang2(VECTOR_ELT(parsedCmd, 0), R_NilValue));
  no_of_protects++;


  if (INTEGER(margin)[0] == 1) {

	  DEBUG("\n**PROTECTING 1 : Worker process %3d \n", worldRank);
    PROTECT(data_chunk = allocVector(REALSXP, ncols));
    no_of_protects++;
    rchunk = REAL(data_chunk);

    for(i=my_start, k=0; i<my_end; i++, k++) {
      
      for(int j=0; j<ncols; j++) {
        rchunk[j] = data[j*nrows+i];
      }

      SETCADR(R_fcall, data_chunk);
      SET_VECTOR_ELT(ans, 0, eval(R_fcall, R_GlobalEnv));

      count = length(VECTOR_ELT(ans, 0));
      offset = i*count;
      DEBUG("\n**MPI_File_write_at** : Worker process %3d \n", worldRank);
      MPI_File_write_at(fh, offset, REAL(VECTOR_ELT(ans, 0)), count, MPI_DOUBLE, &stat); 
    }
  }

  if (INTEGER(margin)[0] == 2) {

	  DEBUG("\n**PROTECTING 1 : Worker process %3d \n", worldRank);
    PROTECT(data_chunk = allocVector(REALSXP, nrows));
    no_of_protects++;
    rchunk = REAL(data_chunk);

    for(i=my_start, k=0; i<my_end; i++, k++) {
      
      for(int j=0; j<nrows; j++) {
        rchunk[j] = data[j+nrows*i];
      }

      SETCADR(R_fcall, data_chunk);
      SET_VECTOR_ELT(ans, 0, eval(R_fcall, R_GlobalEnv));

      count = length(VECTOR_ELT(ans, 0));
      offset = i*count;

      DEBUG("\n**MPI_File_write_at** : Worker process %3d \n", worldRank);
      MPI_File_write_at(fh, offset, REAL(VECTOR_ELT(ans, 0)), count, MPI_DOUBLE, &stat);

    }

  }

  DEBUG("\n**count is %3d \n", count);
  DEBUG("\n**length(data_chunk) is %3d \n", length(data_chunk)) ;
  //TODO ET
  /* If count and length(data_chunk) are the same, then a matrix is written to the ff file,
   * if count is length ==1, then a list is written to the ff file (the current ff read code works for this).
   * Need to return a list/matix flag to the R code, so that it knows how to open the ff file.*/

  MPI_File_sync( fh ) ; 			// Causes all previous writes to be transferred to the storage device
  MPI_Barrier( MPI_COMM_WORLD ) ; 	// Blocks until all processes in the communicator have reached this routine.

  DEBUG("\n**No of things that are protected %d : Worker process %3d \n", no_of_protects, worldRank);
  DEBUG("\n**UNPROTECTING 3 : Worker process %3d \n", worldRank);
  UNPROTECT(3);
  
  /* Close file handler */
  MPI_File_close(&fh);

  MPI_Barrier( MPI_COMM_WORLD ) ; 	// Blocks until all processes in the communicator have reached this routine.
  return;
}
void cache_flush_ind_all(int myid,
			 int numprocs,
			 int size,
			 char *filename)
{
    char *buf;
    MPI_File fh;
    double time;
    /* Calculate how much each processor must write */
    int64_t ind_size = ceil(size / numprocs);
    int64_t comp = 0;
    char *ind_filename = NULL;
    int ind_filename_size = 0;

    /* We will assume that we are using less than 1,000,000 processors 
     * therefore add 1 for NULL char and 6 for each individual processor 
     * for 7 total */
    ind_filename_size += strlen(filename) + 7;
    
    if ((ind_filename = (char *) malloc(ind_filename_size)) == NULL)
    {
	fprintf(stderr, "cache_flush_ind_all: malloc ind_filename of size"
		"%d failed\n", ind_filename_size);
    }
    sprintf(ind_filename, "%s%d", filename, myid);

    ind_size = ind_size * 1024 * 1024; /* ind_size converted to MBytes */
    assert(ind_size != 0);

    if ((buf = (char *) malloc(MAX_BUFFER_SIZE * sizeof(char))) == NULL)
    {
	fprintf(stderr, "cache_flush_all: malloc buf of size %d failed\n",
		    MAX_BUFFER_SIZE);
    }
    
    MPI_Barrier(MPI_COMM_WORLD);
    
    MPI_File_open(MPI_COMM_SELF, ind_filename,
		  MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
    MPI_File_set_view(fh, 0, MPI_BYTE, MPI_BYTE, 
		      "native", MPI_INFO_NULL);
    MPI_File_seek(fh, 0, MPI_SEEK_SET);

    time = MPI_Wtime();

    while (comp != ind_size)
    {
	if (ind_size - comp > MAX_BUFFER_SIZE)
	{
	    comp += MAX_BUFFER_SIZE;
	    MPI_File_write(fh, buf, MAX_BUFFER_SIZE, 
			   MPI_BYTE, MPI_STATUS_IGNORE);
	}
	else
	{
	    int tmp_bytes = ind_size - comp;
	    comp += ind_size - comp;
	    MPI_File_write(fh, buf, tmp_bytes, 
			   MPI_BYTE, MPI_STATUS_IGNORE);
	}
	
    }

    MPI_File_sync(fh);
    time = MPI_Wtime() - time;
    MPI_File_close(&fh);
    MPI_Barrier(MPI_COMM_WORLD);
#if 0
    MPI_File_delete(ind_filename, MPI_INFO_NULL);
#endif
    if (myid == 0)
    {
	fprintf(stderr, 
		"cache_flush_ind_all: File(s) written of "
		"size %.1f MBytes\n"
		"Time: %f secs Bandwidth: %f MBytes / sec\n\n",
		comp*numprocs/1024.0/1024.0,
		time, comp*numprocs/1024.0/1024.0/time);
    }
    MPI_Barrier(MPI_COMM_WORLD);
    free(ind_filename);
    free(buf);
}
Exemplo n.º 10
0
void cache_flush_all(int myid,
		     int numprocs,
		     int size,
		     char *filename)
{
    char *buf;
    MPI_File fh;
    double time;
    /* Calculate how much each processor must write */
    int64_t ind_size = ceil(size / numprocs);
    int64_t comp = 0;

    ind_size = ind_size * 1024 * 1024; /* ind_size converted to MBytes */
    assert(ind_size != 0);

    if ((buf = (char *) malloc(MAX_BUFFER_SIZE * sizeof(char))) == NULL)
    {
	fprintf(stderr, "cache_flush_all: malloc buf of size %d failed\n",
		    MAX_BUFFER_SIZE);
    }
    
    MPI_Barrier(MPI_COMM_WORLD);
    
    MPI_File_open(MPI_COMM_WORLD, filename,
		  MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
    MPI_File_set_view(fh, ind_size * myid, MPI_BYTE, MPI_BYTE, 
		      "native", MPI_INFO_NULL);
    MPI_File_seek(fh, 0, MPI_SEEK_SET);

    time = MPI_Wtime();

    while (comp != ind_size)
    {
	if (ind_size - comp > MAX_BUFFER_SIZE)
	{
	    comp += MAX_BUFFER_SIZE;
	    MPI_File_write(fh, buf, MAX_BUFFER_SIZE, 
			   MPI_BYTE, MPI_STATUS_IGNORE);
	}
	else
	{
	    int tmp_bytes = ind_size - comp;
	    comp += ind_size - comp;
	    MPI_File_write(fh, buf, tmp_bytes,
			   MPI_BYTE, MPI_STATUS_IGNORE);
	}
	
    }

    free(buf);

    MPI_File_sync(fh);
    time = MPI_Wtime() - time;
    MPI_File_close(&fh);
    MPI_Barrier(MPI_COMM_WORLD);
#if 0
    if (myid == 0)
    {
	MPI_File_delete(filename, MPI_INFO_NULL);
	fprintf(stderr, 
		"cache_flush_all: File %s written/deleted of "
		"size %.1f MBytes\n"
		"Time: %f secs Bandwidth: %f MBytes / sec\n\n",
		filename, comp*numprocs/1024.0/1024.0,
		time, comp*numprocs/1024.0/1024.0/time);
    }
    MPI_Barrier(MPI_COMM_WORLD);
#endif
}
Exemplo n.º 11
0
static int
test_mpio_1wMr(char *filename, int special_request)
{
    char hostname[128];
    int  mpi_size, mpi_rank;
    MPI_File fh;
    char mpi_err_str[MPI_MAX_ERROR_STRING];
    int  mpi_err_strlen;
    int  mpi_err;
    unsigned char writedata[DIMSIZE], readdata[DIMSIZE];
    unsigned char expect_val;
    int  i, irank;
    int  nerrs = 0;    /* number of errors */
    int  atomicity;
    MPI_Offset  mpi_off;
    MPI_Status  mpi_stat;

    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);

    if (MAINPROCESS && VERBOSE_MED){
        printf("Testing one process writes, all processes read.\n");
  printf("Using %d processes accessing file %s\n", mpi_size, filename);
        printf("    (Filename can be specified via program argument)\n");
    }

    /* show the hostname so that we can tell where the processes are running */
    if (VERBOSE_DEF){
  if (gethostname(hostname, 128) < 0){
      PRINTID;
      printf("gethostname failed\n");
      return 1;
  }
  PRINTID;
  printf("hostname=%s\n", hostname);
    }

    /* Delete any old file in order to start anew. */
    /* Must delete because MPI_File_open does not have a Truncate mode. */
    /* Don't care if it has error. */
    MPI_File_delete(filename, MPI_INFO_NULL);
    MPI_Barrier(MPI_COMM_WORLD);  /* prevent racing condition */

    if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
      MPI_MODE_RDWR | MPI_MODE_CREATE ,
      MPI_INFO_NULL, &fh))
      != MPI_SUCCESS){
  MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
  PRINTID;
  printf("MPI_File_open failed (%s)\n", mpi_err_str);
  return 1;
    }

if (special_request & USEATOM){
    /* ==================================================
     * Set atomcity to true (1).  A POSIX compliant filesystem
     * should not need this.
     * ==================================================*/
    if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS){
  MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
  PRINTID;
  printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
    }
    if (VERBOSE_HI)
  printf("Initial atomicity = %d\n", atomicity);
    if ((mpi_err = MPI_File_set_atomicity(fh, 1)) != MPI_SUCCESS){
  MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
  PRINTID;
  printf("MPI_File_set_atomicity failed (%s)\n", mpi_err_str);
    }
    if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS){
  MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
  PRINTID;
  printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
    }
    if (VERBOSE_HI)
  printf("After set_atomicity atomicity = %d\n", atomicity);
}

    /* This barrier is not necessary but do it anyway. */
    MPI_Barrier(MPI_COMM_WORLD);
    if (VERBOSE_HI){
  PRINTID;
  printf("between MPI_Barrier and MPI_File_write_at\n");
    }

    /* ==================================================
     * Each process calculates what to write but
     * only process irank(0) writes.
     * ==================================================*/
    irank=0;
    for (i=0; i < DIMSIZE; i++)
  writedata[i] = irank*DIMSIZE + i;
    mpi_off = irank*DIMSIZE;

    /* Only one process writes */
    if (mpi_rank==irank){
  if (VERBOSE_HI){
      PRINTID; printf("wrote %d bytes at %ld\n", DIMSIZE, (long)mpi_off);
  }
  if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE,
      MPI_BYTE, &mpi_stat))
    != MPI_SUCCESS){
      MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
      PRINTID;
      printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
        (long) mpi_off, DIMSIZE, mpi_err_str);
      return 1;
  };
    };

    /* Bcast the return code and */
    /* make sure all writing are done before reading. */
    MPI_Bcast(&mpi_err, 1, MPI_INT, irank, MPI_COMM_WORLD);
    if (VERBOSE_HI){
  PRINTID;
  printf("MPI_Bcast: mpi_err = %d\n", mpi_err);
    }

if (special_request & USEFSYNC){
    /* ==================================================
     * Do a file sync.  A POSIX compliant filesystem
     * should not need this.
     * ==================================================*/
    if (VERBOSE_HI)
  printf("Apply MPI_File_sync\n");
    /* call file_sync to force the write out */
    if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS){
  MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
  PRINTID;
  printf("MPI_File_sync failed (%s)\n", mpi_err_str);
    }
    MPI_Barrier(MPI_COMM_WORLD);
    /* call file_sync to force the write out */
    if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS){
  MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
  PRINTID;
  printf("MPI_File_sync failed (%s)\n", mpi_err_str);
    }
}

    /* This barrier is not necessary because the Bcase or File_sync above */
    /* should take care of it.  Do it anyway. */
    MPI_Barrier(MPI_COMM_WORLD);
    if (VERBOSE_HI){
  PRINTID;
  printf("after MPI_Barrier\n");
    }

    /* ==================================================
     * Each process reads what process 0 wrote and verify.
     * ==================================================*/
    irank=0;
    mpi_off = irank*DIMSIZE;
    if ((mpi_err = MPI_File_read_at(fh, mpi_off, readdata, DIMSIZE, MPI_BYTE,
      &mpi_stat))
      != MPI_SUCCESS){
  MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
  PRINTID;
  printf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n",
    (long) mpi_off, DIMSIZE, mpi_err_str);
  return 1;
    };
    for (i=0; i < DIMSIZE; i++){
  expect_val = irank*DIMSIZE + i;
  if (readdata[i] != expect_val){
      PRINTID;
      printf("read data[%d:%d] got %02x, expect %02x\n", irank, i,
        readdata[i], expect_val);
      nerrs++;
  }
    }

    MPI_File_close(&fh);

    if (VERBOSE_HI){
  PRINTID;
  printf("%d data errors detected\n", nerrs);
    }

    mpi_err = MPI_Barrier(MPI_COMM_WORLD);
    return nerrs;
}
Exemplo n.º 12
0
/**
 * \brief Measures the time to write once to a file.
 *
 * Only one process is active. It writes once to a file.
 *
 * Remark:<br>
 * With the <tt>O_DIRECT</tt> flag set, cache effects are minimized, because I/O
 * is done directly to/from user space buffers. The operation system's page
 * cache is bypassed. Under Linux 2.6 alignment to 512-byte boundaries is
 * required for buffer and file offset. Thus the following parameters should be
 * set in a SKaMPI input file:
 * - <tt>set_send_buffert_alignment (512)</tt>
 * - <tt>set_recv_buffert_alignment (512)</tt>
 * - <tt>switch_buffer_cycling_off ()</tt><br>
 *
 * <tt>O_DIRECT</tt> is only relevant if the POSIX-API is used for I/O.
 * 
 * For more information please refer to the <tt>open ()</tt> man pages.
 *
 * \param[in] size        size of memory buffer, i.e. number of <tt>MPI_BYTE</tt>s
 * \param[in] api         POSIX-API or MPI-API for I/O accesses
 * \param[in] create_flag write into existing file (FALSE) or create it (TRUE)
 * \param[in] directio_flag open file with <tt>O_DIRECT</tt> flag to minimize
 *                          cache effects
 *
 * \return    measured time 
 */
double measure_MPI_IO_write_file_once (int size, char *api, int create_flag, int directio_flag){
  double     start_time = 1.0, end_time = 0.0;
  int        open_flags;
  char       *error_string;
  
  if (get_measurement_rank () == 0){
    if (strcmp (api, POSIX_API) == 0){

      if (directio_flag != 0)
	open_flags = O_WRONLY | O_DIRECT;
      else
	open_flags = O_WRONLY;

      errno = 0;

      if (create_flag == 0){	/* open existing file */

	if ((io_fd = open (io_filename, open_flags)) < 0){
	  error_string = strerror (errno);
	  error_with_abort (errno,
			    "\nmeasure_MPI_IO_write_file_once (int %d, char * %s, int %d, int %d) failed."
			    "\nCannot open local file (write only mode)."
			    "\nError: %s\n",
			    size, api, create_flag, directio_flag, error_string);
	}
      }
      else {			/* open nonexisting file and create it */
	
	if ((io_fd = open (io_filename, open_flags|O_CREAT, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0){
	  error_string = strerror (errno);
	  error_with_abort (errno,
			   "\nmeasure_MPI_IO_write_file_once (int %d, char * %s, int %d, int %d) failed."
			   "\nCannot open local file (write only mode)."
			   "\nError: %s\n",
			    size, api, create_flag, directio_flag, error_string);
	}
      }

      start_time = start_synchronization ();
      write (io_fd, get_send_buffer (), size);
      fsync (io_fd);
      end_time = MPI_Wtime ();

      close (io_fd);

    }
    else{ 			/* if strcmp (api, POSIX_API) != 0 */

      if (create_flag == 0){

	MPI_File_open (MPI_COMM_SELF, io_filename, MPI_MODE_WRONLY, MPI_INFO_NULL, &io_fh);
      }
      else{ 			/* if create_flag != 0*/

	MPI_File_open (MPI_COMM_SELF, io_filename, MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &io_fh);
      }
      
      MPI_File_set_view (io_fh, (MPI_Offset)0, 
			MPI_BYTE, MPI_BYTE,
			"native", MPI_INFO_NULL);

      start_time = start_synchronization ();
      MPI_File_write (io_fh, get_send_buffer (), size, MPI_BYTE, MPI_STATUS_IGNORE);
      MPI_File_sync (io_fh);
      end_time = MPI_Wtime ();
      
      MPI_File_close (&io_fh);
    }
  }
  else if (get_measurement_rank () != 0) {
    start_synchronization ();
  }
  stop_synchronization ();

  if (get_measurement_rank () == 0)
    return end_time - start_time;
  else
    return -1.0;
}
/* *************************** *
 *  Main computational kernel  *
 * *************************** */
int correlationKernel(int rank,
                      int size,
                      double* dataMatrixX,
                      double* dataMatrixY,
                      int columns,
                      int rows,
                      char *out_filename,
                      int distance_flag) {

    int local_check = 0, global_check = 0;
    int i = 0, j, taskNo;
    int err, count = 0;
    unsigned long long fair_chunk = 0, coeff_count = 0;
    unsigned int init_and_cleanup_loop_iter=0;
    unsigned long long cor_cur_size = 0;
    
    double start_time, end_time;

    // Variables needed by the Indexed Datatype
    MPI_Datatype coeff_index_dt;
    MPI_File fh;
    int *blocklens, *indices;

    MPI_Status stat;
    MPI_Comm comm = MPI_COMM_WORLD;

    // Master processor keeps track of tasks
    if (rank == 0) {

        // Make sure everything will work fine even if there are
        // less genes than available workers (there are size-1 workers
        // master does not count)
        if ( (size-1) > rows )
            init_and_cleanup_loop_iter = rows+1;
        else
            init_and_cleanup_loop_iter = size;

        // Start timer
        start_time = MPI_Wtime();

        // Send out initial tasks (remember you have size-1 workers, master does not count)
        for (i=1; i<init_and_cleanup_loop_iter; i++) {
            taskNo = i-1;
            err = MPI_Send(&taskNo, 1, MPI_INT, i, 0, comm);
        }        

        // Terminate any processes that were not working due to the fact
        // that the number of rows where less than the actual available workers
        for(i=init_and_cleanup_loop_iter; i < size; i++) {
            PROF(rank, "\nPROF_idle : Worker %d terminated due to insufficient work load", i);
            err = -1;
            err = MPI_Send(&err, 1, MPI_INT, i, 0, comm);
        }

        // Wait for workers to finish their work assignment and ask for more
        for (i=init_and_cleanup_loop_iter-1; i<rows; i++) {
            err = MPI_Recv(&taskNo, 1, MPI_INT, MPI_ANY_SOURCE, 0, comm, &stat);

            // Check taskNo to make sure everything is ok. Negative means there is problem
            // thus terminate gracefully all remaining working workers
            if ( taskNo < 0 ) {
                // Reduce by one because one worker is already terminated
                init_and_cleanup_loop_iter--;
                // Break and cleanup
                break;
            }

            // The sending processor is ready to work:
            // It's ID is in stat.MPI_SOURCE
            // Send it the current task (i)
            err = MPI_Send(&i, 1, MPI_INT, stat.MPI_SOURCE, 0, comm);
        }

        // Clean up processors
        for (i=1; i<init_and_cleanup_loop_iter; i++) {
            // All tasks complete - shutdown workers
            err = MPI_Recv(&taskNo, 1, MPI_INT, MPI_ANY_SOURCE, 0, comm, &stat);
            // If process failed then it will not be waiting to receive anything
            // We have to ignore the send because it will deadlock
            if ( taskNo < 0 )
                continue;
            err = -1;
            err = MPI_Send(&err, 1, MPI_INT, stat.MPI_SOURCE, 0, comm);
        }

        // Master is *always* OK
        local_check = 0;
        MPI_Allreduce(&local_check, &global_check, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

        // Check failed, abort
        if ( global_check != 0 ) {
            return -1;
        }
        
        // Stop timer
        end_time = MPI_Wtime();
        PROF(rank, "\nPROF_comp (workers=%d) : Time taken by correlation coefficients computations : %g\n", size-1, end_time - start_time);

        // Start timer
        start_time = MPI_Wtime();

        // Master process must call MPI_File_set_view as well, it's a collective call
        // Open the file handler
        MPI_File_open(comm, out_filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);

        // Create the file view
        MPI_File_set_view(fh, 0, MPI_DOUBLE, MPI_DOUBLE, "native", MPI_INFO_NULL);

        // Write data to disk
        MPI_File_write_all(fh, &cor[0], 0, MPI_DOUBLE, &stat);

        // Stop timer
        end_time = MPI_Wtime();
        PROF(rank, "\nPROF_write (workers=%d) : Time taken for global write-file : %g\n",  size-1, end_time - start_time);

    } else {

        // Compute how many workers will share the work load
        // Two scenarios exist:
        // (1) more OR equal number of workers and rows exist
        // (2) more rows than workers
        if ( (size-1) > rows ) {
            // For this scenario each worker will get exaclty one work asssignment.
            // There is not going to be any other work so it only compute "rows" number
            // of coefficients
            fair_chunk = rows;
            cor_cur_size = fair_chunk;
        } else {
            // For this scenario we are going to allocate space equal to a fair
            // distribution of work assignments *plus* an extra amount of space to
            // cover any load imbalancing. This amount is expressed as a percentage
            // of the fair work distribution (see on top, 20% for now)

            // Plus 1 to round it up or just add some extra space, both are fine
            fair_chunk = (rows / (size-1)) + 1;
            DEBUG("fair_chunk %d \n", fair_chunk);

            // We can use "j" as temporary variable.
            // Plus 1 to avoid getting 0 from the multiplication.
            j = (fair_chunk * MEM_PERC) + 1;

            cor_cur_size = (fair_chunk + j) * rows;
            DEBUG("cor_cur_size %lld \n", cor_cur_size);
        }

        // Allocate memory
        DEBUG("cor_cur_size %lld \n", cor_cur_size);
        long long double_size = sizeof(double);
        DEBUG("malloc size %lld \n", (double_size * cor_cur_size));
        cor = (double *)malloc(double_size * cor_cur_size);

        blocklens = (int *)malloc(sizeof(int) * rows);
        indices = (int *)malloc(sizeof(int) * rows);

        mean_value_vectorX = (double *)malloc(sizeof(double) * rows);
        Sxx_vector = (double *)malloc(sizeof(double) * rows);
        mean_value_vectorY = (double *)malloc(sizeof(double) * rows);
        Syy_vector = (double *)malloc(sizeof(double) * rows);

        // Check that all memory is successfully allocated
        if ( ( cor == NULL ) || ( blocklens == NULL ) || ( indices == NULL ) || 
             ( mean_value_vectorX == NULL ) || ( Sxx_vector == NULL ) ||
             ( mean_value_vectorY == NULL ) || ( Syy_vector == NULL ) ) {
            ERR("**ERROR** : Memory allocation failed on worker process %d. Aborting.\n", rank);

            // Free allocated memory
            free_all(cor, blocklens, indices, mean_value_vectorX, Sxx_vector, mean_value_vectorY, Syy_vector);

            // Let the master process know its aborting in order to terminate
            // the rest of the working workers
            // We have to receive a work assignment first and then terminate
            // otherwise the master will deadlock trying to give work to this worker
            err = MPI_Recv(&taskNo, 1, MPI_INT, 0, 0, comm, &stat);
            taskNo = -1;
            err = MPI_Send(&taskNo, 1, MPI_INT, 0, 0, comm);

            // This worker failed
            local_check = 1;
            MPI_Allreduce(&local_check, &global_check, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

            return -1;
        }

        // Compute necessary parameters for Pearson method
        // (this will transform the values of the input array to more meaningful data
        //  and save us from a lot of redundant computations)
        compute_parameters(dataMatrixX, dataMatrixY, rows, columns);

        // Main loop for workers. They get work from master, compute coefficients,
        // save them to their *local* vector and ask for more work
        for(;;) {
            // Get work
            err = 0;
            err = MPI_Recv(&taskNo, 1, MPI_INT, 0, 0, comm, &stat);

            // If received task is -1, function is terminated
            if ( taskNo == -1 )  break;

            // Check if there is enough memory to store the new coefficients, if not reallocate
            // the current memory and expand it by MEM_PERC of the approximated size
            if ( cor_cur_size < (coeff_count + rows) ) {
                PROF(0, "\n**WARNING** : Worker process %3d run out of memory and reallocates. Potential work imbalancing\n", rank);
                DEBUG("\n**WARNING** : Worker process %3d run out of memory and reallocates. Potential work imbalancing\n", rank);

                // Use j as temporary again. Add two (or any other value) to avoid 0.
                // (two is just a random value, you can put any value really...)
                j = (fair_chunk * MEM_PERC) + 2;
                cor_cur_size += (j * rows);

                // Reallocate and check
                cor = (double *)realloc(cor, sizeof(double) * cor_cur_size);
                if ( cor == NULL ) {
                    ERR("**ERROR** : Memory re-allocation failed on worker process %d. Aborting.\n", rank);

                    // Let the master process know its aborting in order to terminate
                    // the rest of the working workers
                    taskNo = -1;
                    err = MPI_Send(&taskNo, 1, MPI_INT, 0, 0, comm);

                    // This worker failed
                    local_check = 1;
                    MPI_Allreduce(&local_check, &global_check, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

                    // Free all allocated memory
                    free_all(cor, blocklens, indices, mean_value_vectorX, Sxx_vector, mean_value_vectorY, Syy_vector);

                    return -1;
                }
            }

            // Compute the correlation coefficients
            if(dataMatrixY != NULL) {
              for (j=0; j < rows; j++) {
                cor[coeff_count] = pearson_XY(dataMatrixX, dataMatrixY, j, taskNo, columns);
                coeff_count++;
              }

            } else {
              for (j=0; j < rows; j++) {
                // Set main diagonal to 1
                if ( j == taskNo ) {
                  cor[coeff_count] = 1.0;
                  coeff_count++;
                  continue;
                }
                cor[coeff_count] = pearson(dataMatrixX, taskNo, j, columns);
                coeff_count++;
              }
            }

            // The value of blocklens[] represents the number of coefficients on each
            // row of the corellation array
            blocklens[count] = rows;

            // The value of indices[] represents the offset of each row in the data file
            indices[count] = (taskNo * rows);
            count++;

            // Give the master the taskID
            err = MPI_Send(&taskNo, 1, MPI_INT, 0, 0, comm);
        }

        // There are two possibilities
        //   (a) everything went well and all workers finished ok
        //   (b) some processes finished ok but one or more of the remaining working workers failed
        // To make sure all is well an all-reduce will be performed to sync all workers and guarantee success
        // before moving on to write the output file
        // This worker is OK
        local_check = 0;
        MPI_Allreduce(&local_check, &global_check, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

        // Check failed
        if ( global_check != 0 ) {
            // Free all allocated memory
          free_all(cor, blocklens, indices, mean_value_vectorX, Sxx_vector, mean_value_vectorY, Syy_vector);
            return -1;
        }

        PROF(0, "\nPROF_stats (thread %3d) : Fair chunk of work : %d \t\t Allocated : %d \t\t Computed : %d\n",
                rank, fair_chunk, cor_cur_size, coeff_count);

        // If the distance_flag is set, then transform all correlation coefficients to distances
        if ( distance_flag == 1 ) {
            for(j=0; j < coeff_count; j++) {
                cor[j] = 1 - cor[j];
            }
        }

        // Create and commit the Indexed datatype *ONLY* if there are data available
        if ( coeff_count != 0 ) {
            MPI_Type_indexed(count, blocklens, indices, MPI_DOUBLE, &coeff_index_dt);
            MPI_Type_commit(&coeff_index_dt);
        }

        // Open the file handler
        MPI_File_open(comm, out_filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);

        // Create the file view
        if ( coeff_count != 0 ) {
            MPI_File_set_view(fh, 0, MPI_DOUBLE, coeff_index_dt, "native", MPI_INFO_NULL);
        } else {
            MPI_File_set_view(fh, 0, MPI_DOUBLE, MPI_DOUBLE, "native", MPI_INFO_NULL);
        }

        // Write data to disk
        // TODO coeff_count cannot be greater than max int (for use in the MPI_File_write_all call). 
        // A better fix should be possible, for now throw error.
        
        DEBUG("\ncoeff_count is %lld\n", coeff_count);
        DEBUG("\INT_MAX is %d\n", INT_MAX);
        if(coeff_count>INT_MAX)
        {
            ERR("**ERROR** : Could not run as the chunks of data are too large. Try running again with more MPI processes.\n");

            // Free allocated memory
            free_all(cor, blocklens, indices, mean_value_vectorX, Sxx_vector, mean_value_vectorY, Syy_vector);

            // Let the master process know its aborting in order to terminate
            // the rest of the working workers
            // We have to receive a work assignment first and then terminate
            // otherwise the master will deadlock trying to give work to this worker
            err = MPI_Recv(&taskNo, 1, MPI_INT, 0, 0, comm, &stat);
            taskNo = -1;
            err = MPI_Send(&taskNo, 1, MPI_INT, 0, 0, comm);

            // This worker failed
            local_check = 1;
            MPI_Allreduce(&local_check, &global_check, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

            return -1;
        }

        
        
        DEBUG("\nWriting %d to disk\n", coeff_count);

        MPI_File_write_all(fh, &cor[0], coeff_count, MPI_DOUBLE, &stat);

        if (coeff_count != 0 )
            MPI_Type_free(&coeff_index_dt);

        // Free all allocated memory
        free_all(cor, blocklens, indices, mean_value_vectorX, Sxx_vector, mean_value_vectorY, Syy_vector);
    }

         DEBUG("\nAbout to write to disk %d\n", rank);
    MPI_File_sync( fh ) ;   		// Causes all previous writes to be transferred to the storage device
         DEBUG("\nWritten to disk %d\n",rank);
  //  MPI_Barrier( MPI_COMM_WORLD ) ; 	// Blocks until all processes in the communicator have reached this routine.
         DEBUG("\nAfter barrier \n", rank);

    // Close file handler
    MPI_File_close(&fh);
  DEBUG("\nAfter file closed /n");
   // MPI_Barrier( MPI_COMM_WORLD ) ; 	// Blocks until all processes in the communicator have reached this routine.
      DEBUG("\nAbout to return from kernel /n");
      return 0;
}