コード例 #1
0
 void zoltanInit() {
   // call Zoltan_Initialize to make sure MPI_Init is called (in MPI or siMPI).
   int argc = 0;
   char **argv = NULL;
   float ver;
   Zoltan_Initialize(argc, argv, &ver);
 }
コード例 #2
0
void lb_zoltan(PSTopology top, LBMethod method, unsigned int dimen, list_type& pl)
{
        const par::communicator& comm = par::comm_world();

        float ver;
        Zoltan_Initialize(0, 0, &ver);

        struct Zoltan_Struct *zz;

        // Create ZData (moves pl into zd)
        ZData zd(par::comm_world(), std::move(pl));

        // Allocate the Zoltan data
        zz = Zoltan_Create(zd.comm.raw());

        // Set some default sane parameters
        if (method == LBMethod::RCB)
                Zoltan_Set_Param(zz, "LB_METHOD", "RCB");
        else
                throw std::runtime_error("Unknown load balancing method");

        // Zoltan_Set_Param(zz, "KEEP_CUTS", "1");
        // Zoltan_Set_Param(zz, "LB_APPROACH", "REPARTITION");
        // Zoltan_Set_Param(zz, "MIGRATE_ONLY_PROC_CHANGES", "1");
        Zoltan_Set_Param(zz, "AUTO_MIGRATE", "TRUE");
        // Set higher for more debugging output
        Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0");

        // Set partition query methods
        Zoltan_Set_Num_Obj_Fn(zz, pl_num_obj,
                              static_cast<void *>(&zd));
        Zoltan_Set_Obj_List_Fn(zz, pl_obj_list,
                               static_cast<void *>(&zd));
        Zoltan_Set_Num_Geom_Fn(zz, pl_num_geom,
                               static_cast<void *>(&zd));
        Zoltan_Set_Geom_Multi_Fn(zz, pl_geom_multi,
                                 static_cast<void *>(&zd));

        // Migration query methods
        Zoltan_Set_Mid_Migrate_PP_Fn(zz, pl_mid_migrate_pp,
                                     static_cast<void *>(&zd));
        Zoltan_Set_Obj_Size_Multi_Fn(zz, pl_obj_size_multi,
                                     static_cast<void *>(&zd));
        Zoltan_Set_Pack_Obj_Multi_Fn(zz, pl_pack_obj_multi,
                                     static_cast<void *>(&zd));
        Zoltan_Set_Unpack_Obj_Multi_Fn(zz, pl_unpack_obj_multi,
                                       static_cast<void *>(&zd));

        int
                zerr,
                changes,
                num_gid_entries, num_lid_entries,
                num_import,
                *import_procs,
                *import_to_part,
                num_export,
                *export_procs,
                *export_to_part;
        unsigned int
                *import_global_ids,
                *import_local_ids,
                *export_global_ids,
                *export_local_ids;

        zerr = Zoltan_LB_Partition(zz,
                                   &changes,
                                   &num_gid_entries,
                                   &num_lid_entries,
                                   &num_import,
                                   &import_global_ids,
                                   &import_local_ids,
                                   &import_procs,
                                   &import_to_part,
                                   &num_export,
                                   &export_global_ids,
                                   &export_local_ids,
                                   &export_procs,
                                   &export_to_part);

        if (zerr != ZOLTAN_OK) comm.abort("Zoltan error", 1);

        // Move the data back again out of the struct
        pl = std::move(zd.list);

        Zoltan_Destroy(&zz);
}
コード例 #3
0
int main(int argc, char* argv[])
{
	if (MPI_Init(&argc, &argv) != MPI_SUCCESS) {
		std::cerr << "Couldn't initialize MPI." << std::endl;
		abort();
	}

	MPI_Comm comm = MPI_COMM_WORLD;

	int rank = 0, comm_size = 0;
	if (MPI_Comm_rank(comm, &rank) != MPI_SUCCESS) {
		std::cerr << "Couldn't obtain MPI rank." << std::endl;
		abort();
	}
	if (MPI_Comm_size(comm, &comm_size) != MPI_SUCCESS) {
		std::cerr << "Couldn't obtain size of MPI communicator." << std::endl;
		abort();
	}

	float zoltan_version;
	if (Zoltan_Initialize(argc, argv, &zoltan_version) != ZOLTAN_OK) {
		std::cerr << "Zoltan_Initialize failed." << std::endl;
		abort();
	}


	constexpr size_t
		nr_of_values = 100,
		max_nr_of_cells = 512;

	const unsigned int neighborhood_size = 1;
	const std::array<bool, 3> periodic = {{true, true, true}};

	dccrg::Cartesian_Geometry::Parameters geom_params;
	geom_params.start = {{-3,  -5,  -7}};
	geom_params.level_0_cell_length = {{7,   5,   3}};

	for (size_t nr_cells = 1; nr_cells <= max_nr_of_cells; nr_cells *= 2) {
		Grid grid;
		const std::array<uint64_t, 3> nr_of_cells{{nr_cells, 1, 1}};
		if (
			not grid.initialize(
				nr_of_cells, comm, "RANDOM", neighborhood_size, 0,
				periodic[0], periodic[1], periodic[2]
			)
		) {
			std::cerr << __FILE__ << ":" << __LINE__
				<< ": Couldn't initialize grids."
				<< std::endl;
			abort();
		}

		if (not grid.set_geometry(geom_params)) {
			std::cerr << __FILE__ << "(" << __LINE__ << "): "
				<< "Couldn't set geometry of grids."
				<< std::endl;
			abort();
		}

		grid.balance_load();
		grid.update_copies_of_remote_neighbors();

		const auto cell_ids = grid.get_cells();

		create_particles(nr_of_values, cell_ids, grid);

		for (const auto& cell_id: cell_ids) {
			auto* const cell_data = grid[cell_id];
			if (cell_data == nullptr) {abort();}
			bulk_value_getter(*cell_data) = 0;
		}
		pamhd::particle::accumulate(
			cell_ids,
			grid,
			[](Cell& cell)->pamhd::particle::Particles_Internal::data_type&{
				return cell[pamhd::particle::Particles_Internal()];
			},
			[](pamhd::particle::Particle_Internal& particle)
				->pamhd::particle::Position::data_type&
			{
				return particle[pamhd::particle::Position()];
			},
			[](Cell&, pamhd::particle::Particle_Internal& particle)
				->pamhd::particle::Mass::data_type&
			{
				return particle[pamhd::particle::Mass()];
			},
			bulk_value_getter,
			list_bulk_value_getter,
			list_target_getter,
			accumulation_list_length_getter,
			accumulation_list_getter
		);

		Cell::set_transfer_all(true, pamhd::particle::Nr_Accumulated_To_Cells());
		grid.update_copies_of_remote_neighbors();
		Cell::set_transfer_all(false, pamhd::particle::Nr_Accumulated_To_Cells());

		allocate_accumulation_lists(grid);

		// transfer accumulated values between processes
		Cell::set_transfer_all(true, Accumulated_To_Cells());
		grid.update_copies_of_remote_neighbors();
		Cell::set_transfer_all(false, Accumulated_To_Cells());

		accumulate_from_remote_neighbors(grid);

		// transform accumulated mass to mass density
		for (const auto& cell_id: cell_ids) {
			const auto cell_length = grid.geometry.get_length(cell_id);
			const auto volume = cell_length[0] * cell_length[1] * cell_length[2];

			auto* const cell_data = grid[cell_id];
			if (cell_data == nullptr) {
				std::cerr << __FILE__ << "(" << __LINE__ << ")" << std::endl;
				abort();
			}
			(*cell_data)[Mass_Density()] /= volume;
		}

		const double norm = get_norm(cell_ids, grid, comm);

		if (norm > 1e-10) {
			if (rank == 0) {
				std::cerr << __FILE__ << ":" << __LINE__
					<< ": Norm is too large: " << norm
					<< " with " << nr_cells << " cell(s)"
					<< std::endl;
			}
			MPI_Finalize();
			return EXIT_FAILURE;
		}
	}

	MPI_Finalize();
	return EXIT_SUCCESS;
}
コード例 #4
0
ファイル: simpleRCB.c プロジェクト: haripandey/trilinos
int main(int argc, char *argv[])
{
  int rc, i, myRank, numProcs;
  float ver;
  struct Zoltan_Struct *zz;
  int changes, numGidEntries, numLidEntries, numImport, numExport;
  ZOLTAN_ID_PTR importGlobalGids, importLocalGids, exportGlobalGids, exportLocalGids; 
  int *importProcs, *importToPart, *exportProcs, *exportToPart;
  int *parts;
  FILE *fp;
  MESH_DATA myMesh;

  /******************************************************************
  ** Initialize MPI and Zoltan
  ******************************************************************/

  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
  MPI_Comm_size(MPI_COMM_WORLD, &numProcs);

  rc = Zoltan_Initialize(argc, argv, &ver);

  if (rc != ZOLTAN_OK){
    printf("sorry...\n");
    MPI_Finalize();
    exit(0);
  }

  /******************************************************************
  ** Read geometry from input file and distribute it unevenly
  ******************************************************************/

  fp = fopen(fname, "r");
  if (!fp){
    if (myRank == 0) fprintf(stderr,"ERROR: Can not open %s\n",fname);
    MPI_Finalize();
    exit(1);
  }
  fclose(fp);

  read_input_objects(myRank, numProcs, fname, &myMesh);

  /******************************************************************
  ** Create a Zoltan library structure for this instance of load
  ** balancing.  Set the parameters and query functions that will
  ** govern the library's calculation.  See the Zoltan User's
  ** Guide for the definition of these and many other parameters.
  ******************************************************************/

  zz = Zoltan_Create(MPI_COMM_WORLD);

  /* General parameters */

  Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0");
  Zoltan_Set_Param(zz, "LB_METHOD", "RCB");
  Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1"); 
  Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "1");
  Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", "0");
  Zoltan_Set_Param(zz, "RETURN_LISTS", "ALL");

  /* RCB parameters */

  Zoltan_Set_Param(zz, "RCB_OUTPUT_LEVEL", "0");
  Zoltan_Set_Param(zz, "RCB_RECTILINEAR_BLOCKS", "1"); 
  /*Zoltan_Set_Param(zz, "RCB_RECTILINEAR_BLOCKS", "0"); */

  /* Query functions, to provide geometry to Zoltan */

  Zoltan_Set_Num_Obj_Fn(zz, get_number_of_objects, &myMesh);
  Zoltan_Set_Obj_List_Fn(zz, get_object_list, &myMesh);
  Zoltan_Set_Num_Geom_Fn(zz, get_num_geometry, &myMesh);
  Zoltan_Set_Geom_Multi_Fn(zz, get_geometry_list, &myMesh);

  /******************************************************************
  ** Zoltan can now partition the vertices in the simple mesh.
  ** In this simple example, we assume the number of partitions is
  ** equal to the number of processes.  Process rank 0 will own
  ** partition 0, process rank 1 will own partition 1, and so on.
  ******************************************************************/

  rc = Zoltan_LB_Partition(zz, /* input (all remaining fields are output) */
        &changes,        /* 1 if partitioning was changed, 0 otherwise */ 
        &numGidEntries,  /* Number of integers used for a global ID */
        &numLidEntries,  /* Number of integers used for a local ID */
        &numImport,      /* Number of vertices to be sent to me */
        &importGlobalGids,  /* Global IDs of vertices to be sent to me */
        &importLocalGids,   /* Local IDs of vertices to be sent to me */
        &importProcs,    /* Process rank for source of each incoming vertex */
        &importToPart,   /* New partition for each incoming vertex */
        &numExport,      /* Number of vertices I must send to other processes*/
        &exportGlobalGids,  /* Global IDs of the vertices I must send */
        &exportLocalGids,   /* Local IDs of the vertices I must send */
        &exportProcs,    /* Process to which I send each of the vertices */
        &exportToPart);  /* Partition to which each vertex will belong */

  if (rc != ZOLTAN_OK){
    printf("sorry...\n");
    MPI_Finalize();
    Zoltan_Destroy(&zz);
    exit(0);
  }

  /******************************************************************
  ** Visualize the mesh partitioning before and after calling Zoltan.
  ******************************************************************/

  parts = (int *)malloc(sizeof(int) * myMesh.numMyPoints);

  for (i=0; i < myMesh.numMyPoints; i++){
    parts[i] = myRank;
  }

  if (myRank== 0){
    printf("\nMesh partition assignments before calling Zoltan\n");
  }

  showSimpleMeshPartitions(myRank, myMesh.numMyPoints, myMesh.myGlobalIDs, parts);

  for (i=0; i < numExport; i++){
    parts[exportLocalGids[i]] = exportToPart[i];
  }

  if (myRank == 0){
    printf("Mesh partition assignments after calling Zoltan\n");
  }

  showSimpleMeshPartitions(myRank, myMesh.numMyPoints, myMesh.myGlobalIDs, parts);

  free(parts);

  /******************************************************************
  ** Free the arrays allocated by Zoltan_LB_Partition, and free
  ** the storage allocated for the Zoltan structure.
  ******************************************************************/

  Zoltan_LB_Free_Part(&importGlobalGids, &importLocalGids, 
                      &importProcs, &importToPart);
  Zoltan_LB_Free_Part(&exportGlobalGids, &exportLocalGids, 
                      &exportProcs, &exportToPart);

  Zoltan_Destroy(&zz);

  /**********************
  ** all done ***********
  **********************/

  MPI_Finalize();

  if (myMesh.numMyPoints > 0){
    free(myMesh.myGlobalIDs);
    free(myMesh.x);
    free(myMesh.y);
  }

  return 0;
}
コード例 #5
0
std::vector<int> zoltanGraphPartitionGridOnRoot(const CpGrid& cpgrid,
                                          const CollectiveCommunication<MPI_Comm>& cc,
                                          int root)
{
    int rc;
    float ver;
    struct Zoltan_Struct *zz;
    int changes, numGidEntries, numLidEntries, numImport, numExport;
    ZOLTAN_ID_PTR importGlobalGids, importLocalGids, exportGlobalGids, exportLocalGids;
    int *importProcs, *importToPart, *exportProcs, *exportToPart;
    int argc=0;
    char** argv;
    rc = Zoltan_Initialize(argc, argv, &ver);
    zz = Zoltan_Create(cc);
    if ( rc != ZOLTAN_OK )
    {
        OPM_THROW(std::runtime_error, "Could not initialize Zoltan!");
    }

    Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0");
    Zoltan_Set_Param(zz, "LB_METHOD", "GRAPH");
    Zoltan_Set_Param(zz, "LB_APPROACH", "PARTITION");
    Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1");
    Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "1");
    Zoltan_Set_Param(zz, "RETURN_LISTS", "ALL");
    Zoltan_Set_Param(zz, "DEBUG_LEVEL", "3");
    Zoltan_Set_Param(zz, "CHECK_GRAPH", "2");
    Zoltan_Set_Param(zz, "PHG_EDGE_SIZE_THRESHOLD", ".35");  /* 0-remove all, 1-remove none */

    bool pretendEmptyGrid = cc.rank()!=root;

    Dune::cpgrid::setCpGridZoltanGraphFunctions(zz, cpgrid, pretendEmptyGrid);

    rc = Zoltan_LB_Partition(zz, /* input (all remaining fields are output) */
                             &changes,        /* 1 if partitioning was changed, 0 otherwise */
                             &numGidEntries,  /* Number of integers used for a global ID */
                             &numLidEntries,  /* Number of integers used for a local ID */
                             &numImport,      /* Number of vertices to be sent to me */
                             &importGlobalGids,  /* Global IDs of vertices to be sent to me */
                             &importLocalGids,   /* Local IDs of vertices to be sent to me */
                             &importProcs,    /* Process rank for source of each incoming vertex */
                             &importToPart,   /* New partition for each incoming vertex */
                             &numExport,      /* Number of vertices I must send to other processes*/
                             &exportGlobalGids,  /* Global IDs of the vertices I must send */
                             &exportLocalGids,   /* Local IDs of the vertices I must send */
                             &exportProcs,    /* Process to which I send each of the vertices */
                             &exportToPart);  /* Partition to which each vertex will belong */
    int size = cpgrid.numCells();
    int         rank  = cc.rank();
    std::vector<int> parts=std::vector<int>(size, rank);

    for ( int i=0; i < numExport; ++i )
    {
        parts[exportLocalGids[i]] = exportProcs[i];
    }
    cc.broadcast(&parts[0], parts.size(), root);
    Zoltan_LB_Free_Part(&exportGlobalGids, &exportLocalGids, &exportProcs, &exportToPart);
    Zoltan_LB_Free_Part(&importGlobalGids, &importLocalGids, &importProcs, &importToPart);
    Zoltan_Destroy(&zz);
    return parts;
}
コード例 #6
0
int ZoltanLibClass::precompute()
{
  std::string str1("Isorropia::ZoltanLibClass::precompute ");
  MPI_Comm mpicomm = MPI_COMM_WORLD;
#ifdef HAVE_MPI
  MPI_Comm default_mpicomm = MPI_COMM_WORLD;
#endif // HAVE_MPI
  int itype;

  Library::precompute(); // assumes input_type_ is set

  if (input_graph_.get() || input_matrix_.get())
  {
    if (input_type_ != hgraph2d_finegrain_input_){
      computeCost();     // graph or hypergraph weights
    }
  }

  if (input_type_ == graph_input_)
    itype = ZoltanLib::QueryObject::graph_input_;
  else if (input_type_ == hgraph_input_)
    itype = ZoltanLib::QueryObject::hgraph_input_;
  else if (input_type_ == hgraph2d_finegrain_input_)
    itype = ZoltanLib::QueryObject::hgraph2d_finegrain_input_;
  else if (input_type_ == geometric_input_)
    itype = ZoltanLib::QueryObject::geometric_input_;
  else if (input_type_ == simple_input_)
    itype = ZoltanLib::QueryObject::simple_input_;
  else if (input_type_ == hgraph_graph_input_)                 // hierarchical partitioning
    itype = ZoltanLib::QueryObject::hgraph_graph_input_;
  else if (input_type_ == hgraph_geometric_input_)             // hierarchical partitioning
    itype = ZoltanLib::QueryObject::hgraph_geometric_input_;
  else if (input_type_ == graph_geometric_input_)              // hierarchical partitioning
    itype = ZoltanLib::QueryObject::graph_geometric_input_;
  else if (input_type_ == hgraph_graph_geometric_input_)       // hierarchical partitioning
    itype = ZoltanLib::QueryObject::hgraph_graph_geometric_input_;
  else
    itype = ZoltanLib::QueryObject::unspecified_input_;


  if (input_graph_.get() !=0 && input_coords_.get()!=0) //geometric and graph inputs
  {
    queryObject_ =  Teuchos::rcp(new ZoltanLib::QueryObject(input_graph_, costs_, input_coords_, weights_, itype));
#ifdef HAVE_MPI
    const  Epetra_Comm &ecomm = input_graph_->RowMap().Comm();
    try
    {
    const Epetra_MpiComm &empicomm = dynamic_cast<const Epetra_MpiComm &>(ecomm);
    mpicomm = empicomm.Comm();
    }
    catch (std::exception& e)
    {
        // Serial Comm with MPI
        MPI_Comm_split(default_mpicomm, ecomm.MyPID(), 0, &mpicomm);
    }
#endif
  }
  else if (input_matrix_.get() !=0 && input_coords_.get()!=0) //geometric and matrix inputs
  {
    queryObject_ =  Teuchos::rcp(new ZoltanLib::QueryObject(input_matrix_, costs_, input_coords_, weights_, itype));
#ifdef HAVE_MPI
    const Epetra_Comm &ecomm = input_matrix_->RowMatrixRowMap().Comm();
    try
    {
    const Epetra_MpiComm &empicomm = dynamic_cast<const Epetra_MpiComm &>(ecomm);
    mpicomm = empicomm.Comm();
    }
    catch (std::exception& e)
    {
        // Serial Comm with MPI
        MPI_Comm_split(default_mpicomm, ecomm.MyPID(), 0, &mpicomm);
    }
#endif
  }
  else if (input_graph_.get() != 0) //graph inputs
  {
    queryObject_ =  Teuchos::rcp(new ZoltanLib::QueryObject(input_graph_, costs_, itype));
#ifdef HAVE_MPI
    const  Epetra_Comm &ecomm = input_graph_->RowMap().Comm();
    try
    {
    const Epetra_MpiComm &empicomm = dynamic_cast<const Epetra_MpiComm &>(ecomm);
    mpicomm = empicomm.Comm();
    }
    catch (std::exception& e)
    {
        // Serial Comm with MPI
        MPI_Comm_split(default_mpicomm, ecomm.MyPID(), 0, &mpicomm);
    }
#endif
  }
  else if (input_matrix_.get() != 0) //matrix inputs
  {
    queryObject_ =  Teuchos::rcp(new ZoltanLib::QueryObject(input_matrix_, costs_, itype));
#ifdef HAVE_MPI
    const Epetra_Comm &ecomm = input_matrix_->RowMatrixRowMap().Comm();
    try
    {
    const Epetra_MpiComm &empicomm = dynamic_cast<const Epetra_MpiComm &>(ecomm);
    mpicomm = empicomm.Comm();
    }
    catch (std::exception& e)
    {
        // Serial Comm with MPI
        MPI_Comm_split(default_mpicomm, ecomm.MyPID(), 0, &mpicomm);
    }
#endif
  }
  else if (input_coords_.get() != 0) // coord inputs
  {
    queryObject_ =  Teuchos::rcp(new ZoltanLib::QueryObject(input_coords_, weights_));
#ifdef HAVE_MPI
    const Epetra_Comm &ecomm = input_coords_->Map().Comm();
    try
    {
    const Epetra_MpiComm &empicomm = dynamic_cast<const Epetra_MpiComm &>(ecomm);
    mpicomm = empicomm.Comm();
    }
    catch (std::exception& e)
    {
        // Serial Comm with MPI
        MPI_Comm_split(default_mpicomm, ecomm.MyPID(), 0, &mpicomm);
    }
#endif
  }
  else // BlockMap inputs
  {
    queryObject_ =  Teuchos::rcp(new ZoltanLib::QueryObject(input_map_, itype));
#ifdef HAVE_MPI
    const  Epetra_Comm &ecomm = input_map_->Comm();
    try
    {
    const Epetra_MpiComm &empicomm = dynamic_cast<const Epetra_MpiComm &>(ecomm);
    mpicomm = empicomm.Comm();
    }
    catch (std::exception& e)
    {
        // Serial Comm with MPI
        MPI_Comm_split(default_mpicomm, ecomm.MyPID(), 0, &mpicomm);
    }
#endif
  }



  float version;
  int argcTmp=0;
  char *argvTmp[1];
  std::string lb_method_str("LB_METHOD");

  // create a Zoltan problem

  argvTmp[0] = NULL;
  Zoltan_Initialize(argcTmp, argvTmp, &version);

  zz_ = new Zoltan(mpicomm);

  if (zz_ == NULL){
    throw Isorropia::Exception("Error creating Zoltan object");
    return (-1);
  }

  //////////////////////////
  // set problem parameters
  //////////////////////////

  std::string dbg_level_str("DEBUG_LEVEL");
  if (!zoltanParamList_.isParameter(dbg_level_str))
  {
    zoltanParamList_.set(dbg_level_str, "0");
  }

  if (!zoltanParamList_.isParameter(lb_method_str)) //set default parameters
  {
    if (input_type_ == graph_input_)
    {
      zoltanParamList_.set(lb_method_str, "GRAPH");
    }
    else if (input_type_ == geometric_input_)
    {
      if (!zoltanParamList_.isParameter(lb_method_str))  //MMW: Don't think this if is needed
        zoltanParamList_.set(lb_method_str, "RCB");
    }
    else if (input_type_ == simple_input_) //not sure this is needed
    {
      zoltanParamList_.set(lb_method_str, "BLOCK");
    }
    else if (input_type_ == hgraph_graph_input_    || input_type_ == hgraph_geometric_input_ ||
             input_type_ == graph_geometric_input_ || input_type_ == hgraph_graph_geometric_input_ )
    {
      zoltanParamList_.set(lb_method_str, "HIER");
    }
    else
    {
      zoltanParamList_.set(lb_method_str, "HYPERGRAPH");
    }
  }

  // Make LB_APPROACH = PARTITION the default in Isorropia
  std::string lb_approach_str("LB_APPROACH");
  if (!zoltanParamList_.isParameter(lb_approach_str)) {
    zoltanParamList_.set(lb_approach_str, "PARTITION");
  }

    // For fine-grain hypergraph, we don't want obj or (hyper)edge weights
  if (input_type_ == hgraph2d_finegrain_input_)
  {
    zoltanParamList_.set("OBJ_WEIGHT_DIM", "0");
    zoltanParamList_.set("EDGE_WEIGHT_DIM", "0");
  }
  else if (input_type_ == geometric_input_)
  {
    // We always overwrite user choice.
    // if (!zoltanParamList_.isParameter("OBJ_WEIGHT_DIM")) {
      if (weights_.get())
      {
        zoltanParamList_.set("OBJ_WEIGHT_DIM", "1");
      }
      else
      {
        zoltanParamList_.set("OBJ_WEIGHT_DIM", "0");
      }
    //}
  }
  else if(input_type_ != simple_input_) //graph or hypergraph
  {
    if (queryObject_->haveVertexWeights())
    {
      if (!zoltanParamList_.isParameter("OBJ_WEIGHT_DIM"))
      {
        zoltanParamList_.set("OBJ_WEIGHT_DIM", "1");
      }
    }

    if (queryObject_->haveGraphEdgeWeights() ||
        queryObject_->haveHypergraphEdgeWeights())
    {
      if (!zoltanParamList_.isParameter("EDGE_WEIGHT_DIM"))
      {
        zoltanParamList_.set("EDGE_WEIGHT_DIM", "1");
      }
    }
  }

  // For fine-grain hypergraph, we will use (row, col) of nz for
  // vertex GIDs.  Don't need LIDs.

  if (input_type_ == hgraph2d_finegrain_input_)
  {
    zoltanParamList_.set("NUM_GID_ENTRIES", "2");
    zoltanParamList_.set("NUM_LID_ENTRIES", "1");
  }

  Teuchos::ParameterList::ConstIterator
    iter = zoltanParamList_.begin(),
    iter_end = zoltanParamList_.end();

  for(; iter != iter_end; ++iter)
  {
    const std::string& name = iter->first;
    const std::string& value = Teuchos::getValue<std::string>(iter->second);
    zz_->Set_Param(name, value);
  }

  // Set the query functions

  zz_->Set_Num_Obj_Fn(ZoltanLib::QueryObject::Number_Objects, (void *)queryObject_.get());
  zz_->Set_Obj_List_Fn(ZoltanLib::QueryObject::Object_List, (void *)queryObject_.get());

  int ierr;
  num_obj_ = ZoltanLib::QueryObject::Number_Objects((void *)queryObject_.get(), &ierr);


  if (input_type_ == hgraph2d_finegrain_input_)
  {
    zz_->Set_HG_Size_CS_Fn(ZoltanLib::QueryObject::HG_Size_CS, (void *)queryObject_.get());
    zz_->Set_HG_CS_Fn(ZoltanLib::QueryObject::HG_CS, (void *)queryObject_.get());
  }
  if (input_type_ == hgraph_input_           || input_type_ == hgraph_graph_input_ ||
      input_type_ == hgraph_geometric_input_ || input_type_ == hgraph_graph_geometric_input_)
  {
    zz_->Set_HG_Size_CS_Fn(ZoltanLib::QueryObject::HG_Size_CS, (void *)queryObject_.get());
    zz_->Set_HG_CS_Fn(ZoltanLib::QueryObject::HG_CS, (void *)queryObject_.get());
    zz_->Set_HG_Size_Edge_Wts_Fn(ZoltanLib::QueryObject::HG_Size_Edge_Weights,
                                 (void *)queryObject_.get());
    zz_->Set_HG_Edge_Wts_Fn(ZoltanLib::QueryObject::HG_Edge_Weights, (void *)queryObject_.get());
  }
  if (input_type_ == graph_input_ || input_type_ == hgraph_graph_input_ ||
      input_type_ == graph_geometric_input_ || input_type_ == hgraph_graph_geometric_input_)
  {
    zz_->Set_Num_Edges_Multi_Fn(ZoltanLib::QueryObject::Number_Edges_Multi, (void *)queryObject_.get());
    zz_->Set_Edge_List_Multi_Fn(ZoltanLib::QueryObject::Edge_List_Multi, (void *)queryObject_.get());
  }
  if (input_type_ == geometric_input_ || input_type_ == hgraph_geometric_input_ ||
      input_type_ == graph_geometric_input_ || input_type_ == hgraph_graph_geometric_input_)
  {
    zz_->Set_Num_Geom_Fn(ZoltanLib::QueryObject::Number_Geom, (void *)queryObject_.get());
    zz_->Set_Geom_Multi_Fn(ZoltanLib::QueryObject::Geom_Multi, (void *)queryObject_.get());
  }

  return (ierr);
}
コード例 #7
0
ファイル: stressTestGRAPH.c プロジェクト: agrippa/Trilinos
int main(int argc, char *argv[])
{
  int rc, do_hier, status;
  float ver;
  struct Zoltan_Struct *zz;
  int changes, numGidEntries, numLidEntries, numImport, numExport;
  int generate_files = 0;
  char *platform=NULL, *topology=NULL;
  char *graph_package=NULL;
  ZOLTAN_ID_PTR importGlobalGids, importLocalGids, exportGlobalGids, exportLocalGids;
  int *importProcs, *importToPart, *exportProcs, *exportToPart;
  struct option opts[10];
  double comm_time[10];
  float cut_weight[3] = {0., 0., 0.};
  long nvert=0;
  char *debug_level=NULL;

  status = 0;

  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
  MPI_Comm_size(MPI_COMM_WORLD, &numProcs);

  Zoltan_Initialize(argc, argv, &ver);
  zz = Zoltan_Create(MPI_COMM_WORLD);

  /******************************************************************
  ** Check that this test makes sense.
  ******************************************************************/

  if (sizeof(long) < sizeof(ZOLTAN_ID_TYPE)){
    if (myRank == 0){
      printf("ERROR: This code assumes that a long is at least %d bytes\n",(int)sizeof(ZOLTAN_ID_TYPE));
    }
    status = 1;
  }

  check_error_status(status, "configuration error");

  /******************************************************************
  ** Initialize zoltan
  ******************************************************************/

  /* options */

  opts[0].name = "platform";
  opts[0].has_arg = 1;
  opts[0].flag = NULL;
  opts[0].val = 1;

  opts[1].name = "topology";
  opts[1].has_arg = 1;
  opts[1].flag = NULL;
  opts[1].val = 2;

  opts[2].name = "size";
  opts[2].has_arg = 1;
  opts[2].flag = NULL;
  opts[2].val = 4;

  opts[3].name = "verbose";
  opts[3].has_arg = 0;
  opts[3].flag = NULL;
  opts[3].val = 5;

  opts[4].name = "help";
  opts[4].has_arg = 0;
  opts[4].flag = NULL;
  opts[4].val = 6;

  opts[5].name = "graph_package";
  opts[5].has_arg = 1;
  opts[5].flag = NULL;
  opts[5].val = 7;

  opts[6].name = "generate_files";
  opts[6].has_arg = 0;
  opts[6].flag = NULL;
  opts[6].val = 8;

  opts[7].name = "debug_level";
  opts[7].has_arg = 1;
  opts[7].flag = NULL;
  opts[7].val = 9;

  opts[8].name = 0;
  opts[8].has_arg = 0;
  opts[8].flag = NULL;
  opts[8].val = 0;

  status = 0;

  while (1){
    rc = getopt_long_only(argc, argv, "",  opts, NULL);

    if (rc == '?'){
      MPI_Barrier(MPI_COMM_WORLD);
      if (myRank == 0) usage();
      MPI_Finalize();
      exit(0);
    }
    else if (rc == 1){
      platform = optarg;
      if (myRank == 0)
        printf( "For platform %s\n",optarg );
    }
    else if (rc == 2){
      topology = optarg;
      if (myRank == 0)
        printf( "For topology %s\n",optarg);
    }
    else if (rc == 7){
      graph_package = optarg;
      if (myRank == 0)
        printf( "Zoltan parameter GRAPH_PACKAGE = %s\n",graph_package);
    }
    else if (rc == 8){
      generate_files = 1;
      if (myRank == 0)
        printf( "Zoltan_Generate_Files will be called for each level.\n");
    }
    else if (rc == 4){
      nvert = atol(optarg);
      if (nvert < 1) status = 1;
      check_error_status(status, "--size={approximate number of vertices}");
      if (myRank == 0){
        printf( "Graph will have approximately %ld vertices.\n",nvert);
      }
    }
    else if (rc == 5){
      verbose = 1;
    }
    else if (rc == 6){
      if (myRank == 0) usage();
      MPI_Finalize();
      exit(0);
    }
    else if (rc == 9){
      debug_level = optarg;
    }
    else if (rc <= 0){
      break;
    }
  }

  if ((platform==NULL) && (topology==NULL)){
    if (myRank == 0)
      fprintf(stdout,"No platform or topology, so we'll skip hierarchical partitioning\n");
    do_hier = 0;
  }
  else if (graph_package == NULL){
    if (myRank == 0)
      fprintf(stdout,"No graph package, so we'll skip hierarchical partitioning\n");
    do_hier = 0;
  }
  else{
    do_hier = 1;
  }

  /* start */

  Zoltan_Memory_Debug(0);

  if (nvert > 0)
    numGlobalVertices = nvert;
  else
    numGlobalVertices = NUM_GLOBAL_VERTICES;

  status = create_a_graph();
  check_error_status(status, "creating the graph");

  Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0");
  Zoltan_Set_Param(zz, "REMAP", "0");
  Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1");
  Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "1");
  Zoltan_Set_Param(zz, "RETURN_LISTS", "ALL"); /* export AND import lists */
  Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", "1"); /* number of weights per vertex */
  Zoltan_Set_Param(zz, "EDGE_WEIGHT_DIM", "1");/* number of weights per hyperedge */

  Zoltan_Set_Num_Obj_Fn(zz, get_number_of_vertices, NULL);
  Zoltan_Set_Obj_List_Fn(zz, get_vertex_list, NULL);
  Zoltan_Set_Num_Edges_Multi_Fn(zz, get_num_edges_list,  NULL);
  Zoltan_Set_Edge_List_Multi_Fn(zz, get_edge_list,  NULL);

  /* GRAPH PARTITION */

  Zoltan_Set_Param(zz, "LB_METHOD", "GRAPH");
  Zoltan_Set_Param(zz, "LB_APPROACH", "PARTITION");

  if (graph_package)
    Zoltan_Set_Param(zz, "GRAPH_PACKAGE", graph_package);

  if (verbose){
    debug(zz, "Initial graph", 0);
  }

  if (generate_files){
    rc = Zoltan_Generate_Files(zz, "flat", myRank, 0, 1, 0);
    if (rc != ZOLTAN_OK) status = 1;
    check_error_status(status, "Zoltan_Generate_Files");
  }

  /* Performance before partitioning */
  time_communication(comm_time+0);
  cut_weight[0] = get_edge_cut_weight(zz);

  if (cut_weight[0] < 0.0) status = 1;
  check_error_status(status, "First call to get_edge_cut_weight");

  rc = Zoltan_LB_Partition(zz, /* input (all remaining fields are output) */
        &changes,        /* 1 if partitioning was changed, 0 otherwise */
        &numGidEntries,  /* Number of integers used for a global ID */
        &numLidEntries,  /* Number of integers used for a local ID */
        &numImport,      /* Number of vertices to be sent to me */
        &importGlobalGids,  /* Global IDs of vertices to be sent to me */
        &importLocalGids,   /* Local IDs of vertices to be sent to me */
        &importProcs,    /* Process rank for source of each incoming vertex */
        &importToPart,   /* New partition for each incoming vertex */
        &numExport,      /* Number of vertices I must send to other processes*/
        &exportGlobalGids,  /* Global IDs of the vertices I must send */
        &exportLocalGids,   /* Local IDs of the vertices I must send */
        &exportProcs,    /* Process to which I send each of the vertices */
        &exportToPart);  /* Partition to which each vertex will belong */

  if (rc != ZOLTAN_OK) status = 1;
  check_error_status(status, "First call to LB_Partition");

  status = migrate_graph(numExport, numImport, exportLocalGids, importGlobalGids);
  check_error_status(status, "migration");

  if (verbose){
    debug(zz, "After flat partitioning and migration", 0);
  }

  time_communication(comm_time+1);      /* With graph partitioning */
  cut_weight[1] = get_edge_cut_weight(zz);

  if (cut_weight[1] < 0.0) status = 1;
  check_error_status(status, "Second call to get_edge_cut_weight");

  Zoltan_LB_Free_Part(&importGlobalGids, &importLocalGids,
                      &importProcs, &importToPart);
  Zoltan_LB_Free_Part(&exportGlobalGids, &exportLocalGids,
                      &exportProcs, &exportToPart);

  if (do_hier){

    /* HIERARCHICAL PARTITION */

    free_graph();
    status = create_a_graph();
    check_error_status(status, "create graph for hierarchical partitioning");

    Zoltan_Set_Param(zz, "LB_METHOD", "HIER");
    Zoltan_Set_Param(zz, "HIER_ASSIST", "1");
    if (generate_files){
      Zoltan_Set_Param(zz, "HIER_GENERATE_FILES", "1");
    }

    if (debug_level)   /* 1, 2 or 3 */
      Zoltan_Set_Param(zz, "HIER_DEBUG_LEVEL", debug_level);
    else
      Zoltan_Set_Param(zz, "HIER_DEBUG_LEVEL", "0");

    /* TODO: Suppose graph is not symmetric, and we request SYMMETRIZE.  Do we still get
     *  a "good" answer when each sub-graph in the hierarchy is symmetrized?
     */

    if (topology)
      Zoltan_Set_Param(zz, "TOPOLOGY", topology);
    else if (platform)
      Zoltan_Set_Param(zz, "PLATFORM", platform);

    rc = Zoltan_LB_Partition(zz, /* input (all remaining fields are output) */
          &changes,        /* 1 if partitioning was changed, 0 otherwise */
          &numGidEntries,  /* Number of integers used for a global ID */
          &numLidEntries,  /* Number of integers used for a local ID */
          &numImport,      /* Number of vertices to be sent to me */
          &importGlobalGids,  /* Global IDs of vertices to be sent to me */
          &importLocalGids,   /* Local IDs of vertices to be sent to me */
          &importProcs,    /* Process rank for source of each incoming vertex */
          &importToPart,   /* New partition for each incoming vertex */
          &numExport,      /* Number of vertices I must send to other processes*/
          &exportGlobalGids,  /* Global IDs of the vertices I must send */
          &exportLocalGids,   /* Local IDs of the vertices I must send */
          &exportProcs,    /* Process to which I send each of the vertices */
          &exportToPart);  /* Partition to which each vertex will belong */

    if (rc != ZOLTAN_OK) status = 1;
    check_error_status(status, "Second call to LB_Partition");

    status = migrate_graph(numExport, numImport, exportLocalGids, importGlobalGids);
    check_error_status(status, "second migration");

    if (verbose){
      debug(zz, "After hierarchical partitioning and migration", 0);
    }

    time_communication(comm_time+2);      /* With hierarchical graph partitioning */
    cut_weight[2] = get_edge_cut_weight(zz);

    if (cut_weight[2] < 0.0) status = 1;
    check_error_status(status, "Third call to get_edge_cut_weight");

    Zoltan_LB_Free_Part(&importGlobalGids, &importLocalGids,
                        &importProcs, &importToPart);
    Zoltan_LB_Free_Part(&exportGlobalGids, &exportLocalGids,
                        &exportProcs, &exportToPart);
  }

  Zoltan_Destroy(&zz);

  free_graph();

  if (myRank == 0){
    fprintf(stdout,"Graph cut weight before partitioning: %f\n",cut_weight[0]);
    fprintf(stdout,"             after flat partitioning: %f\n",cut_weight[1]);
    if (do_hier)
      fprintf(stdout,"     after hierarchical partitioning: %f\n",cut_weight[2]);
    fflush(stdout);
  }

  if (cut_weight[1] >= cut_weight[0]){
    status = 1;
    if (zz->Proc == 0){
      fprintf(stderr,"FAILED: No improvement shown in flat partitioning");
    }
  }

  if (do_hier && (cut_weight[2] > cut_weight[0])){
    status = 1;
    if (zz->Proc == 0){
      fprintf(stderr,"FAILED: No improvement shown in hierarchical partitioning");
    }
  }


  MPI_Finalize();

  return status;
}
コード例 #8
0
ファイル: remove2d.cpp プロジェクト: SeanChu/pamhd
int main(int argc, char* argv[])
{
	if (MPI_Init(&argc, &argv) != MPI_SUCCESS) {
		std::cerr << "Couldn't initialize MPI." << std::endl;
		abort();
	}

	MPI_Comm comm = MPI_COMM_WORLD;

	int rank = 0, comm_size = 0;
	if (MPI_Comm_rank(comm, &rank) != MPI_SUCCESS) {
		std::cerr << "Couldn't obtain MPI rank." << std::endl;
		abort();
	}
	if (MPI_Comm_size(comm, &comm_size) != MPI_SUCCESS) {
		std::cerr << "Couldn't obtain size of MPI communicator." << std::endl;
		abort();
	}


	// intialize Zoltan
	float zoltan_version;
	if (Zoltan_Initialize(argc, argv, &zoltan_version) != ZOLTAN_OK) {
		std::cerr << "Zoltan_Initialize failed." << std::endl;
		abort();
	}

	const unsigned int neighborhood_size = 0;
	const int max_refinement_level = 0;

	double old_div = std::numeric_limits<double>::max();
	size_t old_nr_of_cells = 0;
	for (size_t nr_of_cells = 8; nr_of_cells <= 128; nr_of_cells *= 2) {

		dccrg::Dccrg<Cell, dccrg::Cartesian_Geometry> grid;

		const std::array<uint64_t, 3> grid_size{{1, nr_of_cells + 2, nr_of_cells + 2}};

		if (
			not grid.initialize(
				grid_size,
				comm,
				"RANDOM",
				neighborhood_size,
				max_refinement_level,
				false,
				false,
				false
			)
		) {
			std::cerr << __FILE__ << ":" << __LINE__
				<< ": Couldn't initialize grid."
				<< std::endl;
			abort();
		}

		const std::array<double, 3>
			cell_length{{
				1,
				2 * M_PI / (grid_size[1] - 2),
				2 * M_PI / (grid_size[2] - 2)
			}},
			grid_start{{
				0, -cell_length[1], -cell_length[2]
			}};

		dccrg::Cartesian_Geometry::Parameters geom_params;
		geom_params.start = grid_start;
		geom_params.level_0_cell_length = cell_length;

		if (not grid.set_geometry(geom_params)) {
			std::cerr << __FILE__ << ":" << __LINE__
				<< ": Couldn't set grid geometry."
				<< std::endl;
			abort();
		}

		grid.balance_load();

		const auto all_cells = grid.get_cells();
		for (const auto& cell: all_cells) {
			auto* const cell_data = grid[cell];
			if (cell_data == NULL) {
				std::cerr << __FILE__ << ":" << __LINE__
					<< ": No data for cell " << cell
					<< std::endl;
				abort();
			}

			const auto center = grid.geometry.get_center(cell);
			(*cell_data)[Vector_Field()] = function(center);
		}
		grid.update_copies_of_remote_neighbors();

		// classify cells
		std::vector<uint64_t>
			solve_cells,
			boundary_cells;
		for (const auto& cell: all_cells) {
			const auto index = grid.mapping.get_indices(cell);
			if (
				index[1] > 0
				and index[1] < grid_size[1] - 1
				and index[2] > 0
				and index[2] < grid_size[2] - 1
			) {
				solve_cells.push_back(cell);
			} else {
				boundary_cells.push_back(cell);
			}
		}

		// apply copy boundaries
		for (const auto& cell: boundary_cells) {
			const auto index = grid.mapping.get_indices(cell);
			auto neighbor_index = index;

			if (index[1] == 0) {
				neighbor_index[1] = index[1] + 1;
			} else if (index[1] == grid_size[1] - 1) {
				neighbor_index[1] = index[1] - 1;
			} else if (index[2] == 0) {
				neighbor_index[2] = index[2] + 1;
			} else if (index[2] == grid_size[2] - 1) {
				neighbor_index[2] = index[2] - 1;
			}
			const auto neighbor = grid.mapping.get_cell_from_indices(neighbor_index, 0);

			const auto* const neighbor_data = grid[neighbor];
			auto* const cell_data = grid[cell];
			if (cell_data == NULL or neighbor_data == NULL) {
				std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
				abort();
			}

			(*cell_data)[Vector_Field()] = (*neighbor_data)[Vector_Field()];
		}
		grid.update_copies_of_remote_neighbors();

		auto Vector_Getter = [](Cell& cell_data) -> Vector_Field::data_type& {
			return cell_data[Vector_Field()];
		};
		auto Div_After_Getter = [](Cell& cell_data) -> Divergence_After::data_type& {
			return cell_data[Divergence_After()];
		};
		const double div_before = pamhd::divergence::get_divergence(
			solve_cells,
			grid,
			Vector_Getter,
			[](Cell& cell_data) -> Divergence_Before::data_type& {
				return cell_data[Divergence_Before()];
			}
		);

		pamhd::divergence::remove(
			solve_cells,
			boundary_cells,
			{},
			grid,
			Vector_Getter,
			Div_After_Getter,
			[](Cell& cell_data) -> Gradient::data_type& {
				return cell_data[Gradient()];
			},
			2000, 0, 1e-15, 2, 100, 0, false, false
		);
		grid.update_copies_of_remote_neighbors();

		// update copy boundaries to correspond to removed divergence
		for (const auto& cell: boundary_cells) {
			const auto index = grid.mapping.get_indices(cell);
			auto neighbor_index = index;

			if (index[1] == 0) {
				neighbor_index[1] = index[1] + 1;
			} else if (index[1] == grid_size[1] - 1) {
				neighbor_index[1] = index[1] - 1;
			} else if (index[2] == 0) {
				neighbor_index[2] = index[2] + 1;
			} else if (index[2] == grid_size[2] - 1) {
				neighbor_index[2] = index[2] - 1;
			}
			const auto neighbor = grid.mapping.get_cell_from_indices(neighbor_index, 0);

			const auto* const neighbor_data = grid[neighbor];
			auto* const cell_data = grid[cell];
			if (cell_data == NULL or neighbor_data == NULL) {
				std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
				abort();
			}

			(*cell_data)[Vector_Field()] = (*neighbor_data)[Vector_Field()];
		}
		grid.update_copies_of_remote_neighbors();

		const double div_after = pamhd::divergence::get_divergence(
			solve_cells,
			grid,
			Vector_Getter,
			Div_After_Getter
		);

		if (div_after > div_before) {
			if (grid.get_rank() == 0) {
				std::cerr << __FILE__ << ":" << __LINE__
					<< ": Divergence after removal " << div_after
					<< " is larger than before " << div_before
					<< " with " << nr_of_cells << " cells."
					<< std::endl;
			}
			abort();
		}

		if (old_nr_of_cells > 0) {
			const double
				order_of_accuracy
					= -log(div_after / old_div)
					/ log(double(nr_of_cells) / old_nr_of_cells);

			if (order_of_accuracy < 0.9) {
				if (grid.get_rank() == 0) {
					std::cerr << __FILE__ << ":" << __LINE__
						<< ": Order of accuracy from "
						<< old_nr_of_cells << " to " << nr_of_cells
						<< " is too low: " << order_of_accuracy
						<< std::endl;
				}
				abort();
			}
		}

		old_nr_of_cells = nr_of_cells;
		old_div = div_after;
	}

	MPI_Finalize();

	return EXIT_SUCCESS;
}
コード例 #9
0
ファイル: parallel.cpp プロジェクト: nasailja/gensimcell
int main(int argc, char* argv[])
{
	// the cell type used by this program
	using Cell = combined::Cell;

	/*
	Set up MPI
	*/
	if (MPI_Init(&argc, &argv) != MPI_SUCCESS) {
		std::cerr << "Coudln't initialize MPI." << std::endl;
		abort();
	}

	MPI_Comm comm = MPI_COMM_WORLD;

	int rank = 0, comm_size = 0;
	MPI_Comm_rank(comm, &rank);
	MPI_Comm_size(comm, &comm_size);


	// intialize Zoltan
	float zoltan_version;
	if (Zoltan_Initialize(argc, argv, &zoltan_version) != ZOLTAN_OK) {
		std::cerr << "Zoltan_Initialize failed." << std::endl;
		abort();
	}


	/*
	Set up the grid in which the simulation will run
	*/
	dccrg::Dccrg<Cell, dccrg::Cartesian_Geometry> grid;

	// initialize the grid
	std::array<uint64_t, 3> grid_length = {{20, 20, 1}};
	const unsigned int neighborhood_size = 1;
	if (not grid.initialize(
		grid_length,
		comm,
		"RANDOM",
		neighborhood_size,
		0,
		false, false, false
	)) {
		std::cerr << __FILE__ << ":" << __LINE__
			<< ": Couldn't initialize grid."
			<< std::endl;
		abort();
	}

	// set the grid's geometry
	dccrg::Cartesian_Geometry::Parameters geom_params;
	geom_params.start[0] =
	geom_params.start[1] = -1;
	geom_params.start[2] = -1.0 / grid_length[0];
	geom_params.level_0_cell_length[0] =
	geom_params.level_0_cell_length[1] =
	geom_params.level_0_cell_length[2] = 2.0 / grid_length[0];
	if (not grid.set_geometry(geom_params)) {
		std::cerr << __FILE__ << ":" << __LINE__
			<< ": Couldn't set grid geometry."
			<< std::endl;
		abort();
	}

	grid.balance_load();

	/*
	Simulate
	*/

	gol::initialize<
		Cell,
		gol::Is_Alive,
		gol::Live_Neighbors
	>(grid);

	advection::initialize<
		Cell,
		advection::Density,
		advection::Density_Flux,
		advection::Velocity
	>(grid);

	particle::initialize<
		Cell,
		particle::Number_Of_Internal_Particles,
		particle::Number_Of_External_Particles,
		particle::Velocity,
		particle::Internal_Particles,
		particle::External_Particles
	>(grid);

	const std::vector<uint64_t>
		inner_cells = grid.get_local_cells_not_on_process_boundary(),
		outer_cells = grid.get_local_cells_on_process_boundary();

	const double advection_save_interval = 0.1;
	const double particle_save_interval = 0.1;

	double advection_next_save = 0;
	double particle_next_save = 0;

	double
		simulation_time = 0,
		time_step = 0;
	while (simulation_time <= M_PI) {

		double next_time_step = std::numeric_limits<double>::max();

		/*
		Save simulations
		*/

		// let the saving functions decide what to "transfer"
		Cell::set_transfer_all(
			false,
			gol::Is_Alive(),
			gol::Live_Neighbors()
		);

		Cell::set_transfer_all(
			false,
			advection::Density(),
			advection::Density_Flux(),
			advection::Velocity()
		);

		Cell::set_transfer_all(
			false,
			particle::Number_Of_Internal_Particles(),
			particle::Number_Of_External_Particles(),
			particle::Velocity(),
			particle::Internal_Particles(),
			particle::External_Particles()
		);


		gol::save<Cell, gol::Is_Alive>(grid, simulation_time);

		if (advection_next_save <= simulation_time) {
			advection_next_save += advection_save_interval;

			advection::save<
				Cell,
				advection::Density,
				advection::Velocity
			>(grid, simulation_time);
		}

		if (particle_next_save <= simulation_time) {
			particle_next_save += particle_save_interval;

			particle::save<
				Cell,
				particle::Number_Of_Internal_Particles,
				particle::Velocity,
				particle::Internal_Particles
			>(grid, simulation_time);
		}


		if (simulation_time >= M_PI) {
			break;
		}


		/*
		Solve
		*/

		next_time_step
			= std::min(
				next_time_step,
				particle::solve<
					Cell,
					particle::Number_Of_Internal_Particles,
					particle::Number_Of_External_Particles,
					particle::Velocity,
					particle::Internal_Particles,
					particle::External_Particles
				>(time_step, outer_cells, grid)
			);

		Cell::set_transfer_all(true, particle::Number_Of_External_Particles());
		grid.start_remote_neighbor_copy_updates();

		next_time_step
			= std::min(
				next_time_step,
				particle::solve<
					Cell,
					particle::Number_Of_Internal_Particles,
					particle::Number_Of_External_Particles,
					particle::Velocity,
					particle::Internal_Particles,
					particle::External_Particles
				>(time_step, inner_cells, grid)
			);


		grid.wait_remote_neighbor_copy_update_receives();
		particle::resize_receiving_containers<
			Cell,
			particle::Number_Of_External_Particles,
			particle::External_Particles
		>(grid);

		grid.wait_remote_neighbor_copy_update_sends();


		Cell::set_transfer_all(true, gol::Is_Alive());
		Cell::set_transfer_all(
			true,
			advection::Density(),
			advection::Velocity()
		);
		Cell::set_transfer_all(false, particle::Number_Of_External_Particles());
		Cell::set_transfer_all(
			true,
			particle::Velocity(),
			particle::External_Particles()
		);
		grid.start_remote_neighbor_copy_updates();


		gol::solve<
			Cell,
			gol::Is_Alive,
			gol::Live_Neighbors
		>(inner_cells, grid);

		next_time_step
			= std::min(
				next_time_step,
				advection::solve<
					Cell,
					advection::Density,
					advection::Density_Flux,
					advection::Velocity
				>(time_step, inner_cells, grid)
			);

		particle::incorporate_external_particles<
			Cell,
			particle::Number_Of_Internal_Particles,
			particle::Internal_Particles,
			particle::External_Particles
		>(inner_cells, grid);

		grid.wait_remote_neighbor_copy_update_receives();


		gol::solve<
			Cell,
			gol::Is_Alive,
			gol::Live_Neighbors
		>(outer_cells, grid);

		next_time_step
			= std::min(
				next_time_step,
				advection::solve<
					Cell,
					advection::Density,
					advection::Density_Flux,
					advection::Velocity
				>(time_step, outer_cells, grid)
			);

		gol::apply_solution<
			Cell,
			gol::Is_Alive,
			gol::Live_Neighbors
		>(inner_cells, grid);

		advection::apply_solution<
			Cell,
			advection::Density,
			advection::Density_Flux
		>(inner_cells, grid);

		particle::incorporate_external_particles<
			Cell,
			particle::Number_Of_Internal_Particles,
			particle::Internal_Particles,
			particle::External_Particles
		>(outer_cells, grid);

		particle::remove_external_particles<
			Cell,
			particle::Number_Of_External_Particles,
			particle::External_Particles
		>(inner_cells, grid);

		grid.wait_remote_neighbor_copy_update_sends();


		gol::apply_solution<
			Cell,
			gol::Is_Alive,
			gol::Live_Neighbors
		>(outer_cells, grid);

		advection::apply_solution<
			Cell,
			advection::Density,
			advection::Density_Flux
		>(outer_cells, grid);

		particle::remove_external_particles<
			Cell,
			particle::Number_Of_External_Particles,
			particle::External_Particles
		>(outer_cells, grid);

		simulation_time += time_step;

		MPI_Allreduce(&next_time_step, &time_step, 1, MPI_DOUBLE, MPI_MIN, comm);
		const double CFL = 0.5;
		time_step *= CFL;
	}

	MPI_Finalize();

	return EXIT_SUCCESS;
}
コード例 #10
0
ファイル: dr_main.c プロジェクト: uppatispr/trilinos-official
int main(int argc, char *argv[])
{
/* Local declarations. */
  struct Zoltan_Struct *zz = NULL;

  char  *cmd_file;
  char   cmesg[256]; /* for error messages */

  float  version;

  int    Proc, Num_Proc;
  int    iteration;
  int    error, gerror;
  int    print_output = 1;

  MESH_INFO  mesh;             /* mesh information struct */
  PARIO_INFO pio_info;
  PROB_INFO  prob;

/***************************** BEGIN EXECUTION ******************************/

  /* initialize MPI */
  MPI_Init(&argc, &argv);

#ifdef VAMPIR
  VT_initialize(&argc, &argv);
#endif

  /* get some machine information */
  MPI_Comm_rank(MPI_COMM_WORLD, &Proc);
  MPI_Comm_size(MPI_COMM_WORLD, &Num_Proc);

  my_rank = Proc;

#ifdef HOST_LINUX
  signal(SIGSEGV, meminfo_signal_handler);
  signal(SIGINT, meminfo_signal_handler);
  signal(SIGTERM, meminfo_signal_handler);
  signal(SIGABRT, meminfo_signal_handler);
  signal(SIGFPE, meminfo_signal_handler);
#endif

#ifdef ZOLTAN_PURIFY
  printf("%d of %d ZDRIVE LAUNCH pid = %d file = %s\n", 
         Proc, Num_Proc, getpid(), argv[1]);
#endif

  /* Initialize flags */
  Test.DDirectory = 0;
  Test.Local_Parts = 0;
  Test.Fixed_Objects = 0;
  Test.Drops = 0;
  Test.RCB_Box = 0;
  Test.Multi_Callbacks = 0;
  Test.Graph_Callbacks = 1;
  Test.Hypergraph_Callbacks = 1;
  Test.Gen_Files = 0;
  Test.Null_Lists = NO_NULL_LISTS;
  Test.Dynamic_Weights = .0;
  Test.Dynamic_Graph = .0;
  Test.Vtx_Inc = 0;

  Output.Text = 1;
  Output.Gnuplot = 0;
  Output.Nemesis = 0;
  Output.Plot_Partition = 0;
  Output.Mesh_Info_File = 0;

  /* Interpret the command line */
  switch(argc)
  {
  case 1:
    cmd_file = "zdrive.inp";
    break;

  case 2:
    cmd_file = argv[1];
    break;

  default:
    fprintf(stderr, "MAIN: ERROR in command line,");
    if(Proc == 0)
    {
      fprintf(stderr, " usage:\n");
      fprintf(stderr, "\t%s [command file]", DRIVER_NAME);
    }
    exit(1);
    break;
  }

  /* initialize Zoltan */
  if ((error = Zoltan_Initialize(argc, argv, &version)) != ZOLTAN_OK) {
    sprintf(cmesg, "fatal: Zoltan_Initialize returned error code, %d", error);
    Gen_Error(0, cmesg);
    error_report(Proc);
    print_output = 0;
    goto End;
  }

  /* initialize some variables */
  initialize_mesh(&mesh, Proc);

  pio_info.dsk_list_cnt		= -1;
  pio_info.file_comp            = STANDARD;
  pio_info.num_dsk_ctrlrs	= -1;
  pio_info.pdsk_add_fact	= -1;
  pio_info.zeros		= -1;
  pio_info.file_type		= -1;
  pio_info.chunk_reader         = 0;
  pio_info.init_dist_type	= -1;
  pio_info.init_size		= ZOLTAN_ID_INVALID;
  pio_info.init_dim 		= -1;
  pio_info.init_vwgt_dim 	= -1;
  pio_info.init_dist_pins       = -1;
  pio_info.pdsk_root[0]		= '\0';
  pio_info.pdsk_subdir[0]	= '\0';
  pio_info.pexo_fname[0]	= '\0';

  prob.method[0]		= '\0';
  prob.num_params		= 0;
  prob.params			= NULL;

  /* Read in the ascii input file */
  error = gerror = 0;
  if (Proc == 0) {
    printf("\n\nReading the command file, %s\n", cmd_file);
    if (!read_cmd_file(cmd_file, &prob, &pio_info, NULL)) {
      sprintf(cmesg,"fatal: Could not read in the command file"
              " \"%s\"!\n", cmd_file);
      Gen_Error(0, cmesg);
      error_report(Proc);
      print_output = 0;
      error = 1;
    }

    if (!check_inp(&prob, &pio_info)) {
      Gen_Error(0, "fatal: Error in user specified parameters.\n");
      error_report(Proc);
      print_output = 0;
      error = 1;
    }

    print_input_info(stdout, Num_Proc, &prob, &pio_info, version);
  }

  MPI_Allreduce(&error, &gerror, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
  if (gerror) goto End;

  /* broadcast the command info to all of the processor */
  brdcst_cmd_info(Proc, &prob, &pio_info, &mesh);

  Zoltan_Set_Param(NULL, "DEBUG_MEMORY", "1");
  print_output = Output.Text;

  /*
   *  Create a Zoltan structure.
   */
  if ((zz = Zoltan_Create(MPI_COMM_WORLD)) == NULL) {
    Gen_Error(0, "fatal:  NULL returned from Zoltan_Create()\n");
    return 0;
  }

  if (!setup_zoltan(zz, Proc, &prob, &mesh, &pio_info)) {
    Gen_Error(0, "fatal: Error returned from setup_zoltan\n");
    error_report(Proc);
    print_output = 0;
    goto End;
  }

  /* srand(Proc); Different seeds on different procs. */
  srand(1);  /* Same seed everywhere. */

  if (Test.Dynamic_Weights){
    /* Set obj weight dim to 1; can be overridden by user parameter */
    Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", "1");
  }

  /* Loop over read and balance for a number of iterations */
  /* (Useful for testing REUSE parameters in Zoltan.) */
  for (iteration = 1; iteration <= Number_Iterations; iteration++) {

    if (Proc == 0) {
      printf("Starting iteration %d\n", iteration); 
      fflush(stdout);
    }

    /*
     * now read in the mesh and element information.
     * This is the only function call to do this. Upon return,
     * the mesh struct and the elements array should be filled.
     */
    if (iteration == 1) {
      if (!read_mesh(Proc, Num_Proc, &prob, &pio_info, &mesh)) {
        Gen_Error(0, "fatal: Error returned from read_mesh\n");
        error_report(Proc);
        print_output = 0;
        goto End;
      }
      /*
       *  Create a Zoltan DD for tracking elements during repartitioning.
       */

      if (mesh.data_type == ZOLTAN_HYPERGRAPH && !build_elem_dd(&mesh)) {
        Gen_Error(0, "fatal: Error returned from build_elem_dd\n");
        error_report(Proc);
        print_output = 0;
        goto End;
      }
    }


#ifdef KDDKDD_COOL_TEST
/* KDD Cool test of changing number of partitions  */
    sprintf(cmesg, "%d", Num_Proc * iteration);
    Zoltan_Set_Param(zz, "NUM_GLOBAL_PARTS", cmesg);
#endif

    /*
     * Produce files to verify input.
     */
    if (iteration == 1) {
      if (Debug_Driver > 2) {
        if (!output_results(cmd_file,"in",Proc,Num_Proc,&prob,&pio_info,&mesh)){
          Gen_Error(0, "fatal: Error returned from output_results\n");
          error_report(Proc);
        }
        if (Output.Gnuplot)
          if (!output_gnu(cmd_file,"in",Proc,Num_Proc,&prob,&pio_info,&mesh)) {
            Gen_Error(0, "warning: Error returned from output_gnu\n");
            error_report(Proc);
          }
      }
      if (Test.Vtx_Inc<0){
        /* Read Citeseer data from file */
        FILE *fp;
        int i=0;
        if (Proc==0){
          fp = fopen("months.txt", "r");
          if (!fp)
            printf("ERROR: Couldn't open file months.txt\n");
          while (fscanf(fp, "%d", &CITESEER[i])==1){
            ++i;
          }
          fclose(fp);
        }
        MPI_Bcast (CITESEER, 200, MPI_INT, 0, MPI_COMM_WORLD);
      }
    }

    if (Test.Dynamic_Graph > 0.0){
      if (mesh.data_type == ZOLTAN_GRAPH) {
        remove_random_vertices(&mesh, iteration, Test.Dynamic_Graph); 
      }
      else{
        Gen_Error(0, "fatal: \"test dynamic graph\" only works on graphs, not hypergraphs\n");
        error_report(Proc);
        print_output = 0;
        goto End;
      }
    }

    if (Test.Vtx_Inc){
      if (mesh.data_type == ZOLTAN_HYPERGRAPH ) {
        if (Test.Vtx_Inc>0)
          mesh.visible_nvtx += Test.Vtx_Inc; /* Increment uniformly */
        else
          mesh.visible_nvtx = CITESEER[iteration-1]; /* Citeseer document matrix. */
      }
      else{
        Gen_Error(0, "fatal: \"vertex increment\" only works on hypergraphs\n");
        error_report(Proc);
        print_output = 0;
        goto End;
      }
    }

    /*
     * now run Zoltan to get a new load balance and perform
     * the migration
     */
  
#ifdef IGNORE_FIRST_ITERATION_STATS
if (iteration == 1) {
  /* Exercise partitioner once on Tbird because first run is slow. */
  /* Lee Ann suspects Tbird is loading shared libraries. */
  struct Zoltan_Struct *zzcopy;
  zzcopy = Zoltan_Copy(zz);
  /* Don't do any migration or accumulate any stats. */
  if (Proc == 0) printf("%d KDDKDD IGNORING FIRST ITERATION STATS\n", Proc);
  Zoltan_Set_Param(zzcopy, "RETURN_LISTS", "NONE");
  Zoltan_Set_Param(zzcopy, "FINAL_OUTPUT", "0");
  Zoltan_Set_Param(zzcopy, "USE_TIMERS", "0");
  if (!run_zoltan(zzcopy, Proc, &prob, &mesh, &pio_info)) {
    Gen_Error(0, "fatal: Error returned from run_zoltan\n");
    error_report(Proc);
    print_output = 0;
    goto End;
  }
  Zoltan_Destroy(&zzcopy);
}
#endif /* IGNORE_FIRST_ITERATION_STATS */
#ifdef RANDOM_DIST
 if (iteration % 2 == 0) {
   char LB_METHOD[1024];

  if (Proc == 0) printf("%d CCCC Randomizing the input\n", Proc);
   strcpy(LB_METHOD, prob.method);
   strcpy(prob.method, "RANDOM");
   Zoltan_Set_Param(zz, "LB_METHOD", "RANDOM");
   Zoltan_Set_Param(zz, "RETURN_LISTS", "ALL");
    if (!run_zoltan(zz, Proc, &prob, &mesh, &pio_info)) {
      Gen_Error(0, "fatal: Error returned from run_zoltan\n");
      error_report(Proc);
      print_output = 0;
      goto End;
    }
   Zoltan_Set_Param(zz, "RETURN_LISTS", "NONE");
   Zoltan_Set_Param(zz, "LB_METHOD", LB_METHOD);
   strcpy(prob.method, LB_METHOD);
  if (Proc == 0) printf("%d CCCC Randomizing the input -- END\n", Proc);
 }
#endif /* RANDOM_DIST */
    if (!run_zoltan(zz, Proc, &prob, &mesh, &pio_info)) {
      Gen_Error(0, "fatal: Error returned from run_zoltan\n");
      error_report(Proc);
      print_output = 0;
      goto End;
    }

    /* Reset the mesh data structure for next iteration. */
    if (iteration < Number_Iterations) {
      int i, j;
      float tmp;
      float twiddle = 0.01;
      char str[4];
      /* Perturb coordinates of mesh */
      if (mesh.data_type == ZOLTAN_GRAPH){
        for (i = 0; i < mesh.num_elems; i++) {
          for (j = 0; j < mesh.num_dims; j++) {
            /* tmp = ((float) rand())/RAND_MAX; *//* Equiv. to sjplimp's test */
            tmp = (float) (i % 10) / 10.;
            mesh.elements[i].coord[0][j] += twiddle * (2.0*tmp-1.0);
            mesh.elements[i].avg_coord[j] = mesh.elements[i].coord[0][j];
          }
        }
        /* Increase weights in some parts */
        if (Test.Dynamic_Weights){
          /* Randomly pick 10% of parts to "refine" */
          /* Note:  Assumes at least 10 parts!  */
          /* Increase vertex weight, and also edge weights? TODO */
          j = (int) ((10.0*rand())/RAND_MAX + .5);
          for (i = 0; i < mesh.num_elems; i++) {
            if ((mesh.elements[i].my_part%10) == j){
                mesh.elements[i].cpu_wgt[0] = Test.Dynamic_Weights*(1+rand()%5);
            }
          }
        }
      }
      /* change the ParMETIS Seed */
      sprintf(str, "%d", iteration);
#ifdef ZOLTAN_PARMETIS      
      Zoltan_Set_Param(zz, "PARMETIS_SEED", str);
#endif
    }

  } /* End of loop over read and balance */

  if (Proc == 0) {
    printf("FILE %s:  Total:    %e seconds in Partitioning\n", 
           cmd_file, Total_Partition_Time);
    printf("FILE %s:  Average:  %e seconds per Iteration\n", 
           cmd_file, Total_Partition_Time/Number_Iterations);
  }

End:
  Zoltan_Destroy(&zz);
  if (mesh.dd) Zoltan_DD_Destroy(&(mesh.dd));

  Zoltan_Memory_Stats();

  /*
   * output the results
   */
  if (print_output) {
    if (!output_results(cmd_file,"out",Proc,Num_Proc,&prob,&pio_info,&mesh)) {
      Gen_Error(0, "fatal: Error returned from output_results\n");
      error_report(Proc);
    }

    if (Output.Gnuplot) {
      if (!output_gnu(cmd_file,"out",Proc,Num_Proc,&prob,&pio_info,&mesh)) {
        Gen_Error(0, "warning: Error returned from output_gnu\n");
        error_report(Proc);
      }
    }
  }

  free_mesh_arrays(&mesh);
  if (prob.params != NULL) free(prob.params);
  MPI_Finalize();
  
#ifdef VAMPIR
  VT_finalize();
#endif

  return 0;
}
コード例 #11
0
ファイル: remove1d.cpp プロジェクト: esimerkkitutkija/pamhd
int main(int argc, char* argv[])
{
	if (MPI_Init(&argc, &argv) != MPI_SUCCESS) {
		std::cerr << "Couldn't initialize MPI." << std::endl;
		abort();
	}

	MPI_Comm comm = MPI_COMM_WORLD;

	int rank = 0, comm_size = 0;
	if (MPI_Comm_rank(comm, &rank) != MPI_SUCCESS) {
		std::cerr << "Couldn't obtain MPI rank." << std::endl;
		abort();
	}
	if (MPI_Comm_size(comm, &comm_size) != MPI_SUCCESS) {
		std::cerr << "Couldn't obtain size of MPI communicator." << std::endl;
		abort();
	}


	// intialize Zoltan
	float zoltan_version;
	if (Zoltan_Initialize(argc, argv, &zoltan_version) != ZOLTAN_OK) {
		std::cerr << "Zoltan_Initialize failed." << std::endl;
		abort();
	}

	double
		old_norm_x = std::numeric_limits<double>::max(),
		old_norm_y = std::numeric_limits<double>::max(),
		old_norm_z = std::numeric_limits<double>::max();
	size_t old_nr_of_cells = 0;
	for (size_t nr_of_cells = 8; nr_of_cells <= 2048; nr_of_cells *= 2) {

		dccrg::Dccrg<Cell, dccrg::Cartesian_Geometry> grid_x, grid_y, grid_z;

		const std::array<uint64_t, 3>
			grid_size_x{{nr_of_cells + 2, 1, 1}},
			grid_size_y{{1, nr_of_cells + 2, 1}},
			grid_size_z{{1, 1, nr_of_cells + 2}};

		if (not grid_x.initialize(grid_size_x,comm,"RANDOM",0,0,false,false,false)) {
			std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
			abort();
		}
		if (not grid_y.initialize(grid_size_y,comm,"RANDOM",0,0,false,false,false)) {
			std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
			abort();
		}
		if (not grid_z.initialize(grid_size_z,comm,"RANDOM",0,0,false,false,false)) {
			std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
			abort();
		}

		const std::array<double, 3>
			cell_length_x{{2 * M_PI / (grid_size_x[0] - 2), 1, 1}},
			cell_length_y{{1, 2 * M_PI / (grid_size_y[0] - 2), 1}},
			cell_length_z{{1, 1, 2 * M_PI / (grid_size_z[0] - 2)}},
			grid_start_x{{-cell_length_x[0], 0, 0}},
			grid_start_y{{0, -cell_length_y[1], 0}},
			grid_start_z{{0, 0, -cell_length_z[2]}};

		const double cell_volume
			= cell_length_x[0] * cell_length_x[1] * cell_length_x[2];

		dccrg::Cartesian_Geometry::Parameters geom_params_x,geom_params_y,geom_params_z;
		geom_params_x.start = grid_start_x;
		geom_params_x.level_0_cell_length = cell_length_x;
		geom_params_y.start = grid_start_y;
		geom_params_y.level_0_cell_length = cell_length_y;
		geom_params_z.start = grid_start_z;
		geom_params_z.level_0_cell_length = cell_length_z;

		if (not grid_x.set_geometry(geom_params_x)) {
			std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
			abort();
		}
		if (not grid_y.set_geometry(geom_params_y)) {
			std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
			abort();
		}
		if (not grid_z.set_geometry(geom_params_z)) {
			std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
			abort();
		}

		const auto all_cells = grid_x.get_cells();

		std::vector<uint64_t>
			solve_cells,
			boundary_cells;
		for (const auto& cell: all_cells) {
			auto
				*const cell_data_x = grid_x[cell],
				*const cell_data_y = grid_y[cell],
				*const cell_data_z = grid_z[cell];
			if (cell_data_x == NULL or cell_data_y == NULL or cell_data_z == NULL) {
				std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
				abort();
			}

			const auto index = grid_x.mapping.get_indices(cell);
			if (index[0] > 0 and index[0] < grid_size_x[0] - 1) {
				solve_cells.push_back(cell);
			} else {
				boundary_cells.push_back(cell);
			}

			const auto x = grid_x.geometry.get_center(cell)[0];
			auto
				&vec_x = (*cell_data_x)[Vector_Field()],
				&vec_y = (*cell_data_y)[Vector_Field()],
				&vec_z = (*cell_data_z)[Vector_Field()];
			vec_x[0] =
			vec_y[1] =
			vec_z[2] = function(x);
			vec_x[1] =
			vec_x[2] =
			vec_y[0] =
			vec_y[2] =
			vec_z[0] =
			vec_z[1] = 0;
		}
		grid_x.update_copies_of_remote_neighbors();
		grid_y.update_copies_of_remote_neighbors();
		grid_z.update_copies_of_remote_neighbors();

		// use copy boundaries
		const uint64_t
			neg_bdy_cell = 1,
			pos_bdy_cell = nr_of_cells + 2;
		if (grid_x.is_local(neg_bdy_cell)) {
			const auto
				*const neighbor_data_x = grid_x[neg_bdy_cell + 1],
				*const neighbor_data_y = grid_y[neg_bdy_cell + 1],
				*const neighbor_data_z = grid_z[neg_bdy_cell + 1];
			auto
				*const cell_data_x = grid_x[neg_bdy_cell],
				*const cell_data_y = grid_y[neg_bdy_cell],
				*const cell_data_z = grid_z[neg_bdy_cell];

			if (
				cell_data_x == NULL or neighbor_data_x == NULL
				or cell_data_y == NULL or neighbor_data_y == NULL
				or cell_data_z == NULL or neighbor_data_z == NULL
			) {
				std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
				abort();
			}

			(*cell_data_x)[Vector_Field()] = (*neighbor_data_x)[Vector_Field()];
			(*cell_data_y)[Vector_Field()] = (*neighbor_data_y)[Vector_Field()];
			(*cell_data_z)[Vector_Field()] = (*neighbor_data_z)[Vector_Field()];
		}

		if (grid_x.is_local(pos_bdy_cell)) {
			const auto
				*const neighbor_data_x = grid_x[pos_bdy_cell - 1],
				*const neighbor_data_y = grid_y[pos_bdy_cell - 1],
				*const neighbor_data_z = grid_z[pos_bdy_cell - 1];
			auto
				*const cell_data_x = grid_x[pos_bdy_cell],
				*const cell_data_y = grid_y[pos_bdy_cell],
				*const cell_data_z = grid_z[pos_bdy_cell];

			if (
				cell_data_x == NULL or neighbor_data_x == NULL
				or cell_data_y == NULL or neighbor_data_y == NULL
				or cell_data_z == NULL or neighbor_data_z == NULL
			) {
				std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
				abort();
			}

			(*cell_data_x)[Vector_Field()] = (*neighbor_data_x)[Vector_Field()];
			(*cell_data_y)[Vector_Field()] = (*neighbor_data_y)[Vector_Field()];
			(*cell_data_z)[Vector_Field()] = (*neighbor_data_z)[Vector_Field()];
		}

		auto Vector_Getter = [](Cell& cell_data) -> Vector_Field::data_type& {
			return cell_data[Vector_Field()];
		};
		auto Divergence_Getter = [](Cell& cell_data) -> Divergence::data_type& {
			return cell_data[Divergence()];
		};
		auto Gradient_Getter = [](Cell& cell_data) -> Gradient::data_type& {
			return cell_data[Gradient()];
		};
		pamhd::divergence::remove(
			solve_cells,
			boundary_cells,
			{},
			grid_x,
			Vector_Getter,
			Divergence_Getter,
			Gradient_Getter,
			2000, 0, 1e-15, 2, 100, false
		);
		pamhd::divergence::remove(
			solve_cells,
			boundary_cells,
			{},
			grid_y,
			Vector_Getter,
			Divergence_Getter,
			Gradient_Getter,
			2000, 0, 1e-15, 2, 100, false
		);
		pamhd::divergence::remove(
			solve_cells,
			boundary_cells,
			{},
			grid_z,
			Vector_Getter,
			Divergence_Getter,
			Gradient_Getter,
			2000, 0, 1e-15, 2, 100, false
		);

		const double
			p_of_norm = 2,
			norm_x = get_diff_lp_norm(solve_cells, grid_x, p_of_norm, cell_volume, 0),
			norm_y = get_diff_lp_norm(solve_cells, grid_y, p_of_norm, cell_volume, 1),
			norm_z = get_diff_lp_norm(solve_cells, grid_z, p_of_norm, cell_volume, 2);

		if (norm_x > old_norm_x) {
			if (grid_x.get_rank() == 0) {
				std::cerr << __FILE__ << ":" << __LINE__
					<< ": X norm with " << nr_of_cells
					<< " cells " << norm_x
					<< " is larger than with " << nr_of_cells / 2
					<< " cells " << old_norm_x
					<< std::endl;
			}
			abort();
		}
		if (norm_y > old_norm_y) {
			if (grid_y.get_rank() == 0) {
				std::cerr << __FILE__ << ":" << __LINE__
					<< ": Y norm with " << nr_of_cells
					<< " cells " << norm_y
					<< " is larger than with " << nr_of_cells / 2
					<< " cells " << old_norm_y
					<< std::endl;
			}
			abort();
		}
		if (norm_y > old_norm_z) {
			if (grid_z.get_rank() == 0) {
				std::cerr << __FILE__ << ":" << __LINE__
					<< ": Z norm with " << nr_of_cells
					<< " cells " << norm_z
					<< " is larger than with " << nr_of_cells / 2
					<< " cells " << old_norm_z
					<< std::endl;
			}
			abort();
		}

		if (old_nr_of_cells > 0) {
			const double
				order_of_accuracy_x
					= -log(norm_x / old_norm_x)
					/ log(double(nr_of_cells) / old_nr_of_cells),
				order_of_accuracy_y
					= -log(norm_y / old_norm_y)
					/ log(double(nr_of_cells) / old_nr_of_cells),
				order_of_accuracy_z
					= -log(norm_z / old_norm_z)
					/ log(double(nr_of_cells) / old_nr_of_cells);

			if (order_of_accuracy_x < 1.5) {
				if (grid_x.get_rank() == 0) {
					std::cerr << __FILE__ << ":" << __LINE__
						<< ": X order of accuracy from "
						<< old_nr_of_cells << " to " << nr_of_cells
						<< " is too low: " << order_of_accuracy_x
						<< std::endl;
				}
				abort();
			}
			if (order_of_accuracy_y < 1.5) {
				if (grid_y.get_rank() == 0) {
					std::cerr << __FILE__ << ":" << __LINE__
						<< ": Y order of accuracy from "
						<< old_nr_of_cells << " to " << nr_of_cells
						<< " is too low: " << order_of_accuracy_y
						<< std::endl;
				}
				abort();
			}
			if (order_of_accuracy_z < 1.5) {
				if (grid_z.get_rank() == 0) {
					std::cerr << __FILE__ << ":" << __LINE__
						<< ": Z order of accuracy from "
						<< old_nr_of_cells << " to " << nr_of_cells
						<< " is too low: " << order_of_accuracy_z
						<< std::endl;
				}
				abort();
			}
		}

		old_nr_of_cells = nr_of_cells;
		old_norm_x = norm_x;
		old_norm_y = norm_y;
		old_norm_z = norm_z;
	}

	MPI_Finalize();

	return EXIT_SUCCESS;
}
コード例 #12
0
  int MESH_PartitionWithZoltan(Mesh_ptr mesh, int nparts, int **part, int noptions, 
                               char **options, MSTK_Comm comm) { 

  MEdge_ptr fedge;
  MFace_ptr mf, oppf, rface;
  MRegion_ptr mr, oppr;
  List_ptr fedges, efaces, rfaces, fregions;
  int  i, j, k, id;
  int  nv, ne, nf, nr=0, nfe, nef, nfr, nrf, idx, idx2;
  int  numflag, nedgecut, ipos;
  int  wtflag;

  int rc;
  float ver;
  struct Zoltan_Struct *zz;
  GRAPH_DATA graph;
  int changes, numGidEntries, numLidEntries, numImport, numExport;
  ZOLTAN_ID_PTR importGlobalGids, importLocalGids, exportGlobalGids, exportLocalGids;
  int *importProcs, *importToPart, *exportProcs, *exportToPart;

  int rank;
  MPI_Comm_rank(comm,&rank);
 
  rc = Zoltan_Initialize(0, NULL, &ver);

  if (rc != ZOLTAN_OK){
    MSTK_Report("MESH_PartitionWithZoltan","Could not initialize Zoltan",MSTK_FATAL);
    MPI_Finalize();
    exit(0);
  }

  /******************************************************************
  ** Create a Zoltan library structure for this instance of partition 
  ********************************************************************/
  zz = Zoltan_Create(comm);

  /*****************************************************************
   ** Figure out partitioning method
   *****************************************************************/
  
  char partition_method_str[32];
  strcpy(partition_method_str,"RCB");  /* Default - Recursive Coordinate Bisection */
  if (noptions) {
    for (i = 0; i < noptions; i++) {
      if (strncmp(options[i],"LB_PARTITION",12) == 0) {
        char *result = NULL, instring[256];
        strcpy(instring,options[i]);
        result = strtok(instring,"=");
        result = strtok(NULL," ");
        strcpy(partition_method_str,result);
      }
    }
  }
  
  if (rank == 0) {
    char mesg[256];
    sprintf(mesg,"Using partitioning method %s for ZOLTAN\n",partition_method_str);
    MSTK_Report("MESH_PartitionWithZoltan",mesg,MSTK_MESG);
  }

  /* General parameters for Zoltan */
  Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0");
  Zoltan_Set_Param(zz, "LB_METHOD", partition_method_str);
  Zoltan_Set_Param(zz, "LB_APPROACH", "PARTITION");
  Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1");
  Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "1");
  Zoltan_Set_Param(zz, "RETURN_LISTS", "ALL");


  graph.numMyNodes = 0;
  graph.numAllNbors = 0;
  graph.nodeGID = NULL;
  graph.nodeCoords = NULL;
  graph.nborIndex = NULL;
  graph.nborGID = NULL;
  graph.nborProc = NULL;

  if (strcmp(partition_method_str,"RCB") == 0) {
    if (rank == 0) {
      nr = MESH_Num_Regions(mesh);
      nf = MESH_Num_Faces(mesh);

      if (!nf && !nr)
        MSTK_Report("MESH_PartitionWithZoltan","Cannot partition wire meshes",
                    MSTK_FATAL);

      if (nr == 0) { /* Surface or planar mesh */

        int ndim = 2;       /* assume mesh is planar */
        idx = 0;
        MVertex_ptr mv;
        while ((mv = MESH_Next_Vertex(mesh,&idx))) {
          double vxyz[3];
          MV_Coords(mv,vxyz);
          if (vxyz[2] != 0.0) {
            ndim = 3;  /* non-planar or planar with non-zero z */
            break;
          }
        }
        NDIM_4_ZOLTAN = ndim-1;  /* ignore last dimension to avoid partitioning in that dimension */

        graph.numMyNodes = nf;

        graph.nodeGID = (ZOLTAN_ID_TYPE *) malloc(sizeof(ZOLTAN_ID_TYPE) * nf);
        graph.nodeCoords = (double *) malloc(sizeof(double) * NDIM_4_ZOLTAN * nf);

        idx = 0;
        while ((mf = MESH_Next_Face(mesh,&idx))) {
          double fxyz[MAXPV2][3], cen[3];
          int nfv;

          MF_Coords(mf,&nfv,fxyz);
          cen[0] = cen[1] = cen[2] = 0.0;
          for (j = 0; j < nfv; j++)
            for (k = 0; k < NDIM_4_ZOLTAN; k++) 
              cen[k] += fxyz[j][k];              
          for (k = 0; k < NDIM_4_ZOLTAN; k++) cen[k] /= nfv;

          id = MF_ID(mf);
          graph.nodeGID[id-1] = id;
          memcpy(&(graph.nodeCoords[NDIM_4_ZOLTAN*(id-1)]),cen,NDIM_4_ZOLTAN*sizeof(double));
        }

      }
      else { /* Volume mesh */

        int ndim = 3;
        NDIM_4_ZOLTAN = ndim-1;  /* ignore last dimension  to avoid partitioning in that dimension */
        graph.numMyNodes = nr;

        graph.nodeGID = (ZOLTAN_ID_TYPE *) malloc(sizeof(ZOLTAN_ID_TYPE) * nr);
        graph.nodeCoords = (double *) malloc(sizeof(double) * NDIM_4_ZOLTAN * nr);

        idx = 0;
        while ((mr = MESH_Next_Region(mesh,&idx))) {
          double rxyz[MAXPV3][3], cen[3];
          int nrv;
          
          MR_Coords(mr,&nrv,rxyz);
          cen[0] = cen[1] = cen[2] = 0.0;
          for (j = 0; j < nrv; j++)
            for (k = 0; k < NDIM_4_ZOLTAN; k++)
              cen[k] += rxyz[j][k];
          for (k = 0; k < NDIM_4_ZOLTAN; k++) cen[k] /= nrv;
          for (k = 0; k < NDIM_4_ZOLTAN; k++) 
            if (fabs(cen[k]) < 1.0e-10) cen[k] = 0.0; 

          id = MR_ID(mr);
          graph.nodeGID[id-1] = id;
          memcpy(&(graph.nodeCoords[NDIM_4_ZOLTAN*(id-1)]),cen,NDIM_4_ZOLTAN*sizeof(double));
        }

      }
    }

    MPI_Bcast(&NDIM_4_ZOLTAN,1,MPI_INT,0,comm);

    /* Set some default values */
    Zoltan_Set_Param(zz, "RCB_RECTILINEAR_BLOCKS","1");
    //    Zoltan_Set_Param(zz, "AVERAGE_CUTS", "1");

    if (noptions > 1) {
      for (i = 1; i < noptions; i++) {
        char *paramstr = NULL, *valuestr = NULL, instring[256];
        strcpy(instring,options[i]);
        paramstr = strtok(instring,"=");
        valuestr = strtok(NULL," ");
        Zoltan_Set_Param(zz,paramstr,valuestr);
      }
    }

    /* Query functions - defined in simpleQueries.h */

    Zoltan_Set_Num_Obj_Fn(zz, get_number_of_nodes, &graph);
    Zoltan_Set_Obj_List_Fn(zz, get_node_list, &graph);
    Zoltan_Set_Num_Geom_Fn(zz, get_num_dimensions_reduced, &graph);    /* reduced dimensions */
    Zoltan_Set_Geom_Multi_Fn(zz, get_element_centers_reduced, &graph); /* reduced dimension centers */

  }
  else if (strcmp(partition_method_str,"GRAPH") == 0) {

    if(rank == 0) {
      nv = MESH_Num_Vertices(mesh);
      ne = MESH_Num_Edges(mesh);
      nf = MESH_Num_Faces(mesh);
      nr = MESH_Num_Regions(mesh);
      
      ipos = 0;
      
      /* build nodes and neighbors list, similar as in partition with metis
         Assign processor 0 the whole mesh, assign other processors a NULL mesh */
  
      if (nr == 0) {
        if (nf == 0) {
          MSTK_Report("MESH_PartitionWithZoltan",
                      "Cannot partition wire meshes with Zoltan",MSTK_FATAL);
          exit(-1);
      
        }

        graph.nodeGID = (ZOLTAN_ID_TYPE *)malloc(sizeof(ZOLTAN_ID_TYPE) * nf);
        graph.nborIndex = (int *)malloc(sizeof(int) * (nf + 1));
        graph.nborGID = (ZOLTAN_ID_TYPE *)malloc(sizeof(ZOLTAN_ID_TYPE) * 2*ne);
        graph.nborProc = (int *)malloc(sizeof(int) * 2*ne);
      
        graph.nborIndex[0] = 0;
      
        /* Surface mesh */
        idx = 0; i = 0;
        while ((mf = MESH_Next_Face(mesh,&idx))) {
          graph.nodeGID[i] = MF_ID(mf);
          fedges = MF_Edges(mf,1,0);
          nfe = List_Num_Entries(fedges);
	
          idx2 = 0;
          while ((fedge = List_Next_Entry(fedges,&idx2))) {
	  
            efaces = ME_Faces(fedge);
            nef = List_Num_Entries(efaces);
	  
            if (nef == 1) {
              continue;          /* boundary edge; nothing to do */
            } else {
              int j;
              for (j = 0; j < nef; j++) {
                oppf = List_Entry(efaces,j);
                if (oppf == mf) {
                  graph.nborGID[ipos] = MF_ID(oppf);
                  /* initially set all nodes on processor 0 */
                  graph.nborProc[ipos] = 0;
                  ipos++;
                }
              }
            }
	  
            List_Delete(efaces);
	  
          }
	
          List_Delete(fedges);
          i++;
          graph.nborIndex[i] = ipos;
        }
        graph.numMyNodes = i;
        graph.numAllNbors = ipos;
      }
      else {
        graph.nodeGID = (ZOLTAN_ID_TYPE *)malloc(sizeof(ZOLTAN_ID_TYPE) * nr);
        graph.nborIndex = (int *)malloc(sizeof(int) * (nr + 1));
        graph.nborGID = (ZOLTAN_ID_TYPE *)malloc(sizeof(ZOLTAN_ID_TYPE) * 2*nf);
        graph.nborProc = (int *)malloc(sizeof(int) * 2*nf);
      
        graph.nborIndex[0] = 0;
      
        /* Volume mesh */
      
        idx = 0; i = 0;
        while ((mr = MESH_Next_Region(mesh,&idx))) {
          graph.nodeGID[i] = MR_ID(mr);
          rfaces = MR_Faces(mr);
          nrf = List_Num_Entries(rfaces);
      
          idx2 = 0;
          while ((rface = List_Next_Entry(rfaces,&idx2))) {
	  
            fregions = MF_Regions(rface);
            nfr = List_Num_Entries(fregions);
	  
            if (nfr > 1) {
              oppr = List_Entry(fregions,0);
              if (oppr == mr)
                oppr = List_Entry(fregions,1);
	    
              graph.nborGID[ipos] = MR_ID(oppr);
              /* initially set all nodes on processor 0 */
              graph.nborProc[ipos] = 0;
              ipos++;
            }
	  
            List_Delete(fregions);
	  
          }
	
          List_Delete(rfaces);
	
          i++;
          graph.nborIndex[i] = ipos;
        }
        graph.numMyNodes = i;
        graph.numAllNbors = ipos;
      }
    }

    /* Graph parameters */

    /* Zoltan_Set_Param(zz, "CHECK_GRAPH", "2"); */
    Zoltan_Set_Param(zz, "PHG_EDGE_SIZE_THRESHOLD", ".35");  /* 0-remove all, 1-remove none */

    /* Query functions - defined in simpleQueries.h */

    Zoltan_Set_Num_Obj_Fn(zz, get_number_of_nodes, &graph);
    Zoltan_Set_Obj_List_Fn(zz, get_node_list, &graph);
    Zoltan_Set_Num_Edges_Multi_Fn(zz, get_num_edges_list, &graph);
    Zoltan_Set_Edge_List_Multi_Fn(zz, get_edge_list, &graph);    
  }

  /* Partition the graph */
  /******************************************************************                                                                             
   ** Zoltan can now partition the graph.                                                                                                   
   ** We assume the number of partitions is                                                                                
   ** equal to the number of processes.  Process rank 0 will own                                                                                   
   ** partition 0, process rank 1 will own partition 1, and so on.                                                                                 
   ******************************************************************/
  rc = Zoltan_LB_Partition(zz, /* input (all remaining fields are output) */
			   &changes,        /* 1 if partitioning was changed, 0 otherwise */
			   &numGidEntries,  /* Number of integers used for a global ID */
			   &numLidEntries,  /* Number of integers used for a local ID */
			   &numImport,      /* Number of nodes to be sent to me */
			   &importGlobalGids,  /* Global IDs of nodes to be sent to me */
			   &importLocalGids,   /* Local IDs of nodes to be sent to me */
			   &importProcs,    /* Process rank for source of each incoming node */
			   &importToPart,   /* New partition for each incoming node */
			   &numExport,      /* Number of nodes I must send to other processes*/
			   &exportGlobalGids,  /* Global IDs of the nodes I must send */
			   &exportLocalGids,   /* Local IDs of the nodes I must send */
			   &exportProcs,    /* Process to which I send each of the nodes */
			   &exportToPart);  /* Partition to which each node will belong */

  if (rc != ZOLTAN_OK){
    if (rank == 0)
      MSTK_Report("MESH_PartitionWithZoltan","Could not partition mesh with ZOLTAN",
                  MSTK_ERROR);
    Zoltan_Destroy(&zz);
    MPI_Finalize();
    return 0;
  }

  if(rank == 0) {
    *part = (int *) calloc(graph.numMyNodes,sizeof(int));
    for ( i = 0; i < numExport; i++ ) {
      (*part)[exportGlobalGids[i]-1] = exportToPart[i];
    }
    if (graph.nodeGID) free(graph.nodeGID);
    if (graph.nodeCoords) free(graph.nodeCoords);
    if (graph.nborIndex) free(graph.nborIndex);
    if (graph.nborGID) free(graph.nborGID);
    if (graph.nborProc) free(graph.nborProc);
  }
  else { 
    *part = NULL;
  }


  Zoltan_LB_Free_Part(&exportGlobalGids, &exportLocalGids, &exportProcs, &exportToPart);
  Zoltan_LB_Free_Part(&importGlobalGids, &importLocalGids, &importProcs, &importToPart);
  Zoltan_Destroy(&zz);                

  return 1;
}
コード例 #13
0
ファイル: get3d_grad_amr.cpp プロジェクト: SeanChu/pamhd
int main(int argc, char* argv[])
{
	if (MPI_Init(&argc, &argv) != MPI_SUCCESS) {
		std::cerr << "Couldn't initialize MPI." << std::endl;
		abort();
	}

	MPI_Comm comm = MPI_COMM_WORLD;

	int rank = 0, comm_size = 0;
	if (MPI_Comm_rank(comm, &rank) != MPI_SUCCESS) {
		std::cerr << "Couldn't obtain MPI rank." << std::endl;
		abort();
	}
	if (MPI_Comm_size(comm, &comm_size) != MPI_SUCCESS) {
		std::cerr << "Couldn't obtain size of MPI communicator." << std::endl;
		abort();
	}


	// intialize Zoltan
	float zoltan_version;
	if (Zoltan_Initialize(argc, argv, &zoltan_version) != ZOLTAN_OK) {
		std::cerr << "Zoltan_Initialize failed." << std::endl;
		abort();
	}

	const unsigned int neighborhood_size = 0;
	const int max_refinement_level = 1;

	std::array<double, 3> old_norm{{
		std::numeric_limits<double>::max(),
		std::numeric_limits<double>::max(),
		std::numeric_limits<double>::max()
	}};
	size_t old_nr_of_cells = 0;
	for (size_t nr_of_cells = 4; nr_of_cells <= 16; nr_of_cells *= 2) {

		dccrg::Dccrg<Cell, dccrg::Cartesian_Geometry> grid;

		const std::array<uint64_t, 3> grid_size{{
			nr_of_cells + 2,
			nr_of_cells + 2,
			nr_of_cells + 2
		}};

		if (
			not grid.initialize(
				grid_size,
				comm,
				"RANDOM",
				neighborhood_size,
				max_refinement_level,
				false, false, false
			)
		) {
			std::cerr << __FILE__ << ":" << __LINE__
				<< ": Couldn't initialize grid."
				<< std::endl;
			abort();
		}

		const std::array<double, 3>
			cell_length{{
				double(5) / (grid_size[0] - 2),
				double(5) / (grid_size[1] - 2),
				double(1) / (grid_size[2] - 2),
			}},
			grid_start{{1, 1, 0}};

		dccrg::Cartesian_Geometry::Parameters geom_params;
		geom_params.start = grid_start;
		geom_params.level_0_cell_length = cell_length;

		if (not grid.set_geometry(geom_params)) {
			std::cerr << __FILE__ << ":" << __LINE__
				<< ": Couldn't set grid geometry."
				<< std::endl;
			abort();
		}

		grid.balance_load();

		for (int i = 0; i < max_refinement_level; i++) {
			for (const auto& cell: grid.get_cells()) {
				const auto center = grid.geometry.get_center(cell);
				if (
					    center[0] > grid_start[0] + cell_length[0] + 5.0*2/4
					and center[0] < grid_start[0] + cell_length[0] + 5.0*4/4
					and center[1] > grid_start[1] + cell_length[1] + 5.0*1/4
					and center[1] < grid_start[1] + cell_length[1] + 5.0*3/4
					and center[2] > grid_start[2] + cell_length[2] + 2.0/4
					and center[2] < grid_start[2] + cell_length[2] + 4.0/4
				) {
					grid.refine_completely(cell);
				}
			}
			grid.stop_refining();
		}

		std::vector<uint64_t> solve_cells;
		for (const auto& cell: grid.get_cells()) {
			auto* const cell_data = grid[cell];
			if (cell_data == nullptr) {
				std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
				abort();
			}

			const auto center = grid.geometry.get_center(cell);
			(*cell_data)[Scalar()] = function(center);

			if (
				    center[0] > grid_start[0] + cell_length[0]
				and center[0] < grid_start[0] + cell_length[0] + 5
				and center[1] > grid_start[1] + cell_length[1]
				and center[1] < grid_start[1] + cell_length[1] + 5
				and center[2] > grid_start[2] + cell_length[2]
				and center[2] < grid_start[2] + cell_length[2] + 1
			) {
				solve_cells.push_back(cell);
			}
		}
		grid.update_copies_of_remote_neighbors();

		pamhd::divergence::get_gradient(
			solve_cells,
			grid,
			[](Cell& cell_data) -> Scalar::data_type& {
				return cell_data[Scalar()];
			},
			[](Cell& cell_data) -> Gradient::data_type& {
				return cell_data[Gradient()];
			}
		);

		const auto norm = get_max_norm(solve_cells, grid);

		for (size_t dim = 0; dim < 3; dim++) {
			if (norm[dim] > old_norm[dim]) {
				if (grid.get_rank() == 0) {
					std::cerr << __FILE__ << ":" << __LINE__
						<< " dim " << dim
						<< ": Norm with " << solve_cells.size()
						<< " cells " << norm[dim]
						<< " is larger than with " << old_nr_of_cells
						<< " cells " << old_norm[dim]
						<< std::endl;
				}
				abort();
			}

			if (old_nr_of_cells > 0) {
				const double order_of_accuracy
					= -log(norm[dim] / old_norm[dim])
					/ log(double(solve_cells.size()) / old_nr_of_cells);

				if (order_of_accuracy < 0.15) {
					if (grid.get_rank() == 0) {
						std::cerr << __FILE__ << ":" << __LINE__
							<< " dim " << dim
							<< ": Order of accuracy from "
							<< old_nr_of_cells << " to " << solve_cells.size()
							<< " is too low: " << order_of_accuracy
							<< std::endl;
					}
					abort();
				}
			}
		}

		old_nr_of_cells = solve_cells.size();
		old_norm = norm;
	}

	MPI_Finalize();

	return EXIT_SUCCESS;
}
コード例 #14
0
  void ZoltanInterface<LocalOrdinal, GlobalOrdinal, Node, LocalMatOps>::Build(Level& level) const {
    FactoryMonitor m(*this, "Build", level);

    RCP<Matrix>      A        = Get< RCP<Matrix> >     (level, "A");
    RCP<const Map>   rowMap   = A->getRowMap();

    RCP<MultiVector> Coords   = Get< RCP<MultiVector> >(level, "Coordinates");
    size_t           dim      = Coords->getNumVectors();

    GO               numParts = level.Get<GO>("number of partitions");

    if (numParts == 1) {
      // Running on one processor, so decomposition is the trivial one, all zeros.
      RCP<Xpetra::Vector<GO, LO, GO, NO> > decomposition = Xpetra::VectorFactory<GO, LO, GO, NO>::Build(rowMap, true);
      Set(level, "Partition", decomposition);
      return;
    }

    float zoltanVersion_;
    Zoltan_Initialize(0, NULL, &zoltanVersion_);

    RCP<const Teuchos::MpiComm<int> >            dupMpiComm = rcp_dynamic_cast<const Teuchos::MpiComm<int> >(rowMap->getComm()->duplicate());
    RCP<const Teuchos::OpaqueWrapper<MPI_Comm> > zoltanComm = dupMpiComm->getRawMpiComm();

    RCP<Zoltan> zoltanObj_ = rcp(new Zoltan((*zoltanComm)()));  //extract the underlying MPI_Comm handle and create a Zoltan object
    if (zoltanObj_ == Teuchos::null)
      throw Exceptions::RuntimeError("MueLu::Zoltan : Unable to create Zoltan data structure");

    // Tell Zoltan what kind of local/global IDs we will use.
    // In our case, each GID is two ints and there are no local ids.
    // One can skip this step if the IDs are just single ints.
    int rv;
    if ((rv = zoltanObj_->Set_Param("num_gid_entries", "1")) != ZOLTAN_OK)
      throw Exceptions::RuntimeError("MueLu::Zoltan::Setup : setting parameter 'num_gid_entries' returned error code " + Teuchos::toString(rv));
    if ((rv = zoltanObj_->Set_Param("num_lid_entries", "0") ) != ZOLTAN_OK)
      throw Exceptions::RuntimeError("MueLu::Zoltan::Setup : setting parameter 'num_lid_entries' returned error code " + Teuchos::toString(rv));
    if ((rv = zoltanObj_->Set_Param("obj_weight_dim", "1") ) != ZOLTAN_OK)
      throw Exceptions::RuntimeError("MueLu::Zoltan::Setup : setting parameter 'obj_weight_dim' returned error code "  + Teuchos::toString(rv));

    if (GetVerbLevel() & Statistics1) zoltanObj_->Set_Param("debug_level", "1");
    else                              zoltanObj_->Set_Param("debug_level", "0");

    zoltanObj_->Set_Param("num_global_partitions", toString(numParts));

    zoltanObj_->Set_Num_Obj_Fn(GetLocalNumberOfRows,      (void *) &*A);
    zoltanObj_->Set_Obj_List_Fn(GetLocalNumberOfNonzeros, (void *) &*A);
    zoltanObj_->Set_Num_Geom_Fn(GetProblemDimension,      (void *) &dim);
    zoltanObj_->Set_Geom_Multi_Fn(GetProblemGeometry,     (void *) Coords.get());

    // Data pointers that Zoltan requires.
    ZOLTAN_ID_PTR import_gids = NULL;  // Global nums of objs to be imported
    ZOLTAN_ID_PTR import_lids = NULL;  // Local indices to objs to be imported
    int   *import_procs       = NULL;  // Proc IDs of procs owning objs to be imported.
    int   *import_to_part     = NULL;  // Partition #s to which imported objs should be assigned.
    ZOLTAN_ID_PTR export_gids = NULL;  // Global nums of objs to be exported
    ZOLTAN_ID_PTR export_lids = NULL;  // local indices to objs to be exported
    int   *export_procs       = NULL;  // Proc IDs of destination procs for objs to be exported.
    int   *export_to_part     = NULL;  // Partition #s for objs to be exported.
    int   num_imported;                // Number of objs to be imported.
    int   num_exported;                // Number of objs to be exported.
    int   newDecomp;                   // Flag indicating whether the decomposition has changed
    int   num_gid_entries;             // Number of array entries in a global ID.
    int   num_lid_entries;

    {
      SubFactoryMonitor m1(*this, "Zoltan RCB", level);
      rv = zoltanObj_->LB_Partition(newDecomp, num_gid_entries, num_lid_entries,
                                    num_imported, import_gids, import_lids, import_procs, import_to_part,
                                    num_exported, export_gids, export_lids, export_procs, export_to_part);
      if (rv == ZOLTAN_FATAL)
        throw Exceptions::RuntimeError("Zoltan::LB_Partition() returned error code");
    }

    // TODO check that A's row map is 1-1.  Zoltan requires this.

    RCP<Xpetra::Vector<GO, LO, GO, NO> > decomposition;
    if (newDecomp) {
      decomposition = Xpetra::VectorFactory<GO, LO, GO, NO>::Build(rowMap, false); // Don't initialize, will be overwritten
      ArrayRCP<GO> decompEntries = decomposition->getDataNonConst(0);

      int mypid = rowMap->getComm()->getRank();
      for (typename ArrayRCP<GO>::iterator i = decompEntries.begin(); i != decompEntries.end(); ++i)
        *i = mypid;

      LO blockSize = A->GetFixedBlockSize();
      for (int i = 0; i < num_exported; ++i) {
        // We have assigned Zoltan gids to first row GID in the block
        // NOTE: Zoltan GIDs are different from GIDs in the Coordinates vector
        LO  localEl = rowMap->getLocalElement(export_gids[i]);
        int partNum = export_to_part[i];
        for (LO j = 0; j < blockSize; ++j)
          decompEntries[localEl + j] = partNum;
      }
    }

    Set(level, "Partition", decomposition);

    zoltanObj_->LB_Free_Part(&import_gids, &import_lids, &import_procs, &import_to_part);
    zoltanObj_->LB_Free_Part(&export_gids, &export_lids, &export_procs, &export_to_part);

  } //Build()
コード例 #15
0
ファイル: problemGRAPH.c プロジェクト: gitter-badger/quinoa
int main(int argc, char *argv[])
{
  int i, rc;
  float ver;
  struct Zoltan_Struct *zz;
  int changes, numGidEntries, numLidEntries, numImport, numExport;
  int myRank, numProcs;
  ZOLTAN_ID_PTR importGlobalGids, importLocalGids, exportGlobalGids, exportLocalGids;
  int *importProcs, *importToPart, *exportProcs, *exportToPart;
  int *parts;
  FILE *fp;
  GRAPH_DATA myGraph;

  /******************************************************************
  ** Initialize MPI and Zoltan
  ******************************************************************/

  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
  MPI_Comm_size(MPI_COMM_WORLD, &numProcs);

  rc = Zoltan_Initialize(argc, argv, &ver);

  if (rc != ZOLTAN_OK){
    printf("sorry...\n");
    MPI_Finalize();
    exit(0);
  }

  /******************************************************************
  ** Read graph from input file and distribute it 
  ******************************************************************/

  fp = fopen(fname, "r");
  if (!fp){
    if (myRank == 0) fprintf(stderr,"ERROR: Can not open %s\n",fname);
    MPI_Finalize();
    exit(1);
  }
  fclose(fp);

  read_input_file(myRank, numProcs, fname, &myGraph);

  /******************************************************************
  ** Create a Zoltan library structure for this instance of load
  ** balancing.  Set the parameters and query functions that will
  ** govern the library's calculation.  See the Zoltan User's
  ** Guide for the definition of these and many other parameters.
  ******************************************************************/

  zz = Zoltan_Create(MPI_COMM_WORLD);

  /* General parameters */

/*
  Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0");
  Zoltan_Set_Param(zz, "LB_METHOD", "GRAPH");
  Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1"); 
  Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "1");
  Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", "0");
  Zoltan_Set_Param(zz, "RETURN_LISTS", "ALL");
*/

     Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1"); 
     Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "1");
     Zoltan_Set_Param(zz,"LB_METHOD","GRAPH");
#ifdef HAVE_PARMETIS
     Zoltan_Set_Param(zz,"GRAPH_PACKAGE","PARMETIS");
#else
  #ifdef HAVE_SCOTCH
     Zoltan_Set_Param(zz,"GRAPH_PACKAGE","SCOTCH");
  #endif
#endif
     Zoltan_Set_Param(zz,"EDGE_WEIGHT_DIM","1");
     Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", "0");
     Zoltan_Set_Param(zz,"LB_APPROACH","REPARTITION");
     Zoltan_Set_Param(zz,"GRAPH_SYMMETRIZE","TRANSPOSE");
     Zoltan_Set_Param(zz,"GRAPH_SYM_WEIGHT","ADD");

  /* Graph parameters */

  Zoltan_Set_Param(zz, "CHECK_GRAPH", "2"); 
  Zoltan_Set_Param(zz, "PHG_EDGE_SIZE_THRESHOLD", ".35");  /* 0-remove all, 1-remove none */

  /* Query functions - defined in simpleQueries.h */

  Zoltan_Set_Num_Obj_Fn(zz, get_number_of_vertices, &myGraph);
  Zoltan_Set_Obj_List_Fn(zz, get_vertex_list, &myGraph);
  Zoltan_Set_Num_Edges_Multi_Fn(zz, get_num_edges_list, &myGraph);
  Zoltan_Set_Edge_List_Multi_Fn(zz, get_edge_list, &myGraph);

  /******************************************************************
  ** Zoltan can now partition the simple graph.
  ** In this simple example, we assume the number of partitions is
  ** equal to the number of processes.  Process rank 0 will own
  ** partition 0, process rank 1 will own partition 1, and so on.
  ******************************************************************/

  rc = Zoltan_LB_Partition(zz, /* input (all remaining fields are output) */
        &changes,        /* 1 if partitioning was changed, 0 otherwise */ 
        &numGidEntries,  /* Number of integers used for a global ID */
        &numLidEntries,  /* Number of integers used for a local ID */
        &numImport,      /* Number of vertices to be sent to me */
        &importGlobalGids,  /* Global IDs of vertices to be sent to me */
        &importLocalGids,   /* Local IDs of vertices to be sent to me */
        &importProcs,    /* Process rank for source of each incoming vertex */
        &importToPart,   /* New partition for each incoming vertex */
        &numExport,      /* Number of vertices I must send to other processes*/
        &exportGlobalGids,  /* Global IDs of the vertices I must send */
        &exportLocalGids,   /* Local IDs of the vertices I must send */
        &exportProcs,    /* Process to which I send each of the vertices */
        &exportToPart);  /* Partition to which each vertex will belong */

  if (rc != ZOLTAN_OK){
    printf("sorry...\n");
    MPI_Finalize();
    Zoltan_Destroy(&zz);
    exit(0);
  }

  /******************************************************************
  ** Visualize the graph partitioning before and after calling Zoltan.
  ******************************************************************/

  parts = (int *)malloc(sizeof(int) * myGraph.numMyVertices);

  for (i=0; i < myGraph.numMyVertices; i++){
    parts[i] = myRank;
  }

  if (myRank== 0){
    printf("\nGraph partition before calling Zoltan\n");
  }

  showGraphPartitions(myRank, myGraph.numMyVertices, myGraph.vertexGID, parts, numProcs);

  for (i=0; i < numExport; i++){
    parts[exportLocalGids[i]] = exportToPart[i];
  }

  if (myRank == 0){
    printf("Graph partition after calling Zoltan\n");
  }

  showGraphPartitions(myRank, myGraph.numMyVertices, myGraph.vertexGID, parts, numProcs);

  free(parts);

  /******************************************************************
  ** Free the arrays allocated by Zoltan_LB_Partition, and free
  ** the storage allocated for the Zoltan structure.
  ******************************************************************/

  Zoltan_LB_Free_Part(&importGlobalGids, &importLocalGids, 
                      &importProcs, &importToPart);
  Zoltan_LB_Free_Part(&exportGlobalGids, &exportLocalGids, 
                      &exportProcs, &exportToPart);

  Zoltan_Destroy(&zz);

  /**********************
  ** all done ***********
  **********************/

  MPI_Finalize();

  if (myGraph.numMyVertices > 0){
    free(myGraph.vertexGID);
    free(myGraph.nborIndex);
    if (myGraph.numAllNbors > 0){
      free(myGraph.nborGID);
      free(myGraph.nborProc);
    }
  }

  return 0;
}
コード例 #16
0
ファイル: simpleBLOCK.c プロジェクト: haripandey/trilinos
int main(int argc, char *argv[])
{
  int rc, i;
  int myRank, numProcs;
  float ver;
  struct Zoltan_Struct *zz;
  int changes, numGidEntries, numLidEntries, numImport, numExport;
  ZOLTAN_ID_PTR importGlobalGids, importLocalGids;
  ZOLTAN_ID_PTR exportGlobalGids, exportLocalGids; 
  int *importProcs, *importToPart, *exportProcs, *exportToPart;
  int *parts = NULL;

  FILE *fp;
  OBJECT_DATA myData;

  /******************************************************************
  ** Initialize MPI and Zoltan
  ******************************************************************/

  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
  MPI_Comm_size(MPI_COMM_WORLD, &numProcs);

  rc = Zoltan_Initialize(argc, argv, &ver);

  if (rc != ZOLTAN_OK){
    printf("Error initializing Zoltan\n");
    MPI_Finalize();
    exit(0);
  }

  /******************************************************************
  ** Read objects from input file and distribute them unevenly
  ******************************************************************/

  fp = fopen(fname, "r");
  if (!fp){
    if (myRank == 0) fprintf(stderr,"ERROR: Can not open %s\n",fname);
    MPI_Finalize();
    exit(1);
  }
  fclose(fp);

  read_input_objects(myRank, numProcs, fname, &myData);

  /******************************************************************
  ** Create a Zoltan library structure for this instance of load
  ** balancing.  Set the parameters and query functions.
  ******************************************************************/

  zz = Zoltan_Create(MPI_COMM_WORLD);

  /* General parameters */

  Zoltan_Set_Param(zz, "LB_METHOD", "BLOCK");  /* Zoltan method: "BLOCK" */
  Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1"); /* global ID is 1 integer */
  Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "1"); /* local ID is 1 integer */
  Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", "0"); /* we omit object weights */

  /* Query functions */

  Zoltan_Set_Num_Obj_Fn(zz, get_number_of_objects, &myData);
  Zoltan_Set_Obj_List_Fn(zz, get_object_list, &myData);

  /******************************************************************
  ** Call Zoltan to partition the objects.
  ******************************************************************/

  rc = Zoltan_LB_Partition(zz, /* input (all remaining fields are output) */
        &changes,        /* 1 if partitioning was changed, 0 otherwise */ 
        &numGidEntries,  /* Number of integers used for a global ID */
        &numLidEntries,  /* Number of integers used for a local ID */
        &numImport,      /* Number of objects to be sent to me */
        &importGlobalGids,  /* Global IDs of objects to be sent to me */
        &importLocalGids,   /* Local IDs of objects to be sent to me */
        &importProcs,    /* Process rank for source of each incoming object */
        &importToPart,   /* New partition for each incoming object */
        &numExport,      /* Number of objects I must send to other processes*/
        &exportGlobalGids,  /* Global IDs of the objects I must send */
        &exportLocalGids,   /* Local IDs of the objects I must send */
        &exportProcs,    /* Process to which I send each of the objects */
        &exportToPart);  /* Partition to which each object will belong */

  if (rc != ZOLTAN_OK){
    printf("Error in Zoltan library\n");
    MPI_Finalize();
    Zoltan_Destroy(&zz);
    exit(0);
  }

  /******************************************************************
  ** Visualize the object partitioning before and after calling Zoltan.
  **
  ** In this example, partition number equals process rank.
  ******************************************************************/

  parts = (int *)malloc(sizeof(int) * myData.numMyObjects);

  for (i=0; i < myData.numMyObjects; i++){
    parts[i] = myRank;
  }

  if (myRank== 0){
    printf("\nObject partition assignments before calling Zoltan\n");
  }

  showSimpleMeshPartitions(myRank, myData.numMyObjects, myData.myGlobalIDs, parts);

  for (i=0; i < numExport; i++){
    parts[exportLocalGids[i]] = exportToPart[i];
  }

  if (myRank == 0){
    printf("Object partition assignments after calling Zoltan\n");
  }

  showSimpleMeshPartitions(myRank, myData.numMyObjects, myData.myGlobalIDs, parts);

  /******************************************************************
  ** Free the arrays allocated by Zoltan_LB_Partition, and free
  ** the storage allocated for the Zoltan structure.
  ******************************************************************/

  Zoltan_LB_Free_Part(&importGlobalGids, &importLocalGids, 
                      &importProcs, &importToPart);
  Zoltan_LB_Free_Part(&exportGlobalGids, &exportLocalGids, 
                      &exportProcs, &exportToPart);

  Zoltan_Destroy(&zz);

  MPI_Finalize();

  return 0;
}
コード例 #17
0
ファイル: zcol.c プロジェクト: haripandey/trilinos
int main(int argc, char *argv[])
{
    int rc, i, ngids, maxcol, ncolors;
    float ver;
    struct Zoltan_Struct *zz=NULL;
#ifdef ZOLTANV31
    int numGidEntries, numLidEntries;
#else
    ZOLTAN_GRAPH_EVAL graph;
#endif
    int *color;
    ZOLTAN_ID_PTR gid_list;
    UZData guz, *uz=&guz;
    int msg_tag = 9999;
    int nlvtx, next, maxdeg=0;
    double times[9]={0.,0.,0.,0.,0.,0.,0.,0.}; /* Used for timing measurements */
    double gtimes[9]={0.,0.,0.,0.,0.,0.,0.,0.}; /* Used for timing measurements */
    
    /******************************************************************
     ** Initialize MPI and Zoltan
     ******************************************************************/

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &uz->myRank);
    MPI_Comm_size(MPI_COMM_WORLD, &uz->numProcs);

    MPI_Barrier(MPI_COMM_WORLD);
    times[0] = u_wseconds();
    
    rc = Zoltan_Initialize(argc, argv, &ver);
    if (rc != ZOLTAN_OK){
        fprintf(stderr, "Sorry Zoltan initialize failed...\n");
        goto End;
    }
    zz = Zoltan_Create(MPI_COMM_WORLD);

    if (argc<3 && !uz->myRank) {
        fprintf(stderr, "usage: %s [meshR] [meshC] [X-point stencil] [procR] [procC] [ws-beta] [<ZoltanParam>=<Val>] ...\n\n", argv[0]);
        fprintf(stderr, "ws-beta: is the probablity of adding an edge to a vertex to generate Watts-Strogatz graphs\n");
        fprintf(stderr, "Valid values for Stencil are 5, 7 and 9\n");
        fprintf(stderr, "Zoltan Coloring Parameters and values are\n");
        fprintf(stderr, "\tDISTANCE        : 1 or 2\n");
        fprintf(stderr, "\tSUPERSTEP_SIZE  : suggested >= 100\n"); 
        fprintf(stderr, "\tCOMM_PATTERN    : S or A\n");
        fprintf(stderr, "\tCOLOR_ORDER     : I, B, U\n");
        fprintf(stderr, "\tCOLORING_METHOD : F (for now)\n");
        fprintf(stderr, "\n");
    }

    uz->procR = uz->procC = 0;
    uz->meshR = uz->meshC = 1024;
    uz->stencil = 9;

    if (argc>1)
        uz->meshR = atoi(argv[1]);
    if (argc>2)
        uz->meshC = atoi(argv[2]);
    if (argc>3)
        uz->stencil = atoi(argv[3]);
    if (uz->stencil!=5 && uz->stencil!=7 && uz->stencil!=9) {
        fprintf(stderr, "\t invalid stencil value. Valid values are 5, 7 and 9. Assumed 9.\n");
        uz->stencil = 9;
    }
    --uz->stencil;

    if (argc>4)
        uz->procR = atoi(argv[4]);
    if (argc>5)
        uz->procC = atoi(argv[5]);
    if (uz->procR <= 0 || uz->procC <= 0)
        computeProcMesh(uz);
    
    if (uz->procR*uz->procC!=uz->numProcs) {
        fprintf(stderr, "#Procs=%d but requested %dx%d Proc Mesh Partitioning...\n", uz->numProcs, uz->procR, uz->procC);
        goto End;
    }

    if (argc>6)
        uz->beta = atof(argv[6]);
    else
        uz->beta = 0.0;
    
    /* compute which part of mesh I will compute */
    uz->myR = uz->myRank / uz->procC;
    uz->myC = uz->myRank % uz->procC;

    uz->_sr = uz->myR * (uz->meshR / uz->procR);
    uz->_er = (uz->myR+1) * (uz->meshR / uz->procR);
    if (uz->_er>uz->meshR)
        uz->_er = uz->meshR;
    uz->_sc = uz->myC * (uz->meshC / uz->procC);
    uz->_ec = (uz->myC+1) * (uz->meshC / uz->procC);
    if (uz->_ec>uz->meshC)
        uz->_ec = uz->meshC;


    if ( (uz->meshR % uz->procR) !=0 || (uz->meshC % uz->procC)!=0) {
        printf("Mesh dimensions are not divisible with proc mesh.\nRequested mesh is %dx%d and proc mesh is %d x %d\n", uz->meshR, uz->meshC, uz->procR, uz->procC);
        exit(1);
    }
    nlvtx= (uz->_er-uz->_sr) * (uz->_ec-uz->_sc);

    if (uz->myRank==0)
        printf("Running %s on %d x %d processor mesh, generating %d-point %d x %d mesh with beta=%.3lf\n", argv[0], uz->procR, uz->procC, uz->stencil+1, uz->meshR, uz->meshC, uz->beta);

    times[1] = u_wseconds();    
    uz->numredge = 0;
    uz->redgeto = NULL;
    if (uz->beta>0) { /* create random edges for WS graph */
        int ngvtx=uz->meshC*uz->meshR, trsh=(int) (uz->beta*100.0);
        int ierr=0;
        int *edges=NULL, *redges=NULL, *proclist=NULL, nedge;
        ZOLTAN_COMM_OBJ *plan;
            
        uz->redgeto = (int *) malloc(nlvtx*sizeof(int));
        for (i=0; i<nlvtx; ++i) {
            int rv = Zoltan_Rand_InRange(NULL, 100);
            if ( rv < trsh) {
                if ((uz->redgeto[i] = Zoltan_Rand_InRange(NULL,  ngvtx))==gIDfLID(i)) /* is it a self edge */
                    uz->redgeto[i] = -1;
                else 
                    ++uz->numredge;
            } else
                uz->redgeto[i] = -1;
        }

        edges = (int *) malloc(sizeof(int)*2*uz->numredge);
        proclist = (int *) malloc(sizeof(int)*uz->numredge);
        next = 0;
        for (i=0; i<nlvtx; ++i)
            if (uz->redgeto[i]>0) {
                edges[2*next] = uz->redgeto[i];
                edges[2*next+1] = gIDfLID(i);
                proclist[next] = pIDfGID(uz->redgeto[i]);
                ++next;
            }

        ierr = Zoltan_Comm_Create(&plan, uz->numredge, proclist, MPI_COMM_WORLD,
                                  msg_tag, &nedge);
        
        redges = (int *) malloc(sizeof(int)*2*nedge);
        
        --msg_tag;
        ierr |= Zoltan_Comm_Do(plan, msg_tag, (char *) edges, 2*sizeof(int),
                               (char *) redges);
        ierr |= Zoltan_Comm_Destroy(&plan);
        free(proclist);
        free(edges);
                
        if (ierr) {
            printf("error while communicating edges!\n");
            exit(1);
        }
        xadj = (int *) calloc(1+nlvtx, sizeof(int));
        adj = (int *) malloc(sizeof(int)*nedge);
        for (i=0; i<nedge; ++i)  {
            if (redges[2*i] < gID(uz->_sr, uz->_sc) || redges[2*i] >= gID(uz->_er, uz->_ec)) {
                printf("[%d/%d] received gid=%d doesn't blong to processor range [%d, %d) should go to proc %d\n", uz->myRank, uz->numProcs, redges[2*i],  gID(uz->_sr, uz->_sc), gID(uz->_er, uz->_ec), pIDfGID(redges[2*i]));
            }
                
            ++xadj[lIDfGID(redges[2*i])];
        }
        xadj[nlvtx] = nedge;
        maxdeg = xadj[0];
        for (i=1; i<nlvtx; ++i) {
            maxdeg = xadj[i]>maxdeg ? xadj[i] : maxdeg;
            xadj[i] += xadj[i-1];
        }
        
        for (i=0; i<nedge; ++i) {
            int u = lIDfGID(redges[2*i]);
            int v = redges[2*i+1];
            adj[--xadj[u]] = v;
        }
        free(redges);                    
    }
    maxdeg += uz->stencil+1;
    adjTemp = (int *) malloc(sizeof(int)*2*maxdeg);
    times[2] = u_wseconds();
    
    /*
      printf("My rank %d/%d at proc-mesh loc (%d, %d) generating [%d, %d) x [%d, %d) + %d random edges TotEdge=%d\n", uz->myRank, uz->numProcs, uz->myR, uz->myC, uz->_sr, uz->_er, uz->_sc, uz->_ec, uz->numredge, xadj[nlvtx]);  */
    printStats("Number of Vertices  ", nlvtx, uz->myRank, uz->numProcs);
    if (xadj)
        printStats("Number of Rand Edges", xadj[nlvtx], uz->myRank, uz->numProcs);

    
    /* General parameters */
#ifndef ZOLTANV31
    Zoltan_Set_Param(zz, "GRAPH_BUILD_TYPE", "FAST_NO_DUP");
#endif

    /* General parameters */
    Zoltan_Set_Param(zz, "DEBUG_LEVEL", "3");
    Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1"); 
    Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "1");
    Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", "0");


    /* coloring parameters */
    Zoltan_Set_Param(zz, "SUPERSTEP_SIZE", "500"); /* let's make S=500 default */
    for (i=7; i<argc; ++i) {
        char param[256], *eq;

        if (!uz->myRank)
            printf("processing argv[%d]='%s'\n", i, argv[i]);
        strncpy(param, argv[i], sizeof(param));
        eq = strchr(param, '=');
        if (!eq) {
            fprintf(stderr, "invalid argument '%s', Zoltan Paramters should be in the format <ZoltanParam>=<Val>\n", param);
            goto End;
        }
        *eq = 0;
        Zoltan_Set_Param(zz, param, eq+1);
    }


#if 0    
    /* Graph parameters */
    Zoltan_Set_Param(zz, "CHECK_GRAPH", "2");
#endif

    /* set call backs */
    Zoltan_Set_Num_Obj_Fn(zz, get_number_of_objects, uz);
    Zoltan_Set_Obj_List_Fn(zz, get_object_list, uz);
    Zoltan_Set_Num_Edges_Multi_Fn(zz, get_num_edges_list, uz);
    Zoltan_Set_Edge_List_Multi_Fn(zz, get_edge_list, uz);

#if 0    
#ifndef ZOLTANV31
    Zoltan_LB_Eval_Graph(zz, 0, &graph);

    if (!uz->myRank) {
        printf("EdgeCut   Min=%8.0f  Max=%8.0f  Sum=%8.0f\n", graph.cuts[EVAL_GLOBAL_MIN], graph.cuts[EVAL_GLOBAL_MAX], graph.cuts[EVAL_GLOBAL_SUM]);
        printf("#Vertices Min=%8.0f  Max=%8.0f  Sum=%8.0f imbal=%.2f\n", graph.nobj[EVAL_GLOBAL_MIN], graph.nobj[EVAL_GLOBAL_MAX], graph.nobj[EVAL_GLOBAL_SUM], graph.obj_imbalance);        
    }
#endif
#endif

    /* now color */
    ngids = get_number_of_objects(uz, &rc);

    gid_list = (ZOLTAN_ID_PTR) malloc(sizeof(ZOLTAN_ID_TYPE) * ngids);
#ifndef ZOLTANV31
    next = 0;
    for (i=uz->_sr; i<uz->_er; ++i) {
        int j;
        for (j=uz->_sc; j<uz->_ec; ++j) {
            gid_list[next++] = i*uz->meshC + j;
        }
    }    
#endif
    color = (int *) malloc(sizeof(int) * ngids);    

    MPI_Barrier(MPI_COMM_WORLD);
    times[3] = u_wseconds();    
#ifdef ZOLTANV31
    rc = Zoltan_Color(zz, /* input (all remaining fields are output) */
                      &numGidEntries,  /* Number of integers used for a global ID */
                      &numLidEntries,  /* Number of integers used for a local ID */
                      ngids,           /* #objects to color in this proc */
                      gid_list,        /* global ids of colored vertices */
                      NULL,            /* we ignore local ids */
                      color);          /* result color */    
#else    
    rc = Zoltan_Color(zz, /* input (all remaining fields are output) */
                      1,  /* Number of integers used for a global ID */
                      ngids,           /* #objects to color in this proc */
                      gid_list,        /* global ids of colored vertices */
                      color);          /* result color */
#endif
    MPI_Barrier(MPI_COMM_WORLD);
    times[4] = u_wseconds();
    MPI_Reduce(times, gtimes, 5, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);


    if (rc != ZOLTAN_OK) 
        fprintf(stderr, "Zoltan_Color failed with return code %d...\n", rc);

    for (maxcol=i=0; i<ngids; ++i)
        if (color[i] > maxcol)
            maxcol = color[i];
    MPI_Reduce(&maxcol, &ncolors, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
    if (uz->myRank==0) {
        struct rusage usage;
                
        printf("%s setup             Proc-0: %8.2lf   Max: %8.2lf\n", argv[0], times[1]-times[0], gtimes[1]-gtimes[0]);
        printf("%s gen rand edges    Proc-0: %8.2lf   Max: %8.2lf\n", argv[0], times[2]-times[1], gtimes[2]-gtimes[1]);
        printf("%s set gids          Proc-0: %8.2lf   Max: %8.2lf\n", argv[0], times[3]-times[2], gtimes[3]-gtimes[2]);
        printf("%s Zoltan_Color call Proc-0: %8.2lf   Max: %8.2lf\n", argv[0], times[4]-times[3], gtimes[4]-gtimes[3]);
        printf("%s Coloring Time    : %.2lf   # Colors used : %d\n", argv[0], gtimes[4]-gtimes[0], ncolors);
        getrusage(RUSAGE_SELF, &usage);
        printf("%s maxrss=%ld minflt=%ld majflt=%ld nswap=%ld\n", argv[0], usage.ru_maxrss, usage.ru_minflt, usage.ru_majflt, usage.ru_nswap);                
    }
    
#ifdef _DEBUG
    saveColor(argv[0], uz, (int *) gid_list, color, ngids);
#endif

    /******************************************************************
     ** Clean up
     ******************************************************************/

    if (gid_list)
        free(gid_list);
    if (color)
        free(color);
    if (xadj)
        free(xadj);
    if (adj)
        free(adj);
    if (adjTemp)
        free(adjTemp);
    if (uz->redgeto)
        free(uz->redgeto);

End:    
    Zoltan_Destroy(&zz);
    MPI_Finalize();

    return 0;
}
コード例 #18
0
ファイル: migrateGRAPH.c プロジェクト: gitter-badger/quinoa
int main(int argc, char *argv[])
{
  int i, rc;
  int myRank, numProcs;
  float ver;
  struct Zoltan_Struct *zz;
  int changes, numGidEntries, numLidEntries, numImport, numExport, start_gid, num_nbors;
  ZOLTAN_ID_PTR importGlobalGids, importLocalGids, exportGlobalGids, exportLocalGids;
  int *importProcs, *importToPart, *exportProcs, *exportToPart;
  int *parts=NULL;
  ZOLTAN_ID_PTR lids=NULL;
  FILE *fp;
  struct Zoltan_DD_Struct *dd;
  GRAPH_DATA myGraph;
  int gid_length = 1;   /* our global IDs consist of 1 integer */
  int lid_length = 1;   /* our local IDs consist of 1 integer */

  /******************************************************************
  ** Initialize MPI and Zoltan
  ******************************************************************/

  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
  MPI_Comm_size(MPI_COMM_WORLD, &numProcs);

  rc = Zoltan_Initialize(argc, argv, &ver);

  if (rc != ZOLTAN_OK){
    printf("sorry...\n");
    MPI_Finalize();
    exit(0);
  }

  /******************************************************************
  ** Read graph from input file and distribute it 
  ******************************************************************/

  fp = fopen(fname, "r");
  if (!fp){
    if (myRank == 0) fprintf(stderr,"ERROR: Can not open %s\n",fname);
    MPI_Finalize();
    exit(1);
  }
  fclose(fp);

  read_input_file(myRank, numProcs, fname, &myGraph);

  /*fprintf(stderr,"%d have %d objects\n",myRank,myGraph.numMyVertices);*/

  /******************************************************************
  ** Create a distributed data directory which maps vertex
  ** global IDs to their current partition number.  We'll use this
  ** after migrating vertices, to update the partition in which
  ** our vertices neighbors are.
  **
  ** Our local IDs (array "lids") are of type ZOLTAN_ID_TYPE because
  ** we are using Zoltan's distributed data directory.  It assumes
  ** that a global ID is a sequence of "gid_length" ZOLTAN_ID_TYPEs.
  ** It assumes that a local ID is a sequence of "lid_length"
  ** ZOLTAN_ID_TYPEs.
  ******************************************************************/

  rc = Zoltan_DD_Create(&dd, MPI_COMM_WORLD, 
                           gid_length,    /* length of a global ID */
                           lid_length,    /* length of a local ID */
                           0,             /* length of user data  */
                           myGraph.numMyVertices,  /* hash table size */
                           0);                     /* debug level */

  parts = malloc(myGraph.numMyVertices * sizeof(int));
  lids = malloc(myGraph.numMyVertices * sizeof(ZOLTAN_ID_TYPE));
  
  for (i=0; i < myGraph.numMyVertices; i++){
    parts[i] = myRank;   /* part number of this vertex */
    lids[i] = (ZOLTAN_ID_TYPE)i;         /* local ID on my process for this vertex */
  }
  
  rc = Zoltan_DD_Update(dd, 
                        myGraph.vertexGID, 
                        lids,
                        NULL,
                        parts,
                        myGraph.numMyVertices);
  

  myGraph.dd = dd;

  /******************************************************************
  ** Create a Zoltan library structure for this instance of load
  ** balancing.  Set the parameters and query functions that will
  ** govern the library's calculation.  See the Zoltan User's
  ** Guide for the definition of these and many other parameters.
  ******************************************************************/

  zz = Zoltan_Create(MPI_COMM_WORLD);

  /* General parameters */

  Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0");
  Zoltan_Set_Param(zz, "LB_METHOD", "GRAPH");
  Zoltan_Set_Param(zz, "LB_APPROACH", "PARTITION");
  Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1"); 
  Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "1");
  Zoltan_Set_Param(zz, "RETURN_LISTS", "ALL");

  /* Graph parameters */

  Zoltan_Set_Param(zz, "CHECK_GRAPH", "2"); 
  Zoltan_Set_Param(zz, "PHG_EDGE_SIZE_THRESHOLD", ".35");  /* 0-remove all, 1-remove none */

  /* Query functions, defined in this source file */

  Zoltan_Set_Num_Obj_Fn(zz, get_number_of_vertices, &myGraph);
  Zoltan_Set_Obj_List_Fn(zz, get_vertex_list, &myGraph);
  Zoltan_Set_Num_Edges_Multi_Fn(zz, get_num_edges_list, &myGraph);
  Zoltan_Set_Edge_List_Multi_Fn(zz, get_edge_list, &myGraph);

  Zoltan_Set_Obj_Size_Multi_Fn(zz, get_message_sizes,&myGraph);
  Zoltan_Set_Pack_Obj_Multi_Fn(zz, pack_object_messages,&myGraph);
  Zoltan_Set_Unpack_Obj_Multi_Fn(zz, unpack_object_messages,&myGraph);
  Zoltan_Set_Mid_Migrate_PP_Fn(zz, mid_migrate,&myGraph);

  /******************************************************************
  ** Visualize the graph partitioning before calling Zoltan.
  ******************************************************************/

  if (myRank== 0){
    printf("\nGraph partition before calling Zoltan\n");
  }

  showGraphPartitions(myRank, myGraph.dd);

  /******************************************************************
  ** Zoltan can now partition the simple graph.
  ** In this simple example, we assume the number of partitions is
  ** equal to the number of processes.  Process rank 0 will own
  ** partition 0, process rank 1 will own partition 1, and so on.
  ******************************************************************/

  rc = Zoltan_LB_Partition(zz, /* input (all remaining fields are output) */
        &changes,        /* 1 if partitioning was changed, 0 otherwise */ 
        &numGidEntries,  /* Number of integers used for a global ID */
        &numLidEntries,  /* Number of integers used for a local ID */
        &numImport,      /* Number of vertices to be sent to me */
        &importGlobalGids,  /* Global IDs of vertices to be sent to me */
        &importLocalGids,   /* Local IDs of vertices to be sent to me */
        &importProcs,    /* Process rank for source of each incoming vertex */
        &importToPart,   /* New partition for each incoming vertex */
        &numExport,      /* Number of vertices I must send to other processes*/
        &exportGlobalGids,  /* Global IDs of the vertices I must send */
        &exportLocalGids,   /* Local IDs of the vertices I must send */
        &exportProcs,    /* Process to which I send each of the vertices */
        &exportToPart);  /* Partition to which each vertex will belong */

  if (rc != ZOLTAN_OK){
    printf("sorry...\n");
    MPI_Finalize();
    Zoltan_Destroy(&zz);
    exit(0);
  }

  /*fprintf(stderr,"%d export %d import %d\n",myRank,numExport,numImport);*/

  /******************************************************************
  ** Update the data directory with the new partition numbers
  ******************************************************************/

  for (i=0; i < numExport; i++){
    parts[exportLocalGids[i]] = exportToPart[i];
  }

  rc = Zoltan_DD_Update(dd, 
                        myGraph.vertexGID, 
                        lids,
                        NULL,
                        parts,
                        myGraph.numMyVertices);

  /******************************************************************
  ** Migrate vertices to new partitions
  ******************************************************************/

  rc = Zoltan_Migrate(zz, 
                      numImport, importGlobalGids, importLocalGids,
                      importProcs, importToPart,
                      numExport, exportGlobalGids, exportLocalGids,
                      exportProcs, exportToPart);


  /******************************************************************
  ** Use the data dictionary to find neighbors' partitions
  ******************************************************************/

  start_gid = myGraph.numMyVertices - numImport;
  num_nbors = myGraph.nborIndex[myGraph.numMyVertices] - myGraph.nborIndex[start_gid];

  rc = Zoltan_DD_Find(dd,
             (ZOLTAN_ID_PTR)(myGraph.nborGID + start_gid), NULL, NULL, 
              myGraph.nborPart + start_gid, num_nbors, NULL);

  /******************************************************************
  ** Visualize the graph partitioning after calling Zoltan.
  ******************************************************************/

  if (myRank == 0){
    printf("Graph partition after calling Zoltan\n");
  }
  showGraphPartitions(myRank, myGraph.dd);

  /******************************************************************
  ** Free the arrays allocated by Zoltan_LB_Partition, and free
  ** the storage allocated for the Zoltan structure.
  ******************************************************************/

  Zoltan_LB_Free_Part(&importGlobalGids, &importLocalGids, 
                      &importProcs, &importToPart);
  Zoltan_LB_Free_Part(&exportGlobalGids, &exportLocalGids, 
                      &exportProcs, &exportToPart);

  Zoltan_Destroy(&zz);

  /**********************
  ** all done ***********
  **********************/

  MPI_Finalize();

  if (myGraph.vertex_capacity > 0){
    free(myGraph.vertexGID);
    free(myGraph.nborIndex);
    if (myGraph.nbor_capacity > 0){
      free(myGraph.nborGID);
      free(myGraph.nborPart);
    }
  }

  if (parts) free(parts);
  if (lids) free(lids);

  return 0;
}
コード例 #19
0
ファイル: stressTestRIB.c プロジェクト: agrippa/Trilinos
int main(int argc, char *argv[])
{
  int rc, i; 
  ZOLTAN_GNO_TYPE numGlobalVertices;
  float ver;
  char dimstring[16];
  double min, max, avg, local;

  struct Zoltan_Struct *zz;
  int changes, numGidEntries, numLidEntries, numImport, numExport;
  ZOLTAN_ID_PTR importGlobalGids, importLocalGids, exportGlobalGids, exportLocalGids; 
  int *importProcs, *importToPart, *exportProcs, *exportToPart;

#ifdef HOST_LINUX
  signal(SIGSEGV, meminfo_signal_handler);
  signal(SIGINT, meminfo_signal_handler);
  signal(SIGTERM, meminfo_signal_handler);
  signal(SIGABRT, meminfo_signal_handler);
  signal(SIGFPE, meminfo_signal_handler);
#endif

  /******************************************************************
  ** Problem size
  ******************************************************************/

  numGlobalVertices = NUM_GLOBAL_VERTICES;
  vertexWeightDim = VERTEX_WEIGHT_DIMENSION;
  vertexDim = VERTEX_DIMENSION;

  if (argc > 1){
    sscanf(argv[1], "%zd", &numGlobalVertices);
    if (argc > 2){
      vertexWeightDim = atoi(argv[2]);
      if (argc > 3){
        vertexDim = atoi(argv[3]);
      }
    }
  }

  sprintf(dimstring,"%d",vertexWeightDim);

  /******************************************************************
  ** Initialize MPI and Zoltan
  ******************************************************************/

  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
  MPI_Comm_size(MPI_COMM_WORLD, &numProcs);

  rc = Zoltan_Initialize(argc, argv, &ver);

  if (rc != ZOLTAN_OK){
    printf("sorry...\n");
    MPI_Finalize();
    exit(1);
  }

  Zoltan_Memory_Debug(2);

  /******************************************************************
  ** Create vertices
  ******************************************************************/

  rc = create_vertices(numGlobalVertices, vertexDim, vertexWeightDim, numProcs, myRank);

  if (rc){
    fprintf(stderr,"Process rank %d: insufficient memory\n",myRank);
    MPI_Finalize();
    exit(1);
  }

  first_gid = vertex_gid[myRank];

  /******************************************************************
  ** Create a Zoltan library structure for this instance of load
  ** balancing.  Set the parameters and query functions that will
  ** govern the library's calculation.  See the Zoltan User's
  ** Guide for the definition of these and many other parameters.
  ******************************************************************/

  zz = Zoltan_Create(MPI_COMM_WORLD);

  /* General parameters */

  Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0");
  Zoltan_Set_Param(zz, "LB_METHOD", "RIB");
  Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1"); 
  Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "1");
  Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", dimstring);
  Zoltan_Set_Param(zz, "RETURN_LISTS", "ALL");

  /* RIB parameters */

  Zoltan_Set_Param(zz, "RIB_OUTPUT_LEVEL", "0");

  /* Query functions, to provide geometry to Zoltan */

  Zoltan_Set_Num_Obj_Fn(zz, get_number_of_objects, NULL);
  Zoltan_Set_Obj_List_Fn(zz, get_object_list, NULL);
  Zoltan_Set_Num_Geom_Fn(zz, get_num_geometry, NULL);
  Zoltan_Set_Geom_Multi_Fn(zz, get_geometry_list, NULL);
  Zoltan_Set_Part_Multi_Fn(zz, get_partition_list, NULL);

  /******************************************************************
  ** Zoltan can now partition the vertices in the simple mesh.
  ** In this simple example, we assume the number of partitions is
  ** equal to the number of processes.  Process rank 0 will own
  ** partition 0, process rank 1 will own partition 1, and so on.
  ******************************************************************/

  if (myRank == 0){
    printf("Run Zoltan\n");
  }

  rc = Zoltan_LB_Partition(zz, /* input (all remaining fields are output) */
        &changes,        /* 1 if partitioning was changed, 0 otherwise */ 
        &numGidEntries,  /* Number of integers used for a global ID */
        &numLidEntries,  /* Number of integers used for a local ID */
        &numImport,      /* Number of vertices to be sent to me */
        &importGlobalGids,  /* Global IDs of vertices to be sent to me */
        &importLocalGids,   /* Local IDs of vertices to be sent to me */
        &importProcs,    /* Process rank for source of each incoming vertex */
        &importToPart,   /* New partition for each incoming vertex */
        &numExport,      /* Number of vertices I must send to other processes*/
        &exportGlobalGids,  /* Global IDs of the vertices I must send */
        &exportLocalGids,   /* Local IDs of the vertices I must send */
        &exportProcs,    /* Process to which I send each of the vertices */
        &exportToPart);  /* Partition to which each vertex will belong */

  if (rc != ZOLTAN_OK){
    if (myRank == 0)printf("sorry...\n");
    MPI_Finalize();
    Zoltan_Destroy(&zz);
    exit(0);
  }

  /******************************************************************
  ** Check the balance of the partitions before running zoltan.
  ** The query function get_partition_list() will give the 
  ** partitions of the vertices before we called Zoltan.
  ******************************************************************/

  if (myRank == 0){
    printf("\nBALANCE before running Zoltan\n");
  }

  rc = Zoltan_LB_Eval_Balance(zz, 1, NULL);

  if (rc != ZOLTAN_OK){
    printf("sorry first LB_Eval_Balance...\n");
    MPI_Finalize();
    Zoltan_Destroy(&zz);
    exit(0);
  }

  /******************************************************************
  ** Print out the balance of the new partitions.
  ******************************************************************/
 
  vertex_part = (int *)malloc(sizeof(int) * numLocalVertices);

  if (!vertex_part){
    printf("sorry memory error...\n");
    MPI_Finalize();
    Zoltan_Destroy(&zz);
    exit(0);
  }

  for (i=0; i < numLocalVertices; i++){
    vertex_part[i] = myRank;
  }

  if (numExport > 0){
    for (i=0; i < numExport; i++){
      vertex_part[exportLocalGids[i]] = exportToPart[i];
    }
  }

  if (myRank == 0){
    printf("\nBALANCE after running Zoltan\n");
  }

  rc = Zoltan_LB_Eval_Balance(zz, 1, NULL);

  if (rc != ZOLTAN_OK){
    printf("sorry second LB_Eval_Balance...\n");
    MPI_Finalize();
    Zoltan_Destroy(&zz);
    exit(0);
  }

  /******************************************************************
  ** Free the arrays allocated by Zoltan_LB_Partition, and free
  ** the storage allocated for the Zoltan structure.
  ******************************************************************/

  if (myRank == 0){
    printf("Free structures\n");
  }

  Zoltan_LB_Free_Part(&importGlobalGids, &importLocalGids, 
                      &importProcs, &importToPart);
  Zoltan_LB_Free_Part(&exportGlobalGids, &exportLocalGids, 
                      &exportProcs, &exportToPart);

  Zoltan_Destroy(&zz);

  if (vertex_part) free(vertex_part);
  if (v_x) free(v_x);
  if (v_y) free(v_y);
  if (v_z) free(v_z);
  if (vertex_weight) free(vertex_weight);
  if (vertex_gid) free(vertex_gid);

  /**********************
  ** all done ***********
  **********************/

  local= (double)Zoltan_Memory_Usage(ZOLTAN_MEM_STAT_MAXIMUM)/(1024.0*1024);
  MPI_Reduce(&local, &avg, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
  avg /= (double)numProcs;
  MPI_Reduce(&local, &max, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
  MPI_Reduce(&local, &min, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);

  if (myRank == 0){
    printf("Total MBytes in use by test while Zoltan is running: %12.3lf\n",
             mbytes/(1024.0*1024));
    printf("Min/Avg/Max of maximum MBytes in use by Zoltan:    %12.3lf / %12.3lf / %12.3lf\n",
             min, avg, max);
  }

  MPI_Finalize();

  return 0;
}
コード例 #20
0
ファイル: get3d_curl.cpp プロジェクト: esimerkkitutkija/pamhd
int main(int argc, char* argv[])
{
    if (MPI_Init(&argc, &argv) != MPI_SUCCESS) {
        std::cerr << "Couldn't initialize MPI." << std::endl;
        abort();
    }

    MPI_Comm comm = MPI_COMM_WORLD;

    int rank = 0, comm_size = 0;
    if (MPI_Comm_rank(comm, &rank) != MPI_SUCCESS) {
        std::cerr << "Couldn't obtain MPI rank." << std::endl;
        abort();
    }
    if (MPI_Comm_size(comm, &comm_size) != MPI_SUCCESS) {
        std::cerr << "Couldn't obtain size of MPI communicator." << std::endl;
        abort();
    }


    // intialize Zoltan
    float zoltan_version;
    if (Zoltan_Initialize(argc, argv, &zoltan_version) != ZOLTAN_OK) {
        std::cerr << "Zoltan_Initialize failed." << std::endl;
        abort();
    }

    const unsigned int neighborhood_size = 0;
    const int max_refinement_level = 0;

    double
    old_norm_x = std::numeric_limits<double>::max(),
    old_norm_y = std::numeric_limits<double>::max(),
    old_norm_z = std::numeric_limits<double>::max();
    size_t old_nr_of_cells = 0;
    for (size_t nr_of_cells = 8; nr_of_cells <= 64; nr_of_cells *= 2) {

        dccrg::Dccrg<Cell, dccrg::Cartesian_Geometry> grid;

        const std::array<uint64_t, 3> grid_size{{nr_of_cells + 2, nr_of_cells + 2, nr_of_cells + 2}};

        if (
            not grid.initialize(
                grid_size,
                comm,
                "RANDOM",
                neighborhood_size,
                max_refinement_level,
                false,
                false,
                false
            )
        ) {
            std::cerr << __FILE__ << ":" << __LINE__
                      << ": Couldn't initialize grid."
                      << std::endl;
            abort();
        }

        const std::array<double, 3>
        cell_length{{
                double(3) / (grid_size[0] - 2),
                1.5 / (grid_size[1] - 2),
                double(4) / (grid_size[2] - 2)
            }},
        grid_start{{
                -1 - cell_length[0],
                -M_PI / 4 - cell_length[1],
                -2 - cell_length[2]
            }};

        const double cell_volume
            = cell_length[0] * cell_length[1] * cell_length[2];

        dccrg::Cartesian_Geometry::Parameters geom_params;
        geom_params.start = grid_start;
        geom_params.level_0_cell_length = cell_length;

        if (not grid.set_geometry(geom_params)) {
            std::cerr << __FILE__ << ":" << __LINE__
                      << ": Couldn't set grid geometry."
                      << std::endl;
            abort();
        }

        grid.balance_load();

        const auto all_cells = grid.get_cells();
        for (const auto& cell: all_cells) {
            auto* const cell_data = grid[cell];
            if (cell_data == NULL) {
                std::cerr << __FILE__ << ":" << __LINE__
                          << ": No data for cell " << cell
                          << std::endl;
                abort();
            }

            (*cell_data)[Vector()] = function(grid.geometry.get_center(cell));
        }
        grid.update_copies_of_remote_neighbors();

        std::vector<uint64_t> solve_cells;
        for (const auto& cell: all_cells) {
            const auto index = grid.mapping.get_indices(cell);
            if (
                index[0] > 0
                and index[0] < grid_size[0] - 1
                and index[1] > 0
                and index[1] < grid_size[1] - 1
                and index[2] > 0
                and index[2] < grid_size[2] - 1
            ) {
                solve_cells.push_back(cell);
            }
        }

        pamhd::divergence::get_curl(
            solve_cells,
            grid,
        [](Cell& cell_data) -> Vector::data_type& {
            return cell_data[Vector()];
        },
        [](Cell& cell_data) -> Curl::data_type& {
            return cell_data[Curl()];
        }
        );

        const double
        p_of_norm = 2,
        norm_x = get_diff_lp_norm(solve_cells, grid, p_of_norm, cell_volume, 0),
        norm_y = get_diff_lp_norm(solve_cells, grid, p_of_norm, cell_volume, 1),
        norm_z = get_diff_lp_norm(solve_cells, grid, p_of_norm, cell_volume, 2);

        if (norm_x > old_norm_x) {
            if (grid.get_rank() == 0) {
                std::cerr << __FILE__ << ":" << __LINE__
                          << ": X norm with " << nr_of_cells
                          << " cells " << norm_x
                          << " is larger than with " << nr_of_cells / 2
                          << " cells " << old_norm_x
                          << std::endl;
            }
            abort();
        }
        if (norm_y > old_norm_y) {
            if (grid.get_rank() == 0) {
                std::cerr << __FILE__ << ":" << __LINE__
                          << ": Y norm with " << nr_of_cells
                          << " cells " << norm_y
                          << " is larger than with " << nr_of_cells / 2
                          << " cells " << old_norm_y
                          << std::endl;
            }
            abort();
        }
        if (norm_z > old_norm_z) {
            if (grid.get_rank() == 0) {
                std::cerr << __FILE__ << ":" << __LINE__
                          << ": Z norm with " << nr_of_cells
                          << " cells " << norm_z
                          << " is larger than with " << nr_of_cells / 2
                          << " cells " << old_norm_z
                          << std::endl;
            }
            abort();
        }

        if (old_nr_of_cells > 0) {
            const double
            order_of_accuracy_x
                = -log(norm_x / old_norm_x)
                  / log(double(nr_of_cells) / old_nr_of_cells),
                  order_of_accuracy_y
                  = -log(norm_y / old_norm_y)
                    / log(double(nr_of_cells) / old_nr_of_cells),
                    order_of_accuracy_z
                    = -log(norm_z / old_norm_z)
                      / log(double(nr_of_cells) / old_nr_of_cells);

            if (order_of_accuracy_x < 1.95) {
                if (grid.get_rank() == 0) {
                    std::cerr << __FILE__ << ":" << __LINE__
                              << ": Order of accuracy from "
                              << old_nr_of_cells << " to " << nr_of_cells
                              << " is too low for x: " << order_of_accuracy_x
                              << std::endl;
                }
                abort();
            }
            if (order_of_accuracy_y < 1.95) {
                if (grid.get_rank() == 0) {
                    std::cerr << __FILE__ << ":" << __LINE__
                              << ": Order of accuracy from "
                              << old_nr_of_cells << " to " << nr_of_cells
                              << " is too low for y: " << order_of_accuracy_y
                              << std::endl;
                }
                abort();
            }
            if (order_of_accuracy_z < 1.95) {
                if (grid.get_rank() == 0) {
                    std::cerr << __FILE__ << ":" << __LINE__
                              << ": Order of accuracy from "
                              << old_nr_of_cells << " to " << nr_of_cells
                              << " is too low for z: " << order_of_accuracy_z
                              << std::endl;
                }
                abort();
            }
        }

        old_nr_of_cells = nr_of_cells;
        old_norm_x = norm_x;
        old_norm_y = norm_y;
        old_norm_z = norm_z;
    }

    MPI_Finalize();

    return EXIT_SUCCESS;
}