Exemple #1
0
int 
pl_split_by_plane(Polygon * pl, 
				  Plane * pln, 
				  PolyList * fparts, 
				  PolyList * bparts)
{
	SplitGraph * sg = build_split_graph(pl, pln);
	
	switch(sg->r){
		case ON:
			break;
		case BK:
		case ONBK:
			pll_append(&bparts, pl);
			break;
		case FR:
		case ONFR:
			pll_append(&fparts, pl);
			break;
		default:
			split_graph(sg, fparts, bparts);
	}
	
	return sg->r;
}
Exemple #2
0
int main(int argn, char **argv) {
    MPI_Init(&argn, &argv);

    int rank, size;
    MPI_Comm communicator = MPI_COMM_WORLD;
    MPI_Comm_rank(communicator, &rank);
    MPI_Comm_size(communicator, &size);

    PPartitionConfig partition_config;
    DspacConfig dspac_config;
    std::string graph_filename;

    int ret_code = parse_dspac_parameters(argn, argv, partition_config, dspac_config, graph_filename);

    if (ret_code) {
        MPI_Finalize();
        return 0;
    }
    if (rank == ROOT) {
        std::cout << "graph: " << graph_filename << "\n"
                  << "infinity edge weight: " << dspac_config.infinity << "\n"
                  << "seed: " << partition_config.seed << "\n"
                  << "k: " << partition_config.k << "\n"
                  << "ncores: " << size << std::endl;
    }

    timer t;
    MPI_Barrier(MPI_COMM_WORLD);
    {
        t.restart();
        if (rank == ROOT) std::cout << "running collective dummy operations ";
        dummy_operations dop;
        dop.run_collective_dummy_operations();
    }
    MPI_Barrier(MPI_COMM_WORLD);

    if (rank == ROOT) {
        std::cout << "took " << t.elapsed() << std::endl;
    }

    // load input graph
    t.restart();
    parallel_graph_access input_graph(communicator);
    edge_balanced_graph_io::read_binary_graph_edge_balanced(input_graph, graph_filename, partition_config, rank, size);
    if (rank == ROOT) {
        std::cout << "input IO took " << t.elapsed() << "\n"
                  << "n(input): " << input_graph.number_of_global_nodes() << "\n"
                  << "m(input): " << input_graph.number_of_global_edges() << std::endl;
    }
    MPI_Barrier(communicator);

    // construct split graph
    t.restart();
    parallel_graph_access split_graph(communicator);
    dspac splitter(input_graph, communicator, dspac_config.infinity);
    splitter.construct(split_graph);
    if (rank == ROOT) {
        std::cout << "split graph construction took " << t.elapsed() << "\n"
                  << "n(split): " << split_graph.number_of_global_nodes() << "\n"
                  << "m(split): " << split_graph.number_of_global_edges() << std::endl;
    }

    // partition split graph
    t.restart();
    executeParhip(split_graph, partition_config);
    if (rank == ROOT) {
        std::cout << "parhip took " << t.elapsed() << std::endl;
    }

    // evaluate edge partition
    t.restart();
    splitter.fix_cut_dominant_edges(split_graph);
    std::vector<PartitionID> edge_partition = splitter.project_partition(split_graph);
    EdgeWeight vertex_cut = splitter.calculate_vertex_cut(partition_config.k, edge_partition);
    if (rank == ROOT) {
        std::cout << "evaluation took " << t.elapsed() << "\n"
                  << "vertex cut: " << vertex_cut << std::endl;
    }

    if( partition_config.save_partition ) {
            parallel_vector_io pvio;
            std::string filename("tmpedgepartition.txtp");
            pvio.writePartitionSimpleParallel(split_graph, filename);
    }

    if( partition_config.save_partition_binary ) {
            parallel_vector_io pvio;
            std::string filename("tmpedgepartition.binp");
            pvio.writePartitionBinaryParallelPosix(partition_config, split_graph, filename);
    }

    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Finalize();
}