コード例 #1
0
ファイル: d8.cpp プロジェクト: kornholi/TauDEM
//Open files, Initialize grid memory, makes function calls to set flowDir, slope, and resolvflats, writes files
int setdird8(char* demfile, char* pointfile, char *slopefile, char *flowfile, int useflowfile)
{
    MPI_Init(NULL,NULL);

    int rank,size;
    MPI_Comm_rank(MCW,&rank);
    MPI_Comm_size(MCW,&size);

    if (rank==0) {
        printf("D8FlowDir version %s\n",TDVERSION);

        if (strlen(pointfile) == 0) {
            printf("WARNING: no output p file specified\n");
        }

        if (strlen(slopefile) == 0) {
            printf("WARNING: no output sd8 file specified\n");
        }

        fflush(stdout);
    }

    MPITimer t;

    t.start("Total");
    t.start("Header read");

    //Read DEM from file
    tiffIO dem(demfile, FLOAT_TYPE);

    long totalX = dem.getTotalX();
    long totalY = dem.getTotalY();
    double dx = dem.getdxA();
    double dy = dem.getdyA();

    linearpart<float> elevDEM(totalX, totalY, dx, dy, MPI_FLOAT, *(float*) dem.getNodata());

    int xstart, ystart;
    int nx = elevDEM.getnx();
    int ny = elevDEM.getny();
    elevDEM.localToGlobal(0, 0, xstart, ystart);
    elevDEM.savedxdyc(dem);

    t.end("Header read");

    if (rank==0) {
        float timeestimate=(2.8e-9*pow((double)(totalX*totalY),1.55)/pow((double) size,0.65))/60+1;  // Time estimate in minutes
        //fprintf(stderr,"%d %d %d\n",totalX,totalY,size);
        fprintf(stderr,"This run may take on the order of %.0f minutes to complete.\n",timeestimate);
        fprintf(stderr,"This estimate is very approximate. \nRun time is highly uncertain as it depends on the complexity of the input data \nand speed and memory of the computer. This estimate is based on our testing on \na dual quad core Dell Xeon E5405 2.0GHz PC with 16GB RAM.\n");
        fflush(stderr);
    }

    uint64_t bytes_to_read = (uint64_t) nx * ny * sizeof(float);
    if (rank == 0) { 
        fprintf(stderr, "Reading input data (%s)... ", humanReadableSize(bytes_to_read).c_str());
    }

    t.start("Data read");

    dem.read(xstart, ystart, ny, nx, elevDEM.getGridPointer(), elevDEM.getGridPointerStride());
    elevDEM.share();
    double data_read_time = t.end("Data read");
   
    if (rank == 0) {
        fprintf(stderr, "done (%s/s).\n", humanReadableSize(bytes_to_read / data_read_time).c_str());
    }

    //Creates empty partition to store new flow direction
    short flowDirNodata = MISSINGSHORT;

    linearpart<short> flowDir(totalX, totalY, dx, dy, MPI_SHORT, flowDirNodata);

    //If using a flowfile, read it in
    if (useflowfile == 1) {
        tiffIO flow(flowfile, SHORT_TYPE);

        linearpart<short> imposedflow(flow.getTotalX(), flow.getTotalY(),
                flow.getdxA(), flow.getdyA(), MPI_SHORT, *(short*) flow.getNodata());

        if (!dem.compareTiff(flow)) {
            printf("Error using imposed flow file.\n");
            return 1;
        }

        for (int j=0; j < elevDEM.getny(); j++) {
            for (int i=0; i < elevDEM.getnx(); i++ ) {
                short data = imposedflow.getData(i,j);

                if (imposedflow.isNodata(i,j) || !imposedflow.hasAccess(i-1,j) || !imposedflow.hasAccess(i+1,j) ||
                        !imposedflow.hasAccess(i,j-1) || !imposedflow.hasAccess(i,j+1)) {
                    //Do nothing
                } else if (data > 0 && data <= 8) {
                    flowDir.setData(i,j,data);
                }
            }
        }
    }

    if (rank == 0) fprintf(stderr, "Calculating flow directions... ");
    t.start("Calculate flow directions");
    uint64_t numFlat = setPosDir(elevDEM, flowDir);
    t.end("Calculate flow directions");
  
    flowDir.share();

    if (strlen(slopefile) > 0)
    {
        t.start("Calculate slope");
        
        //Creates empty partition to store new slopes
        float slopeNodata = -1.0f;
        linearpart<float> slope(totalX, totalY, dx, dy, MPI_FLOAT, slopeNodata);

        calcSlope(flowDir, elevDEM, slope);

        t.end("Calculate slope");

        t.start("Write slope");
        tiffIO slopeIO(slopefile, FLOAT_TYPE, &slopeNodata, dem);
        slopeIO.write(xstart, ystart, ny, nx, slope.getGridPointer(), slope.getGridPointerStride());
        t.end("Write slope");
    }

    uint64_t totalNumFlat = 0;
    MPI_Allreduce(&numFlat, &totalNumFlat, 1, MPI_UINT64_T, MPI_SUM, MCW);
   
    if (rank == 0) {
        fprintf(stderr, "done. %" PRIu64 " flats to resolve.\n", totalNumFlat);
        fflush(stderr);
    }

    t.start("Resolve flats");

    if (totalNumFlat > 0) {
        if (rank == 0) {
            fprintf(stderr, "Finding flat islands...\n");
        }

        std::vector<std::vector<node>> islands;
        std::vector<std::vector<node>> borderingIslands;

        t.start("Find islands");
        findIslands<D8>(flowDir, islands, borderingIslands);
        t.end("Find islands");

        uint64_t localSharedFlats = 0, sharedFlats = 0;
        for (auto& island : borderingIslands) {
            localSharedFlats += island.size();
        }

        t.start("Resolve shared flats");
        MPI_Allreduce(&localSharedFlats, &sharedFlats, 1, MPI_UINT64_T, MPI_SUM, MCW);

        if (rank == 0 && size > 1) {
            fprintf(stderr, "Processing partial flats\n");

            printf("PRL: %llu flats shared across processors (%llu local -> %.2f%% shared)\n",
                    sharedFlats, totalNumFlat - sharedFlats, 100. * sharedFlats / totalNumFlat);
        }

        if (sharedFlats > 0) {
            SparsePartition<int> inc(totalX, totalY, 0);
            size_t lastNumFlat = resolveFlats_parallel_async<D8>(elevDEM, inc, flowDir, borderingIslands);

            if (rank==0) {
                fprintf(stderr, "PRL: Iteration complete. Number of flats remaining: %zu\n", lastNumFlat);
                fflush(stderr);
            }

            // Repeatedly call resolve flats until there is no change across all processors
            while (lastNumFlat > 0) {
                SparsePartition<int> newInc(totalX, totalY, 0);

                lastNumFlat = resolveFlats_parallel_async<D8>(inc, newInc, flowDir, borderingIslands);
                inc = std::move(newInc);

                if (rank==0) {
                    fprintf(stderr, "PRL: Iteration complete. Number of flats remaining: %zu\n", lastNumFlat);
                    fflush(stderr);
                }
            }
        }
        t.end("Resolve shared flats");

        //printf("rank %d: Done, %d islands. Took %.2f seconds\n", rank, numIslands, MPI_Wtime() - flatFindStart);
        //printf("rank %d: %lu bordering islands with %d flats\n", rank, bordering_islands.size(), localSharedFlats);

        t.start("Resolve local flats");
        if (!islands.empty()) {
            SparsePartition<int> inc(totalX, totalY, 0);
            size_t lastNumFlat = resolveFlats<D8>(elevDEM, inc, flowDir, islands);

            if (rank==0) {
                fprintf(stderr, "Iteration complete. Number of flats remaining: %zu\n\n", lastNumFlat);
                fflush(stderr);
            }

            // Repeatedly call resolve flats until there is no change
            while (lastNumFlat > 0)
            {
                SparsePartition<int> newInc(totalX, totalY, 0);

                lastNumFlat = resolveFlats<D8>(inc, newInc, flowDir, islands); 
                inc = std::move(newInc);

                if (rank==0) {
                    fprintf(stderr, "Iteration complete. Number of flats remaining: %zu\n\n", lastNumFlat);
                    fflush(stderr);
                }
            } 
        }
        t.end("Resolve local flats");
    }

    t.end("Resolve flats");

    if (strlen(pointfile) > 0) {
        t.start("Write directions");
        tiffIO pointIO(pointfile, SHORT_TYPE, &flowDirNodata, dem);
        pointIO.write(xstart, ystart, ny, nx, flowDir.getGridPointer(), flowDir.getGridPointerStride());
        t.end("Write directions");
    }

    t.end("Total");
    t.stop();
    //t.save("timing_info");

    MPI_Finalize();
    return 0;
}
コード例 #2
0
//Open files, Initialize grid memory, makes function calls to set flowDir, slope, and resolvflats, writes files
int setdir(char* demfile, char* pointfile, char *slopefile, char *flowfile, int useflowfile)
{
    MPI_Init(NULL,NULL);

    int rank,size;
    MPI_Comm_rank(MCW,&rank);
    MPI_Comm_size(MCW,&size);

    if (rank==0) {
        printf("D8FlowDir version %s\n",TDVERSION);
        fflush(stdout);
    }

    MPITimer t;

    double begint = MPI_Wtime();

    t.start("Total");
    t.start("Header read");

    //Read DEM from file
    tiffIO dem(demfile, FLOAT_TYPE);

    long totalX = dem.getTotalX();
    long totalY = dem.getTotalY();
    double dx = dem.getdxA();
    double dy = dem.getdyA();

    linearpart<float> elevDEM(totalX, totalY, dx, dy, MPI_FLOAT, *(float*) dem.getNodata());

    int xstart, ystart;
    int nx = elevDEM.getnx();
    int ny = elevDEM.getny();
    elevDEM.localToGlobal(0, 0, xstart, ystart);
    elevDEM.savedxdyc(dem);

    t.end("Header read");

    double headert = MPI_Wtime();

    if (rank==0) {
        float timeestimate=(2.8e-9*pow((double)(totalX*totalY),1.55)/pow((double) size,0.65))/60+1;  // Time estimate in minutes
        //fprintf(stderr,"%d %d %d\n",totalX,totalY,size);
        fprintf(stderr,"This run may take on the order of %.0f minutes to complete.\n",timeestimate);
        fprintf(stderr,"This estimate is very approximate. \nRun time is highly uncertain as it depends on the complexity of the input data \nand speed and memory of the computer. This estimate is based on our testing on \na dual quad core Dell Xeon E5405 2.0GHz PC with 16GB RAM.\n");
        fflush(stderr);
    }

    uint64_t bytes_to_read = (uint64_t) nx * ny * sizeof(float);
    if (rank == 0) { 
        fprintf(stderr, "Reading input data (%s)... ", humanReadableSize(bytes_to_read).c_str());
    }

    t.start("Data read");

    dem.read(xstart, ystart, ny, nx, elevDEM.getGridPointer());
    elevDEM.share();
    double data_read_time = t.end("Data read");
   
    if (rank == 0) {
        fprintf(stderr, "done (%s/s).\n", humanReadableSize(bytes_to_read / data_read_time).c_str());
    }

    //Creates empty partition to store new flow direction
    float flowDirNodata = MISSINGFLOAT;

    linearpart<float> flowDir(totalX, totalY, dx, dy, MPI_FLOAT, flowDirNodata);

    if (rank == 0) fprintf(stderr, "Calculating flow directions... ");
    t.start("Calculate flow directions");
    uint64_t numFlat;
    {
        t.start("Calculate flow directions");
        float slopeNodata = -1.0f;
        linearpart<float> slope(totalX, totalY, dx, dy, MPI_FLOAT, slopeNodata);

        numFlat = setPosDirDinf(elevDEM, flowDir, slope, useflowfile);
        t.end("Calculate flow directions");

        t.start("Write slope");
        tiffIO slopeIO(slopefile, FLOAT_TYPE, &slopeNodata, dem);
        slopeIO.write(xstart, ystart, ny, nx, slope.getGridPointer());
        t.end("Write slope");
    }

    flowDir.share();

    uint64_t totalNumFlat = 0;
    MPI_Allreduce(&numFlat, &totalNumFlat, 1, MPI_UINT64_T, MPI_SUM, MCW);
   
    if (rank == 0) {
        fprintf(stderr, "done. %" PRIu64 " flats to resolve.\n", totalNumFlat);
        fflush(stderr);
    }

    t.start("Resolve flats");

    if (totalNumFlat > 0) {
        std::vector<node> flats;

        t.start("Add flats");

        // FIXME: Should do this during slope calculation
        for (int j=0; j<ny; j++) {
            for (int i=0; i<nx; i++) {
                if (flowDir.getData(i, j) == -1) {
                    flats.push_back(node(i, j));
                }
            }
        }

        t.end("Add flats");

        if (rank == 0) {
            fprintf(stderr, "Finding flat islands...\n");
        }

        double flatFindStart = MPI_Wtime();
        int numIslands = 0;

        std::vector<std::vector<node>> islands;
        std::set<int> bordering_island_labels;

        t.start("Find islands");
        {
            SparsePartition<int> island_marker(nx, ny, 0);
            std::vector<node> q;

            for(node flat : flats)
            {
                if (island_marker.getData(flat.x, flat.y) != 0) {
                    continue;
                }

                q.push_back(flat);

                int label = ++numIslands;
                islands.push_back(std::vector<node>());

                while(!q.empty()) {
                    node flat = q.back();
                    q.pop_back();

                    if (island_marker.getData(flat.x, flat.y) != 0) {
                        continue;
                    }

                    island_marker.setData(flat.x, flat.y, label);
                    islands[label - 1].push_back(flat);

                    for (int k=1; k<=8; k++) {
                        //if neighbor is in flat
                        int in = flat.x + d1[k];
                        int jn = flat.y + d2[k];

                        if ((jn == -1 || jn == ny) && flowDir.hasAccess(in, jn)) {
                            if (flowDir.getData(in, jn) == -1) {
                                bordering_island_labels.insert(label);
                            }
                        }

                        if (!flowDir.isInPartition(in, jn))
                            continue;

                        if (flowDir.getData(in, jn) == -1)
                            q.push_back(node(in, jn));
                    }
                }
            }
        }
        t.end("Find islands");

        std::vector<std::vector<node>> borderingIslands;
        uint64_t localSharedFlats = 0, sharedFlats = 0;

        for (auto& label : bordering_island_labels) {
            std::vector<node> island = std::move(islands[label - 1]);

            localSharedFlats += island.size(); 
            borderingIslands.push_back(island);
        }

        t.start("Resolve shared flats");
        MPI_Allreduce(&localSharedFlats, &sharedFlats, 1, MPI_UINT64_T, MPI_SUM, MCW);

        if (rank == 0 && size > 1) {
            fprintf(stderr, "Processing partial flats\n");
            printf("PRL: %llu flats shared across processors (%llu local -> %.2f%% shared)\n", sharedFlats, totalNumFlat - sharedFlats, 100. * sharedFlats / totalNumFlat);
        }

        if (sharedFlats > 0) {
            SparsePartition<short> inc(nx, ny, 0);
            size_t lastNumFlat = resolveFlats_parallel(elevDEM, inc, flowDir, borderingIslands, elevDEM);

            if (rank==0) {
                fprintf(stderr, "PRL: Iteration complete. Number of flats remaining: %zu\n", lastNumFlat);
                fflush(stderr);
            }

            // Repeatedly call resolve flats until there is no change across all processors
            while (lastNumFlat > 0) {
                SparsePartition<short> newInc(nx, ny, 0);

                lastNumFlat = resolveFlats_parallel(inc, newInc, flowDir, borderingIslands, elevDEM);
                inc = std::move(newInc);

                if (rank==0) {
                    fprintf(stderr, "PRL: Iteration complete. Number of flats remaining: %zu\n", lastNumFlat);
                    fflush(stderr);
                }
            }
        }
        t.end("Resolve shared flats");

        //printf("rank %d: Done, %d islands. Took %.2f seconds\n", rank, numIslands, MPI_Wtime() - flatFindStart);
        //printf("rank %d: %lu bordering islands with %d flats\n", rank, bordering_islands.size(), localSharedFlats);

        t.start("Resolve local flats");
        if (!islands.empty()) {
            SparsePartition<short> inc(nx, ny, 0);
            size_t lastNumFlat = resolveFlats(elevDEM, inc, flowDir, islands, elevDEM);

            if (rank==0) {
                fprintf(stderr, "Iteration complete. Number of flats remaining: %zu\n\n", lastNumFlat);
                fflush(stderr);
            }

            // Repeatedly call resolve flats until there is no change
            while (lastNumFlat > 0)
            {
                SparsePartition<short> newInc(nx, ny, 0);

                lastNumFlat = resolveFlats(inc, newInc, flowDir, islands, elevDEM);
                inc = std::move(newInc);

                if (rank==0) {
                    fprintf(stderr, "Iteration complete. Number of flats remaining: %zu\n\n", lastNumFlat);
                    fflush(stderr);
                }
            } 
        }
        t.end("Resolve local flats");
    }

    t.end("Resolve flats");

    t.start("Write directions");
    tiffIO pointIO(pointfile, FLOAT_TYPE, &flowDirNodata, dem);
    pointIO.write(xstart, ystart, ny, nx, flowDir.getGridPointer());
    t.end("Write directions");

    t.end("Total");
    t.stop();
    //t.save("timing_info");

    MPI_Finalize();
    return 0;
}