Пример #1
0
void read_input(field * temperature1, field * temperature2, char *filename)
{
    FILE *fp;
    int nx, ny, i, j;

    fp = fopen(filename, "r");
    // Read the header
    fscanf(fp, "# %d %d \n", &nx, &ny);

    initialize_field_metadata(temperature1, nx, ny);
    initialize_field_metadata(temperature2, nx, ny);

    // Allocate arrays (including ghost layers
    temperature1->data = malloc_2d(nx + 2, ny + 2);
    temperature2->data = malloc_2d(nx + 2, ny + 2);

    // Read the actual data
    for (i = 1; i < nx + 1; i++) {
        for (j = 1; j < ny + 1; j++) {
            fscanf(fp, "%lf", &temperature1->data[i][j]);
        }
    }

    // Set the boundary values
    for (i = 1; i < nx + 1; i++) {
        temperature1->data[i][0] = temperature1->data[i][1];
        temperature1->data[i][ny + 1] = temperature1->data[i][ny];
    }
    for (j = 0; j < ny + 2; j++) {
        temperature1->data[0][j] = temperature1->data[1][j];
        temperature1->data[nx + 1][j] = temperature1->data[nx][j];
    }

    copy_field(temperature1, temperature2);

    fclose(fp);
}
Пример #2
0
void read_restart(field *temperature, parallel_data *parallel, int *iter)
{
	MPI_File fp;
	int full_nx, full_ny;
	int disp, size;

	// initialise MPI metadata with bogus dimensions
	parallel_initialize(parallel, 0, 0);
	// open file for reading
	MPI_File_open(parallel->comm, CHECKPOINT, MPI_MODE_RDONLY,
			MPI_INFO_NULL, &fp);

	// read grid size and current iteration
	MPI_File_read_all(fp, &full_nx, 1, MPI_INT, MPI_STATUS_IGNORE);
	MPI_File_read_all(fp, &full_ny, 1, MPI_INT, MPI_STATUS_IGNORE);
	MPI_File_read_all(fp, iter, 1, MPI_INT, MPI_STATUS_IGNORE);
	// set correct dimensions to MPI metadata
	parallel_set_dimensions(parallel, full_nx, full_ny);
	// set local dimensions and allocate memory for the data
	initialize_field_metadata(temperature, full_nx, full_ny, parallel);
	allocate_field(temperature);

	// size of the local data including the outermost ghost row if at the
	// top or the bottom of the full grid
	if ((parallel->rank == 0) || (parallel->rank == parallel->size - 1)) {
		size = (temperature->nx + 2) * (temperature->ny + 1);
	} else {
		size = (temperature->nx + 2) * temperature->ny;
	}

	// point each MPI task to the correct part of the file
	disp = 3 * sizeof(int);
	if (parallel->rank > 0) {
		disp += (1 + parallel->rank * temperature->ny) * 
			(temperature->nx + 2) * sizeof(double);
	}

	// read data simultaneously to all processes
	MPI_File_read_at_all(fp, disp, &temperature->data[0][0], 
			size, MPI_DOUBLE, MPI_STATUS_IGNORE);

	// close up shop
	MPI_File_close(&fp);
}
Пример #3
0
int main(int argc, char **argv)
{
    double a = 0.5;             //!< Diffusion constant
    field current, previous;    //!< Current and previous temperature fields

    double dt;                  //!< Time step
    int nsteps = 500;           //!< Number of time steps

    int rows = 200;             //!< Field dimensions with default values
    int cols = 200;

    char input_file[64];        //!< Name of the optional input file

    int image_interval = 10;    //!< Image output interval

    int iter;

    /* Following combinations of command line arguments are possible:
       No arguments:    use default field dimensions and number of time steps
       One argument:    read initial field from a given file
       Two arguments:   initial field from file and number of time steps
       Three arguments: field dimensions (rows,cols) and number of time steps
     */

    switch (argc) {
    case 1:
        // use defaults
        initialize_field_metadata(&current, rows, cols);
        initialize_field_metadata(&previous, rows, cols);
        initialize(&current, &previous);
        break;
    case 2:
        // Initial field from a file
        strncpy(input_file, argv[1], 64);
        read_input(&current, &previous, input_file);
        break;
    case 3:
        // Initial field from a file
        strncpy(input_file, argv[1], 64);
        read_input(&current, &previous, input_file);
        // Number of time steps
        nsteps = atoi(argv[2]);
        break;
    case 4:
        // Field dimensions
        rows = atoi(argv[1]);
        cols = atoi(argv[2]);
        initialize_field_metadata(&current, rows, cols);
        initialize_field_metadata(&previous, rows, cols);
        initialize(&current, &previous);
        // Number of time steps
        nsteps = atoi(argv[3]);
        break;
    default:
        printf("Unsupported number of command line arguments\n");
        return -1;
    }

    // Output the initial field
    output(&current, 0);

    // Largest stable time step
    dt = current.dx2 * current.dy2 /
        (2.0 * a * (current.dx2 + current.dy2));

    // Time evolve
    for (iter = 1; iter < nsteps; iter++) {
        evolve(&current, &previous, a, dt);
        // output every 10 iteration
        if (iter % image_interval == 0)
            output(&current, iter);
        // make current field to be previous for next iteration step
        swap_fields(&current, &previous);
    }

    finalize(&current, &previous);
    return 0;
}
Пример #4
0
void read_input(field * temperature1, field * temperature2, char *filename,
                parallel_data * parallel)
{
    FILE *fp;
    int nx, ny, i, j;

    double **full_data;
    double **inner_data;

    int nx_local;

    fp = fopen(filename, "r");
    // Read the header
    fscanf(fp, "# %d %d \n", &nx, &ny);

    parallel_initialize(parallel, nx, ny);
    initialize_field_metadata(temperature1, nx, ny, parallel);
    initialize_field_metadata(temperature2, nx, ny, parallel);

    // Allocate arrays (including ghost layers)
    temperature1->data =
        malloc_2d(temperature1->nx + 2, temperature1->ny + 2);
    temperature2->data =
        malloc_2d(temperature2->nx + 2, temperature2->ny + 2);

    inner_data = malloc_2d(temperature1->nx, temperature1->ny);

    if (parallel->rank == 0) {
        // Full array
        full_data = malloc_2d(nx, ny);

        // Read the actual data
        for (i = 0; i < nx; i++) {
            for (j = 0; j < ny; j++) {
                fscanf(fp, "%lf", &full_data[i][j]);
            }
        }
    } else
        // dummy array for full data
        full_data = malloc_2d(1, 1);

    nx_local = temperature1->nx;

    MPI_Scatter(full_data[0], nx_local * ny, MPI_DOUBLE, inner_data[0],
                nx_local * ny, MPI_DOUBLE, 0, parallel->comm);

    // Copy to the array containing also boundaries
    for (i = 0; i < nx_local; i++)
        memcpy(&temperature1->data[i + 1][1], &inner_data[i][0],
               ny * sizeof(double));

    // Set the boundary values
    for (i = 0; i < nx_local + 1; i++) {
        temperature1->data[i][0] = temperature1->data[i][1];
        temperature1->data[i][ny + 1] = temperature1->data[i][ny];
    }
    for (j = 0; j < ny + 2; j++) {
        temperature1->data[0][j] = temperature1->data[1][j];
        temperature1->data[nx_local + 1][j] =
            temperature1->data[nx_local][j];
    }

    copy_field(temperature1, temperature2);

    free_2d(full_data);
    free_2d(inner_data);
    fclose(fp);
}
Пример #5
0
void read_input(field * temperature1, field * temperature2, char *filename,
                parallel_data * parallel)
{
    FILE *fp;
    int nx, ny, i, j;

    double **full_data;

    int coords[2];
    int ix, jy, p;

    fp = fopen(filename, "r");
    // Read the header
    fscanf(fp, "# %d %d \n", &nx, &ny);

    parallel_initialize(parallel, nx, ny);
    initialize_field_metadata(temperature1, nx, ny, parallel);
    initialize_field_metadata(temperature2, nx, ny, parallel);

    // Allocate arrays (including ghost layers)
    temperature1->data =
        malloc_2d(temperature1->nx + 2, temperature1->ny + 2);
    temperature2->data =
        malloc_2d(temperature2->nx + 2, temperature2->ny + 2);

    if (parallel->rank == 0) {
        // Full array
        full_data = malloc_2d(nx, ny);

        // Read the actual data
        for (i = 0; i < nx; i++) {
            for (j = 0; j < ny; j++) {
                fscanf(fp, "%lf", &full_data[i][j]);
            }
        }
        // Copy to own local array
        for (i = 0; i < temperature1->nx; i++)
            memcpy(&temperature1->data[i + 1][1], full_data[i],
                   temperature1->ny * sizeof(double));
        // Send to other processes
        for (p = 1; p < parallel->size; p++) {
            MPI_Cart_coords(parallel->comm, p, 2, coords);
            ix = coords[0] * temperature1->nx;
            jy = coords[1] * temperature1->ny;
            MPI_Send(&full_data[ix][jy], 1, parallel->subarraytype, p, 44,
                     parallel->comm);
        }
    } else
        // Receive data
        MPI_Recv(&temperature1->data[1][1], 1, parallel->subarraytype, 0,
                 44, parallel->comm, MPI_STATUS_IGNORE);

    // Set the boundary values
    for (i = 0; i < temperature1->nx + 1; i++) {
        temperature1->data[i][0] = temperature1->data[i][1];
        temperature1->data[i][temperature1->ny + 1] =
            temperature1->data[i][temperature1->ny];
    }
    for (j = 0; j < temperature1->ny + 2; j++) {
        temperature1->data[0][j] = temperature1->data[1][j];
        temperature1->data[temperature1->nx + 1][j] =
            temperature1->data[temperature1->nx][j];
    }

    copy_field(temperature1, temperature2);

    if (parallel->rank == 0)
        free_2d(full_data);

    fclose(fp);
}