コード例 #1
0
ファイル: domain.cpp プロジェクト: ttnghia/Peridynamics
//------------------------------------------------------------------------------
int Domain::gridPointBelongsTo(int i, int j, int k)
{
    int id = k + nGrid[2]*j + nGrid[2]*nGrid[1]*i;
    return BLOCK_OWNER(id, nNodes, globalNumberOfGridpoints);
}
コード例 #2
0
//------------------------------------------------------------------------------
void InitialFromImage::initializeLocal()
{

    double rScale = 0.02;
    VEC3 shift;
    for(int d=0; d<dim;d++)
        shift[d] = domain.dom[d][1] - domain.dom[d][0];

    VEC3 v;
    VEC3 rx;
    VEC3 rz;

    vector<VEC3> _r;
    _r.push_back( VEC3() );

    int id = 0;
    switch(dim){
    case 3:

        if(gType == gridType::FCC){
            _r.push_back( VEC3() );
            _r.push_back( VEC3() );
        }

        if(gType == gridType::FCC)
        {
            rx = {0.5*spacing, 0.5*spacing, 0};
            rz = {0, 0.5*spacing, 0.5*spacing};
        }

        for(int i=0; i<nXYZ[0]; i++)
        {
            for(int j=0; j<nXYZ[1]; j++)
            {
                for(int k=0; k<nXYZ[2]; k++)
                {
                    arma::vec deviation  = deviationS0*arma::randn(dim + 1);
                    double s = s0*(1 +  deviation(0));
                    v = {0, 0, 0};
                    _r[0] = { spacing*(i + 0.25 + rScale * deviation(1))
                           , spacing*(j + 0.25 + rScale * deviation(2))
                           , spacing*(k + 0.25 + rScale * deviation(3)) };

                    if(gType == gridType::FCC)
                    {
                        _r[1] = _r[0] + rx;
                        _r[2] = _r[0] + rz;
                    }


                    for(VEC3 r:_r)
                    {
                        r = periodicBoundary( r, shift );

                        // Adding particles to the correct domain
#ifdef USE_MPI
                        if( BLOCK_OWNER(getGridId(r), nNodes, totalNumberOfgridpoints) == myRank )
                        {
                            Particle *P = new Particle(id, rho, s, volume, r, r, v);
                            particles.push_back(P);
                        }
#else
                        Particle *P = new Particle(id, rho, s, volume, r, r, v);
                        particles.push_back(P);
#endif
                        id++;
                    }
                }
            }
        }
        break;
    default:
        cerr << "dim =" << dim << " not implemented for coniguration:' Box'" << endl;
        exit(EXIT_FAILURE);
        break;
    }

    domain.setParticles(particles);
    domain.update();
//    exit(EXIT_FAILURE);
}
コード例 #3
0
ファイル: backup-floyd-parallel.c プロジェクト: liuhuac/MPI
int main (int argc, char *argv[]) 
{
        int opt;
	char *ifile=NULL;
	char *ofile=NULL;
        while((opt=getopt(argc, argv, "i:o:"))!=-1){
                switch(opt){
                        case 'i':
                                ifile=strdup(optarg);
                                break;
                        case 'o':
                                ofile=strdup(optarg);
                                break;
			case '?':
                        case ':':
                        default :
                                usage();
                                break;
                }
        }

        if(optind!=argc){
                printf("Unknow argument '%s'\n",argv[optind]);
                usage();
        } else if(strcmp(argv[optind-1], "--")==0){
                printf("Unknow argument '%s'\n",argv[optind-1]);
                usage();
        }

        if(ifile==NULL){
                ifile=strdup("default-make-graph-file.dat");
        }
        if(ofile==NULL){
                ofile=strdup("default-make-graph-file.seq");
        }

	int rank, size; /* rank is your pid, staring with 0 */
	/* size, is the number of processes you */
	/* run the program with */
	/* never make MPI calls before this and */
	/* never touch argc and argv before doing this */ 
	MPI_Init (&argc, &argv);
	/* get current process id */
	MPI_Comm_rank (MPI_COMM_WORLD, &rank); 
	/* get number of processes */
	MPI_Comm_size (MPI_COMM_WORLD, &size); 

	MPI_Barrier(MPI_COMM_WORLD);
	double entire_start=MPI_Wtime();

	int n; /*vector length*/
	void *subvector; /*subvector*/
	void **subs; /*2D array*/
	void *storage; /*Array elements*/

	int dim[2], period[2], reorder;
	dim[0]=size;
	dim[1]=1;
	period[0]=0;
	period[1]=0;
	reorder=1;
	MPI_Comm comm;
	MPI_Comm rowcomm;
	MPI_Comm colcomm;
	int remain_dims[2];

	MPI_Cart_create(MPI_COMM_WORLD, 2, dim, period, reorder, &comm);
	remain_dims[0]=1;
	remain_dims[1]=0;
	MPI_Cart_sub(comm, remain_dims, &rowcomm);
	remain_dims[0]=0;
	remain_dims[1]=0;
	MPI_Cart_sub(comm, remain_dims, &rowcomm);

	read_checkerboard_matrix_square (
		ifile,		/* IN - File name */
		&subs,         	/* OUT - 2D array */
		&storage,       /* OUT - Array elements */
		MPI_INT,   	/* IN - Element type */
		&n,	        /* OUT - Array dimension */
		comm);		/* IN - Communicator */

	print_checkerboard_matrix (
		subs,           /* IN -2D matrix */
		MPI_INT,        /* IN -Matrix element type */
		n,            	/* IN -Matrix rows */
		n,            	/* IN -Matrix columns */
		comm);    	/* IN - Communicator */
	

	MPI_Barrier(MPI_COMM_WORLD);
	double comp_start=MPI_Wtime();

	int coord[2];
	MPI_Cart_coords(comm, rank, 2, coord);
	local_rows = BLOCK_SIZE(coord[0],dim[0],n);
	local_cols = BLOCK_SIZE(coord[1],dim[1],n);

	int i,j,k;
	int ii;
	for(k=0;k<n;k++){

		xk_coord[0]=coord[0];
		xk_coord[1]=BLOCK_OWNER(k,dim[1],n);
		ky_coord[0]=BLOCK_OWNER(k,dim[0],n);
		ky_coord[1]=coord[1];
		MPI_Cart_rank(comm, xk_coord, &xk_rank);
		MPI_Cart_rank(comm, ky_coord, &ky_rank);
		row_offset=BLOCK_LOW(k,dim[0],n);
		col_offset=BLOCK_LOW(k,dim[1],n);

		if(rank==xk_rank){
			xk_rows=BLOCK_SIZE(xk_coord[0],dim[0],n);
			for(ii=0;ii<xk_rows;ii++){
				xk_storage[ii]=subs[ii][col_offset];
			}
			
			MPI_Send(xk_storage, xk_rows, MPI_INT, dest, tag,comm)
		} else {