int main(int argc, char **args) { PetscErrorCode ierr; ierr = DCellInit(); CHKERRQ(ierr); PetscReal dx = 1; iCoor size = {1625,1145,0}; // iCoor size = {253,341,0}; int fd; Coor dh = {dx,dx,dx}; iCoor pos = {0,0,0}; Grid chip; ierr = GridCreate(dh,pos,size,1,&chip); CHKERRQ(ierr); ierr = PetscInfo(0,"Reading image file\n"); CHKERRQ(ierr); ierr = PetscBinaryOpen("/scratch/n/BL Big",FILE_MODE_READ,&fd); CHKERRQ(ierr); ierr = PetscBinaryRead(fd,chip->v1,size.x*size.y,PETSC_DOUBLE); CHKERRQ(ierr); ierr = PetscBinaryClose(fd); CHKERRQ(ierr); ierr = PetscInfo(0,"Writing image file\n"); CHKERRQ(ierr); ierr = GridWrite(chip,0); CHKERRQ(ierr); FluidField fluid; ierr = FluidFieldCreate(PETSC_COMM_WORLD, &fluid); CHKERRQ(ierr); ierr = FluidFieldSetDims(fluid,size); CHKERRQ(ierr); ierr = FluidFieldSetDx(fluid,dx); CHKERRQ(ierr); ierr = FluidFieldSetMask(fluid, chip); CHKERRQ(ierr); ierr = FluidFieldSetup(fluid); CHKERRQ(ierr); ierr = SetPressureBC(fluid); CHKERRQ(ierr); ierr = KSPSolve(fluid->ksp,fluid->rhs,fluid->vel); CHKERRQ(ierr); ierr = FluidFieldWrite( fluid,0); CHKERRQ(ierr); ierr = FluidFieldDestroy(fluid); CHKERRQ(ierr); ierr = GridDestroy(chip); CHKERRQ(ierr); ierr = DCellFinalize(); CHKERRQ(ierr); return 0; }
int main(int argc, char **args) { PetscErrorCode ierr; ierr = PetscInitialize(&argc, &args, (char *) 0, ""); CHKERRQ(ierr); ierr = PetscPrintf(PETSC_COMM_WORLD, "Start\n"); CHKERRQ(ierr); int d1 = 5; ierr = PetscOptionsSetValue("-da_grid_x","5"); CHKERRQ(ierr); int d2 = 7; ierr = PetscOptionsSetValue("-da_grid_y","7"); CHKERRQ(ierr); FluidField f; ierr = FluidFieldCreate(&f); CHKERRQ(ierr); iCoor s = {d1,d2,0}; Grid g; ierr = GridCreate(s,&g); CHKERRQ(ierr); PetscReal *p; VecGetArray(f->p, &p); for (int i = 0; i < g->n.x*g->n.y; ++i) { g->v1[i] = i; p[i] = i; } PetscReal **pp; DAVecGetArray(f->da, f->p, &pp); for (int j = 0; j < d2; ++j) { for (int i = 0; i < d1; ++i) { printf( "%1.0f, %1.0f\t", g->v2[j][i], pp[j][i]); } printf("\n"); } DAVecRestoreArray(f->da, f->p, &pp); ierr = GridDestroy(g); CHKERRQ(ierr); ierr = FluidFieldDestroy(f); CHKERRQ(ierr); ierr = PetscPrintf(PETSC_COMM_WORLD, "End\n"); CHKERRQ(ierr); ierr = PetscFinalize(); CHKERRQ(ierr); return 0; }
static PetscErrorCode SampleOnGrid(MPI_Comm comm,Op op,const PetscInt M[3],const PetscInt smooth[2],PetscInt nrepeat,PetscLogDouble mintime,PetscLogDouble *memused,PetscLogDouble *memavail,PetscBool monitor) { PetscErrorCode ierr; PetscInt pgrid[3],cmax,fedegree,dof,addquadpts,nlevels,M_max,solve_type=0; PetscMPIInt nranks; Grid grid; DM dm; Vec U,V=NULL,F; Mat A=NULL; KSP ksp=NULL; MG mg=NULL; const char *solve_types[2] = {"fmg","ksp"}; PetscReal L[3]; PetscBool affine,ksp_only = PETSC_FALSE; #ifdef USE_HPM char eventname[256]; #endif PetscFunctionBegin; ierr = PetscOptionsBegin(comm,NULL,"KSP or FMG solver option",NULL);CHKERRQ(ierr); ierr = PetscOptionsEList("-solve_type","Solve with KSP or FMG","",solve_types,2,solve_types[0],&solve_type,NULL);CHKERRQ(ierr); if (solve_type) {ksp_only = PETSC_TRUE;} ierr = PetscOptionsEnd();CHKERRQ(ierr); ierr = OpGetFEDegree(op,&fedegree);CHKERRQ(ierr); ierr = OpGetDof(op,&dof);CHKERRQ(ierr); ierr = OpGetAddQuadPts(op,&addquadpts);CHKERRQ(ierr); ierr = MPI_Comm_size(comm,&nranks);CHKERRQ(ierr); ierr = ProcessGridFindSquarest(nranks,pgrid);CHKERRQ(ierr); // It would make sense to either use a different coarsening criteria (perhaps even specified by the sampler). On // large numbers of processes, the coarse grids should be square enough that 192 is a good threshold size. cmax = 192; ierr = GridCreate(comm,M,pgrid,cmax,&grid);CHKERRQ(ierr); ierr = GridGetNumLevels(grid,&nlevels);CHKERRQ(ierr); ierr = DMCreateFE(grid,fedegree,dof,addquadpts,&dm);CHKERRQ(ierr); M_max = PetscMax(M[0],PetscMax(M[1],M[2])); L[0] = M[0]*1./M_max; L[1] = M[1]*1./M_max; L[2] = M[2]*1./M_max; ierr = DMFESetUniformCoordinates(dm,L);CHKERRQ(ierr); ierr = OpGetAffineOnly(op,&affine);CHKERRQ(ierr); if (!affine) {ierr = DMCoordDistort(dm,L);CHKERRQ(ierr);} ierr = DMCreateGlobalVector(dm,&U);CHKERRQ(ierr); ierr = DMCreateGlobalVector(dm,&F);CHKERRQ(ierr); ierr = OpForcing(op,dm,F);CHKERRQ(ierr); if (!ksp_only) { ierr = MGCreate(op,dm,nlevels,&mg);CHKERRQ(ierr); ierr = MGMonitorSet(mg,monitor);CHKERRQ(ierr); ierr = MGSetUpPC(mg);CHKERRQ(ierr); } else { ierr = DMCreateGlobalVector(dm,&V);CHKERRQ(ierr); ierr = OpGetMat(op,dm,&A);CHKERRQ(ierr); ierr = KSPCreate(PETSC_COMM_WORLD,&ksp);CHKERRQ(ierr); ierr = KSPSetOperators(ksp,A,A);CHKERRQ(ierr); ierr = KSPSetFromOptions(ksp);CHKERRQ(ierr); } #ifdef USE_HPM ierr = PetscSNPrintf(eventname,sizeof eventname,"Solve G[%D %D %D]",M[0],M[1],M[2]);CHKERRQ(ierr); HPM_Start(eventname); #endif PetscInt i = 0; PetscLogDouble sampletime = 0; while ( (i<nrepeat) || (sampletime < mintime) ) { PetscLogDouble t0,t1,elapsed,flops,eqs; ierr = VecZeroEntries(U);CHKERRQ(ierr); ierr = MPI_Barrier(comm);CHKERRQ(ierr); ierr = PetscTime(&t0);CHKERRQ(ierr); flops = petsc_TotalFlops; if (!ksp_only) { ierr = MGFCycle(op,mg,smooth[0],smooth[1],F,U);CHKERRQ(ierr); } else { ierr = KSPSolve(ksp,F,V);CHKERRQ(ierr); ierr = VecAXPY(V,-1.,U);CHKERRQ(ierr); } ierr = PetscTime(&t1);CHKERRQ(ierr); flops = petsc_TotalFlops - flops; elapsed = t1 - t0; ierr = MPI_Allreduce(MPI_IN_PLACE,&elapsed,1,MPI_DOUBLE,MPI_MAX,comm);CHKERRQ(ierr); ierr = MPI_Allreduce(MPI_IN_PLACE,&flops,1,MPI_DOUBLE,MPI_SUM,comm);CHKERRQ(ierr); eqs = (double)(M[0]*fedegree+1)*(M[1]*fedegree+1)*(M[2]*fedegree+1)*dof; ierr = PetscPrintf(comm,"Q%D G[%5D%5D%5D] P[%3D%3D%3D] %10.3e s %10f GF %10f MEq/s\n",fedegree,M[0],M[1],M[2],pgrid[0],pgrid[1],pgrid[2],t1-t0,flops/elapsed*1e-9,eqs/elapsed*1e-6);CHKERRQ(ierr); i++; sampletime += elapsed; } #ifdef USE_HPM HPM_Stop(eventname); #endif if (memused) {ierr = MemoryGetUsage(memused,memavail);CHKERRQ(ierr); } ierr = MGDestroy(&mg);CHKERRQ(ierr); ierr = KSPDestroy(&ksp);CHKERRQ(ierr); ierr = MatDestroy(&A);CHKERRQ(ierr); ierr = VecDestroy(&V);CHKERRQ(ierr); ierr = VecDestroy(&U);CHKERRQ(ierr); ierr = VecDestroy(&F);CHKERRQ(ierr); ierr = DMDestroy(&dm);CHKERRQ(ierr); ierr = GridDestroy(&grid);CHKERRQ(ierr); PetscFunctionReturn(0); }