static void do_test(MAP *rmap, MAP *cmap) { MAT *A; VEC *x, *y; INT i, j, m0, n0; INT *cols = phgAlloc(cmap->nglobal * sizeof(*cols)); FLOAT *data = phgAlloc(cmap->nglobal * sizeof(*data)); A = phgMapCreateMat(rmap, cmap); m0 = A->rmap->partition[A->rmap->rank]; n0 = A->cmap->partition[A->cmap->rank]; /* Matrix entries: A(I,J) = 1 + (I-1) + (J-1) */ for (i = m0; i < m0 + A->rmap->nlocal; i++) { #if 0 /* Test MatAddEntry */ for (j = 0; j < A->cmap->nglobal; j++) phgMatAddGlobalEntry(A, i, j, 1.0 + i + j); #else /* Test MatAddEntries */ for (j = 0; j < A->cmap->nglobal; j++) { cols[j] = j; data[j] = 1.0 + i + j; } phgMatAddGlobalEntries(A, 1, &i, A->cmap->nglobal, cols, data); #endif } phgFree(cols); phgFree(data); phgMatAssemble(A); phgInfo(-1, "y = A * x\n"); x = phgMapCreateVec(A->cmap, 1); for (i = 0; i < x->map->nlocal; i++) x->data[i] = 1.0 + i + n0; phgVecAssemble(x); y = phgMatVec(MAT_OP_N, 1.0, A, x, 0.0, NULL); for (i = 0; i < y->map->nlocal; i++) phgInfo(-1, " y->data[%d] = %lg\n", i + m0, (double)y->data[i]); phgVecDestroy(&x); phgVecDestroy(&y); phgInfo(-1, "y = A' * x\n"); x = phgMapCreateVec(A->rmap, 1); for (i = 0; i < x->map->nlocal; i++) x->data[i] = 1.0 + i + m0; phgVecAssemble(x); y = phgMatVec(MAT_OP_T, 1.0, A, x, 0.0, NULL); for (i = 0; i < y->map->nlocal; i++) phgInfo(-1, " y->data[%d] = %lg\n", i + n0, (double)y->data[i]); phgVecDestroy(&x); phgVecDestroy(&y); phgMatDestroy(&A); }
/* * Jacobi smoother2: * Implement using details of matvec * ref: matvec in matvec.c * * */ void mg_Jacobi2(MAT *A, VEC *x, VEC *b, int nsmooth, void *ctx) { INT i, j, k, n, *pc, *pc_offp; FLOAT *pd, *pd_offp, *vx, *vx0, *vb; FLOAT sum, omega = _p->smooth_damp;; VEC *x0; #if USE_MPI FLOAT *offp_data = NULL; #endif /* USE_MPI */ MagicCheck(VEC, x); MagicCheck(VEC, b); assert(x == NULL || x->nvec == 1); assert(b == NULL || b->nvec == 1); if (x != NULL && !x->assembled) phgVecAssemble(x); if (b != NULL && !b->assembled) phgVecAssemble(b); x0 = phgMapCreateVec(x->map, 1); assert(A->type != PHG_DESTROYED); if (!A->assembled) phgMatAssemble(A); assert(A->type != PHG_MATRIX_FREE); phgMatPack(A); #if USE_MPI if (A->cmap->nprocs > 1) { offp_data = phgAlloc(A->cinfo->rsize * sizeof(*offp_data)); } #endif /* USE_MPI */ if (A->cmap->nlocal != A->rmap->nlocal || A->cmap->nlocal != x->map->nlocal || A->cmap->nlocal != b->map->nlocal) phgError(1, "%s:%d: inconsistent matrix-vector.", __FILE__, __LINE__); #if USE_MPI if (A->cmap->nprocs > 1) { phgMapScatterBegin(A->cinfo, x->nvec, x->data, offp_data); phgMapScatterEnd(A->cinfo, x->nvec, x->data, offp_data); } #endif /* USE_MPI */ /* iteration */ for (k = 0; k < nsmooth; k++) { phgVecCopy(x, &x0); /* multiply with local data */ vx = x->data; vx0 = x0->data; vb = b->data; pc = A->packed_cols; pd = A->packed_data; if (A->cmap->nprocs > 1) { pc_offp = A->packed_cols + A->rmap->nlocal + A->nnz_d; pd_offp = A->packed_data + A->nnz_d; } else { pc_offp = NULL; pd_offp = NULL; } for (i = 0; i < A->rmap->nlocal; i++) { INT jcol; FLOAT aa = 0., dx; /* x_i = (b_i - \sum_{j ~= i} a_ij * x_j) / a_ii */ sum = vb[i]; /* local data */ if ((n = *(pc++)) != 0) { for (j = 0; j < n; j++) { jcol = *(pc++); if (jcol != i) { sum -= *(pd++) * vx0[jcol]; } else { aa = *(pd++); assert(fabs(aa) > 1e-14); } } } /* remote data */ if (pc_offp != NULL && (n = *(pc_offp++)) != 0) { for (j = 0; j < n; j++) { jcol = *(pc_offp++); sum -= *(pd_offp++) * offp_data[jcol]; } } dx = sum / aa - vx[i]; vx[i] += omega * dx; } #if USE_MPI if (A->cmap->nprocs > 1) { phgMapScatterBegin(A->cinfo, x->nvec, x->data, offp_data); phgMapScatterEnd(A->cinfo, x->nvec, x->data, offp_data); } #endif /* USE_MPI */ } phgVecDestroy(&x0); #if USE_MPI phgFree(offp_data); #endif /* USE_MPI */ return; }
/* * Gauss-Sidel smoother for vector: * * As GS: * 1. exchange off proc data * 2. smooth local dof * * Assumption: * 1. unknow dof is vector of dim 3 * 2. Matrix block for vector is * | a1 -b 0 | * | b a2 0 | * | 0 0 a3 | * * */ void mg_GaussSidel_vec(MAT *A, VEC *x, VEC *b, int nsmooth, void *ctx) { INT i, j, k, l, n, *pc, *pc0, *pc_offp, nlocal; FLOAT *pd, *pd0, *pd_offp, *vx, *vb; size_t *ps; FLOAT sum[Dim], omega = _p->smooth_damp; #if USE_MPI FLOAT *offp_data = NULL; #endif /* USE_MPI */ MagicCheck(VEC, x); MagicCheck(VEC, b); assert(x == NULL || x->nvec == 1); assert(b == NULL || b->nvec == 1); if (x != NULL && !x->assembled) phgVecAssemble(x); if (b != NULL && !b->assembled) phgVecAssemble(b); assert(A->type != PHG_DESTROYED); if (!A->assembled) phgMatAssemble(A); assert(A->type != PHG_MATRIX_FREE); phgMatPack(A); #if USE_MPI if (A->cmap->nprocs > 1) { offp_data = phgAlloc(A->cinfo->rsize * sizeof(*offp_data)); } #endif /* USE_MPI */ if (A->cmap->nlocal != A->rmap->nlocal || A->cmap->nlocal != x->map->nlocal || A->cmap->nlocal != b->map->nlocal) phgError(1, "%s:%d: inconsistent matrix-vector.", __FILE__, __LINE__); if (A->cmap->nlocal % Dim != 0) phgError(1, "%s: assume vector dof of dim 3!\n", __FUNCTION__); #if USE_MPI if (A->cmap->nprocs > 1) { phgMapScatterBegin(A->cinfo, x->nvec, x->data, offp_data); phgMapScatterEnd(A->cinfo, x->nvec, x->data, offp_data); } #endif /* USE_MPI */ /* iteration */ for (l = 0; l < nsmooth; l++) { INT i_start, i_add; /* multiply with local data */ vx = x->data; vb = b->data; pc0 = A->packed_cols; pd0 = A->packed_data; ps = A->packed_ind; nlocal = A->rmap->nlocal; /* * lexicographic order: low to high * Note: low->high and high->low alternatively does not help. * */ if (TRUE || l % 2 == 0) { i_start = 0; i_add = Dim; } else { i_start = nlocal - Dim; i_add = -Dim; } //for (i = i_start; i < A->rmap->nlocal && i >= 0; i += i_add) { for (i = 0; i < nlocal ; i += Dim) { INT jcol; FLOAT aa[Dim][Dim], det, dx[Dim]; memset(aa, 0, sizeof(aa)); sum[0] = vb[i ]; sum[1] = vb[i+1]; sum[2] = vb[i+2]; /* local data */ pc = pc0 + PACK_COL(ps, i); pd = pd0 + PACK_DAT(ps, i); for (k = 0; k < Dim; k++) { if ((n = *(pc++)) != 0) { for (j = 0; j < n; j++) { jcol = *(pc++); if (jcol < i || jcol > i+2) { sum[k] -= *(pd++) * vx[jcol]; } else { /* offD, jcol = i,i+1,i+2 */ aa[k][jcol - i] = *(pd++); } } } } /* remote data */ if (A->cmap->nprocs > 1) { pc_offp = pc0 + PACK_COL_OFFP(ps, i, nlocal); pd_offp = pd0 + PACK_DAT_OFFP(ps, i, nlocal); for (k = 0; k < Dim; k++) { if ((n = *(pc_offp++)) != 0) { for (j = 0; j < n; j++) { jcol = *(pc_offp++); sum[k] -= *(pd_offp++) * offp_data[jcol]; } } } } /* solve */ det = (aa[0][0] * aa[1][1] - aa[0][1] * aa[1][0]); assert(Fabs(det) > 1e-12); det = 1. / det; dx[0] = (aa[1][1] * sum[0] - aa[0][1] * sum[1]) * det - vx[i ]; dx[1] = (aa[0][0] * sum[1] - aa[1][0] * sum[1]) * det - vx[i+1]; dx[2] = (1./aa[2][2] * sum[2]) - vx[i+2]; vx[i ] += omega * dx[0]; vx[i+1] += omega * dx[1]; vx[i+2] += omega * dx[2]; } #if USE_MPI if (A->cmap->nprocs > 1) { phgMapScatterBegin(A->cinfo, x->nvec, x->data, offp_data); phgMapScatterEnd(A->cinfo, x->nvec, x->data, offp_data); } #endif /* USE_MPI */ } #if USE_MPI phgFree(offp_data); #endif /* USE_MPI */ return; }
/* * Gauss-Sidel smoother2: * * 1. interior dof (!REMOTE) is smoothed, and then offp data is updated; * 2. proc boundary dof (REMOTE) is smoothed, and then offp data is updated. * * Note: need types_vec. * */ void mg_GaussSidel2(MAT *A, VEC *x, VEC *b, int nsmooth, void *ctx) { MG_LEVEL *ml = (MG_LEVEL *)ctx; INT i, j, k, n, *pc, *pc_offp; FLOAT *pd, *pd_offp, *vx, *vb; FLOAT sum, omega = _p->smooth_damp;; #if USE_MPI FLOAT *offp_data = NULL; #endif /* USE_MPI */ MagicCheck(VEC, x); MagicCheck(VEC, b); assert(x == NULL || x->nvec == 1); assert(b == NULL || b->nvec == 1); if (x != NULL && !x->assembled) phgVecAssemble(x); if (b != NULL && !b->assembled) phgVecAssemble(b); assert(A->type != PHG_DESTROYED); if (!A->assembled) phgMatAssemble(A); assert(A->type != PHG_MATRIX_FREE); phgMatPack(A); #if USE_MPI if (A->cmap->nprocs > 1) { offp_data = phgAlloc(A->cinfo->rsize * sizeof(*offp_data)); } #endif /* USE_MPI */ if (A->cmap->nlocal != A->rmap->nlocal || A->cmap->nlocal != x->map->nlocal || A->cmap->nlocal != b->map->nlocal) phgError(1, "%s:%d: inconsistent matrix-vector.", __FILE__, __LINE__); #if USE_MPI if (A->cmap->nprocs > 1) { phgMapScatterBegin(A->cinfo, x->nvec, x->data, offp_data); phgMapScatterEnd(A->cinfo, x->nvec, x->data, offp_data); } #endif /* USE_MPI */ /* iteration */ for (k = 0; k < nsmooth; k++) { /* multiply with local data */ vx = x->data; vb = b->data; /* First, interior dof (!REMOTE) is smoothed, and then offp data is updated */ pc = A->packed_cols; pd = A->packed_data; if (A->cmap->nprocs > 1) { pc_offp = A->packed_cols + A->rmap->nlocal + A->nnz_d; pd_offp = A->packed_data + A->nnz_d; } else { pc_offp = NULL; pd_offp = NULL; } for (i = 0; i < A->rmap->nlocal; i++) { INT jcol; FLOAT aa = 0., dx; if (ml->types_vec[i] & REMOTE) { if ((n = *(pc++)) != 0) { pc += n; pd += n; } if (pc_offp != NULL && (n = *(pc_offp++)) != 0) { pc_offp += n; pd_offp += n; } continue; } sum = vb[i]; /* local data */ if ((n = *(pc++)) != 0) { for (j = 0; j < n; j++) { jcol = *(pc++); if (jcol != i) { sum -= *(pd++) * vx[jcol]; } else { aa = *(pd++); assert(fabs(aa) > 1e-14); } } } /* remote data */ if (pc_offp != NULL && (n = *(pc_offp++)) != 0) { for (j = 0; j < n; j++) { jcol = *(pc_offp++); sum -= *(pd_offp++) * offp_data[jcol]; } } dx = sum / aa - vx[i]; vx[i] += omega * dx; } #if USE_MPI if (A->cmap->nprocs > 1) { phgMapScatterBegin(A->cinfo, x->nvec, x->data, offp_data); phgMapScatterEnd(A->cinfo, x->nvec, x->data, offp_data); } #endif /* USE_MPI */ /* Second, proc boundary dof (!REMOTE) is smoothed, and then offp data is updated */ pc = A->packed_cols; pd = A->packed_data; if (A->cmap->nprocs > 1) { pc_offp = A->packed_cols + A->rmap->nlocal + A->nnz_d; pd_offp = A->packed_data + A->nnz_d; } else { pc_offp = NULL; pd_offp = NULL; } for (i = 0; i < A->rmap->nlocal; i++) { INT jcol; FLOAT aa = 0., dx; if (!(ml->types_vec[i] & REMOTE)) { if ((n = *(pc++)) != 0) { pc += n; pd += n; } if (pc_offp != NULL && (n = *(pc_offp++)) != 0) { pc_offp += n; pd_offp += n; } continue; } sum = vb[i]; /* local data */ if ((n = *(pc++)) != 0) { for (j = 0; j < n; j++) { jcol = *(pc++); if (jcol != i) { sum -= *(pd++) * vx[jcol]; } else { aa = *(pd++); assert(fabs(aa) > 1e-14); } } } /* remote data */ if (pc_offp != NULL && (n = *(pc_offp++)) != 0) { for (j = 0; j < n; j++) { jcol = *(pc_offp++); sum -= *(pd_offp++) * offp_data[jcol]; } } dx = sum / aa - vx[i]; vx[i] += omega * dx; } #if USE_MPI if (A->cmap->nprocs > 1) { phgMapScatterBegin(A->cinfo, x->nvec, x->data, offp_data); phgMapScatterEnd(A->cinfo, x->nvec, x->data, offp_data); } #endif /* USE_MPI */ } #if USE_MPI phgFree(offp_data); #endif /* USE_MPI */ return; }
int main(int argc, char *argv[]) { MAT *A, *B; VEC *U = NULL, *x; FLOAT *C; SOLVER *solver, *solver1 = NULL; INT i, j, k, n, *pvt, N = 1000, K = 2; char *main_opts = NULL, *sub_opts = NULL; phgOptionsRegisterInt("-n", "N value", &N); phgOptionsRegisterInt("-k", "K value", &K); phgOptionsRegisterString("-main_solver_opts", "Options for the main solver", &main_opts); phgOptionsRegisterString("-sub_solver_opts", "Options for the subsolver", &sub_opts); /* a direct solver is preferable for the sparse matrix */ phgOptionsPreset("-solver mumps"); phgInit(&argc, &argv); phgPrintf( "----------------------------------------------------------------------------\n" "This code solves (A+UU^t)x=b using the Sherman-Morrison-Woodbury formula.\n" "Note: may use the following to disable use of the Sherman-Morrison-Woodbury\n" "algorithm and change to the default solver instead:\n" " -preonly_pc_type solver -preonly_pc_opts \"-solver_maxit 2000\"\n" "----------------------------------------------------------------------------\n" ); phgPrintf("Generating the linear system: N = %"dFMT", K = %"dFMT"\n", N, K); /* A is a distributed NxN SPD tridiagonal matrix (A = [-1, 2, -1]) */ n = N / phgNProcs + (phgRank < (N % phgNProcs) ? 1 : 0); A = phgMatCreate(phgComm, n, N); phgPrintf(" Generating matrix A.\n"); for (i = 0; i < n; i++) { /* diagonal */ phgMatAddEntry(A, i, i, 2.0); /* diagonal - 1 */ if (i > 0) phgMatAddEntry(A, i, i - 1, -1.0); else if (phgRank > 0) phgMatAddLGEntry(A, i, A->rmap->partition[phgRank] - 1, -1.0); /* diagonal + 1 */ if (i < n - 1) phgMatAddEntry(A, i, i + 1, -1.0); else if (phgRank < phgNProcs - 1) phgMatAddLGEntry(A, i, A->rmap->partition[phgRank] + n, -1.0); } phgMatAssemble(A); /* U is a K-component vector */ U = phgMapCreateVec(A->rmap, K); phgVecRandomize(U, 123); /* solver1 is the solver for A */ phgOptionsPush(); phgOptionsSetOptions(sub_opts); solver1 = phgMat2Solver(SOLVER_DEFAULT, A); phgOptionsPop(); /* x is a scratch vector */ x = phgMapCreateVec(A->rmap, 1); /* C is a KxK dense matrix, pvt is an integer array, they store the LU * factorization of (I + U^t*inv(A)*U) */ phgPrintf(" Generating the dense matrix I+U^t*inv(A)*U.\n"); C = phgCalloc(K * K, sizeof(*C)); pvt = phgAlloc(K * sizeof(*pvt)); for (i = 0; i < K; i++) { for (j = 0; j < n; j++) { solver1->rhs->data[j] = U->data[i * n + j]; x->data[j] = 0.0; } solver1->rhs->assembled = TRUE; phgSolverVecSolve(solver1, FALSE, x); for (j = 0; j < K; j++) for (k = 0; k < n; k++) C[i * K + j] += U->data[j * n + k] * x->data[k]; } #if USE_MPI if (U->map->nprocs > 1) { FLOAT *tmp = phgAlloc(K * K * sizeof(*tmp)); MPI_Allreduce(C, tmp, K * K, PHG_MPI_FLOAT, MPI_SUM, U->map->comm); phgFree(C); C = tmp; } #endif /* USE_MPI */ for (i = 0; i < K; i++) C[i * K + i] += 1.0; phgPrintf(" Factorizing the dense matrix I+U^t*inv(A)*U.\n"); phgSolverDenseLU(K, C, pvt); /* B is a matrix-free matrix representing A + U*U^t, B->mv_data is used * to pass A, U, solver1, C and pvt to callback functions */ B = phgMapCreateMatrixFreeMat(A->rmap, A->cmap, funcB, /* arguments carried over to CB functions */ A, U, solver1, C, pvt, NULL); /* solver is a PreOnly solver for B whose pc_proc is set to sherman(). * * Note: can also use pcg, gmres, or petsc for this solver, in this case * the solution obtained with the Sherman-Morisson formula is iteratively * refined. */ phgOptionsPush(); phgOptionsSetOptions("-solver preonly"); phgOptionsSetOptions(main_opts); solver = phgMat2Solver(SOLVER_DEFAULT, B); phgSolverSetPC(solver, solver, sherman); phgOptionsPop(); for (i = 0; i < n; i++) x->data[i] = 1.0; phgMatVec(MAT_OP_N, 1.0, B, x, 0.0, &solver->rhs); phgPrintf("Solving the linear system.\n"); /* reset initial solution to zero */ memset(x->data, 0, n * sizeof(*x->data)); phgSolverVecSolve(solver, TRUE, x); for (i = 0; i < n; i++) solver->rhs->data[i] = 1.0; phgVecAXPBY(-1.0, solver->rhs, 1.0, &x); phgPrintf("Checking the result: |x - x_exact| / |x_exact| = %lg\n", (double)phgVecNorm2(x, 0, NULL) / sqrt((double)N)); phgSolverDestroy(&solver); phgSolverDestroy(&solver1); phgMatDestroy(&A); phgMatDestroy(&B); phgVecDestroy(&U); phgVecDestroy(&x); phgFree(C); phgFree(pvt); phgFinalize(); return 0; }
static void build_matrices(MAT *matA, MAT *matM, MAT *matC, MAT *S, FLOAT s, DOF *u_h, DOF *p_h) /* S is used to store s*diag(M(p_h))^(-1) */ { int N = u_h->type->nbas; /* number of basis functions in an element */ int M = p_h->type->nbas; int i, j; GRID *g = u_h->g; ELEMENT *e; FLOAT A[N][N], B[N][N], C[N][M]; INT I[N], Ip[M]; INT k, n0; VEC *V = phgMapCreateVec(S->rmap, 1); phgVecDisassemble(V); /* for phgVecAddEntry */ ForAllElements(g, e) { for (i = 0; i < N; i++) { I[i] = phgMapE2L(matA->rmap, 0, e, i); for (k = 0; k < M; k++) { /* \int \grad\psi_k\cdot\phi_i */ C[i][k] = phgQuadGradBasDotBas(e, p_h, k, u_h, i, QUAD_DEFAULT); } for (j = 0; j <= i; j++) { /* \int \phi_i\cdot\phi_j */ B[j][i] = B[i][j] = phgQuadBasDotBas(e, u_h, j, u_h, i, QUAD_DEFAULT); /* \int \curl\phi_i\cdot\curl\phi_j */ A[j][i] = A[i][j] = phgQuadCurlBasDotCurlBas(e, u_h, j, u_h, i, QUAD_DEFAULT); } } for (i = 0; i < M; i++) { Ip[i] = phgMapE2L(matC->cmap, 0, e, i); if (Ip[i] < 0) /* boundary entry */ continue; phgVecAddEntry(V, 0, Ip[i], phgQuadBasDotBas(e, p_h, i, p_h, i, QUAD_DEFAULT)); } /* loop on basis functions */ for (i = 0; i < N; i++) { if (phgDofDirichletBC(u_h, e, i, NULL, NULL, NULL, DOF_PROJ_CROSS)) continue; phgMatAddEntries(matA, 1, I + i, N, I, A[i]); phgMatAddEntries(matM, 1, I + i, N, I, B[i]); phgMatAddEntries(matC, 1, I + i, M, Ip, C[i]); } } phgVecAssemble(V); n0 = V->map->partition[V->map->rank]; for (k = 0; k < V->map->nlocal; k++) phgMatAddGlobalEntry(S, k + n0, k + n0, s / V->data[k]); phgVecDestroy(&V); phgMatAssemble(S); phgMatSetupDiagonal(S); phgMatAssemble(matA); phgMatAssemble(matM); phgMatAssemble(matC); }
int main(int argc, char *argv[]) { GRID *g; DOF *u_h; MAT *A, *A0, *B; MAP *map; INT i; size_t nnz, mem, mem_peak; VEC *x, *y0, *y1, *y2; double t0, t1, dnz, dnz1, mflops, mop; char *fn = "../test/cube.dat"; FLOAT mem_max = 300; INT refine = 0; phgOptionsRegisterFilename("-mesh_file", "Mesh file", (char **)&fn); phgOptionsRegisterInt("-loop_count", "Loop count", &loop_count); phgOptionsRegisterInt("-refine", "Refinement level", &refine); phgOptionsRegisterFloat("-mem_max", "Maximum memory", &mem_max); phgInit(&argc, &argv); g = phgNewGrid(-1); if (!phgImport(g, fn, FALSE)) phgError(1, "can't read file \"%s\".\n", fn); phgRefineAllElements(g, refine); u_h = phgDofNew(g, DOF_DEFAULT, 1, "u_h", DofNoAction); while (TRUE) { phgPrintf("\n"); if (phgBalanceGrid(g, 1.2, 1, NULL, 0.)) phgPrintf("Repartition mesh, %d submeshes, load imbalance: %lg\n", g->nprocs, (double)g->lif); map = phgMapCreate(u_h, NULL); A = phgMapCreateMat(map, map); A->handle_bdry_eqns = TRUE; build_matrix(A, u_h); phgMatAssemble(A); /* Note: A is unsymmetric (A' != A) if boundary entries not removed */ phgMatRemoveBoundaryEntries(A); #if 0 /* test block matrix operation */ A0 = phgMatCreateBlockMatrix(g->comm, 1, 1, &A, NULL); #else A0 = A; #endif phgPrintf("%d DOF, %d elems, %d submeshes, matrix size: %d, LIF: %lg\n", DofGetDataCountGlobal(u_h), g->nleaf_global, g->nprocs, A->rmap->nglobal, (double)g->lif); /* test PHG mat-vec multiply */ x = phgMapCreateVec(A->cmap, 1); y1 = phgMapCreateVec(A->rmap, 1); phgVecRandomize(x, 123); phgMatVec(MAT_OP_N, 1.0, A0, x, 0.0, &y1); phgPerfGetMflops(g, NULL, NULL); /* reset flops counter */ t0 = phgGetTime(NULL); for (i = 0; i < loop_count; i++) { phgMatVec(MAT_OP_N, 1.0, A0, x, 0.0, &y1); } t1 = phgGetTime(NULL); mflops = phgPerfGetMflops(g, NULL, NULL); y0 = phgVecCopy(y1, NULL); nnz = A->nnz_d + A->nnz_o; #if USE_MPI dnz1 = nnz; MPI_Reduce(&dnz1, &dnz, 1, MPI_DOUBLE, MPI_SUM, 0, g->comm); #else dnz = nnz; #endif mop = loop_count * (dnz + dnz - A->rmap->nlocal) * 1e-6; phgPrintf("\n"); t1 -= t0; phgPrintf(" PHG: time %0.4lf, nnz %0.16lg, %0.2lfMF (%0.2lfMF)\n", t1, dnz, mop / (t1 == 0 ? 1. : t1), mflops); /* test trans(A)*x */ phgPerfGetMflops(g, NULL, NULL); /* reset flops counter */ t0 = phgGetTime(NULL); for (i = 0; i < loop_count; i++) { phgMatVec(MAT_OP_T, 1.0, A0, x, 0.0, &y1); } t1 = phgGetTime(NULL); mflops = phgPerfGetMflops(g, NULL, NULL); t1 -= t0; phgPrintf(" A'*x: time %0.4lf, nnz %0.16lg, %0.2lfMF (%0.2lfMF), " "err: %le\n", t1, dnz, mop / (t1 == 0 ? 1. : t1), mflops, (double)phgVecNorm2(phgVecAXPBY(-1.0, y0, 1.0, &y1), 0, NULL)); /* time A * trans(A) */ phgPerfGetMflops(g, NULL, NULL); /* reset flops counter */ t0 = phgGetTime(NULL); B = phgMatMat(MAT_OP_N, MAT_OP_N, 1.0, A, A, 0.0, NULL); t1 = phgGetTime(NULL); mflops = phgPerfGetMflops(g, NULL, NULL); nnz = B->nnz_d + B->nnz_o; #if USE_MPI dnz1 = nnz; MPI_Reduce(&dnz1, &dnz, 1, MPI_DOUBLE, MPI_SUM, 0, g->comm); #else dnz = nnz; #endif /* compare B*x <--> A*A*x */ y2 = phgMatVec(MAT_OP_N, 1.0, B, x, 0.0, NULL); phgMatVec(MAT_OP_N, 1.0, A0, y0, 0.0, &y1); phgMatDestroy(&B); t1 -= t0; phgPrintf(" A*A: time %0.4lf, nnz %0.16lg, %0.2lfMF, err: %le\n", t1, dnz, mflops, (double)phgVecNorm2(phgVecAXPBY(-1.0, y1, 1.0, &y2), 0, NULL)); #if USE_PETSC { Mat ma, mb; MatInfo info; Vec va, vb, vc; PetscScalar *vec; ma = phgPetscCreateMatAIJ(A); MatGetVecs(ma, PETSC_NULL, &va); VecDuplicate(va, &vb); VecGetArray(va, &vec); memcpy(vec, x->data, x->map->nlocal * sizeof(*vec)); VecRestoreArray(va, &vec); MatMult(ma, va, vb); phgPerfGetMflops(g, NULL, NULL); /* reset flops counter */ t0 = phgGetTime(NULL); for (i = 0; i < loop_count; i++) { MatMult(ma, va, vb); } t1 = phgGetTime(NULL); mflops = phgPerfGetMflops(g, NULL, NULL); VecGetArray(vb, &vec); memcpy(y1->data, vec, x->map->nlocal * sizeof(*vec)); VecRestoreArray(vb, &vec); MatGetInfo(ma, MAT_GLOBAL_SUM, &info); /*phgPrintf(" --------------------------------------------" "-------------------------\n");*/ phgPrintf("\n"); t1 -= t0; dnz = info.nz_used; phgPrintf(" PETSc: time %0.4lf, nnz %0.16lg, %0.2lfMF (%0.2lfMF), " "err: %le\n", t1, dnz, mop / (t1==0 ? 1.:t1), mflops, (double)phgVecNorm2(phgVecAXPBY(-1.0, y0, 1.0, &y1), 0, NULL)); phgPerfGetMflops(g, NULL, NULL); /* reset flops counter */ t0 = phgGetTime(NULL); for (i = 0; i < loop_count; i++) { MatMultTranspose(ma, va, vb); } t1 = phgGetTime(NULL); mflops = phgPerfGetMflops(g, NULL, NULL); VecGetArray(vb, &vec); memcpy(y1->data, vec, x->map->nlocal * sizeof(*vec)); VecRestoreArray(vb, &vec); t1 -= t0; phgPrintf(" A'*x: time %0.4lf, nnz %0.16lg, %0.2lfMF (%0.2lfMF), " "err: %le\n", t1, dnz, mop / (t1==0 ? 1.:t1), mflops, (double)phgVecNorm2(phgVecAXPBY(-1.0, y0, 1.0, &y1), 0, NULL)); phgPerfGetMflops(g, NULL, NULL); /* reset flops counter */ t0 = phgGetTime(NULL); MatMatMult(ma, ma, MAT_INITIAL_MATRIX, PETSC_DEFAULT, &mb); t1 = phgGetTime(NULL); mflops = phgPerfGetMflops(g, NULL, NULL); t1 -= t0; MatGetInfo(mb, MAT_GLOBAL_SUM, &info); dnz = info.nz_used; VecDuplicate(va, &vc); /* compare B*x <--> A*A*x */ MatMult(ma, vb, vc); MatMult(mb, va, vb); VecGetArray(vb, &vec); memcpy(y1->data, vec, x->map->nlocal * sizeof(*vec)); VecRestoreArray(vb, &vec); VecGetArray(vc, &vec); memcpy(y2->data, vec, x->map->nlocal * sizeof(*vec)); VecRestoreArray(vc, &vec); phgPrintf(" A*A: time %0.4lf, nnz %0.16lg, %0.2lfMF, err: %le\n", t1, dnz, mflops, (double)phgVecNorm2(phgVecAXPBY(-1.0, y1, 1.0, &y2), 0, NULL)); phgPetscMatDestroy(&mb); phgPetscMatDestroy(&ma); phgPetscVecDestroy(&va); phgPetscVecDestroy(&vb); phgPetscVecDestroy(&vc); } #endif /* USE_PETSC */ #if USE_HYPRE { HYPRE_IJMatrix ma; HYPRE_IJVector va, vb, vc; HYPRE_ParCSRMatrix par_ma; hypre_ParCSRMatrix *par_mb; HYPRE_ParVector par_va, par_vb, par_vc; HYPRE_Int offset, *ni, start, end; assert(sizeof(INT)==sizeof(int) && sizeof(FLOAT)==sizeof(double)); setup_hypre_mat(A, &ma); ni = phgAlloc(2 * A->rmap->nlocal * sizeof(*ni)); offset = A->cmap->partition[A->cmap->rank]; for (i = 0; i < A->rmap->nlocal; i++) ni[i] = i + offset; HYPRE_IJVectorCreate(g->comm, offset, offset + A->rmap->nlocal - 1, &va); HYPRE_IJVectorCreate(g->comm, offset, offset + A->rmap->nlocal - 1, &vb); HYPRE_IJVectorCreate(g->comm, offset, offset + A->rmap->nlocal - 1, &vc); HYPRE_IJVectorSetObjectType(va, HYPRE_PARCSR); HYPRE_IJVectorSetObjectType(vb, HYPRE_PARCSR); HYPRE_IJVectorSetObjectType(vc, HYPRE_PARCSR); HYPRE_IJVectorSetMaxOffProcElmts(va, 0); HYPRE_IJVectorSetMaxOffProcElmts(vb, 0); HYPRE_IJVectorSetMaxOffProcElmts(vc, 0); HYPRE_IJVectorInitialize(va); HYPRE_IJVectorInitialize(vb); HYPRE_IJVectorInitialize(vc); HYPRE_IJMatrixGetObject(ma, (void **)(void *)&par_ma); HYPRE_IJVectorGetObject(va, (void **)(void *)&par_va); HYPRE_IJVectorGetObject(vb, (void **)(void *)&par_vb); HYPRE_IJVectorGetObject(vc, (void **)(void *)&par_vc); HYPRE_IJVectorSetValues(va, A->cmap->nlocal, ni, (double *)x->data); HYPRE_IJVectorAssemble(va); HYPRE_IJVectorAssemble(vb); HYPRE_IJVectorAssemble(vc); HYPRE_IJMatrixGetRowCounts(ma, A->cmap->nlocal, ni, ni + A->rmap->nlocal); for (i = 0, nnz = 0; i < A->rmap->nlocal; i++) nnz += ni[A->rmap->nlocal + i]; #if USE_MPI dnz1 = nnz; MPI_Reduce(&dnz1, &dnz, 1, MPI_DOUBLE, MPI_SUM, 0, g->comm); #else dnz = nnz; #endif HYPRE_ParCSRMatrixMatvec(1.0, par_ma, par_va, 0.0, par_vb); phgPerfGetMflops(g, NULL, NULL); /* reset flops counter */ t0 = phgGetTime(NULL); for (i = 0; i < loop_count; i++) { HYPRE_ParCSRMatrixMatvec(1.0, par_ma, par_va, 0.0, par_vb); } t1 = phgGetTime(NULL); mflops = phgPerfGetMflops(g, NULL, NULL); HYPRE_IJVectorGetValues(vb, A->rmap->nlocal, ni, (double*)y1->data); /*phgPrintf(" --------------------------------------------" "-------------------------\n");*/ phgPrintf("\n"); t1 -= t0; phgPrintf(" HYPRE: time %0.4lf, nnz %0.16lg, %0.2lfMF (%0.2lfMF), " "err: %le\n", t1, dnz, mop / (t1==0 ? 1.:t1), mflops, (double)phgVecNorm2(phgVecAXPBY(-1.0, y0, 1.0, &y1), 0, NULL)); phgPerfGetMflops(g, NULL, NULL); /* reset flops counter */ t0 = phgGetTime(NULL); for (i = 0; i < loop_count; i++) { HYPRE_ParCSRMatrixMatvecT(1.0, par_ma, par_va, 0.0, par_vb); } t1 = phgGetTime(NULL); mflops = phgPerfGetMflops(g, NULL, NULL); HYPRE_IJVectorGetValues(vb, A->rmap->nlocal, ni, (double*)y1->data); t1 -= t0; phgPrintf(" A'*x: time %0.4lf, nnz %0.16lg, %0.2lfMF (%0.2lfMF), " "err: %le\n", t1, dnz, mop / (t1==0 ? 1.:t1), mflops, (double)phgVecNorm2(phgVecAXPBY(-1.0, y0, 1.0, &y1), 0, NULL)); phgPerfGetMflops(g, NULL, NULL); /* reset flops counter */ t0 = phgGetTime(NULL); /* Note: 'HYPRE_ParCSRMatrix' is currently typedef'ed to * 'hypre_ParCSRMatrix *' */ par_mb = hypre_ParMatmul((hypre_ParCSRMatrix *)par_ma, (hypre_ParCSRMatrix *)par_ma); t1 = phgGetTime(NULL); mflops = phgPerfGetMflops(g, NULL, NULL); start = hypre_ParCSRMatrixFirstRowIndex(par_mb); end = hypre_ParCSRMatrixLastRowIndex(par_mb) + 1; for (i = start, nnz = 0; i < end; i++) { HYPRE_Int ncols; hypre_ParCSRMatrixGetRow(par_mb, i, &ncols, NULL, NULL); hypre_ParCSRMatrixRestoreRow(par_mb, i, &ncols, NULL, NULL); nnz += ncols; } #if USE_MPI dnz1 = nnz; MPI_Reduce(&dnz1, &dnz, 1, MPI_DOUBLE, MPI_SUM, 0, g->comm); #else dnz = nnz; #endif /* compare B*x <--> A*A*x */ HYPRE_ParCSRMatrixMatvec(1.0, par_ma, par_vb, 0.0, par_vc); HYPRE_ParCSRMatrixMatvec(1.0, (void *)par_mb, par_va, 0.0, par_vb); HYPRE_IJVectorGetValues(vb, A->rmap->nlocal, ni, (double*)y1->data); HYPRE_IJVectorGetValues(vc, A->rmap->nlocal, ni, (double*)y2->data); hypre_ParCSRMatrixDestroy((par_mb)); t1 -= t0; phgPrintf(" A*A: time %0.4lf, nnz %0.16lg, %0.2lfMF, err: %le\n", t1, dnz, mflops, (double)phgVecNorm2(phgVecAXPBY(-1.0, y1, 1.0, &y2), 0, NULL)); phgFree(ni); HYPRE_IJMatrixDestroy(ma); HYPRE_IJVectorDestroy(va); HYPRE_IJVectorDestroy(vb); HYPRE_IJVectorDestroy(vc); } #endif /* USE_HYPRE */ if (A0 != A) phgMatDestroy(&A0); #if 0 if (A->rmap->nglobal > 1000) { VEC *v = phgMapCreateVec(A->rmap, 3); for (i = 0; i < v->map->nlocal; i++) { v->data[i + 0 * v->map->nlocal] = 1 * (i + v->map->partition[g->rank]); v->data[i + 1 * v->map->nlocal] = 2 * (i + v->map->partition[g->rank]); v->data[i + 2 * v->map->nlocal] = 3 * (i + v->map->partition[g->rank]); } phgMatDumpMATLAB(A, "A", "A.m"); phgVecDumpMATLAB(v, "v", "v.m"); phgFinalize(); exit(0); } #endif phgMatDestroy(&A); phgVecDestroy(&x); phgVecDestroy(&y0); phgVecDestroy(&y1); phgVecDestroy(&y2); phgMapDestroy(&map); mem = phgMemoryUsage(g, &mem_peak); dnz = mem / (1024.0 * 1024.0); dnz1 = mem_peak / (1024.0 * 1024.0); /*phgPrintf(" --------------------------------------------" "-------------------------\n");*/ phgPrintf("\n"); phgPrintf(" Memory: current %0.4lgMB, peak %0.4lgMB\n", dnz, dnz1); #if 0 { static int loop_count = 0; if (++loop_count == 4) break; } #endif if (mem_peak > 1024 * (size_t)1024 * mem_max) break; phgRefineAllElements(g, 1); } phgDofFree(&u_h); phgFreeGrid(&g); phgFinalize(); return 0; }