idx_t vnbrpoolGetNext(ctrl_t *ctrl, idx_t nnbrs) { ctrl->nbrpoolcpos += nnbrs; if (ctrl->nbrpoolcpos > ctrl->nbrpoolsize) { ctrl->nbrpoolsize += gk_max(10 * nnbrs, ctrl->nbrpoolsize / 2); ctrl->vnbrpool = (vnbr_t *) gk_realloc(ctrl->vnbrpool, ctrl->nbrpoolsize * sizeof(vnbr_t), "vnbrpoolGet: vnbrpool"); ctrl->nbrpoolreallocs++; } return ctrl->nbrpoolcpos - nnbrs; }
void MlevelNodeBisectionL2(ctrl_t *ctrl, graph_t *graph, idx_t niparts) { idx_t i, mincut, nruns=5; graph_t *cgraph; idx_t *bestwhere; /* if the graph is small, just find a single vertex separator */ if (graph->nvtxs < 5000) { MlevelNodeBisectionL1(ctrl, graph, niparts); return; } WCOREPUSH; ctrl->CoarsenTo = gk_max(100, graph->nvtxs/30); cgraph = CoarsenGraphNlevels(ctrl, graph, 4); bestwhere = iwspacemalloc(ctrl, cgraph->nvtxs); mincut = graph->tvwgt[0]; for (i=0; i<nruns; i++) { MlevelNodeBisectionL1(ctrl, cgraph, 0.7*niparts); if (i == 0 || cgraph->mincut < mincut) { mincut = cgraph->mincut; if (i < nruns-1) icopy(cgraph->nvtxs, cgraph->where, bestwhere); } if (mincut == 0) break; if (i < nruns-1) FreeRData(cgraph); } if (mincut != cgraph->mincut) icopy(cgraph->nvtxs, bestwhere, cgraph->where); WCOREPOP; Refine2WayNode(ctrl, graph, cgraph); }
void MlevelNodeBisectionL1(ctrl_t *ctrl, graph_t *graph, idx_t niparts) { graph_t *cgraph; ctrl->CoarsenTo = graph->nvtxs/8; if (ctrl->CoarsenTo > 100) ctrl->CoarsenTo = 100; else if (ctrl->CoarsenTo < 40) ctrl->CoarsenTo = 40; cgraph = CoarsenGraph(ctrl, graph); niparts = gk_max(1, (cgraph->nvtxs <= ctrl->CoarsenTo ? niparts/2: niparts)); /*niparts = (cgraph->nvtxs <= ctrl->CoarsenTo ? SMALLNIPARTS : LARGENIPARTS);*/ InitSeparator(ctrl, cgraph, niparts); Refine2WayNode(ctrl, graph, cgraph); }
/************************************************************************* * This function is the entry point of the initial partition algorithm * that does recursive bissection. * This algorithm assembles the graph to all the processors and preceeds * by parallelizing the recursive bisection step. **************************************************************************/ void InitPartition(ctrl_t *ctrl, graph_t *graph) { idx_t i, j, ncon, mype, npes, gnvtxs, ngroups; idx_t *xadj, *adjncy, *adjwgt, *vwgt; idx_t *part, *gwhere0, *gwhere1; idx_t *tmpwhere, *tmpvwgt, *tmpxadj, *tmpadjncy, *tmpadjwgt; graph_t *agraph; idx_t lnparts, fpart, fpe, lnpes; idx_t twoparts=2, moptions[METIS_NOPTIONS], edgecut, max_cut; real_t *tpwgts, *tpwgts2, *lbvec, lbsum, min_lbsum, wsum; MPI_Comm ipcomm; struct { double sum; int rank; } lpesum, gpesum; WCOREPUSH; ncon = graph->ncon; ngroups = gk_max(gk_min(RIP_SPLIT_FACTOR, ctrl->npes), 1); IFSET(ctrl->dbglvl, DBG_TIME, gkMPI_Barrier(ctrl->comm)); IFSET(ctrl->dbglvl, DBG_TIME, starttimer(ctrl->InitPartTmr)); lbvec = rwspacemalloc(ctrl, ncon); /* assemble the graph to all the processors */ agraph = AssembleAdaptiveGraph(ctrl, graph); gnvtxs = agraph->nvtxs; /* make a copy of the graph's structure for later */ xadj = icopy(gnvtxs+1, agraph->xadj, iwspacemalloc(ctrl, gnvtxs+1)); vwgt = icopy(gnvtxs*ncon, agraph->vwgt, iwspacemalloc(ctrl, gnvtxs*ncon)); adjncy = icopy(agraph->nedges, agraph->adjncy, iwspacemalloc(ctrl, agraph->nedges)); adjwgt = icopy(agraph->nedges, agraph->adjwgt, iwspacemalloc(ctrl, agraph->nedges)); part = iwspacemalloc(ctrl, gnvtxs); /* create different processor groups */ gkMPI_Comm_split(ctrl->gcomm, ctrl->mype % ngroups, 0, &ipcomm); gkMPI_Comm_rank(ipcomm, &mype); gkMPI_Comm_size(ipcomm, &npes); /* Go into the recursive bisection */ METIS_SetDefaultOptions(moptions); moptions[METIS_OPTION_SEED] = ctrl->sync + (ctrl->mype % ngroups) + 1; tpwgts = ctrl->tpwgts; tpwgts2 = rwspacemalloc(ctrl, 2*ncon); lnparts = ctrl->nparts; fpart = fpe = 0; lnpes = npes; while (lnpes > 1 && lnparts > 1) { /* determine the weights of the two partitions as a function of the weight of the target partition weights */ for (j=(lnparts>>1), i=0; i<ncon; i++) { tpwgts2[i] = rsum(j, tpwgts+fpart*ncon+i, ncon); tpwgts2[ncon+i] = rsum(lnparts-j, tpwgts+(fpart+j)*ncon+i, ncon); wsum = 1.0/(tpwgts2[i] + tpwgts2[ncon+i]); tpwgts2[i] *= wsum; tpwgts2[ncon+i] *= wsum; } METIS_PartGraphRecursive(&agraph->nvtxs, &ncon, agraph->xadj, agraph->adjncy, agraph->vwgt, NULL, agraph->adjwgt, &twoparts, tpwgts2, NULL, moptions, &edgecut, part); /* pick one of the branches */ if (mype < fpe+lnpes/2) { KeepPart(ctrl, agraph, part, 0); lnpes = lnpes/2; lnparts = lnparts/2; } else { KeepPart(ctrl, agraph, part, 1); fpart = fpart + lnparts/2; fpe = fpe + lnpes/2; lnpes = lnpes - lnpes/2; lnparts = lnparts - lnparts/2; } } gwhere0 = iset(gnvtxs, 0, iwspacemalloc(ctrl, gnvtxs)); gwhere1 = iwspacemalloc(ctrl, gnvtxs); if (lnparts == 1) { /* Case npes is greater than or equal to nparts */ /* Only the first process will assign labels (for the reduction to work) */ if (mype == fpe) { for (i=0; i<agraph->nvtxs; i++) gwhere0[agraph->label[i]] = fpart; } } else { /* Case in which npes is smaller than nparts */ /* create the normalized tpwgts for the lnparts from ctrl->tpwgts */ tpwgts = rwspacemalloc(ctrl, lnparts*ncon); for (j=0; j<ncon; j++) { for (wsum=0.0, i=0; i<lnparts; i++) { tpwgts[i*ncon+j] = ctrl->tpwgts[(fpart+i)*ncon+j]; wsum += tpwgts[i*ncon+j]; } for (wsum=1.0/wsum, i=0; i<lnparts; i++) tpwgts[i*ncon+j] *= wsum; } METIS_PartGraphKway(&agraph->nvtxs, &ncon, agraph->xadj, agraph->adjncy, agraph->vwgt, NULL, agraph->adjwgt, &lnparts, tpwgts, NULL, moptions, &edgecut, part); for (i=0; i<agraph->nvtxs; i++) gwhere0[agraph->label[i]] = fpart + part[i]; } gkMPI_Allreduce((void *)gwhere0, (void *)gwhere1, gnvtxs, IDX_T, MPI_SUM, ipcomm); if (ngroups > 1) { tmpxadj = agraph->xadj; tmpadjncy = agraph->adjncy; tmpadjwgt = agraph->adjwgt; tmpvwgt = agraph->vwgt; tmpwhere = agraph->where; agraph->xadj = xadj; agraph->adjncy = adjncy; agraph->adjwgt = adjwgt; agraph->vwgt = vwgt; agraph->where = gwhere1; agraph->vwgt = vwgt; agraph->nvtxs = gnvtxs; edgecut = ComputeSerialEdgeCut(agraph); ComputeSerialBalance(ctrl, agraph, gwhere1, lbvec); lbsum = rsum(ncon, lbvec, 1); gkMPI_Allreduce((void *)&edgecut, (void *)&max_cut, 1, IDX_T, MPI_MAX, ctrl->gcomm); gkMPI_Allreduce((void *)&lbsum, (void *)&min_lbsum, 1, REAL_T, MPI_MIN, ctrl->gcomm); lpesum.sum = lbsum; if (min_lbsum < UNBALANCE_FRACTION*ncon) { if (lbsum < UNBALANCE_FRACTION*ncon) lpesum.sum = edgecut; else lpesum.sum = max_cut; } lpesum.rank = ctrl->mype; gkMPI_Allreduce((void *)&lpesum, (void *)&gpesum, 1, MPI_DOUBLE_INT, MPI_MINLOC, ctrl->gcomm); gkMPI_Bcast((void *)gwhere1, gnvtxs, IDX_T, gpesum.rank, ctrl->gcomm); agraph->xadj = tmpxadj; agraph->adjncy = tmpadjncy; agraph->adjwgt = tmpadjwgt; agraph->vwgt = tmpvwgt; agraph->where = tmpwhere; } icopy(graph->nvtxs, gwhere1+graph->vtxdist[ctrl->mype], graph->where); FreeGraph(agraph); gkMPI_Comm_free(&ipcomm); IFSET(ctrl->dbglvl, DBG_TIME, gkMPI_Barrier(ctrl->comm)); IFSET(ctrl->dbglvl, DBG_TIME, stoptimer(ctrl->InitPartTmr)); WCOREPOP; }
int METIS_PartGraphKway(idx_t *nvtxs, idx_t *ncon, idx_t *xadj, idx_t *adjncy, idx_t *vwgt, idx_t *vsize, idx_t *adjwgt, idx_t *nparts, real_t *tpwgts, real_t *ubvec, idx_t *options, idx_t *objval, idx_t *part) { int sigrval=0, renumber=0; graph_t *graph; ctrl_t *ctrl; /* set up malloc cleaning code and signal catchers */ if (!gk_malloc_init()) return METIS_ERROR_MEMORY; gk_sigtrap(); if ((sigrval = gk_sigcatch()) != 0) goto SIGTHROW; /* set up the run parameters */ ctrl = SetupCtrl(METIS_OP_KMETIS, options, *ncon, *nparts, tpwgts, ubvec); if (!ctrl) { gk_siguntrap(); return METIS_ERROR_INPUT; } /* if required, change the numbering to 0 */ if (ctrl->numflag == 1) { Change2CNumbering(*nvtxs, xadj, adjncy); renumber = 1; } /* set up the graph */ graph = SetupGraph(ctrl, *nvtxs, *ncon, xadj, adjncy, vwgt, vsize, adjwgt); /* set up multipliers for making balance computations easier */ SetupKWayBalMultipliers(ctrl, graph); /* set various run parameters that depend on the graph */ if (ctrl->iptype == METIS_IPTYPE_METISRB) { ctrl->CoarsenTo = gk_max((*nvtxs)/(40*gk_log2(*nparts)), 30*(*nparts)); ctrl->CoarsenTo = 10*(*nparts); ctrl->nIparts = (ctrl->CoarsenTo == 30*(*nparts) ? 4 : 5); } else { ctrl->CoarsenTo = 10*(*nparts); ctrl->nIparts = 10; } /* take care contiguity requests for disconnected graphs */ if (ctrl->contig && !IsConnected(graph, 0)) gk_errexit(SIGERR, "METIS Error: A contiguous partition is requested for a non-contiguous input graph.\n"); /* allocate workspace memory */ AllocateWorkSpace(ctrl, graph); /* start the partitioning */ IFSET(ctrl->dbglvl, METIS_DBG_TIME, InitTimers(ctrl)); IFSET(ctrl->dbglvl, METIS_DBG_TIME, gk_startwctimer(ctrl->TotalTmr)); *objval = MlevelKWayPartitioning(ctrl, graph, part); IFSET(ctrl->dbglvl, METIS_DBG_TIME, gk_stopwctimer(ctrl->TotalTmr)); IFSET(ctrl->dbglvl, METIS_DBG_TIME, PrintTimers(ctrl)); /* clean up */ FreeCtrl(&ctrl); SIGTHROW: /* if required, change the numbering back to 1 */ if (renumber) Change2FNumbering(*nvtxs, xadj, adjncy, part); gk_siguntrap(); gk_malloc_cleanup(0); return metis_rcode(sigrval); }
void FM_2WayCutRefine(ctrl_t *ctrl, graph_t *graph, real_t *ntpwgts, idx_t niter) { idx_t i, ii, j, k, kwgt, nvtxs, nbnd, nswaps, from, to, pass, me, limit, tmp; idx_t *xadj, *vwgt, *adjncy, *adjwgt, *where, *id, *ed, *bndptr, *bndind, *pwgts; idx_t *moved, *swaps, *perm; rpq_t *queues[2]; idx_t higain, mincut, mindiff, origdiff, initcut, newcut, mincutorder, avgvwgt; idx_t tpwgts[2]; WCOREPUSH; nvtxs = graph->nvtxs; xadj = graph->xadj; vwgt = graph->vwgt; adjncy = graph->adjncy; adjwgt = graph->adjwgt; where = graph->where; id = graph->id; ed = graph->ed; pwgts = graph->pwgts; bndptr = graph->bndptr; bndind = graph->bndind; moved = iwspacemalloc(ctrl, nvtxs); swaps = iwspacemalloc(ctrl, nvtxs); perm = iwspacemalloc(ctrl, nvtxs); tpwgts[0] = graph->tvwgt[0]*ntpwgts[0]; tpwgts[1] = graph->tvwgt[0]-tpwgts[0]; limit = gk_min(gk_max(0.01*nvtxs, 15), 100); avgvwgt = gk_min((pwgts[0]+pwgts[1])/20, 2*(pwgts[0]+pwgts[1])/nvtxs); queues[0] = rpqCreate(nvtxs); queues[1] = rpqCreate(nvtxs); IFSET(ctrl->dbglvl, METIS_DBG_REFINE, Print2WayRefineStats(ctrl, graph, ntpwgts, 0, -2)); origdiff = iabs(tpwgts[0]-pwgts[0]); iset(nvtxs, -1, moved); for (pass=0; pass<niter; pass++) { /* Do a number of passes */ rpqReset(queues[0]); rpqReset(queues[1]); mincutorder = -1; newcut = mincut = initcut = graph->mincut; mindiff = iabs(tpwgts[0]-pwgts[0]); ASSERT(ComputeCut(graph, where) == graph->mincut); ASSERT(CheckBnd(graph)); /* Insert boundary nodes in the priority queues */ nbnd = graph->nbnd; irandArrayPermute(nbnd, perm, nbnd, 1); for (ii=0; ii<nbnd; ii++) { i = perm[ii]; ASSERT(ed[bndind[i]] > 0 || id[bndind[i]] == 0); ASSERT(bndptr[bndind[i]] != -1); rpqInsert(queues[where[bndind[i]]], bndind[i], ed[bndind[i]]-id[bndind[i]]); } for (nswaps=0; nswaps<nvtxs; nswaps++) { from = (tpwgts[0]-pwgts[0] < tpwgts[1]-pwgts[1] ? 0 : 1); to = (from+1)%2; if ((higain = rpqGetTop(queues[from])) == -1) break; ASSERT(bndptr[higain] != -1); newcut -= (ed[higain]-id[higain]); INC_DEC(pwgts[to], pwgts[from], vwgt[higain]); if ((newcut < mincut && iabs(tpwgts[0]-pwgts[0]) <= origdiff+avgvwgt) || (newcut == mincut && iabs(tpwgts[0]-pwgts[0]) < mindiff)) { mincut = newcut; mindiff = iabs(tpwgts[0]-pwgts[0]); mincutorder = nswaps; } else if (nswaps-mincutorder > limit) { /* We hit the limit, undo last move */ newcut += (ed[higain]-id[higain]); INC_DEC(pwgts[from], pwgts[to], vwgt[higain]); break; } where[higain] = to; moved[higain] = nswaps; swaps[nswaps] = higain; IFSET(ctrl->dbglvl, METIS_DBG_MOVEINFO, printf("Moved %6"PRIDX" from %"PRIDX". [%3"PRIDX" %3"PRIDX"] %5"PRIDX" [%4"PRIDX" %4"PRIDX"]\n", higain, from, ed[higain]-id[higain], vwgt[higain], newcut, pwgts[0], pwgts[1])); /************************************************************** * Update the id[i]/ed[i] values of the affected nodes ***************************************************************/ SWAP(id[higain], ed[higain], tmp); if (ed[higain] == 0 && xadj[higain] < xadj[higain+1]) BNDDelete(nbnd, bndind, bndptr, higain); for (j=xadj[higain]; j<xadj[higain+1]; j++) { k = adjncy[j]; kwgt = (to == where[k] ? adjwgt[j] : -adjwgt[j]); INC_DEC(id[k], ed[k], kwgt); /* Update its boundary information and queue position */ if (bndptr[k] != -1) { /* If k was a boundary vertex */ if (ed[k] == 0) { /* Not a boundary vertex any more */ BNDDelete(nbnd, bndind, bndptr, k); if (moved[k] == -1) /* Remove it if in the queues */ rpqDelete(queues[where[k]], k); } else { /* If it has not been moved, update its position in the queue */ if (moved[k] == -1) rpqUpdate(queues[where[k]], k, ed[k]-id[k]); } } else { if (ed[k] > 0) { /* It will now become a boundary vertex */ BNDInsert(nbnd, bndind, bndptr, k); if (moved[k] == -1) rpqInsert(queues[where[k]], k, ed[k]-id[k]); } } } } /**************************************************************** * Roll back computations *****************************************************************/ for (i=0; i<nswaps; i++) moved[swaps[i]] = -1; /* reset moved array */ for (nswaps--; nswaps>mincutorder; nswaps--) { higain = swaps[nswaps]; to = where[higain] = (where[higain]+1)%2; SWAP(id[higain], ed[higain], tmp); if (ed[higain] == 0 && bndptr[higain] != -1 && xadj[higain] < xadj[higain+1]) BNDDelete(nbnd, bndind, bndptr, higain); else if (ed[higain] > 0 && bndptr[higain] == -1) BNDInsert(nbnd, bndind, bndptr, higain); INC_DEC(pwgts[to], pwgts[(to+1)%2], vwgt[higain]); for (j=xadj[higain]; j<xadj[higain+1]; j++) { k = adjncy[j]; kwgt = (to == where[k] ? adjwgt[j] : -adjwgt[j]); INC_DEC(id[k], ed[k], kwgt); if (bndptr[k] != -1 && ed[k] == 0) BNDDelete(nbnd, bndind, bndptr, k); if (bndptr[k] == -1 && ed[k] > 0) BNDInsert(nbnd, bndind, bndptr, k); } } graph->mincut = mincut; graph->nbnd = nbnd; IFSET(ctrl->dbglvl, METIS_DBG_REFINE, Print2WayRefineStats(ctrl, graph, ntpwgts, 0, mincutorder)); if (mincutorder <= 0 || mincut == initcut) break; } rpqDestroy(queues[0]); rpqDestroy(queues[1]); WCOREPOP; }
void FM_Mc2WayCutRefine(ctrl_t *ctrl, graph_t *graph, real_t *ntpwgts, idx_t niter) { idx_t i, ii, j, k, l, kwgt, nvtxs, ncon, nbnd, nswaps, from, to, pass, me, limit, tmp, cnum; idx_t *xadj, *adjncy, *vwgt, *adjwgt, *pwgts, *where, *id, *ed, *bndptr, *bndind; idx_t *moved, *swaps, *perm, *qnum; idx_t higain, mincut, initcut, newcut, mincutorder; real_t *invtvwgt, *ubfactors, *minbalv, *newbalv; real_t origbal, minbal, newbal, rgain, ffactor; rpq_t **queues; WCOREPUSH; nvtxs = graph->nvtxs; ncon = graph->ncon; xadj = graph->xadj; vwgt = graph->vwgt; adjncy = graph->adjncy; adjwgt = graph->adjwgt; invtvwgt = graph->invtvwgt; where = graph->where; id = graph->id; ed = graph->ed; pwgts = graph->pwgts; bndptr = graph->bndptr; bndind = graph->bndind; moved = iwspacemalloc(ctrl, nvtxs); swaps = iwspacemalloc(ctrl, nvtxs); perm = iwspacemalloc(ctrl, nvtxs); qnum = iwspacemalloc(ctrl, nvtxs); ubfactors = rwspacemalloc(ctrl, ncon); newbalv = rwspacemalloc(ctrl, ncon); minbalv = rwspacemalloc(ctrl, ncon); limit = gk_min(gk_max(0.01*nvtxs, 25), 150); /* Determine a fudge factor to allow the refinement routines to get out of tight balancing constraints. */ ffactor = .5/gk_max(20, nvtxs); /* Initialize the queues */ queues = (rpq_t **)wspacemalloc(ctrl, 2*ncon*sizeof(rpq_t *)); for (i=0; i<2*ncon; i++) queues[i] = rpqCreate(nvtxs); for (i=0; i<nvtxs; i++) qnum[i] = iargmax_nrm(ncon, vwgt+i*ncon, invtvwgt); /* Determine the unbalance tolerance for each constraint. The tolerance is equal to the maximum of the original load imbalance and the user-supplied allowed tolerance. The rationale behind this approach is to allow the refinement routine to improve the cut, without having to worry about fixing load imbalance problems. The load imbalance is addressed by the balancing routines. */ origbal = ComputeLoadImbalanceDiffVec(graph, 2, ctrl->pijbm, ctrl->ubfactors, ubfactors); for (i=0; i<ncon; i++) ubfactors[i] = (ubfactors[i] > 0 ? ctrl->ubfactors[i]+ubfactors[i] : ctrl->ubfactors[i]); IFSET(ctrl->dbglvl, METIS_DBG_REFINE, Print2WayRefineStats(ctrl, graph, ntpwgts, origbal, -2)); iset(nvtxs, -1, moved); for (pass=0; pass<niter; pass++) { /* Do a number of passes */ for (i=0; i<2*ncon; i++) rpqReset(queues[i]); mincutorder = -1; newcut = mincut = initcut = graph->mincut; minbal = ComputeLoadImbalanceDiffVec(graph, 2, ctrl->pijbm, ubfactors, minbalv); ASSERT(ComputeCut(graph, where) == graph->mincut); ASSERT(CheckBnd(graph)); /* Insert boundary nodes in the priority queues */ nbnd = graph->nbnd; irandArrayPermute(nbnd, perm, nbnd/5, 1); for (ii=0; ii<nbnd; ii++) { i = bndind[perm[ii]]; ASSERT(ed[i] > 0 || id[i] == 0); ASSERT(bndptr[i] != -1); //rgain = 1.0*(ed[i]-id[i])/sqrt(vwgt[i*ncon+qnum[i]]+1); //rgain = (ed[i]-id[i] > 0 ? 1.0*(ed[i]-id[i])/sqrt(vwgt[i*ncon+qnum[i]]+1) : ed[i]-id[i]); rgain = ed[i]-id[i]; rpqInsert(queues[2*qnum[i]+where[i]], i, rgain); } for (nswaps=0; nswaps<nvtxs; nswaps++) { SelectQueue(graph, ctrl->pijbm, ubfactors, queues, &from, &cnum); to = (from+1)%2; if (from == -1 || (higain = rpqGetTop(queues[2*cnum+from])) == -1) break; ASSERT(bndptr[higain] != -1); newcut -= (ed[higain]-id[higain]); iaxpy(ncon, 1, vwgt+higain*ncon, 1, pwgts+to*ncon, 1); iaxpy(ncon, -1, vwgt+higain*ncon, 1, pwgts+from*ncon, 1); newbal = ComputeLoadImbalanceDiffVec(graph, 2, ctrl->pijbm, ubfactors, newbalv); if ((newcut < mincut && newbal <= ffactor) || (newcut == mincut && (newbal < minbal || (newbal == minbal && BetterBalance2Way(ncon, minbalv, newbalv))))) { mincut = newcut; minbal = newbal; mincutorder = nswaps; rcopy(ncon, newbalv, minbalv); } else if (nswaps-mincutorder > limit) { /* We hit the limit, undo last move */ newcut += (ed[higain]-id[higain]); iaxpy(ncon, 1, vwgt+higain*ncon, 1, pwgts+from*ncon, 1); iaxpy(ncon, -1, vwgt+higain*ncon, 1, pwgts+to*ncon, 1); break; } where[higain] = to; moved[higain] = nswaps; swaps[nswaps] = higain; if (ctrl->dbglvl&METIS_DBG_MOVEINFO) { printf("Moved%6"PRIDX" from %"PRIDX"(%"PRIDX") Gain:%5"PRIDX", " "Cut:%5"PRIDX", NPwgts:", higain, from, cnum, ed[higain]-id[higain], newcut); for (l=0; l<ncon; l++) printf("(%.3"PRREAL" %.3"PRREAL")", pwgts[l]*invtvwgt[l], pwgts[ncon+l]*invtvwgt[l]); printf(" %+.3"PRREAL" LB: %.3"PRREAL"(%+.3"PRREAL")\n", minbal, ComputeLoadImbalance(graph, 2, ctrl->pijbm), newbal); } /************************************************************** * Update the id[i]/ed[i] values of the affected nodes ***************************************************************/ SWAP(id[higain], ed[higain], tmp); if (ed[higain] == 0 && xadj[higain] < xadj[higain+1]) BNDDelete(nbnd, bndind, bndptr, higain); for (j=xadj[higain]; j<xadj[higain+1]; j++) { k = adjncy[j]; kwgt = (to == where[k] ? adjwgt[j] : -adjwgt[j]); INC_DEC(id[k], ed[k], kwgt); /* Update its boundary information and queue position */ if (bndptr[k] != -1) { /* If k was a boundary vertex */ if (ed[k] == 0) { /* Not a boundary vertex any more */ BNDDelete(nbnd, bndind, bndptr, k); if (moved[k] == -1) /* Remove it if in the queues */ rpqDelete(queues[2*qnum[k]+where[k]], k); } else { /* If it has not been moved, update its position in the queue */ if (moved[k] == -1) { //rgain = 1.0*(ed[k]-id[k])/sqrt(vwgt[k*ncon+qnum[k]]+1); //rgain = (ed[k]-id[k] > 0 ? // 1.0*(ed[k]-id[k])/sqrt(vwgt[k*ncon+qnum[k]]+1) : ed[k]-id[k]); rgain = ed[k]-id[k]; rpqUpdate(queues[2*qnum[k]+where[k]], k, rgain); } } } else { if (ed[k] > 0) { /* It will now become a boundary vertex */ BNDInsert(nbnd, bndind, bndptr, k); if (moved[k] == -1) { //rgain = 1.0*(ed[k]-id[k])/sqrt(vwgt[k*ncon+qnum[k]]+1); //rgain = (ed[k]-id[k] > 0 ? // 1.0*(ed[k]-id[k])/sqrt(vwgt[k*ncon+qnum[k]]+1) : ed[k]-id[k]); rgain = ed[k]-id[k]; rpqInsert(queues[2*qnum[k]+where[k]], k, rgain); } } } } } /**************************************************************** * Roll back computations *****************************************************************/ for (i=0; i<nswaps; i++) moved[swaps[i]] = -1; /* reset moved array */ for (nswaps--; nswaps>mincutorder; nswaps--) { higain = swaps[nswaps]; to = where[higain] = (where[higain]+1)%2; SWAP(id[higain], ed[higain], tmp); if (ed[higain] == 0 && bndptr[higain] != -1 && xadj[higain] < xadj[higain+1]) BNDDelete(nbnd, bndind, bndptr, higain); else if (ed[higain] > 0 && bndptr[higain] == -1) BNDInsert(nbnd, bndind, bndptr, higain); iaxpy(ncon, 1, vwgt+higain*ncon, 1, pwgts+to*ncon, 1); iaxpy(ncon, -1, vwgt+higain*ncon, 1, pwgts+((to+1)%2)*ncon, 1); for (j=xadj[higain]; j<xadj[higain+1]; j++) { k = adjncy[j]; kwgt = (to == where[k] ? adjwgt[j] : -adjwgt[j]); INC_DEC(id[k], ed[k], kwgt); if (bndptr[k] != -1 && ed[k] == 0) BNDDelete(nbnd, bndind, bndptr, k); if (bndptr[k] == -1 && ed[k] > 0) BNDInsert(nbnd, bndind, bndptr, k); } } graph->mincut = mincut; graph->nbnd = nbnd; IFSET(ctrl->dbglvl, METIS_DBG_REFINE, Print2WayRefineStats(ctrl, graph, ntpwgts, minbal, mincutorder)); if (mincutorder <= 0 || mincut == initcut) break; } for (i=0; i<2*ncon; i++) rpqDestroy(queues[i]); WCOREPOP; }
/************************************************************************* * This function performs a k-way directed diffusion **************************************************************************/ real_t WavefrontDiffusion(ctrl_t *ctrl, graph_t *graph, idx_t *home) { idx_t ii, i, j, k, l, nvtxs, nedges, nparts; idx_t from, to, edge, done, nswaps, noswaps, totalv, wsize; idx_t npasses, first, second, third, mind, maxd; idx_t *xadj, *adjncy, *adjwgt, *where, *perm; idx_t *rowptr, *colind, *ed, *psize; real_t *transfer, *tmpvec; real_t balance = -1.0, *load, *solution, *workspace; real_t *nvwgt, *npwgts, flowFactor, cost, ubfactor; matrix_t matrix; ikv_t *cand; idx_t ndirty, nclean, dptr, clean; nvtxs = graph->nvtxs; nedges = graph->nedges; xadj = graph->xadj; nvwgt = graph->nvwgt; adjncy = graph->adjncy; adjwgt = graph->adjwgt; where = graph->where; nparts = ctrl->nparts; ubfactor = ctrl->ubvec[0]; matrix.nrows = nparts; flowFactor = 0.35; flowFactor = (ctrl->mype == 2) ? 0.50 : flowFactor; flowFactor = (ctrl->mype == 3) ? 0.75 : flowFactor; flowFactor = (ctrl->mype == 4) ? 1.00 : flowFactor; /* allocate memory */ solution = rmalloc(4*nparts+2*nedges, "WavefrontDiffusion: solution"); tmpvec = solution + nparts; npwgts = solution + 2*nparts; load = solution + 3*nparts; matrix.values = solution + 4*nparts; transfer = matrix.transfer = solution + 4*nparts + nedges; perm = imalloc(2*nvtxs+2*nparts+nedges+1, "WavefrontDiffusion: perm"); ed = perm + nvtxs; psize = perm + 2*nvtxs; rowptr = matrix.rowptr = perm + 2*nvtxs + nparts; colind = matrix.colind = perm + 2*nvtxs + 2*nparts + 1; /*GKTODO - Potential problem with this malloc */ wsize = gk_max(sizeof(real_t)*nparts*6, sizeof(idx_t)*(nvtxs+nparts*2+1)); workspace = (real_t *)gk_malloc(wsize, "WavefrontDiffusion: workspace"); cand = ikvmalloc(nvtxs, "WavefrontDiffusion: cand"); /*****************************/ /* Populate empty subdomains */ /*****************************/ iset(nparts, 0, psize); for (i=0; i<nvtxs; i++) psize[where[i]]++; mind = iargmin(nparts, psize); maxd = iargmax(nparts, psize); if (psize[mind] == 0) { for (i=0; i<nvtxs; i++) { k = (RandomInRange(nvtxs)+i)%nvtxs; if (where[k] == maxd) { where[k] = mind; psize[mind]++; psize[maxd]--; break; } } } iset(nvtxs, 0, ed); rset(nparts, 0.0, npwgts); for (i=0; i<nvtxs; i++) { npwgts[where[i]] += nvwgt[i]; for (j=xadj[i]; j<xadj[i+1]; j++) ed[i] += (where[i] != where[adjncy[j]] ? adjwgt[j] : 0); } ComputeLoad(graph, nparts, load, ctrl->tpwgts, 0); done = 0; /* zero out the tmpvec array */ rset(nparts, 0.0, tmpvec); npasses = gk_min(nparts/2, NGD_PASSES); for (l=0; l<npasses; l++) { /* Set-up and solve the diffusion equation */ nswaps = 0; /************************/ /* Solve flow equations */ /************************/ SetUpConnectGraph(graph, &matrix, (idx_t *)workspace); /* check for disconnected subdomains */ for(i=0; i<matrix.nrows; i++) { if (matrix.rowptr[i]+1 == matrix.rowptr[i+1]) { cost = (real_t)(ctrl->mype); goto CleanUpAndExit; } } ConjGrad2(&matrix, load, solution, 0.001, workspace); ComputeTransferVector(1, &matrix, solution, transfer, 0); GetThreeMax(nparts, load, &first, &second, &third); if (l%3 == 0) { FastRandomPermute(nvtxs, perm, 1); } else { /*****************************/ /* move dirty vertices first */ /*****************************/ ndirty = 0; for (i=0; i<nvtxs; i++) { if (where[i] != home[i]) ndirty++; } dptr = 0; for (i=0; i<nvtxs; i++) { if (where[i] != home[i]) perm[dptr++] = i; else perm[ndirty++] = i; } PASSERT(ctrl, ndirty == nvtxs); ndirty = dptr; nclean = nvtxs-dptr; FastRandomPermute(ndirty, perm, 0); FastRandomPermute(nclean, perm+ndirty, 0); } if (ctrl->mype == 0) { for (j=nvtxs, k=0, ii=0; ii<nvtxs; ii++) { i = perm[ii]; if (ed[i] != 0) { cand[k].key = -ed[i]; cand[k++].val = i; } else { cand[--j].key = 0; cand[j].val = i; } } ikvsorti(k, cand); } for (ii=0; ii<nvtxs/3; ii++) { i = (ctrl->mype == 0) ? cand[ii].val : perm[ii]; from = where[i]; /* don't move out the last vertex in a subdomain */ if (psize[from] == 1) continue; clean = (from == home[i]) ? 1 : 0; /* only move from top three or dirty vertices */ if (from != first && from != second && from != third && clean) continue; /* Scatter the sparse transfer row into the dense tmpvec row */ for (j=rowptr[from]+1; j<rowptr[from+1]; j++) tmpvec[colind[j]] = transfer[j]; for (j=xadj[i]; j<xadj[i+1]; j++) { to = where[adjncy[j]]; if (from != to) { if (tmpvec[to] > (flowFactor * nvwgt[i])) { tmpvec[to] -= nvwgt[i]; INC_DEC(psize[to], psize[from], 1); INC_DEC(npwgts[to], npwgts[from], nvwgt[i]); INC_DEC(load[to], load[from], nvwgt[i]); where[i] = to; nswaps++; /* Update external degrees */ ed[i] = 0; for (k=xadj[i]; k<xadj[i+1]; k++) { edge = adjncy[k]; ed[i] += (to != where[edge] ? adjwgt[k] : 0); if (where[edge] == from) ed[edge] += adjwgt[k]; if (where[edge] == to) ed[edge] -= adjwgt[k]; } break; } } } /* Gather the dense tmpvec row into the sparse transfer row */ for (j=rowptr[from]+1; j<rowptr[from+1]; j++) { transfer[j] = tmpvec[colind[j]]; tmpvec[colind[j]] = 0.0; } ASSERT(fabs(rsum(nparts, tmpvec, 1)) < .0001) } if (l % 2 == 1) { balance = rmax(nparts, npwgts)*nparts; if (balance < ubfactor + 0.035) done = 1; if (GlobalSESum(ctrl, done) > 0) break; noswaps = (nswaps > 0) ? 0 : 1; if (GlobalSESum(ctrl, noswaps) > ctrl->npes/2) break; } } graph->mincut = ComputeSerialEdgeCut(graph); totalv = Mc_ComputeSerialTotalV(graph, home); cost = ctrl->ipc_factor * (real_t)graph->mincut + ctrl->redist_factor * (real_t)totalv; CleanUpAndExit: gk_free((void **)&solution, (void **)&perm, (void **)&workspace, (void **)&cand, LTERM); return cost; }
/*********************************************************************************** * This function is the entry point of the parallel multilevel local diffusion * algorithm. It uses parallel undirected diffusion followed by adaptive k-way * refinement. This function utilizes local coarsening. ************************************************************************************/ int ParMETIS_V3_RefineKway(idx_t *vtxdist, idx_t *xadj, idx_t *adjncy, idx_t *vwgt, idx_t *adjwgt, idx_t *wgtflag, idx_t *numflag, idx_t *ncon, idx_t *nparts, real_t *tpwgts, real_t *ubvec, idx_t *options, idx_t *edgecut, idx_t *part, MPI_Comm *comm) { idx_t npes, mype, status; ctrl_t *ctrl=NULL; graph_t *graph=NULL; size_t curmem; /* Check the input parameters and return if an error */ status = CheckInputsPartKway(vtxdist, xadj, adjncy, vwgt, adjwgt, wgtflag, numflag, ncon, nparts, tpwgts, ubvec, options, edgecut, part, comm); if (GlobalSEMinComm(*comm, status) == 0) return METIS_ERROR; status = METIS_OK; gk_malloc_init(); curmem = gk_GetCurMemoryUsed(); /* Setup ctrl */ ctrl = SetupCtrl(PARMETIS_OP_RMETIS, options, *ncon, *nparts, tpwgts, ubvec, *comm); npes = ctrl->npes; mype = ctrl->mype; /* Take care the nparts == 1 case */ if (*nparts == 1) { iset(vtxdist[mype+1]-vtxdist[mype], (*numflag == 0 ? 0 : 1), part); *edgecut = 0; goto DONE; } /* setup the graph */ if (*numflag > 0) ChangeNumbering(vtxdist, xadj, adjncy, part, npes, mype, 1); graph = SetupGraph(ctrl, *ncon, vtxdist, xadj, vwgt, NULL, adjncy, adjwgt, *wgtflag); if (ctrl->ps_relation == PARMETIS_PSR_COUPLED) iset(graph->nvtxs, mype, graph->home); else icopy(graph->nvtxs, part, graph->home); /* Allocate workspace */ AllocateWSpace(ctrl, 10*graph->nvtxs); /* Partition and Remap */ STARTTIMER(ctrl, ctrl->TotalTmr); ctrl->CoarsenTo = gk_min(vtxdist[npes]+1, 50*(*ncon)*gk_max(npes, *nparts)); Adaptive_Partition(ctrl, graph); ParallelReMapGraph(ctrl, graph); icopy(graph->nvtxs, graph->where, part); *edgecut = graph->mincut; STOPTIMER(ctrl, ctrl->TotalTmr); /* Take care of output */ IFSET(ctrl->dbglvl, DBG_TIME, PrintTimingInfo(ctrl)); IFSET(ctrl->dbglvl, DBG_TIME, gkMPI_Barrier(ctrl->gcomm)); IFSET(ctrl->dbglvl, DBG_INFO, PrintPostPartInfo(ctrl, graph, 1)); FreeInitialGraphAndRemap(graph); if (*numflag > 0) ChangeNumbering(vtxdist, xadj, adjncy, part, npes, mype, 0); DONE: FreeCtrl(&ctrl); if (gk_GetCurMemoryUsed() - curmem > 0) { printf("ParMETIS appears to have a memory leak of %zdbytes. Report this.\n", (ssize_t)(gk_GetCurMemoryUsed() - curmem)); } gk_malloc_cleanup(0); return (int)status; }
/*********************************************************************************** * This function is the entry point of the parallel kmetis algorithm that uses * coordinates to compute an initial graph distribution. ************************************************************************************/ int ParMETIS_V3_PartGeomKway(idx_t *vtxdist, idx_t *xadj, idx_t *adjncy, idx_t *vwgt, idx_t *adjwgt, idx_t *wgtflag, idx_t *numflag, idx_t *ndims, real_t *xyz, idx_t *ncon, idx_t *nparts, real_t *tpwgts, real_t *ubvec, idx_t *options, idx_t *edgecut, idx_t *part, MPI_Comm *comm) { idx_t h, i, j, npes, mype, status, nvtxs, seed, dbglvl; idx_t cut, gcut, maxnvtxs; idx_t moptions[METIS_NOPTIONS]; ctrl_t *ctrl; graph_t *graph, *mgraph; real_t balance; size_t curmem; /* Check the input parameters and return if an error */ status = CheckInputsPartGeomKway(vtxdist, xadj, adjncy, vwgt, adjwgt, wgtflag, numflag, ndims, xyz, ncon, nparts, tpwgts, ubvec, options, edgecut, part, comm); if (GlobalSEMinComm(*comm, status) == 0) return METIS_ERROR; status = METIS_OK; gk_malloc_init(); curmem = gk_GetCurMemoryUsed(); /* Setup the ctrl */ ctrl = SetupCtrl(PARMETIS_OP_GKMETIS, options, *ncon, *nparts, tpwgts, ubvec, *comm); npes = ctrl->npes; mype = ctrl->mype; /* Take care the nparts == 1 case */ if (*nparts == 1) { iset(vtxdist[mype+1]-vtxdist[mype], (*numflag == 0 ? 0 : 1), part); *edgecut = 0; goto DONE; } /* Take care of npes == 1 case */ if (npes == 1) { nvtxs = vtxdist[1] - vtxdist[0]; /* subtraction is required when numflag==1 */ METIS_SetDefaultOptions(moptions); moptions[METIS_OPTION_NUMBERING] = *numflag; status = METIS_PartGraphKway(&nvtxs, ncon, xadj, adjncy, vwgt, NULL, adjwgt, nparts, tpwgts, ubvec, moptions, edgecut, part); goto DONE; } /* Setup the graph */ if (*numflag > 0) ChangeNumbering(vtxdist, xadj, adjncy, part, npes, mype, 1); graph = SetupGraph(ctrl, *ncon, vtxdist, xadj, vwgt, NULL, adjncy, adjwgt, *wgtflag); gk_free((void **)&graph->nvwgt, LTERM); /* Allocate the workspace */ AllocateWSpace(ctrl, 10*graph->nvtxs); /* Compute the initial npes-way partitioning geometric partitioning */ STARTTIMER(ctrl, ctrl->TotalTmr); Coordinate_Partition(ctrl, graph, *ndims, xyz, 1); STOPTIMER(ctrl, ctrl->TotalTmr); /* Move the graph according to the partitioning */ STARTTIMER(ctrl, ctrl->MoveTmr); ctrl->nparts = npes; mgraph = MoveGraph(ctrl, graph); ctrl->nparts = *nparts; SetupGraph_nvwgts(ctrl, mgraph); /* compute nvwgts for the moved graph */ if (ctrl->dbglvl&DBG_INFO) { CommInterfaceData(ctrl, graph, graph->where, graph->where+graph->nvtxs); for (cut=0, i=0; i<graph->nvtxs; i++) { for (j=graph->xadj[i]; j<graph->xadj[i+1]; j++) { if (graph->where[i] != graph->where[graph->adjncy[j]]) cut += graph->adjwgt[j]; } } gcut = GlobalSESum(ctrl, cut)/2; maxnvtxs = GlobalSEMax(ctrl, mgraph->nvtxs); balance = (real_t)(maxnvtxs)/((real_t)(graph->gnvtxs)/(real_t)(npes)); rprintf(ctrl, "XYZ Cut: %6"PRIDX" \tBalance: %6.3"PRREAL" [%"PRIDX" %"PRIDX" %"PRIDX"]\n", gcut, balance, maxnvtxs, graph->gnvtxs, npes); } STOPTIMER(ctrl, ctrl->MoveTmr); /* Compute the partition of the moved graph */ STARTTIMER(ctrl, ctrl->TotalTmr); ctrl->CoarsenTo = gk_min(vtxdist[npes]+1, 25*(*ncon)*gk_max(npes, *nparts)); if (vtxdist[npes] < SMALLGRAPH || vtxdist[npes] < npes*20 || GlobalSESum(ctrl, mgraph->nedges) == 0) { /* serially */ IFSET(ctrl->dbglvl, DBG_INFO, rprintf(ctrl, "Partitioning a graph of size %"PRIDX" serially\n", vtxdist[npes])); PartitionSmallGraph(ctrl, mgraph); } else { /* in parallel */ Global_Partition(ctrl, mgraph); } ParallelReMapGraph(ctrl, mgraph); /* Invert the ordering back to the original graph */ ctrl->nparts = npes; ProjectInfoBack(ctrl, graph, part, mgraph->where); ctrl->nparts = *nparts; *edgecut = mgraph->mincut; STOPTIMER(ctrl, ctrl->TotalTmr); /* Print some stats */ IFSET(ctrl->dbglvl, DBG_TIME, PrintTimingInfo(ctrl)); IFSET(ctrl->dbglvl, DBG_TIME, gkMPI_Barrier(ctrl->gcomm)); IFSET(ctrl->dbglvl, DBG_INFO, PrintPostPartInfo(ctrl, mgraph, 0)); FreeGraph(mgraph); FreeInitialGraphAndRemap(graph); if (*numflag > 0) ChangeNumbering(vtxdist, xadj, adjncy, part, npes, mype, 0); DONE: FreeCtrl(&ctrl); if (gk_GetCurMemoryUsed() - curmem > 0) { printf("ParMETIS appears to have a memory leak of %zdbytes. Report this.\n", (ssize_t)(gk_GetCurMemoryUsed() - curmem)); } gk_malloc_cleanup(0); return (int)status; }
void FM_2WayNodeBalance(ctrl_t *ctrl, graph_t *graph) { idx_t i, ii, j, k, jj, kk, nvtxs, nbnd, nswaps, gain; idx_t badmaxpwgt, higain, oldgain, to, other; idx_t *xadj, *vwgt, *adjncy, *where, *pwgts, *edegrees, *bndind, *bndptr; idx_t *perm, *moved; rpq_t *queue; nrinfo_t *rinfo; real_t mult; nvtxs = graph->nvtxs; xadj = graph->xadj; adjncy = graph->adjncy; vwgt = graph->vwgt; bndind = graph->bndind; bndptr = graph->bndptr; where = graph->where; pwgts = graph->pwgts; rinfo = graph->nrinfo; mult = 0.5*ctrl->ubfactors[0]; badmaxpwgt = (idx_t)(mult*(pwgts[0]+pwgts[1])); if (gk_max(pwgts[0], pwgts[1]) < badmaxpwgt) return; if (iabs(pwgts[0]-pwgts[1]) < 3*graph->tvwgt[0]/nvtxs) return; WCOREPUSH; to = (pwgts[0] < pwgts[1] ? 0 : 1); other = (to+1)%2; queue = rpqCreate(nvtxs); perm = iwspacemalloc(ctrl, nvtxs); moved = iset(nvtxs, -1, iwspacemalloc(ctrl, nvtxs)); IFSET(ctrl->dbglvl, METIS_DBG_REFINE, printf("Partitions: [%6"PRIDX" %6"PRIDX"] Nv-Nb[%6"PRIDX" %6"PRIDX"]. ISep: %6"PRIDX" [B]\n", pwgts[0], pwgts[1], graph->nvtxs, graph->nbnd, graph->mincut)); nbnd = graph->nbnd; my_irandArrayPermute_r(nbnd, perm, nbnd, 1, &ctrl->curseed); for (ii=0; ii<nbnd; ii++) { i = bndind[perm[ii]]; ASSERT(where[i] == 2); rpqInsert(queue, i, vwgt[i]-rinfo[i].edegrees[other]); } ASSERT(CheckNodeBnd(graph, nbnd)); ASSERT(CheckNodePartitionParams(graph)); /****************************************************** * Get into the FM loop *******************************************************/ for (nswaps=0; nswaps<nvtxs; nswaps++) { if ((higain = rpqGetTop(queue)) == -1) break; moved[higain] = 1; gain = vwgt[higain]-rinfo[higain].edegrees[other]; badmaxpwgt = (idx_t)(mult*(pwgts[0]+pwgts[1])); /* break if other is now underwight */ if (pwgts[to] > pwgts[other]) break; /* break if balance is achieved and no +ve or zero gain */ if (gain < 0 && pwgts[other] < badmaxpwgt) break; /* skip this vertex if it will violate balance on the other side */ if (pwgts[to]+vwgt[higain] > badmaxpwgt) continue; ASSERT(bndptr[higain] != -1); pwgts[2] -= gain; BNDDelete(nbnd, bndind, bndptr, higain); pwgts[to] += vwgt[higain]; where[higain] = to; IFSET(ctrl->dbglvl, METIS_DBG_MOVEINFO, printf("Moved %6"PRIDX" to %3"PRIDX", Gain: %3"PRIDX", \t[%5"PRIDX" %5"PRIDX" %5"PRIDX"]\n", higain, to, vwgt[higain]-rinfo[higain].edegrees[other], pwgts[0], pwgts[1], pwgts[2])); /********************************************************** * Update the degrees of the affected nodes ***********************************************************/ for (j=xadj[higain]; j<xadj[higain+1]; j++) { k = adjncy[j]; if (where[k] == 2) { /* For the in-separator vertices modify their edegree[to] */ rinfo[k].edegrees[to] += vwgt[higain]; } else if (where[k] == other) { /* This vertex is pulled into the separator */ ASSERTP(bndptr[k] == -1, ("%"PRIDX" %"PRIDX" %"PRIDX"\n", k, bndptr[k], where[k])); BNDInsert(nbnd, bndind, bndptr, k); where[k] = 2; pwgts[other] -= vwgt[k]; edegrees = rinfo[k].edegrees; edegrees[0] = edegrees[1] = 0; for (jj=xadj[k]; jj<xadj[k+1]; jj++) { kk = adjncy[jj]; if (where[kk] != 2) edegrees[where[kk]] += vwgt[kk]; else { ASSERT(bndptr[kk] != -1); oldgain = vwgt[kk]-rinfo[kk].edegrees[other]; rinfo[kk].edegrees[other] -= vwgt[k]; if (moved[kk] == -1) rpqUpdate(queue, kk, oldgain+vwgt[k]); } } /* Insert the new vertex into the priority queue */ rpqInsert(queue, k, vwgt[k]-edegrees[other]); } } } IFSET(ctrl->dbglvl, METIS_DBG_REFINE, printf("\tBalanced sep: %6"PRIDX" at %4"PRIDX", PWGTS: [%6"PRIDX" %6"PRIDX"], NBND: %6"PRIDX"\n", pwgts[2], nswaps, pwgts[0], pwgts[1], nbnd)); graph->mincut = pwgts[2]; graph->nbnd = nbnd; rpqDestroy(queue); WCOREPOP; }
/************************************************************************* * This function is the entry point of the initial partitioning algorithm. * This algorithm assembles the graph to all the processors and preceed * serially. **************************************************************************/ idx_t Mc_Diffusion(ctrl_t *ctrl, graph_t *graph, idx_t *vtxdist, idx_t *where, idx_t *home, idx_t npasses) { idx_t h, i, j; idx_t nvtxs, nedges, ncon, pass, iter, domain, processor; idx_t nparts, mype, npes, nlinks, me, you, wsize; idx_t nvisited, nswaps = -1, tnswaps, done, alldone = -1; idx_t *rowptr, *colind, *diff_where, *sr_where, *ehome, *map, *rmap; idx_t *pack, *unpack, *match, *proc2sub, *sub2proc; idx_t *visited, *gvisited; real_t *transfer, *npwgts, maxdiff, minflow, maxflow; real_t lbavg, oldlbavg, ubavg, *lbvec; real_t *diff_flows, *sr_flows; real_t diff_lbavg, sr_lbavg, diff_cost, sr_cost; idx_t *rbuffer, *sbuffer; idx_t *rcount, *rdispl; real_t *solution, *load, *workspace; matrix_t matrix; graph_t *egraph; if (graph->ncon > 3) return 0; WCOREPUSH; nvtxs = graph->nvtxs; nedges = graph->nedges; ncon = graph->ncon; nparts = ctrl->nparts; mype = ctrl->mype; npes = ctrl->npes; ubavg = ravg(ncon, ctrl->ubvec); /* initialize variables and allocate memory */ lbvec = rwspacemalloc(ctrl, ncon); diff_flows = rwspacemalloc(ctrl, ncon); sr_flows = rwspacemalloc(ctrl, ncon); load = rwspacemalloc(ctrl, nparts); solution = rwspacemalloc(ctrl, nparts); npwgts = graph->gnpwgts = rwspacemalloc(ctrl, ncon*nparts); matrix.values = rwspacemalloc(ctrl, nedges); transfer = matrix.transfer = rwspacemalloc(ctrl, ncon*nedges); proc2sub = iwspacemalloc(ctrl, gk_max(nparts, npes*2)); sub2proc = iwspacemalloc(ctrl, nparts); match = iwspacemalloc(ctrl, nparts); rowptr = matrix.rowptr = iwspacemalloc(ctrl, nparts+1); colind = matrix.colind = iwspacemalloc(ctrl, nedges); rcount = iwspacemalloc(ctrl, npes); rdispl = iwspacemalloc(ctrl, npes+1); pack = iwspacemalloc(ctrl, nvtxs); unpack = iwspacemalloc(ctrl, nvtxs); rbuffer = iwspacemalloc(ctrl, nvtxs); sbuffer = iwspacemalloc(ctrl, nvtxs); map = iwspacemalloc(ctrl, nvtxs); rmap = iwspacemalloc(ctrl, nvtxs); diff_where = iwspacemalloc(ctrl, nvtxs); ehome = iwspacemalloc(ctrl, nvtxs); wsize = gk_max(sizeof(real_t)*nparts*6, sizeof(idx_t)*(nvtxs+nparts*2+1)); workspace = (real_t *)gk_malloc(wsize, "Mc_Diffusion: workspace"); graph->ckrinfo = (ckrinfo_t *)gk_malloc(nvtxs*sizeof(ckrinfo_t), "Mc_Diffusion: rinfo"); /* construct subdomain connectivity graph */ matrix.nrows = nparts; SetUpConnectGraph(graph, &matrix, (idx_t *)workspace); nlinks = (matrix.nnzs-nparts) / 2; visited = iwspacemalloc(ctrl, matrix.nnzs); gvisited = iwspacemalloc(ctrl, matrix.nnzs); for (pass=0; pass<npasses; pass++) { rset(matrix.nnzs*ncon, 0.0, transfer); iset(matrix.nnzs, 0, gvisited); iset(matrix.nnzs, 0, visited); iter = nvisited = 0; /* compute ncon flow solutions */ for (h=0; h<ncon; h++) { rset(nparts, 0.0, solution); ComputeLoad(graph, nparts, load, ctrl->tpwgts, h); lbvec[h] = (rmax(nparts, load)+1.0/nparts) * (real_t)nparts; ConjGrad2(&matrix, load, solution, 0.001, workspace); ComputeTransferVector(ncon, &matrix, solution, transfer, h); } oldlbavg = ravg(ncon, lbvec); tnswaps = 0; maxdiff = 0.0; for (i=0; i<nparts; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { maxflow = rmax(ncon, transfer+j*ncon); minflow = rmin(ncon, transfer+j*ncon); maxdiff = (maxflow - minflow > maxdiff) ? maxflow - minflow : maxdiff; } } while (nvisited < nlinks) { /* compute independent sets of subdomains */ iset(gk_max(nparts, npes*2), UNMATCHED, proc2sub); CSR_Match_SHEM(&matrix, match, proc2sub, gvisited, ncon); /* set up the packing arrays */ iset(nparts, UNMATCHED, sub2proc); for (i=0; i<npes*2; i++) { if (proc2sub[i] == UNMATCHED) break; sub2proc[proc2sub[i]] = i/2; } iset(npes, 0, rcount); for (i=0; i<nvtxs; i++) { domain = where[i]; processor = sub2proc[domain]; if (processor != UNMATCHED) rcount[processor]++; } rdispl[0] = 0; for (i=1; i<npes+1; i++) rdispl[i] = rdispl[i-1] + rcount[i-1]; iset(nvtxs, UNMATCHED, unpack); for (i=0; i<nvtxs; i++) { domain = where[i]; processor = sub2proc[domain]; if (processor != UNMATCHED) unpack[rdispl[processor]++] = i; } SHIFTCSR(i, npes, rdispl); iset(nvtxs, UNMATCHED, pack); for (i=0; i<rdispl[npes]; i++) { ASSERT(unpack[i] != UNMATCHED); domain = where[unpack[i]]; processor = sub2proc[domain]; if (processor != UNMATCHED) pack[unpack[i]] = i; } /* Compute the flows */ if (proc2sub[mype*2] != UNMATCHED) { me = proc2sub[2*mype]; you = proc2sub[2*mype+1]; ASSERT(me != you); for (j=rowptr[me]; j<rowptr[me+1]; j++) { if (colind[j] == you) { visited[j] = 1; rcopy(ncon, transfer+j*ncon, diff_flows); break; } } for (j=rowptr[you]; j<rowptr[you+1]; j++) { if (colind[j] == me) { visited[j] = 1; for (h=0; h<ncon; h++) { if (transfer[j*ncon+h] > 0.0) diff_flows[h] = -1.0 * transfer[j*ncon+h]; } break; } } nswaps = 1; rcopy(ncon, diff_flows, sr_flows); iset(nvtxs, 0, sbuffer); for (i=0; i<nvtxs; i++) { if (where[i] == me || where[i] == you) sbuffer[i] = 1; } egraph = ExtractGraph(ctrl, graph, sbuffer, map, rmap); if (egraph != NULL) { icopy(egraph->nvtxs, egraph->where, diff_where); for (j=0; j<egraph->nvtxs; j++) ehome[j] = home[map[j]]; RedoMyLink(ctrl, egraph, ehome, me, you, sr_flows, &sr_cost, &sr_lbavg); if (ncon <= 4) { sr_where = egraph->where; egraph->where = diff_where; nswaps = BalanceMyLink(ctrl, egraph, ehome, me, you, diff_flows, maxdiff, &diff_cost, &diff_lbavg, 1.0/(real_t)nvtxs); if ((sr_lbavg < diff_lbavg && (diff_lbavg >= ubavg-1.0 || sr_cost == diff_cost)) || (sr_lbavg < ubavg-1.0 && sr_cost < diff_cost)) { for (i=0; i<egraph->nvtxs; i++) where[map[i]] = sr_where[i]; } else { for (i=0; i<egraph->nvtxs; i++) where[map[i]] = diff_where[i]; } } else { for (i=0; i<egraph->nvtxs; i++) where[map[i]] = egraph->where[i]; } gk_free((void **)&egraph->xadj, &egraph->nvwgt, &egraph->adjncy, &egraph, LTERM); } /* Pack the flow data */ iset(nvtxs, UNMATCHED, sbuffer); for (i=0; i<nvtxs; i++) { domain = where[i]; if (domain == you || domain == me) sbuffer[pack[i]] = where[i]; } } /* Broadcast the flow data */ gkMPI_Allgatherv((void *)&sbuffer[rdispl[mype]], rcount[mype], IDX_T, (void *)rbuffer, rcount, rdispl, IDX_T, ctrl->comm); /* Unpack the flow data */ for (i=0; i<rdispl[npes]; i++) { if (rbuffer[i] != UNMATCHED) where[unpack[i]] = rbuffer[i]; } /* Do other stuff */ gkMPI_Allreduce((void *)visited, (void *)gvisited, matrix.nnzs, IDX_T, MPI_MAX, ctrl->comm); nvisited = isum(matrix.nnzs, gvisited, 1)/2; tnswaps += GlobalSESum(ctrl, nswaps); if (iter++ == NGD_PASSES) break; } /* perform serial refinement */ Mc_ComputeSerialPartitionParams(ctrl, graph, nparts); Mc_SerialKWayAdaptRefine(ctrl, graph, nparts, home, ctrl->ubvec, 10); /* check for early breakout */ for (h=0; h<ncon; h++) { lbvec[h] = (real_t)(nparts) * npwgts[rargmax_strd(nparts,npwgts+h,ncon)*ncon+h]; } lbavg = ravg(ncon, lbvec); done = 0; if (tnswaps == 0 || lbavg >= oldlbavg || lbavg <= ubavg + 0.035) done = 1; alldone = GlobalSEMax(ctrl, done); if (alldone == 1) break; } /* ensure that all subdomains have at least one vertex */ /* iset(nparts, 0, match); for (i=0; i<nvtxs; i++) match[where[i]]++; done = 0; while (done == 0) { done = 1; me = iargmin(nparts, match); if (match[me] == 0) { if (ctrl->mype == PE) printf("WARNING: empty subdomain %"PRIDX" in Mc_Diffusion\n", me); you = iargmax(nparts, match); for (i=0; i<nvtxs; i++) { if (where[i] == you) { where[i] = me; match[you]--; match[me]++; done = 0; break; } } } } */ /* now free memory and return */ gk_free((void **)&workspace, (void **)&graph->ckrinfo, LTERM); graph->gnpwgts = NULL; graph->ckrinfo = NULL; WCOREPOP; return 0; }
void Match_Global(ctrl_t *ctrl, graph_t *graph) { idx_t h, i, ii, j, k; idx_t nnbrs, nvtxs, ncon, cnvtxs, firstvtx, lastvtx, maxi, maxidx, nkept; idx_t otherlastvtx, nrequests, nchanged, pass, nmatched, wside; idx_t *xadj, *adjncy, *adjwgt, *vtxdist, *home, *myhome; idx_t *match; idx_t *peind, *sendptr, *recvptr; idx_t *perm, *iperm, *nperm, *changed; real_t *nvwgt, maxnvwgt; idx_t *nreqs_pe; ikv_t *match_requests, *match_granted, *pe_requests; idx_t last_unmatched; WCOREPUSH; maxnvwgt = 0.75/((real_t)(ctrl->CoarsenTo)); graph->match_type = PARMETIS_MTYPE_GLOBAL; IFSET(ctrl->dbglvl, DBG_TIME, gkMPI_Barrier(ctrl->comm)); IFSET(ctrl->dbglvl, DBG_TIME, starttimer(ctrl->MatchTmr)); nvtxs = graph->nvtxs; ncon = graph->ncon; xadj = graph->xadj; adjncy = graph->adjncy; adjwgt = graph->adjwgt; home = graph->home; nvwgt = graph->nvwgt; vtxdist = graph->vtxdist; firstvtx = vtxdist[ctrl->mype]; lastvtx = vtxdist[ctrl->mype+1]; nnbrs = graph->nnbrs; peind = graph->peind; sendptr = graph->sendptr; recvptr = graph->recvptr; match = graph->match = ismalloc(nvtxs+graph->nrecv, UNMATCHED, "GlobalMatch: match"); /* wspacemalloc'ed arrays */ myhome = iset(nvtxs+graph->nrecv, UNMATCHED, iwspacemalloc(ctrl, nvtxs+graph->nrecv)); nreqs_pe = iset(nnbrs, 0, iwspacemalloc(ctrl, nnbrs)); perm = iwspacemalloc(ctrl, nvtxs); iperm = iwspacemalloc(ctrl, nvtxs); nperm = iwspacemalloc(ctrl, nnbrs); changed = iwspacemalloc(ctrl, nvtxs); match_requests = ikvwspacemalloc(ctrl, graph->nsend); match_granted = ikvwspacemalloc(ctrl, graph->nrecv); /* create the traversal order */ FastRandomPermute(nvtxs, perm, 1); for (i=0; i<nvtxs; i++) iperm[perm[i]] = i; iincset(nnbrs, 0, nperm); /* if coasening for adaptive/repartition, exchange home information */ if (ctrl->partType == ADAPTIVE_PARTITION || ctrl->partType == REFINE_PARTITION) { PASSERT(ctrl, home != NULL); icopy(nvtxs, home, myhome); CommInterfaceData(ctrl, graph, myhome, myhome+nvtxs); } /* if coarsening for ordering, replace home with where information */ if (ctrl->partType == ORDER_PARTITION) { PASSERT(ctrl, graph->where != NULL); icopy(nvtxs, graph->where, myhome); CommInterfaceData(ctrl, graph, myhome, myhome+nvtxs); } /* mark all heavy vertices as TOO_HEAVY so they will not be matched */ for (nchanged=i=0; i<nvtxs; i++) { for (h=0; h<ncon; h++) { if (nvwgt[i*ncon+h] > maxnvwgt) { match[i] = TOO_HEAVY; nchanged++; break; } } } /* If no heavy vertices, pick one at random and treat it as such so that at the end of the matching each partition will still have one vertex. This is to eliminate the cases in which once a matching has been computed, a processor ends up having no vertices */ if (nchanged == 0) match[RandomInRange(nvtxs)] = TOO_HEAVY; CommInterfaceData(ctrl, graph, match, match+nvtxs); /* set initial value of nkept based on how over/under weight the partition is to begin with */ nkept = graph->gnvtxs/ctrl->npes - nvtxs; /* Find a matching by doing multiple iterations */ for (nmatched=pass=0; pass<NMATCH_PASSES; pass++) { wside = (graph->level+pass)%2; nchanged = nrequests = 0; for (last_unmatched=ii=nmatched; ii<nvtxs; ii++) { i = perm[ii]; if (match[i] == UNMATCHED) { /* Unmatched */ maxidx = i; maxi = -1; /* Deal with islands. Find another vertex and match it with */ if (xadj[i] == xadj[i+1]) { last_unmatched = gk_max(ii, last_unmatched)+1; for (; last_unmatched<nvtxs; last_unmatched++) { k = perm[last_unmatched]; if (match[k] == UNMATCHED && myhome[i] == myhome[k]) { match[i] = firstvtx+k + (i <= k ? KEEP_BIT : 0); match[k] = firstvtx+i + (i > k ? KEEP_BIT : 0); changed[nchanged++] = i; changed[nchanged++] = k; break; } } continue; } /* Find a heavy-edge matching. */ for (j=xadj[i]; j<xadj[i+1]; j++) { k = adjncy[j]; if (match[k] == UNMATCHED && myhome[k] == myhome[i]) { if (ncon == 1) { if (maxi == -1 || adjwgt[maxi] < adjwgt[j] || (adjwgt[maxi] == adjwgt[j] && RandomInRange(xadj[i+1]-xadj[i]) == 0)) { maxi = j; maxidx = k; } } else { if (maxi == -1 || adjwgt[maxi] < adjwgt[j] || (adjwgt[maxi] == adjwgt[j] && maxidx < nvtxs && k < nvtxs && BetterVBalance(ncon,nvwgt+i*ncon,nvwgt+maxidx*ncon,nvwgt+k*ncon) >= 0)) { maxi = j; maxidx = k; } } } } if (maxi != -1) { k = adjncy[maxi]; if (k < nvtxs) { /* Take care the local vertices first */ /* Here we give preference the local matching by granting it right away */ match[i] = firstvtx+k + (i <= k ? KEEP_BIT : 0); match[k] = firstvtx+i + (i > k ? KEEP_BIT : 0); changed[nchanged++] = i; changed[nchanged++] = k; } else { /* Take care any remote boundary vertices */ match[k] = MAYBE_MATCHED; /* Alternate among which vertices will issue the requests */ if ((wside == 0 && firstvtx+i < graph->imap[k]) || (wside == 1 && firstvtx+i > graph->imap[k])) { match[i] = MAYBE_MATCHED; match_requests[nrequests].key = graph->imap[k]; match_requests[nrequests].val = firstvtx+i; nrequests++; } } } } } #ifdef DEBUG_MATCH PrintVector2(ctrl, nvtxs, firstvtx, match, "Match1"); myprintf(ctrl, "[c: %2"PRIDX"] Nlocal: %"PRIDX", Nrequests: %"PRIDX"\n", c, nlocal, nrequests); #endif /*********************************************************** * Exchange the match_requests, requests for me are stored in * match_granted ************************************************************/ /* Issue the receives first. Note that from each PE can receive a maximum of the interface node that it needs to send it in the case of a mat-vec */ for (i=0; i<nnbrs; i++) { gkMPI_Irecv((void *)(match_granted+recvptr[i]), 2*(recvptr[i+1]-recvptr[i]), IDX_T, peind[i], 1, ctrl->comm, ctrl->rreq+i); } /* Issue the sends next. This needs some work */ ikvsorti(nrequests, match_requests); for (j=i=0; i<nnbrs; i++) { otherlastvtx = vtxdist[peind[i]+1]; for (k=j; k<nrequests && match_requests[k].key < otherlastvtx; k++); gkMPI_Isend((void *)(match_requests+j), 2*(k-j), IDX_T, peind[i], 1, ctrl->comm, ctrl->sreq+i); j = k; } /* OK, now get into the loop waiting for the operations to finish */ gkMPI_Waitall(nnbrs, ctrl->rreq, ctrl->statuses); for (i=0; i<nnbrs; i++) { gkMPI_Get_count(ctrl->statuses+i, IDX_T, nreqs_pe+i); nreqs_pe[i] = nreqs_pe[i]/2; /* Adjust for pairs of IDX_T */ } gkMPI_Waitall(nnbrs, ctrl->sreq, ctrl->statuses); /*********************************************************** * Now, go and service the requests that you received in * match_granted ************************************************************/ RandomPermute(nnbrs, nperm, 0); for (ii=0; ii<nnbrs; ii++) { i = nperm[ii]; pe_requests = match_granted+recvptr[i]; for (j=0; j<nreqs_pe[i]; j++) { k = pe_requests[j].key; PASSERTP(ctrl, k >= firstvtx && k < lastvtx, (ctrl, "%"PRIDX" %"PRIDX" %"PRIDX" %"PRIDX" %"PRIDX"\n", firstvtx, lastvtx, k, j, peind[i])); /* myprintf(ctrl, "Requesting a match %"PRIDX" %"PRIDX"\n", pe_requests[j].key, pe_requests[j].val); */ if (match[k-firstvtx] == UNMATCHED) { /* Bingo, lets grant this request */ changed[nchanged++] = k-firstvtx; if (nkept >= 0) { /* decide who to keep it based on local balance */ match[k-firstvtx] = pe_requests[j].val + KEEP_BIT; nkept--; } else { match[k-firstvtx] = pe_requests[j].val; pe_requests[j].key += KEEP_BIT; nkept++; } /* myprintf(ctrl, "Request from pe:%"PRIDX" (%"PRIDX" %"PRIDX") granted!\n", peind[i], pe_requests[j].val, pe_requests[j].key); */ } else { /* We are not granting the request */ /* myprintf(ctrl, "Request from pe:%"PRIDX" (%"PRIDX" %"PRIDX") not granted!\n", peind[i], pe_requests[j].val, pe_requests[j].key); */ pe_requests[j].key = UNMATCHED; } } } /*********************************************************** * Exchange the match_granted information. It is stored in * match_requests ************************************************************/ /* Issue the receives first. Note that from each PE can receive a maximum of the interface node that it needs to send during the case of a mat-vec */ for (i=0; i<nnbrs; i++) { gkMPI_Irecv((void *)(match_requests+sendptr[i]), 2*(sendptr[i+1]-sendptr[i]), IDX_T, peind[i], 1, ctrl->comm, ctrl->rreq+i); } /* Issue the sends next. */ for (i=0; i<nnbrs; i++) { gkMPI_Isend((void *)(match_granted+recvptr[i]), 2*nreqs_pe[i], IDX_T, peind[i], 1, ctrl->comm, ctrl->sreq+i); } /* OK, now get into the loop waiting for the operations to finish */ gkMPI_Waitall(nnbrs, ctrl->rreq, ctrl->statuses); for (i=0; i<nnbrs; i++) { gkMPI_Get_count(ctrl->statuses+i, IDX_T, nreqs_pe+i); nreqs_pe[i] = nreqs_pe[i]/2; /* Adjust for pairs of IDX_T */ } gkMPI_Waitall(nnbrs, ctrl->sreq, ctrl->statuses); /*********************************************************** * Now, go and through the match_requests and update local * match information for the matchings that were granted. ************************************************************/ for (i=0; i<nnbrs; i++) { pe_requests = match_requests+sendptr[i]; for (j=0; j<nreqs_pe[i]; j++) { match[pe_requests[j].val-firstvtx] = pe_requests[j].key; if (pe_requests[j].key != UNMATCHED) changed[nchanged++] = pe_requests[j].val-firstvtx; } } for (i=0; i<nchanged; i++) { ii = iperm[changed[i]]; perm[ii] = perm[nmatched]; iperm[perm[nmatched]] = ii; nmatched++; } CommChangedInterfaceData(ctrl, graph, nchanged, changed, match, match_requests, match_granted); } /* Traverse the vertices and those that were unmatched, match them with themselves */ cnvtxs = 0; for (i=0; i<nvtxs; i++) { if (match[i] == UNMATCHED || match[i] == TOO_HEAVY) { match[i] = (firstvtx+i) + KEEP_BIT; cnvtxs++; } else if (match[i] >= KEEP_BIT) { /* A matched vertex which I get to keep */ cnvtxs++; } } if (ctrl->dbglvl&DBG_MATCHINFO) { PrintVector2(ctrl, nvtxs, firstvtx, match, "Match"); myprintf(ctrl, "Cnvtxs: %"PRIDX"\n", cnvtxs); rprintf(ctrl, "Done with matching...\n"); } WCOREPOP; IFSET(ctrl->dbglvl, DBG_TIME, gkMPI_Barrier(ctrl->comm)); IFSET(ctrl->dbglvl, DBG_TIME, stoptimer(ctrl->MatchTmr)); IFSET(ctrl->dbglvl, DBG_TIME, starttimer(ctrl->ContractTmr)); CreateCoarseGraph_Global(ctrl, graph, cnvtxs); IFSET(ctrl->dbglvl, DBG_TIME, gkMPI_Barrier(ctrl->comm)); IFSET(ctrl->dbglvl, DBG_TIME, stoptimer(ctrl->ContractTmr)); }
/************************************************************************* * This function converts a mesh into a dual graph **************************************************************************/ int ParMETIS_V3_Mesh2Dual(idx_t *elmdist, idx_t *eptr, idx_t *eind, idx_t *numflag, idx_t *ncommon, idx_t **r_xadj, idx_t **r_adjncy, MPI_Comm *comm) { idx_t i, j, jj, k, kk, m; idx_t npes, mype, pe, count, mask, pass; idx_t nelms, lnns, my_nns, node; idx_t firstelm, firstnode, lnode, nrecv, nsend; idx_t *scounts, *rcounts, *sdispl, *rdispl; idx_t *nodedist, *nmap, *auxarray; idx_t *gnptr, *gnind, *nptr, *nind, *myxadj=NULL, *myadjncy = NULL; idx_t *sbuffer, *rbuffer, *htable; ikv_t *nodelist, *recvbuffer; idx_t maxcount, *ind, *wgt; idx_t gmaxnode, gminnode; size_t curmem; gk_malloc_init(); curmem = gk_GetCurMemoryUsed(); /* Get basic comm info */ gkMPI_Comm_size(*comm, &npes); gkMPI_Comm_rank(*comm, &mype); nelms = elmdist[mype+1]-elmdist[mype]; if (*numflag > 0) ChangeNumberingMesh(elmdist, eptr, eind, NULL, NULL, NULL, npes, mype, 1); mask = (1<<11)-1; /*****************************/ /* Determine number of nodes */ /*****************************/ gminnode = GlobalSEMinComm(*comm, imin(eptr[nelms], eind)); for (i=0; i<eptr[nelms]; i++) eind[i] -= gminnode; gmaxnode = GlobalSEMaxComm(*comm, imax(eptr[nelms], eind)); /**************************/ /* Check for input errors */ /**************************/ ASSERT(nelms > 0); /* construct node distribution array */ nodedist = ismalloc(npes+1, 0, "nodedist"); for (nodedist[0]=0, i=0,j=gmaxnode+1; i<npes; i++) { k = j/(npes-i); nodedist[i+1] = nodedist[i]+k; j -= k; } my_nns = nodedist[mype+1]-nodedist[mype]; firstnode = nodedist[mype]; nodelist = ikvmalloc(eptr[nelms], "nodelist"); auxarray = imalloc(eptr[nelms], "auxarray"); htable = ismalloc(gk_max(my_nns, mask+1), -1, "htable"); scounts = imalloc(npes, "scounts"); rcounts = imalloc(npes, "rcounts"); sdispl = imalloc(npes+1, "sdispl"); rdispl = imalloc(npes+1, "rdispl"); /*********************************************/ /* first find a local numbering of the nodes */ /*********************************************/ for (i=0; i<nelms; i++) { for (j=eptr[i]; j<eptr[i+1]; j++) { nodelist[j].key = eind[j]; nodelist[j].val = j; auxarray[j] = i; /* remember the local element ID that uses this node */ } } ikvsorti(eptr[nelms], nodelist); for (count=1, i=1; i<eptr[nelms]; i++) { if (nodelist[i].key > nodelist[i-1].key) count++; } lnns = count; nmap = imalloc(lnns, "nmap"); /* renumber the nodes of the elements array */ count = 1; nmap[0] = nodelist[0].key; eind[nodelist[0].val] = 0; nodelist[0].val = auxarray[nodelist[0].val]; /* Store the local element ID */ for (i=1; i<eptr[nelms]; i++) { if (nodelist[i].key > nodelist[i-1].key) { nmap[count] = nodelist[i].key; count++; } eind[nodelist[i].val] = count-1; nodelist[i].val = auxarray[nodelist[i].val]; /* Store the local element ID */ } gkMPI_Barrier(*comm); /**********************************************************/ /* perform comms necessary to construct node-element list */ /**********************************************************/ iset(npes, 0, scounts); for (pe=i=0; i<eptr[nelms]; i++) { while (nodelist[i].key >= nodedist[pe+1]) pe++; scounts[pe] += 2; } ASSERT(pe < npes); gkMPI_Alltoall((void *)scounts, 1, IDX_T, (void *)rcounts, 1, IDX_T, *comm); icopy(npes, scounts, sdispl); MAKECSR(i, npes, sdispl); icopy(npes, rcounts, rdispl); MAKECSR(i, npes, rdispl); ASSERT(sdispl[npes] == eptr[nelms]*2); nrecv = rdispl[npes]/2; recvbuffer = ikvmalloc(gk_max(1, nrecv), "recvbuffer"); gkMPI_Alltoallv((void *)nodelist, scounts, sdispl, IDX_T, (void *)recvbuffer, rcounts, rdispl, IDX_T, *comm); /**************************************/ /* construct global node-element list */ /**************************************/ gnptr = ismalloc(my_nns+1, 0, "gnptr"); for (i=0; i<npes; i++) { for (j=rdispl[i]/2; j<rdispl[i+1]/2; j++) { lnode = recvbuffer[j].key-firstnode; ASSERT(lnode >= 0 && lnode < my_nns) gnptr[lnode]++; } } MAKECSR(i, my_nns, gnptr); gnind = imalloc(gk_max(1, gnptr[my_nns]), "gnind"); for (pe=0; pe<npes; pe++) { firstelm = elmdist[pe]; for (j=rdispl[pe]/2; j<rdispl[pe+1]/2; j++) { lnode = recvbuffer[j].key-firstnode; gnind[gnptr[lnode]++] = recvbuffer[j].val+firstelm; } } SHIFTCSR(i, my_nns, gnptr); /*********************************************************/ /* send the node-element info to the relevant processors */ /*********************************************************/ iset(npes, 0, scounts); /* use a hash table to ensure that each node is sent to a proc only once */ for (pe=0; pe<npes; pe++) { for (j=rdispl[pe]/2; j<rdispl[pe+1]/2; j++) { lnode = recvbuffer[j].key-firstnode; if (htable[lnode] == -1) { scounts[pe] += gnptr[lnode+1]-gnptr[lnode]; htable[lnode] = 1; } } /* now reset the hash table */ for (j=rdispl[pe]/2; j<rdispl[pe+1]/2; j++) { lnode = recvbuffer[j].key-firstnode; htable[lnode] = -1; } } gkMPI_Alltoall((void *)scounts, 1, IDX_T, (void *)rcounts, 1, IDX_T, *comm); icopy(npes, scounts, sdispl); MAKECSR(i, npes, sdispl); /* create the send buffer */ nsend = sdispl[npes]; sbuffer = imalloc(gk_max(1, nsend), "sbuffer"); count = 0; for (pe=0; pe<npes; pe++) { for (j=rdispl[pe]/2; j<rdispl[pe+1]/2; j++) { lnode = recvbuffer[j].key-firstnode; if (htable[lnode] == -1) { for (k=gnptr[lnode]; k<gnptr[lnode+1]; k++) { if (k == gnptr[lnode]) sbuffer[count++] = -1*(gnind[k]+1); else sbuffer[count++] = gnind[k]; } htable[lnode] = 1; } } ASSERT(count == sdispl[pe+1]); /* now reset the hash table */ for (j=rdispl[pe]/2; j<rdispl[pe+1]/2; j++) { lnode = recvbuffer[j].key-firstnode; htable[lnode] = -1; } } icopy(npes, rcounts, rdispl); MAKECSR(i, npes, rdispl); nrecv = rdispl[npes]; rbuffer = imalloc(gk_max(1, nrecv), "rbuffer"); gkMPI_Alltoallv((void *)sbuffer, scounts, sdispl, IDX_T, (void *)rbuffer, rcounts, rdispl, IDX_T, *comm); k = -1; nptr = ismalloc(lnns+1, 0, "nptr"); nind = rbuffer; for (pe=0; pe<npes; pe++) { for (j=rdispl[pe]; j<rdispl[pe+1]; j++) { if (nind[j] < 0) { k++; nind[j] = (-1*nind[j])-1; } nptr[k]++; } } MAKECSR(i, lnns, nptr); ASSERT(k+1 == lnns); ASSERT(nptr[lnns] == nrecv) myxadj = *r_xadj = (idx_t *)malloc(sizeof(idx_t)*(nelms+1)); if (myxadj == NULL) gk_errexit(SIGMEM, "Failed to allocate memory for the dual graph's xadj array.\n"); iset(nelms+1, 0, myxadj); iset(mask+1, -1, htable); firstelm = elmdist[mype]; /* Two passes -- in first pass, simply find out the memory requirements */ maxcount = 200; ind = imalloc(maxcount, "ParMETIS_V3_Mesh2Dual: ind"); wgt = imalloc(maxcount, "ParMETIS_V3_Mesh2Dual: wgt"); for (pass=0; pass<2; pass++) { for (i=0; i<nelms; i++) { for (count=0, j=eptr[i]; j<eptr[i+1]; j++) { node = eind[j]; for (k=nptr[node]; k<nptr[node+1]; k++) { if ((kk=nind[k]) == firstelm+i) continue; m = htable[(kk&mask)]; if (m == -1) { ind[count] = kk; wgt[count] = 1; htable[(kk&mask)] = count++; } else { if (ind[m] == kk) { wgt[m]++; } else { for (jj=0; jj<count; jj++) { if (ind[jj] == kk) { wgt[jj]++; break; } } if (jj == count) { ind[count] = kk; wgt[count++] = 1; } } } /* Adjust the memory. This will be replaced by a idxrealloc() when GKlib will be incorporated */ if (count == maxcount-1) { maxcount *= 2; ind = irealloc(ind, maxcount, "ParMETIS_V3_Mesh2Dual: ind"); wgt = irealloc(wgt, maxcount, "ParMETIS_V3_Mesh2Dual: wgt"); } } } for (j=0; j<count; j++) { htable[(ind[j]&mask)] = -1; if (wgt[j] >= *ncommon) { if (pass == 0) myxadj[i]++; else myadjncy[myxadj[i]++] = ind[j]; } } } if (pass == 0) { MAKECSR(i, nelms, myxadj); myadjncy = *r_adjncy = (idx_t *)malloc(sizeof(idx_t)*myxadj[nelms]); if (myadjncy == NULL) gk_errexit(SIGMEM, "Failed to allocate memory for dual graph's adjncy array.\n"); } else { SHIFTCSR(i, nelms, myxadj); } } /*****************************************/ /* correctly renumber the elements array */ /*****************************************/ for (i=0; i<eptr[nelms]; i++) eind[i] = nmap[eind[i]] + gminnode; if (*numflag == 1) ChangeNumberingMesh(elmdist, eptr, eind, myxadj, myadjncy, NULL, npes, mype, 0); /* do not free nodelist, recvbuffer, rbuffer */ gk_free((void **)&nodedist, &nodelist, &auxarray, &htable, &scounts, &rcounts, &sdispl, &rdispl, &nmap, &recvbuffer, &gnptr, &gnind, &sbuffer, &rbuffer, &nptr, &ind, &wgt, LTERM); if (gk_GetCurMemoryUsed() - curmem > 0) { printf("ParMETIS appears to have a memory leak of %zdbytes. Report this.\n", (ssize_t)(gk_GetCurMemoryUsed() - curmem)); } gk_malloc_cleanup(0); return METIS_OK; }
/************************************************************************* * This function is the entry point of the initial balancing algorithm. * This algorithm assembles the graph to all the processors and preceeds * with the balancing step. **************************************************************************/ void Balance_Partition(ctrl_t *ctrl, graph_t *graph) { idx_t i, j, nvtxs, nedges, ncon; idx_t mype, npes, srnpes, srmype; idx_t *vtxdist, *xadj, *adjncy, *adjwgt, *vwgt, *vsize; idx_t *part, *lwhere, *home; idx_t lnparts, fpart, fpe, lnpes, ngroups; idx_t *rcounts, *rdispls; idx_t twoparts=2, moptions[METIS_NOPTIONS], edgecut, max_cut; idx_t sr_pe, gd_pe, sr, gd, who_wins; real_t my_cut, my_totalv, my_cost = -1.0, my_balance = -1.0, wsum; real_t rating, max_rating, your_cost = -1.0, your_balance = -1.0; real_t lbsum, min_lbsum, *lbvec, *tpwgts, *tpwgts2, buffer[2]; graph_t *agraph, cgraph; ctrl_t *myctrl; MPI_Status status; MPI_Comm ipcomm, srcomm; struct { double cost; int rank; } lpecost, gpecost; IFSET(ctrl->dbglvl, DBG_TIME, starttimer(ctrl->InitPartTmr)); WCOREPUSH; vtxdist = graph->vtxdist; agraph = AssembleAdaptiveGraph(ctrl, graph); nvtxs = cgraph.nvtxs = agraph->nvtxs; nedges = cgraph.nedges = agraph->nedges; ncon = cgraph.ncon = agraph->ncon; xadj = cgraph.xadj = icopy(nvtxs+1, agraph->xadj, iwspacemalloc(ctrl, nvtxs+1)); vwgt = cgraph.vwgt = icopy(nvtxs*ncon, agraph->vwgt, iwspacemalloc(ctrl, nvtxs*ncon)); vsize = cgraph.vsize = icopy(nvtxs, agraph->vsize, iwspacemalloc(ctrl, nvtxs)); adjncy = cgraph.adjncy = icopy(nedges, agraph->adjncy, iwspacemalloc(ctrl, nedges)); adjwgt = cgraph.adjwgt = icopy(nedges, agraph->adjwgt, iwspacemalloc(ctrl, nedges)); part = cgraph.where = agraph->where = iwspacemalloc(ctrl, nvtxs); lwhere = iwspacemalloc(ctrl, nvtxs); home = iwspacemalloc(ctrl, nvtxs); lbvec = rwspacemalloc(ctrl, graph->ncon); /****************************************/ /****************************************/ if (ctrl->ps_relation == PARMETIS_PSR_UNCOUPLED) { WCOREPUSH; rcounts = iwspacemalloc(ctrl, ctrl->npes); rdispls = iwspacemalloc(ctrl, ctrl->npes+1); for (i=0; i<ctrl->npes; i++) rdispls[i] = rcounts[i] = vtxdist[i+1]-vtxdist[i]; MAKECSR(i, ctrl->npes, rdispls); gkMPI_Allgatherv((void *)graph->home, graph->nvtxs, IDX_T, (void *)part, rcounts, rdispls, IDX_T, ctrl->comm); for (i=0; i<agraph->nvtxs; i++) home[i] = part[i]; WCOREPOP; /* local frees */ } else { for (i=0; i<ctrl->npes; i++) { for (j=vtxdist[i]; j<vtxdist[i+1]; j++) part[j] = home[j] = i; } } /* Ensure that the initial partitioning is legal */ for (i=0; i<agraph->nvtxs; i++) { if (part[i] >= ctrl->nparts) part[i] = home[i] = part[i] % ctrl->nparts; if (part[i] < 0) part[i] = home[i] = (-1*part[i]) % ctrl->nparts; } /****************************************/ /****************************************/ IFSET(ctrl->dbglvl, DBG_REFINEINFO, ComputeSerialBalance(ctrl, agraph, agraph->where, lbvec)); IFSET(ctrl->dbglvl, DBG_REFINEINFO, rprintf(ctrl, "input cut: %"PRIDX", balance: ", ComputeSerialEdgeCut(agraph))); for (i=0; i<agraph->ncon; i++) IFSET(ctrl->dbglvl, DBG_REFINEINFO, rprintf(ctrl, "%.3"PRREAL" ", lbvec[i])); IFSET(ctrl->dbglvl, DBG_REFINEINFO, rprintf(ctrl, "\n")); /****************************************/ /* Split the processors into two groups */ /****************************************/ sr = (ctrl->mype % 2 == 0) ? 1 : 0; gd = (ctrl->mype % 2 == 1) ? 1 : 0; if (graph->ncon > MAX_NCON_FOR_DIFFUSION || ctrl->npes == 1) { sr = 1; gd = 0; } sr_pe = 0; gd_pe = 1; gkMPI_Comm_split(ctrl->gcomm, sr, 0, &ipcomm); gkMPI_Comm_rank(ipcomm, &mype); gkMPI_Comm_size(ipcomm, &npes); if (sr == 1) { /* Half of the processors do scratch-remap */ ngroups = gk_max(gk_min(RIP_SPLIT_FACTOR, npes), 1); gkMPI_Comm_split(ipcomm, mype % ngroups, 0, &srcomm); gkMPI_Comm_rank(srcomm, &srmype); gkMPI_Comm_size(srcomm, &srnpes); METIS_SetDefaultOptions(moptions); moptions[METIS_OPTION_SEED] = ctrl->sync + (mype % ngroups) + 1; tpwgts = ctrl->tpwgts; tpwgts2 = rwspacemalloc(ctrl, 2*ncon); iset(nvtxs, 0, lwhere); lnparts = ctrl->nparts; fpart = fpe = 0; lnpes = srnpes; while (lnpes > 1 && lnparts > 1) { PASSERT(ctrl, agraph->nvtxs > 1); /* determine the weights of the two partitions as a function of the weight of the target partition weights */ for (j=(lnparts>>1), i=0; i<ncon; i++) { tpwgts2[i] = rsum(j, tpwgts+fpart*ncon+i, ncon); tpwgts2[ncon+i] = rsum(lnparts-j, tpwgts+(fpart+j)*ncon+i, ncon); wsum = 1.0/(tpwgts2[i] + tpwgts2[ncon+i]); tpwgts2[i] *= wsum; tpwgts2[ncon+i] *= wsum; } METIS_PartGraphRecursive(&agraph->nvtxs, &ncon, agraph->xadj, agraph->adjncy, agraph->vwgt, NULL, agraph->adjwgt, &twoparts, tpwgts2, NULL, moptions, &edgecut, part); /* pick one of the branches */ if (srmype < fpe+lnpes/2) { KeepPart(ctrl, agraph, part, 0); lnpes = lnpes/2; lnparts = lnparts/2; } else { KeepPart(ctrl, agraph, part, 1); fpart = fpart + lnparts/2; fpe = fpe + lnpes/2; lnpes = lnpes - lnpes/2; lnparts = lnparts - lnparts/2; } } if (lnparts == 1) { /* Case in which srnpes is greater or equal to nparts */ /* Only the first process will assign labels (for the reduction to work) */ if (srmype == fpe) { for (i=0; i<agraph->nvtxs; i++) lwhere[agraph->label[i]] = fpart; } } else { /* Case in which srnpes is smaller than nparts */ /* create the normalized tpwgts for the lnparts from ctrl->tpwgts */ tpwgts = rwspacemalloc(ctrl, lnparts*ncon); for (j=0; j<ncon; j++) { for (wsum=0.0, i=0; i<lnparts; i++) { tpwgts[i*ncon+j] = ctrl->tpwgts[(fpart+i)*ncon+j]; wsum += tpwgts[i*ncon+j]; } for (wsum=1.0/wsum, i=0; i<lnparts; i++) tpwgts[i*ncon+j] *= wsum; } METIS_PartGraphKway(&agraph->nvtxs, &ncon, agraph->xadj, agraph->adjncy, agraph->vwgt, NULL, agraph->adjwgt, &lnparts, tpwgts, NULL, moptions, &edgecut, part); for (i=0; i<agraph->nvtxs; i++) lwhere[agraph->label[i]] = fpart + part[i]; } gkMPI_Allreduce((void *)lwhere, (void *)part, nvtxs, IDX_T, MPI_SUM, srcomm); edgecut = ComputeSerialEdgeCut(&cgraph); ComputeSerialBalance(ctrl, &cgraph, part, lbvec); lbsum = rsum(ncon, lbvec, 1); gkMPI_Allreduce((void *)&edgecut, (void *)&max_cut, 1, IDX_T, MPI_MAX, ipcomm); gkMPI_Allreduce((void *)&lbsum, (void *)&min_lbsum, 1, REAL_T, MPI_MIN, ipcomm); lpecost.rank = ctrl->mype; lpecost.cost = lbsum; if (min_lbsum < UNBALANCE_FRACTION * (real_t)(ncon)) { if (lbsum < UNBALANCE_FRACTION * (real_t)(ncon)) lpecost.cost = (double)edgecut; else lpecost.cost = (double)max_cut + lbsum; } gkMPI_Allreduce((void *)&lpecost, (void *)&gpecost, 1, MPI_DOUBLE_INT, MPI_MINLOC, ipcomm); if (ctrl->mype == gpecost.rank && ctrl->mype != sr_pe) gkMPI_Send((void *)part, nvtxs, IDX_T, sr_pe, 1, ctrl->comm); if (ctrl->mype != gpecost.rank && ctrl->mype == sr_pe) gkMPI_Recv((void *)part, nvtxs, IDX_T, gpecost.rank, 1, ctrl->comm, &status); if (ctrl->mype == sr_pe) { icopy(nvtxs, part, lwhere); SerialRemap(ctrl, &cgraph, ctrl->nparts, home, lwhere, part, ctrl->tpwgts); } gkMPI_Comm_free(&srcomm); }
void gk_graph_SortAdjacencies(gk_graph_t *graph) { int n, nn=0; ssize_t *ptr; int *ind; float *val; switch (what) { case GK_CSR_ROW: if (!graph->rowptr) gk_errexit(SIGERR, "Row-based view of the graphrix does not exists.\n"); n = graph->nrows; ptr = graph->rowptr; ind = graph->rowind; val = graph->rowval; break; case GK_CSR_COL: if (!graph->colptr) gk_errexit(SIGERR, "Column-based view of the graphrix does not exists.\n"); n = graph->ncols; ptr = graph->colptr; ind = graph->colind; val = graph->colval; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } #pragma omp parallel if (n > 100) { ssize_t i, j, k; gk_ikv_t *cand; float *tval; #pragma omp single for (i=0; i<n; i++) nn = gk_max(nn, ptr[i+1]-ptr[i]); cand = gk_ikvmalloc(nn, "gk_graph_SortIndices: cand"); tval = gk_fmalloc(nn, "gk_graph_SortIndices: tval"); #pragma omp for schedule(static) for (i=0; i<n; i++) { for (k=0, j=ptr[i]; j<ptr[i+1]; j++) { if (j > ptr[i] && ind[j] < ind[j-1]) k = 1; /* an inversion */ cand[j-ptr[i]].val = j-ptr[i]; cand[j-ptr[i]].key = ind[j]; tval[j-ptr[i]] = val[j]; } if (k) { gk_ikvsorti(ptr[i+1]-ptr[i], cand); for (j=ptr[i]; j<ptr[i+1]; j++) { ind[j] = cand[j-ptr[i]].key; val[j] = tval[cand[j-ptr[i]].val]; } } } gk_free((void **)&cand, &tval, LTERM); } }