Пример #1
0
/*************************************************************************
* This function performs a k-way directed diffusion
**************************************************************************/
real_t WavefrontDiffusion(ctrl_t *ctrl, graph_t *graph, idx_t *home)
{
  idx_t ii, i, j, k, l, nvtxs, nedges, nparts;
  idx_t from, to, edge, done, nswaps, noswaps, totalv, wsize;
  idx_t npasses, first, second, third, mind, maxd;
  idx_t *xadj, *adjncy, *adjwgt, *where, *perm;
  idx_t *rowptr, *colind, *ed, *psize;
  real_t *transfer, *tmpvec;
  real_t balance = -1.0, *load, *solution, *workspace;
  real_t *nvwgt, *npwgts, flowFactor, cost, ubfactor;
  matrix_t matrix;
  ikv_t *cand;
  idx_t ndirty, nclean, dptr, clean;

  nvtxs        = graph->nvtxs;
  nedges       = graph->nedges;
  xadj         = graph->xadj;
  nvwgt        = graph->nvwgt;
  adjncy       = graph->adjncy;
  adjwgt       = graph->adjwgt;
  where        = graph->where;
  nparts       = ctrl->nparts;
  ubfactor     = ctrl->ubvec[0];
  matrix.nrows = nparts;

  flowFactor = 0.35;
  flowFactor = (ctrl->mype == 2) ? 0.50 : flowFactor;
  flowFactor = (ctrl->mype == 3) ? 0.75 : flowFactor;
  flowFactor = (ctrl->mype == 4) ? 1.00 : flowFactor;

  /* allocate memory */
  solution                   = rmalloc(4*nparts+2*nedges, "WavefrontDiffusion: solution");
  tmpvec                     = solution + nparts;
  npwgts                     = solution + 2*nparts;
  load                       = solution + 3*nparts;
  matrix.values              = solution + 4*nparts;
  transfer = matrix.transfer = solution + 4*nparts + nedges;

  perm                   = imalloc(2*nvtxs+2*nparts+nedges+1, "WavefrontDiffusion: perm");
  ed                     = perm + nvtxs;
  psize                  = perm + 2*nvtxs;
  rowptr = matrix.rowptr = perm + 2*nvtxs + nparts;
  colind = matrix.colind = perm + 2*nvtxs + 2*nparts + 1;

  /*GKTODO - Potential problem with this malloc */
  wsize     = gk_max(sizeof(real_t)*nparts*6, sizeof(idx_t)*(nvtxs+nparts*2+1));
  workspace = (real_t *)gk_malloc(wsize, "WavefrontDiffusion: workspace");
  cand      = ikvmalloc(nvtxs, "WavefrontDiffusion: cand");


  /*****************************/
  /* Populate empty subdomains */
  /*****************************/
  iset(nparts, 0, psize);
  for (i=0; i<nvtxs; i++) 
    psize[where[i]]++;

  mind = iargmin(nparts, psize);
  maxd = iargmax(nparts, psize);
  if (psize[mind] == 0) {
    for (i=0; i<nvtxs; i++) {
      k = (RandomInRange(nvtxs)+i)%nvtxs; 
      if (where[k] == maxd) {
        where[k] = mind;
        psize[mind]++;
        psize[maxd]--;
        break;
      }
    }
  }

  iset(nvtxs, 0, ed);
  rset(nparts, 0.0, npwgts);
  for (i=0; i<nvtxs; i++) {
    npwgts[where[i]] += nvwgt[i];
    for (j=xadj[i]; j<xadj[i+1]; j++)
      ed[i] += (where[i] != where[adjncy[j]] ? adjwgt[j] : 0);
  }

  ComputeLoad(graph, nparts, load, ctrl->tpwgts, 0);
  done = 0;


  /* zero out the tmpvec array */
  rset(nparts, 0.0, tmpvec);

  npasses = gk_min(nparts/2, NGD_PASSES);
  for (l=0; l<npasses; l++) {
    /* Set-up and solve the diffusion equation */
    nswaps = 0;

    /************************/
    /* Solve flow equations */
    /************************/
    SetUpConnectGraph(graph, &matrix, (idx_t *)workspace);

    /* check for disconnected subdomains */
    for(i=0; i<matrix.nrows; i++) {
      if (matrix.rowptr[i]+1 == matrix.rowptr[i+1]) {
        cost = (real_t)(ctrl->mype); 
	goto CleanUpAndExit;
      }
    }

    ConjGrad2(&matrix, load, solution, 0.001, workspace);
    ComputeTransferVector(1, &matrix, solution, transfer, 0);

    GetThreeMax(nparts, load, &first, &second, &third);

    if (l%3 == 0) {
      FastRandomPermute(nvtxs, perm, 1);
    }
    else {
      /*****************************/
      /* move dirty vertices first */
      /*****************************/
      ndirty = 0;
      for (i=0; i<nvtxs; i++) {
        if (where[i] != home[i])
          ndirty++;
      }

      dptr = 0;
      for (i=0; i<nvtxs; i++) {
        if (where[i] != home[i])
          perm[dptr++] = i;
        else
          perm[ndirty++] = i;
      }

      PASSERT(ctrl, ndirty == nvtxs);
      ndirty = dptr;
      nclean = nvtxs-dptr;
      FastRandomPermute(ndirty, perm, 0);
      FastRandomPermute(nclean, perm+ndirty, 0);
    }

    if (ctrl->mype == 0) {
      for (j=nvtxs, k=0, ii=0; ii<nvtxs; ii++) {
        i = perm[ii];
        if (ed[i] != 0) {
          cand[k].key = -ed[i];
          cand[k++].val = i;
        }
        else {
          cand[--j].key = 0;
          cand[j].val = i;
        }
      }
      ikvsorti(k, cand);
    }


    for (ii=0; ii<nvtxs/3; ii++) {
      i = (ctrl->mype == 0) ? cand[ii].val : perm[ii];
      from = where[i];

      /* don't move out the last vertex in a subdomain */
      if (psize[from] == 1)
        continue;

      clean = (from == home[i]) ? 1 : 0;

      /* only move from top three or dirty vertices */
      if (from != first && from != second && from != third && clean)
        continue;

      /* Scatter the sparse transfer row into the dense tmpvec row */
      for (j=rowptr[from]+1; j<rowptr[from+1]; j++)
        tmpvec[colind[j]] = transfer[j];

      for (j=xadj[i]; j<xadj[i+1]; j++) {
        to = where[adjncy[j]];
        if (from != to) {
          if (tmpvec[to] > (flowFactor * nvwgt[i])) {
            tmpvec[to] -= nvwgt[i];
            INC_DEC(psize[to], psize[from], 1);
            INC_DEC(npwgts[to], npwgts[from], nvwgt[i]);
            INC_DEC(load[to], load[from], nvwgt[i]);
            where[i] = to;
            nswaps++;

            /* Update external degrees */
            ed[i] = 0;
            for (k=xadj[i]; k<xadj[i+1]; k++) {
              edge = adjncy[k];
              ed[i] += (to != where[edge] ? adjwgt[k] : 0);

              if (where[edge] == from)
                ed[edge] += adjwgt[k];
              if (where[edge] == to)
                ed[edge] -= adjwgt[k];
            }
            break;
          }
        }
      }

      /* Gather the dense tmpvec row into the sparse transfer row */
      for (j=rowptr[from]+1; j<rowptr[from+1]; j++) {
        transfer[j] = tmpvec[colind[j]];
        tmpvec[colind[j]] = 0.0;
      }
      ASSERT(fabs(rsum(nparts, tmpvec, 1)) < .0001)
    }

    if (l % 2 == 1) {
      balance = rmax(nparts, npwgts)*nparts;
      if (balance < ubfactor + 0.035)
        done = 1;

      if (GlobalSESum(ctrl, done) > 0)
        break;

      noswaps = (nswaps > 0) ? 0 : 1;
      if (GlobalSESum(ctrl, noswaps) > ctrl->npes/2)
        break;

    }
  }

  graph->mincut = ComputeSerialEdgeCut(graph);
  totalv        = Mc_ComputeSerialTotalV(graph, home);
  cost          = ctrl->ipc_factor * (real_t)graph->mincut + ctrl->redist_factor * (real_t)totalv;


CleanUpAndExit:
  gk_free((void **)&solution, (void **)&perm, (void **)&workspace, (void **)&cand, LTERM);

  return cost;
}
Пример #2
0
/*************************************************************************
* This function is the entry point of the initial balancing algorithm.
* This algorithm assembles the graph to all the processors and preceeds
* with the balancing step.
**************************************************************************/
void Balance_Partition(CtrlType *ctrl, GraphType *graph, WorkSpaceType *wspace)
{
    int i, j, mype, npes, nvtxs, nedges, ncon;
    idxtype *vtxdist, *xadj, *adjncy, *adjwgt, *vwgt, *vsize;
    idxtype *part, *lwhere, *home;
    GraphType *agraph, cgraph;
    CtrlType myctrl;
    int lnparts, fpart, fpe, lnpes, ngroups, srnpes, srmype;
    int twoparts=2, numflag = 0, wgtflag = 3, moptions[10], edgecut, max_cut;
    int sr_pe, gd_pe, sr, gd, who_wins, *rcounts, *rdispls;
    float my_cut, my_totalv, my_cost = -1.0, my_balance = -1.0, wsum;
    float rating, max_rating, your_cost = -1.0, your_balance = -1.0;
    float lbvec[MAXNCON], lbsum, min_lbsum, *mytpwgts, mytpwgts2[2], buffer[2];
    MPI_Status status;
    MPI_Comm ipcomm, srcomm;
    struct {
        float cost;
        int rank;
    } lpecost, gpecost;

    IFSET(ctrl->dbglvl, DBG_TIME, starttimer(ctrl->InitPartTmr));

    vtxdist = graph->vtxdist;
    agraph = Mc_AssembleAdaptiveGraph(ctrl, graph, wspace);
    nvtxs = cgraph.nvtxs = agraph->nvtxs;
    nedges = cgraph.nedges = agraph->nedges;
    ncon = cgraph.ncon = agraph->ncon;

    xadj = cgraph.xadj = idxmalloc(nvtxs*(5+ncon)+1+nedges*2, "U_IP: xadj");
    vwgt = cgraph.vwgt = xadj + nvtxs+1;
    vsize = cgraph.vsize = xadj + nvtxs*(1+ncon)+1;
    cgraph.where = agraph->where = part = xadj + nvtxs*(2+ncon)+1;
    lwhere = xadj + nvtxs*(3+ncon)+1;
    home = xadj + nvtxs*(4+ncon)+1;
    adjncy = cgraph.adjncy = xadj + nvtxs*(5+ncon)+1;
    adjwgt = cgraph.adjwgt = xadj + nvtxs*(5+ncon)+1 + nedges;

    /* ADD: this assumes that tpwgts for all constraints is the same */
    /* ADD: this is necessary because serial metis does not support the general case */
    mytpwgts = fsmalloc(ctrl->nparts, 0.0, "mytpwgts");
    for (i=0; i<ctrl->nparts; i++)
        for (j=0; j<ncon; j++)
            mytpwgts[i] += ctrl->tpwgts[i*ncon+j];
    for (i=0; i<ctrl->nparts; i++)
        mytpwgts[i] /= (float)ncon;

    idxcopy(nvtxs+1, agraph->xadj, xadj);
    idxcopy(nvtxs*ncon, agraph->vwgt, vwgt);
    idxcopy(nvtxs, agraph->vsize, vsize);
    idxcopy(nedges, agraph->adjncy, adjncy);
    idxcopy(nedges, agraph->adjwgt, adjwgt);

    /****************************************/
    /****************************************/
    if (ctrl->ps_relation == DISCOUPLED) {
        rcounts = imalloc(ctrl->npes, "rcounts");
        rdispls = imalloc(ctrl->npes+1, "rdispls");

        for (i=0; i<ctrl->npes; i++) {
            rdispls[i] = rcounts[i] = vtxdist[i+1]-vtxdist[i];
        }
        MAKECSR(i, ctrl->npes, rdispls);

        MPI_Allgatherv((void *)graph->home, graph->nvtxs, IDX_DATATYPE,
                       (void *)part, rcounts, rdispls, IDX_DATATYPE, ctrl->comm);

        for (i=0; i<agraph->nvtxs; i++)
            home[i] = part[i];

        GKfree((void **)&rcounts, (void **)&rdispls, LTERM);
    }
    else {
        for (i=0; i<ctrl->npes; i++)
            for (j=vtxdist[i]; j<vtxdist[i+1]; j++)
                part[j] = home[j] = i;
    }

    /* Ensure that the initial partitioning is legal */
    for (i=0; i<agraph->nvtxs; i++) {
        if (part[i] >= ctrl->nparts)
            part[i] = home[i] = part[i] % ctrl->nparts;
        if (part[i] < 0)
            part[i] = home[i] = (-1*part[i]) % ctrl->nparts;
    }
    /****************************************/
    /****************************************/

    IFSET(ctrl->dbglvl, DBG_REFINEINFO, Mc_ComputeSerialBalance(ctrl, agraph, agraph->where, lbvec));
    IFSET(ctrl->dbglvl, DBG_REFINEINFO, rprintf(ctrl, "input cut: %d, balance: ", ComputeSerialEdgeCut(agraph)));
    for (i=0; i<agraph->ncon; i++)
        IFSET(ctrl->dbglvl, DBG_REFINEINFO, rprintf(ctrl, "%.3f ", lbvec[i]));
    IFSET(ctrl->dbglvl, DBG_REFINEINFO, rprintf(ctrl, "\n"));

    /****************************************/
    /* Split the processors into two groups */
    /****************************************/
    sr = (ctrl->mype % 2 == 0) ? 1 : 0;
    gd = (ctrl->mype % 2 == 1) ? 1 : 0;

    if (graph->ncon > MAX_NCON_FOR_DIFFUSION || ctrl->npes == 1) {
        sr = 1;
        gd = 0;
    }

    sr_pe = 0;
    gd_pe = 1;

    MPI_Comm_split(ctrl->gcomm, sr, 0, &ipcomm);
    MPI_Comm_rank(ipcomm, &mype);
    MPI_Comm_size(ipcomm, &npes);

    myctrl.dbglvl = 0;
    myctrl.mype = mype;
    myctrl.npes = npes;
    myctrl.comm = ipcomm;
    myctrl.sync = ctrl->sync;
    myctrl.seed = ctrl->seed;
    myctrl.nparts = ctrl->nparts;
    myctrl.ipc_factor = ctrl->ipc_factor;
    myctrl.redist_factor = ctrl->redist_base;
    myctrl.partType = ADAPTIVE_PARTITION;
    myctrl.ps_relation = DISCOUPLED;
    myctrl.tpwgts = ctrl->tpwgts;
    icopy(ncon, ctrl->tvwgts, myctrl.tvwgts);
    icopy(ncon, ctrl->ubvec, myctrl.ubvec);

    if (sr == 1) {
        /*******************************************/
        /* Half of the processors do scratch-remap */
        /*******************************************/
        ngroups = amax(amin(RIP_SPLIT_FACTOR, npes), 1);
        MPI_Comm_split(ipcomm, mype % ngroups, 0, &srcomm);
        MPI_Comm_rank(srcomm, &srmype);
        MPI_Comm_size(srcomm, &srnpes);

        moptions[0] = 0;
        moptions[7] = ctrl->sync + (mype % ngroups) + 1;

        idxset(nvtxs, 0, lwhere);
        lnparts = ctrl->nparts;
        fpart = fpe = 0;
        lnpes = srnpes;
        while (lnpes > 1 && lnparts > 1) {
            ASSERT(ctrl, agraph->nvtxs > 1);
            /* Determine the weights of the partitions */
            mytpwgts2[0] = ssum(lnparts/2, mytpwgts+fpart);
            mytpwgts2[1] = 1.0-mytpwgts2[0];


            if (agraph->ncon == 1) {
                METIS_WPartGraphKway2(&agraph->nvtxs, agraph->xadj, agraph->adjncy, agraph->vwgt,
                                      agraph->adjwgt, &wgtflag, &numflag, &twoparts, mytpwgts2, moptions, &edgecut,
                                      part);
            }
            else {
                METIS_mCPartGraphRecursive2(&agraph->nvtxs, &ncon, agraph->xadj, agraph->adjncy,
                                            agraph->vwgt, agraph->adjwgt, &wgtflag, &numflag, &twoparts, mytpwgts2,
                                            moptions, &edgecut, part);
            }

            wsum = ssum(lnparts/2, mytpwgts+fpart);
            sscale(lnparts/2, 1.0/wsum, mytpwgts+fpart);
            sscale(lnparts-lnparts/2, 1.0/(1.0-wsum), mytpwgts+fpart+lnparts/2);

            /* I'm picking the left branch */
            if (srmype < fpe+lnpes/2) {
                Mc_KeepPart(agraph, wspace, part, 0);
                lnpes = lnpes/2;
                lnparts = lnparts/2;
            }
            else {
                Mc_KeepPart(agraph, wspace, part, 1);
                fpart = fpart + lnparts/2;
                fpe = fpe + lnpes/2;
                lnpes = lnpes - lnpes/2;
                lnparts = lnparts - lnparts/2;
            }
        }

        /* In case srnpes is greater than or equal to nparts */
        if (lnparts == 1) {
            /* Only the first process will assign labels (for the reduction to work) */
            if (srmype == fpe) {
                for (i=0; i<agraph->nvtxs; i++)
                    lwhere[agraph->label[i]] = fpart;
            }
        }
        /* In case srnpes is smaller than nparts */
        else {
            if (ncon == 1)
                METIS_WPartGraphKway2(&agraph->nvtxs, agraph->xadj, agraph->adjncy, agraph->vwgt,
                                      agraph->adjwgt, &wgtflag, &numflag, &lnparts, mytpwgts+fpart, moptions,
                                      &edgecut, part);
            else
                METIS_mCPartGraphRecursive2(&agraph->nvtxs, &ncon, agraph->xadj, agraph->adjncy,
                                            agraph->vwgt, agraph->adjwgt, &wgtflag, &numflag, &lnparts, mytpwgts+fpart,
                                            moptions, &edgecut, part);

            for (i=0; i<agraph->nvtxs; i++)
                lwhere[agraph->label[i]] = fpart + part[i];
        }

        MPI_Allreduce((void *)lwhere, (void *)part, nvtxs, IDX_DATATYPE, MPI_SUM, srcomm);

        edgecut = ComputeSerialEdgeCut(&cgraph);
        Mc_ComputeSerialBalance(ctrl, &cgraph, part, lbvec);
        lbsum = ssum(ncon, lbvec);
        MPI_Allreduce((void *)&edgecut, (void *)&max_cut, 1, MPI_INT, MPI_MAX, ipcomm);
        MPI_Allreduce((void *)&lbsum, (void *)&min_lbsum, 1, MPI_FLOAT, MPI_MIN, ipcomm);
        lpecost.rank = ctrl->mype;
        lpecost.cost = lbsum;
        if (min_lbsum < UNBALANCE_FRACTION * (float)(ncon)) {
            if (lbsum < UNBALANCE_FRACTION * (float)(ncon))
                lpecost.cost = (float)edgecut;
            else
                lpecost.cost = (float)max_cut + lbsum;
        }
        MPI_Allreduce((void *)&lpecost, (void *)&gpecost, 1, MPI_FLOAT_INT, MPI_MINLOC, ipcomm);

        if (ctrl->mype == gpecost.rank && ctrl->mype != sr_pe) {
            MPI_Send((void *)part, nvtxs, IDX_DATATYPE, sr_pe, 1, ctrl->comm);
        }

        if (ctrl->mype != gpecost.rank && ctrl->mype == sr_pe) {
            MPI_Recv((void *)part, nvtxs, IDX_DATATYPE, gpecost.rank, 1, ctrl->comm, &status);
        }

        if (ctrl->mype == sr_pe) {
            idxcopy(nvtxs, part, lwhere);
            SerialRemap(&cgraph, ctrl->nparts, home, lwhere, part, ctrl->tpwgts);
        }

        MPI_Comm_free(&srcomm);
    }
    /**************************************/
    /* The other half do global diffusion */
    /**************************************/
    else {
        /******************************************************************/
        /* The next stmt is required to balance out the sr MPI_Comm_split */
        /******************************************************************/
        MPI_Comm_split(ipcomm, MPI_UNDEFINED, 0, &srcomm);

        if (ncon == 1) {
            rating = WavefrontDiffusion(&myctrl, agraph, home);
            Mc_ComputeSerialBalance(ctrl, &cgraph, part, lbvec);
            lbsum = ssum(ncon, lbvec);

            /* Determine which PE computed the best partitioning */
            MPI_Allreduce((void *)&rating, (void *)&max_rating, 1, MPI_FLOAT, MPI_MAX, ipcomm);
            MPI_Allreduce((void *)&lbsum, (void *)&min_lbsum, 1, MPI_FLOAT, MPI_MIN, ipcomm);

            lpecost.rank = ctrl->mype;
            lpecost.cost = lbsum;
            if (min_lbsum < UNBALANCE_FRACTION * (float)(ncon)) {
                if (lbsum < UNBALANCE_FRACTION * (float)(ncon))
                    lpecost.cost = rating;
                else
                    lpecost.cost = max_rating + lbsum;
            }

            MPI_Allreduce((void *)&lpecost, (void *)&gpecost, 1, MPI_FLOAT_INT, MPI_MINLOC, ipcomm);

            /* Now send this to the coordinating processor */
            if (ctrl->mype == gpecost.rank && ctrl->mype != gd_pe)
                MPI_Send((void *)part, nvtxs, IDX_DATATYPE, gd_pe, 1, ctrl->comm);

            if (ctrl->mype != gpecost.rank && ctrl->mype == gd_pe)
                MPI_Recv((void *)part, nvtxs, IDX_DATATYPE, gpecost.rank, 1, ctrl->comm, &status);

            if (ctrl->mype == gd_pe) {
                idxcopy(nvtxs, part, lwhere);
                SerialRemap(&cgraph, ctrl->nparts, home, lwhere, part, ctrl->tpwgts);
            }
        }
        else {
            Mc_Diffusion(&myctrl, agraph, graph->vtxdist, agraph->where, home, wspace, N_MOC_GD_PASSES);
        }
    }

    if (graph->ncon <= MAX_NCON_FOR_DIFFUSION) {
        if (ctrl->mype == sr_pe  || ctrl->mype == gd_pe) {
            /********************************************************************/
            /* The coordinators from each group decide on the best partitioning */
            /********************************************************************/
            my_cut = (float) ComputeSerialEdgeCut(&cgraph);
            my_totalv = (float) Mc_ComputeSerialTotalV(&cgraph, home);
            Mc_ComputeSerialBalance(ctrl, &cgraph, part, lbvec);
            my_balance = ssum(cgraph.ncon, lbvec);
            my_balance /= (float) cgraph.ncon;
            my_cost = ctrl->ipc_factor * my_cut + REDIST_WGT * ctrl->redist_base * my_totalv;

            IFSET(ctrl->dbglvl, DBG_REFINEINFO, printf("%s initial cut: %.1f, totalv: %.1f, balance: %.3f\n",
                    (ctrl->mype == sr_pe ? "scratch-remap" : "diffusion"), my_cut, my_totalv, my_balance));

            if (ctrl->mype == gd_pe) {
                buffer[0] = my_cost;
                buffer[1] = my_balance;
                MPI_Send((void *)buffer, 2, MPI_FLOAT, sr_pe, 1, ctrl->comm);
            }
            else {
                MPI_Recv((void *)buffer, 2, MPI_FLOAT, gd_pe, 1, ctrl->comm, &status);
                your_cost = buffer[0];
                your_balance = buffer[1];
            }
        }

        if (ctrl->mype == sr_pe) {
            who_wins = gd_pe;
            if ((my_balance < 1.1 && your_balance > 1.1) ||
                    (my_balance < 1.1 && your_balance < 1.1 && my_cost < your_cost) ||
                    (my_balance > 1.1 && your_balance > 1.1 && my_balance < your_balance)) {
                who_wins = sr_pe;
            }
        }

        MPI_Bcast((void *)&who_wins, 1, MPI_INT, sr_pe, ctrl->comm);
    }
    else {
        who_wins = sr_pe;
    }

    MPI_Bcast((void *)part, nvtxs, IDX_DATATYPE, who_wins, ctrl->comm);
    idxcopy(graph->nvtxs, part+vtxdist[ctrl->mype], graph->where);

    MPI_Comm_free(&ipcomm);
    GKfree((void **)&xadj, (void **)&mytpwgts, LTERM);

    /* For whatever reason, FreeGraph crashes here...so explicitly free the memory.
      FreeGraph(agraph);
    */
    GKfree((void **)&agraph->xadj, (void **)&agraph->adjncy, (void **)&agraph->vwgt, (void **)&agraph->nvwgt, LTERM);
    GKfree((void **)&agraph->vsize, (void **)&agraph->adjwgt, (void **)&agraph->label, LTERM);
    GKfree((void **)&agraph, LTERM);

    IFSET(ctrl->dbglvl, DBG_TIME, stoptimer(ctrl->InitPartTmr));

}