Ejemplo n.º 1
0
/*--------------------------------------------------------------------------
 * hypre_Maxwell_Grad.c
 *   Forms a node-to-edge gradient operator. Looping over the
 *   edge grid so that each processor fills up only its own rows. Each 
 *   processor will have its processor interface nodal ranks.
 *   Loops over two types of boxes, interior of grid boxes and boundary
 *   of boxes. Algo:
 *       find all nodal and edge physical boundary points and set 
 *       the appropriate flag to be 0 at a boundary dof.
 *       set -1's in value array
 *       for each edge box, 
 *       for interior 
 *       {
 *          connect edge ijk (row) to nodes (col) connected to this edge
 *          and change -1 to 1 if needed;
 *       }
 *       for boundary layers
 *       {
 *          if edge not on the physical boundary connect only the nodes
 *          that are not on the physical boundary
 *       }
 *       set parcsr matrix with values;
 *
 * Note that the nodes that are on the processor interface can be 
 * on the physical boundary. But the off-proc edges connected to this
 * type of node will be a physical boundary edge.
 *
 *--------------------------------------------------------------------------*/
hypre_ParCSRMatrix *
hypre_Maxwell_Grad(hypre_SStructGrid    *grid)
{
   MPI_Comm               comm = (grid ->  comm);

   HYPRE_IJMatrix         T_grad;
   hypre_ParCSRMatrix    *parcsr_grad;
   HYPRE_Int              matrix_type= HYPRE_PARCSR;

   hypre_SStructGrid     *node_grid, *edge_grid;

   hypre_SStructPGrid    *pgrid;
   hypre_StructGrid      *var_grid;
   hypre_BoxArray        *boxes, *tmp_box_array1, *tmp_box_array2;
   hypre_BoxArray        *node_boxes, *edge_boxes, *cell_boxes;
   hypre_Box             *box, *cell_box;
   hypre_Box              layer, interior_box;
   hypre_Box             *box_piece;

   hypre_BoxManager      *boxman;
   hypre_BoxManEntry     *entry;

   HYPRE_Int             *inode, *jedge;
   HYPRE_Int              nrows, nnodes, *nflag, *eflag, *ncols;
   HYPRE_Real            *vals;

   hypre_Index            index;
   hypre_Index            loop_size, start, lindex;
   hypre_Index            shift, shift2;
   hypre_Index           *offsets, *varoffsets;

   HYPRE_Int              nparts= hypre_SStructGridNParts(grid);
   HYPRE_Int              ndim  = hypre_SStructGridNDim(grid);

   HYPRE_SStructVariable  vartype_node, *vartype_edges;
   HYPRE_SStructVariable *vartypes;

   HYPRE_Int              nvars, part;

   HYPRE_Int              i, j, k, m, n, d;
   HYPRE_Int             *direction, ndirection;

   HYPRE_Int              ilower, iupper;
   HYPRE_Int              jlower, jupper;

   HYPRE_Int              start_rank1, start_rank2, rank;

   HYPRE_Int              myproc;
   HYPRE_Int              ierr=0;

   hypre_BoxInit(&layer, ndim);
   hypre_BoxInit(&interior_box, ndim);

   hypre_MPI_Comm_rank(comm, &myproc);

   hypre_ClearIndex(shift);
   for (i= 0; i< ndim; i++)
   {
      hypre_IndexD(shift, i)= -1;
   }

   /* To get the correct ranks, separate node & edge grids must be formed. 
      Note that the edge vars must be ordered the same way as is in grid.*/
   HYPRE_SStructGridCreate(comm, ndim, nparts, &node_grid);
   HYPRE_SStructGridCreate(comm, ndim, nparts, &edge_grid);

   vartype_node = HYPRE_SSTRUCT_VARIABLE_NODE;
   vartype_edges= hypre_TAlloc(HYPRE_SStructVariable, ndim);

   /* Assuming the same edge variable types on all parts */
   pgrid   = hypre_SStructGridPGrid(grid, 0);
   vartypes= hypre_SStructPGridVarTypes(pgrid);
   nvars   = hypre_SStructPGridNVars(pgrid);

   k= 0;
   for (i= 0; i< nvars; i++)
   {
      j= vartypes[i];
      switch(j)
      {
         case 2:
         {
            vartype_edges[k]= HYPRE_SSTRUCT_VARIABLE_XFACE;
            k++;
            break;
         }

         case 3:
         {
            vartype_edges[k]= HYPRE_SSTRUCT_VARIABLE_YFACE;
            k++;
            break;
         }

         case 5:
         {
            vartype_edges[k]= HYPRE_SSTRUCT_VARIABLE_XEDGE;
            k++;
            break;
         }

         case 6:
         {
            vartype_edges[k]= HYPRE_SSTRUCT_VARIABLE_YEDGE;
            k++;
            break;
         }

         case 7:
         {
            vartype_edges[k]= HYPRE_SSTRUCT_VARIABLE_ZEDGE;
            k++;
            break;
         }

      }  /* switch(j) */
   }     /* for (i= 0; i< nvars; i++) */

   for (part= 0; part< nparts; part++)
   {
      pgrid= hypre_SStructGridPGrid(grid, part);  
      var_grid= hypre_SStructPGridCellSGrid(pgrid) ;

      boxes= hypre_StructGridBoxes(var_grid);
      hypre_ForBoxI(j, boxes)
      {
         box= hypre_BoxArrayBox(boxes, j);
         HYPRE_SStructGridSetExtents(node_grid, part,
                                     hypre_BoxIMin(box), hypre_BoxIMax(box));
         HYPRE_SStructGridSetExtents(edge_grid, part,
                                     hypre_BoxIMin(box), hypre_BoxIMax(box));
      }
      HYPRE_SStructGridSetVariables(node_grid, part, 1, &vartype_node);
      HYPRE_SStructGridSetVariables(edge_grid, part, ndim, vartype_edges);
   }
Ejemplo n.º 2
0
/*--------------------------------------------------------------------------
 * hypre_SStructSharedDOF_ParcsrMatRowsComm
 *   Given a sstruct_grid & parcsr matrix with rows corresponding to the
 *   sstruct_grid, determine and extract the rows that must be communicated.
 *   These rows are for shared dof that geometrically lie on processor 
 *   boundaries but internally are stored on one processor.
 *   Algo:
 *       for each cellbox
 *         RECVs:
 *          i)  stretch the cellbox to the variable box
 *          ii) in the appropriate (dof-dependent) direction, take the
 *              boundary and boxman_intersect to extract boxmanentries
 *              that contain these boundary edges.
 *          iii)loop over the boxmanentries and see if they belong
 *              on this proc or another proc
 *                 a) if belong on another proc, these are the recvs:
 *                    count and prepare the communication buffers and
 *                    values.
 *          
 *         SENDs:
 *          i)  form layer of cells that is one layer off cellbox
 *              (stretches in the appropriate direction)
 *          ii) boxman_intersect with the cellgrid boxman
 *          iii)loop over the boxmanentries and see if they belong
 *              on this proc or another proc
 *                 a) if belong on another proc, these are the sends:
 *                    count and prepare the communication buffers and
 *                    values.
 *
 * Note: For the recv data, the dof can come from only one processor.
 *       For the send data, the dof can go to more than one processor 
 *       (the same dof is on the boundary of several cells).
 *--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructSharedDOF_ParcsrMatRowsComm( hypre_SStructGrid    *grid,
                                          hypre_ParCSRMatrix   *A,
                                          HYPRE_Int            *num_offprocrows_ptr,
                                          hypre_MaxwellOffProcRow ***OffProcRows_ptr)
{
   MPI_Comm             A_comm= hypre_ParCSRMatrixComm(A);
   MPI_Comm          grid_comm= hypre_SStructGridComm(grid);

   HYPRE_Int       matrix_type= HYPRE_PARCSR;

   HYPRE_Int            nparts= hypre_SStructGridNParts(grid);
   HYPRE_Int            ndim  = hypre_SStructGridNDim(grid);

   hypre_SStructGrid     *cell_ssgrid;

   hypre_SStructPGrid    *pgrid;
   hypre_StructGrid      *cellgrid;
   hypre_BoxArray        *cellboxes;
   hypre_Box             *box, *cellbox, vbox, boxman_entry_box;

   hypre_Index            loop_size, start;
   HYPRE_Int              loopi, loopj, loopk;
   HYPRE_Int              start_rank, end_rank, rank; 

   HYPRE_Int              i, j, k, m, n, t, part, var, nvars;

   HYPRE_SStructVariable *vartypes;
   HYPRE_Int              nbdry_slabs;
   hypre_BoxArray        *recv_slabs, *send_slabs;
   hypre_Index            varoffset;

   hypre_BoxManager     **boxmans, *cell_boxman;
   hypre_BoxManEntry    **boxman_entries, *entry;
   HYPRE_Int              nboxman_entries;

   hypre_Index            ishift, jshift, kshift, zero_index;
   hypre_Index            ilower, iupper, index;

   HYPRE_Int              proc, nprocs, myproc;
   HYPRE_Int             *SendToProcs, *RecvFromProcs;
   HYPRE_Int            **send_RowsNcols;       /* buffer for rows & ncols */
   HYPRE_Int             *send_RowsNcols_alloc; 
   HYPRE_Int             *send_ColsData_alloc;
   HYPRE_Int             *tot_nsendRowsNcols, *tot_sendColsData;
   double               **vals;  /* buffer for cols & data */

   HYPRE_Int             *col_inds;
   double                *values;

   hypre_MPI_Request           *requests;
   hypre_MPI_Status            *status;
   HYPRE_Int            **rbuffer_RowsNcols;
   double               **rbuffer_ColsData;
   HYPRE_Int              num_sends, num_recvs;

   hypre_MaxwellOffProcRow **OffProcRows;
   HYPRE_Int                *starts;

   HYPRE_Int              ierr= 0;

   hypre_MPI_Comm_rank(A_comm, &myproc);
   hypre_MPI_Comm_size(grid_comm, &nprocs);

   start_rank= hypre_ParCSRMatrixFirstRowIndex(A);
   end_rank  = hypre_ParCSRMatrixLastRowIndex(A);

   hypre_SetIndex(ishift, 1, 0, 0);
   hypre_SetIndex(jshift, 0, 1, 0);
   hypre_SetIndex(kshift, 0, 0, 1);
   hypre_SetIndex(zero_index, 0, 0, 0);

  /* need a cellgrid boxman to determine the send boxes -> only the cell dofs
     are unique so a boxman intersect can be used to get the edges that
     must be sent. */
   HYPRE_SStructGridCreate(grid_comm, ndim, nparts, &cell_ssgrid);
   vartypes= hypre_CTAlloc(HYPRE_SStructVariable, 1);
   vartypes[0]= HYPRE_SSTRUCT_VARIABLE_CELL;

   for (i= 0; i< nparts; i++)
   {
      pgrid= hypre_SStructGridPGrid(grid, i);
      cellgrid= hypre_SStructPGridCellSGrid(pgrid);

      cellboxes= hypre_StructGridBoxes(cellgrid);
      hypre_ForBoxI(j, cellboxes)
      {
         box= hypre_BoxArrayBox(cellboxes, j);
         HYPRE_SStructGridSetExtents(cell_ssgrid, i,
                                     hypre_BoxIMin(box), hypre_BoxIMax(box));
      }
      HYPRE_SStructGridSetVariables(cell_ssgrid, i, 1, vartypes);
   }
Ejemplo n.º 3
0
HYPRE_Int 
hypre_Maxwell_PhysBdy( hypre_SStructGrid      **grid_l,
                       HYPRE_Int                num_levels,
                       hypre_Index              rfactors,
                       HYPRE_Int             ***BdryRanksl_ptr, 
                       HYPRE_Int              **BdryRanksCntsl_ptr )
{

   MPI_Comm                comm= (grid_l[0]-> comm);

   HYPRE_Int             **BdryRanks_l;
   HYPRE_Int              *BdryRanksCnts_l;

   HYPRE_Int              *npts;
   HYPRE_Int              *ranks, *upper_rank, *lower_rank;
   hypre_BoxManEntry      *boxman_entry;

   hypre_SStructGrid      *grid;
   hypre_SStructPGrid     *pgrid;
   hypre_StructGrid       *cell_fgrid, *cell_cgrid, *sgrid;

   hypre_BoxArrayArray ****bdry;
   hypre_BoxArrayArray    *fbdry;
   hypre_BoxArrayArray    *cbdry;

   hypre_BoxArray         *box_array;
   hypre_BoxArray         *fboxes, *cboxes;

   hypre_Box              *fbox, *cbox;
   hypre_Box              *box, *contract_fbox, rbox;
   hypre_Box               intersect;

   HYPRE_Int             **cbox_mapping, **fbox_mapping;
   HYPRE_Int             **boxes_with_bdry;

   HYPRE_Int               ndim, nvars;
   HYPRE_Int               nboxes, nfboxes;
   HYPRE_Int               boxi;
   
   hypre_Index             zero_shift, upper_shift, lower_shift;
   hypre_Index             loop_size, start, index, lindex;

   HYPRE_Int               i, j, k, l, m, n, p;
   HYPRE_Int               d;
   HYPRE_Int               cnt;

   HYPRE_Int               part= 0;  /* NOTE, ASSUMING ONE PART */
   HYPRE_Int               matrix_type= HYPRE_PARCSR;
   HYPRE_Int               myproc;

   HYPRE_Int               ierr= 0;

   hypre_MPI_Comm_rank(comm, &myproc);

   ndim= hypre_SStructGridNDim(grid_l[0]);
   hypre_SetIndex3(zero_shift, 0, 0, 0);

   hypre_BoxInit(&intersect, ndim);

   /* bounding global ranks of this processor & allocate boundary box markers. */
   upper_rank= hypre_CTAlloc(HYPRE_Int, num_levels);
   lower_rank= hypre_CTAlloc(HYPRE_Int, num_levels);

   boxes_with_bdry= hypre_TAlloc(HYPRE_Int *, num_levels);
   for (i= 0; i< num_levels; i++)
   {
      grid = grid_l[i];
      lower_rank[i]= hypre_SStructGridStartRank(grid);

      /* note we are assuming only one part */
      pgrid= hypre_SStructGridPGrid(grid, part);
      nvars= hypre_SStructPGridNVars(pgrid);
      sgrid= hypre_SStructPGridSGrid(pgrid, nvars-1);
      box_array= hypre_StructGridBoxes(sgrid);
      box  = hypre_BoxArrayBox(box_array, hypre_BoxArraySize(box_array)-1);

      hypre_SStructGridBoxProcFindBoxManEntry(grid, part, nvars-1,
                                              hypre_BoxArraySize(box_array)-1, myproc, &boxman_entry);
      hypre_SStructBoxManEntryGetGlobalCSRank(boxman_entry, hypre_BoxIMax(box), 
                                              &upper_rank[i]);

      sgrid= hypre_SStructPGridCellSGrid(pgrid);
      box_array= hypre_StructGridBoxes(sgrid);
      boxes_with_bdry[i]= hypre_CTAlloc(HYPRE_Int, hypre_BoxArraySize(box_array));
   }
 
   /*-----------------------------------------------------------------------------
    * construct box_number mapping between levels, and offset strides because of 
    * projection coarsening. Note: from the way the coarse boxes are created and
    * numbered, to determine the coarse box that matches the fbox, we need to
    * only check the tail end of the list of cboxes. In fact, given fbox_i,
    * if it's coarsened extents do not interesect with the first coarse box of the
    * tail end, then this fbox vanishes in the coarsening.
    *   c/fbox_mapping gives the fine/coarse box mapping between two consecutive levels
    *   of the multilevel hierarchy. 
    *-----------------------------------------------------------------------------*/
   if (num_levels > 1)
   {
      cbox_mapping= hypre_CTAlloc(HYPRE_Int *, num_levels);
      fbox_mapping= hypre_CTAlloc(HYPRE_Int *, num_levels);
   }