Ejemplo n.º 1
0
int_t pdgstrf
/************************************************************************/
(
 superlu_options_t *options, int m, int n, double anorm,
 LUstruct_t *LUstruct, gridinfo_t *grid, SuperLUStat_t *stat, int *info
 )
/* 
 * Purpose
 * =======
 *
 *  PDGSTRF performs the LU factorization in parallel.
 *
 * Arguments
 * =========
 * 
 * options (input) superlu_options_t*
 *         The structure defines the input parameters to control
 *         how the LU decomposition will be performed.
 *         The following field should be defined:
 *         o ReplaceTinyPivot (yes_no_t)
 *           Specifies whether to replace the tiny diagonals by
 *           sqrt(epsilon)*norm(A) during LU factorization.
 *
 * m      (input) int
 *        Number of rows in the matrix.
 *
 * n      (input) int
 *        Number of columns in the matrix.
 *
 * anorm  (input) double
 *        The norm of the original matrix A, or the scaled A if
 *        equilibration was done.
 *
 * LUstruct (input/output) LUstruct_t*
 *         The data structures to store the distributed L and U factors.
 *         The following fields should be defined:
 *
 *         o Glu_persist (input) Glu_persist_t*
 *           Global data structure (xsup, supno) replicated on all processes,
 *           describing the supernode partition in the factored matrices
 *           L and U:
 *	       xsup[s] is the leading column of the s-th supernode,
 *             supno[i] is the supernode number to which column i belongs.
 *
 *         o Llu (input/output) LocalLU_t*
 *           The distributed data structures to store L and U factors.
 *           See superlu_ddefs.h for the definition of 'LocalLU_t'.
 *
 * grid   (input) gridinfo_t*
 *        The 2D process mesh. It contains the MPI communicator, the number
 *        of process rows (NPROW), the number of process columns (NPCOL),
 *        and my process rank. It is an input argument to all the
 *        parallel routines.
 *        Grid can be initialized by subroutine SUPERLU_GRIDINIT.
 *        See superlu_ddefs.h for the definition of 'gridinfo_t'.
 *
 * stat   (output) SuperLUStat_t*
 *        Record the statistics on runtime and floating-point operation count.
 *        See util.h for the definition of 'SuperLUStat_t'.
 *
 * info   (output) int*
 *        = 0: successful exit
 *        < 0: if info = -i, the i-th argument had an illegal value
 *        > 0: if info = i, U(i,i) is exactly zero. The factorization has
 *             been completed, but the factor U is exactly singular,
 *             and division by zero will occur if it is used to solve a
 *             system of equations.
 *
 */
{
#ifdef _CRAY
    _fcd ftcs = _cptofcd("N", strlen("N"));
    _fcd ftcs1 = _cptofcd("L", strlen("L"));
    _fcd ftcs2 = _cptofcd("N", strlen("N"));
    _fcd ftcs3 = _cptofcd("U", strlen("U"));
#endif
    double alpha = 1.0, beta = 0.0;
    int_t *xsup;
    int_t *lsub, *lsub1, *usub, *Usub_buf,
          *Lsub_buf_2[2];  /* Need 2 buffers to implement Irecv. */
    double *lusup, *lusup1, *uval, *Uval_buf,
           *Lval_buf_2[2]; /* Need 2 buffers to implement Irecv. */
    int_t fnz, i, ib, ijb, ilst, it, iukp, jb, jj, klst, knsupc,
          lb, lib, ldv, ljb, lptr, lptr0, lptrj, luptr, luptr0, luptrj,
          nlb, nub, nsupc, rel, rukp;
    int_t Pc, Pr;
    int   iam, kcol, krow, mycol, myrow, pi, pj;
    int   j, k, lk, nsupers;
    int   nsupr, nbrow, segsize;
    int   msgcnt[4]; /* Count the size of the message xfer'd in each buffer:
		      *     0 : transferred in Lsub_buf[]
		      *     1 : transferred in Lval_buf[]
		      *     2 : transferred in Usub_buf[] 
		      *     3 : transferred in Uval_buf[]
		      */
    int_t  msg0, msg2;
    int_t  **Ufstnz_br_ptr, **Lrowind_bc_ptr;
    double **Unzval_br_ptr, **Lnzval_bc_ptr;
    int_t  *index;
    double *nzval;
    int_t  *iuip, *ruip;/* Pointers to U index/nzval; size ceil(NSUPERS/Pr). */
    double *ucol;
    int_t  *indirect;
    double *tempv, *tempv2d;
    int_t iinfo;
    int_t *ToRecv, *ToSendD, **ToSendR;
    Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
    LocalLU_t *Llu = LUstruct->Llu;
    superlu_scope_t *scp;
    float s_eps;
    double thresh;
    double *tempU2d, *tempu;
    int    full, ldt, ldu, lead_zero, ncols;
    MPI_Request recv_req[4], *send_req, *U_diag_blk_send_req = NULL;
    MPI_Status status;
#if ( DEBUGlevel>=2 ) 
    int_t num_copy=0, num_update=0;
#endif
#if ( PRNTlevel==3 )
    int_t  zero_msg = 0, total_msg = 0;
#endif
#if ( PROFlevel>=1 )
    double t1, t2;
    float msg_vol = 0, msg_cnt = 0;
    int_t iword = sizeof(int_t), dword = sizeof(double);
#endif

    /* Test the input parameters. */
    *info = 0;
    if ( m < 0 ) *info = -2;
    else if ( n < 0 ) *info = -3;
    if ( *info ) {
	pxerbla("pdgstrf", grid, -*info);
	return (-1);
    }

    /* Quick return if possible. */
    if ( m == 0 || n == 0 ) return 0;

    /*
     * Initialization.
     */
    iam = grid->iam;
    Pc = grid->npcol;
    Pr = grid->nprow;
    myrow = MYROW( iam, grid );
    mycol = MYCOL( iam, grid );
    nsupers = Glu_persist->supno[n-1] + 1;
    xsup = Glu_persist->xsup;
    s_eps = slamch_("Epsilon");
    thresh = s_eps * anorm;

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Enter pdgstrf()");
#endif

    stat->ops[FACT] = 0.0;

    if ( Pr*Pc > 1 ) {
	i = Llu->bufmax[0];
	if ( !(Llu->Lsub_buf_2[0] = intMalloc_dist(2 * ((size_t)i))) )
	    ABORT("Malloc fails for Lsub_buf.");
	Llu->Lsub_buf_2[1] = Llu->Lsub_buf_2[0] + i;
	i = Llu->bufmax[1];
	if ( !(Llu->Lval_buf_2[0] = doubleMalloc_dist(2 * ((size_t)i))) )
	    ABORT("Malloc fails for Lval_buf[].");
	Llu->Lval_buf_2[1] = Llu->Lval_buf_2[0] + i;
	if ( Llu->bufmax[2] != 0 ) 
	    if ( !(Llu->Usub_buf = intMalloc_dist(Llu->bufmax[2])) )
		ABORT("Malloc fails for Usub_buf[].");
	if ( Llu->bufmax[3] != 0 ) 
	    if ( !(Llu->Uval_buf = doubleMalloc_dist(Llu->bufmax[3])) )
		ABORT("Malloc fails for Uval_buf[].");
	if ( !(U_diag_blk_send_req =
	       (MPI_Request *) SUPERLU_MALLOC(Pr*sizeof(MPI_Request))))
	    ABORT("Malloc fails for U_diag_blk_send_req[].");
        U_diag_blk_send_req[myrow] = 0; /* flag no outstanding Isend */
	if ( !(send_req =
	       (MPI_Request *) SUPERLU_MALLOC(2*Pc*sizeof(MPI_Request))))
	    ABORT("Malloc fails for send_req[].");
    }
    k = sp_ienv_dist(3); /* max supernode size */
    if ( !(Llu->ujrow = doubleMalloc_dist(k*(k+1)/2)) )
	ABORT("Malloc fails for ujrow[].");

#if ( PRNTlevel>=1 )
    if ( !iam ) {
	printf(".. thresh = s_eps %e * anorm %e = %e\n", s_eps, anorm, thresh);
	printf(".. Buffer size: Lsub %d\tLval %d\tUsub %d\tUval %d\tLDA %d\n",
	       Llu->bufmax[0], Llu->bufmax[1], 
	       Llu->bufmax[2], Llu->bufmax[3], Llu->bufmax[4]);
    }
#endif

    Lsub_buf_2[0] = Llu->Lsub_buf_2[0];
    Lsub_buf_2[1] = Llu->Lsub_buf_2[1];
    Lval_buf_2[0] = Llu->Lval_buf_2[0];
    Lval_buf_2[1] = Llu->Lval_buf_2[1];
    Usub_buf = Llu->Usub_buf;
    Uval_buf = Llu->Uval_buf;
    Lrowind_bc_ptr = Llu->Lrowind_bc_ptr;
    Lnzval_bc_ptr = Llu->Lnzval_bc_ptr;
    Ufstnz_br_ptr = Llu->Ufstnz_br_ptr;
    Unzval_br_ptr = Llu->Unzval_br_ptr;
    ToRecv = Llu->ToRecv;
    ToSendD = Llu->ToSendD;
    ToSendR = Llu->ToSendR;

    ldt = sp_ienv_dist(3); /* Size of maximum supernode */
    if ( !(tempv2d = doubleCalloc_dist(2*((size_t)ldt)*ldt)) )
	ABORT("Calloc fails for tempv2d[].");
    tempU2d = tempv2d + ldt*ldt;
    if ( !(indirect = intMalloc_dist(ldt)) )
	ABORT("Malloc fails for indirect[].");
    k = CEILING( nsupers, Pr ); /* Number of local block rows */
    if ( !(iuip = intMalloc_dist(k)) )
	ABORT("Malloc fails for iuip[].");
    if ( !(ruip = intMalloc_dist(k)) )
	ABORT("Malloc fails for ruip[].");

#if ( VAMPIR>=1 )
    VT_symdef(1, "Send-L", "Comm");
    VT_symdef(2, "Recv-L", "Comm");
    VT_symdef(3, "Send-U", "Comm");
    VT_symdef(4, "Recv-U", "Comm");
    VT_symdef(5, "TRF2", "Factor");
    VT_symdef(100, "Factor", "Factor");
    VT_begin(100);
    VT_traceon();
#endif

    /* ---------------------------------------------------------------
       Handle the first block column separately to start the pipeline.
       --------------------------------------------------------------- */
    if ( mycol == 0 ) {

#if ( VAMPIR>=1 )
	VT_begin(5);
#endif
	pdgstrf2(options, 0, thresh, Glu_persist, grid, Llu, 
		 U_diag_blk_send_req, stat, info);

#if ( VAMPIR>=1 )
	VT_end(5);
#endif

	scp = &grid->rscp; /* The scope of process row. */

	/* Process column *kcol* multicasts numeric values of L(:,k) 
	   to process rows. */
	lsub = Lrowind_bc_ptr[0];
	lusup = Lnzval_bc_ptr[0];
	if ( lsub ) {
	    msgcnt[0] = lsub[1] + BC_HEADER + lsub[0]*LB_DESCRIPTOR;
	    msgcnt[1] = lsub[1] * SuperSize( 0 );
	} else {
	    msgcnt[0] = msgcnt[1] = 0;
	}
	
	for (pj = 0; pj < Pc; ++pj) {
	    if ( ToSendR[0][pj] != EMPTY ) {
#if ( PROFlevel>=1 )
		TIC(t1);
#endif
#if ( VAMPIR>=1 )
		VT_begin(1);
#endif
		MPI_Isend( lsub, msgcnt[0], mpi_int_t, pj, 0, scp->comm,
			  &send_req[pj] );
		MPI_Isend( lusup, msgcnt[1], MPI_DOUBLE, pj, 1, scp->comm,
			  &send_req[pj+Pc] );
#if ( DEBUGlevel>=2 )
		printf("(%d) Send L(:,%4d): lsub %4d, lusup %4d to Pc %2d\n",
		       iam, 0, msgcnt[0], msgcnt[1], pj);
#endif
#if ( VAMPIR>=1 )
		VT_end(1);
#endif
#if ( PROFlevel>=1 )
		TOC(t2, t1);
		stat->utime[COMM] += t2;
		msg_cnt += 2;
		msg_vol += msgcnt[0]*iword + msgcnt[1]*dword;
#endif
	    }
	} /* for pj ... */
    } else { /* Post immediate receives. */
	if ( ToRecv[0] >= 1 ) { /* Recv block column L(:,0). */
	    scp = &grid->rscp; /* The scope of process row. */
	    MPI_Irecv( Lsub_buf_2[0], Llu->bufmax[0], mpi_int_t, 0,
		      0, scp->comm, &recv_req[0] );
	    MPI_Irecv( Lval_buf_2[0], Llu->bufmax[1], MPI_DOUBLE, 0,
		      1, scp->comm, &recv_req[1] );
#if ( DEBUGlevel>=2 )
	    printf("(%d) Post Irecv L(:,%4d)\n", iam, 0);
#endif
	}
    } /* if mycol == 0 */

    /* ------------------------------------------
       MAIN LOOP: Loop through all block columns.
       ------------------------------------------ */
    for (k = 0; k < nsupers; ++k) {

	knsupc = SuperSize( k );
	krow = PROW( k, grid );
	kcol = PCOL( k, grid );

	if ( mycol == kcol ) {
	    lk = LBj( k, grid ); /* Local block number. */

	    for (pj = 0; pj < Pc; ++pj) {
                /* Wait for Isend to complete before using lsub/lusup. */
		if ( ToSendR[lk][pj] != EMPTY ) {
		    MPI_Wait( &send_req[pj], &status );
		    MPI_Wait( &send_req[pj+Pc], &status );
		}
	    }
	    lsub = Lrowind_bc_ptr[lk];
	    lusup = Lnzval_bc_ptr[lk];
	} else {
	    if ( ToRecv[k] >= 1 ) { /* Recv block column L(:,k). */
		scp = &grid->rscp; /* The scope of process row. */
#if ( PROFlevel>=1 )
		TIC(t1);
#endif
#if ( VAMPIR>=1 )
		VT_begin(2);
#endif
		/*probe_recv(iam, kcol, (4*k)%NTAGS, mpi_int_t, scp->comm, 
		  Llu->bufmax[0]);*/
		/*MPI_Recv( Lsub_buf, Llu->bufmax[0], mpi_int_t, kcol, 
			 (4*k)%NTAGS, scp->comm, &status );*/
		MPI_Wait( &recv_req[0], &status );
		MPI_Get_count( &status, mpi_int_t, &msgcnt[0] );
		/*probe_recv(iam, kcol, (4*k+1)%NTAGS, MPI_DOUBLE, scp->comm, 
		  Llu->bufmax[1]);*/
		/*MPI_Recv( Lval_buf, Llu->bufmax[1], MPI_DOUBLE, kcol, 
			 (4*k+1)%NTAGS, scp->comm, &status );*/
		MPI_Wait( &recv_req[1], &status );
		MPI_Get_count( &status, MPI_DOUBLE, &msgcnt[1] );
#if ( VAMPIR>=1 )
		VT_end(2);
#endif
#if ( PROFlevel>=1 )
		TOC(t2, t1);
		stat->utime[COMM] += t2;
#endif
#if ( DEBUGlevel>=2 )
		printf("(%d) Recv L(:,%4d): lsub %4d, lusup %4d from Pc %2d\n",
		       iam, k, msgcnt[0], msgcnt[1], kcol);
		fflush(stdout);
#endif
		lsub = Lsub_buf_2[k%2];
		lusup = Lval_buf_2[k%2];
#if ( PRNTlevel==3 )
		++total_msg;
		if ( !msgcnt[0] ) ++zero_msg;
#endif
	    } else msgcnt[0] = 0;
	} /* if mycol = Pc(k) */

	scp = &grid->cscp; /* The scope of process column. */

	if ( myrow == krow ) {
	    /* Parallel triangular solve across process row *krow* --
	       U(k,j) = L(k,k) \ A(k,j).  */
#ifdef _CRAY
	    pdgstrs2(n, k, Glu_persist, grid, Llu, stat, ftcs1, ftcs2, ftcs3);
#else
	    pdgstrs2(n, k, Glu_persist, grid, Llu, stat);
#endif

	    /* Multicasts U(k,:) to process columns. */
	    lk = LBi( k, grid );
	    usub = Ufstnz_br_ptr[lk];
	    uval = Unzval_br_ptr[lk];
	    if ( usub )	{
		msgcnt[2] = usub[2];
		msgcnt[3] = usub[1];
	    } else {
		msgcnt[2] = msgcnt[3] = 0;
	    }

	    if ( ToSendD[lk] == YES ) {
		for (pi = 0; pi < Pr; ++pi) {
		    if ( pi != myrow ) {
#if ( PROFlevel>=1 )
			TIC(t1);
#endif
#if ( VAMPIR>=1 )
			VT_begin(3);
#endif
			MPI_Send( usub, msgcnt[2], mpi_int_t, pi,
				 (4*k+2)%NTAGS, scp->comm);
			MPI_Send( uval, msgcnt[3], MPI_DOUBLE, pi,
				 (4*k+3)%NTAGS, scp->comm);
#if ( VAMPIR>=1 )
			VT_end(3);
#endif
#if ( PROFlevel>=1 )
			TOC(t2, t1);
			stat->utime[COMM] += t2;
			msg_cnt += 2;
			msg_vol += msgcnt[2]*iword + msgcnt[3]*dword;
#endif
#if ( DEBUGlevel>=2 )
			printf("(%d) Send U(%4d,:) to Pr %2d\n", iam, k, pi);
#endif
		    } /* if pi ... */
		} /* for pi ... */
	    } /* if ToSendD ... */
	} else { /* myrow != krow */
	    if ( ToRecv[k] == 2 ) { /* Recv block row U(k,:). */
#if ( PROFlevel>=1 )
		TIC(t1);
#endif
#if ( VAMPIR>=1 )
		VT_begin(4);
#endif
		/*probe_recv(iam, krow, (4*k+2)%NTAGS, mpi_int_t, scp->comm, 
		  Llu->bufmax[2]);*/
		MPI_Recv( Usub_buf, Llu->bufmax[2], mpi_int_t, krow,
			 (4*k+2)%NTAGS, scp->comm, &status );
		MPI_Get_count( &status, mpi_int_t, &msgcnt[2] );
		/*probe_recv(iam, krow, (4*k+3)%NTAGS, MPI_DOUBLE, scp->comm, 
		  Llu->bufmax[3]);*/
		MPI_Recv( Uval_buf, Llu->bufmax[3], MPI_DOUBLE, krow, 
			 (4*k+3)%NTAGS, scp->comm, &status );
		MPI_Get_count( &status, MPI_DOUBLE, &msgcnt[3] );
#if ( VAMPIR>=1 )
		VT_end(4);
#endif
#if ( PROFlevel>=1 )
		TOC(t2, t1);
		stat->utime[COMM] += t2;
#endif
		usub = Usub_buf;
		uval = Uval_buf;
#if ( DEBUGlevel>=2 )
		printf("(%d) Recv U(%4d,:) from Pr %2d\n", iam, k, krow);
#endif
#if ( PRNTlevel==3 )
		++total_msg;
		if ( !msgcnt[2] ) ++zero_msg;
#endif
	    } else msgcnt[2] = 0;
	} /* if myrow == Pr(k) */
	  
	/* 
	 * Parallel rank-k update; pair up blocks L(i,k) and U(k,j).
	 *  for (j = k+1; k < N; ++k) {
	 *     for (i = k+1; i < N; ++i) 
	 *         if ( myrow == PROW( i, grid ) && mycol == PCOL( j, grid )
	 *              && L(i,k) != 0 && U(k,j) != 0 )
	 *             A(i,j) = A(i,j) - L(i,k) * U(k,j);
	 */
	msg0 = msgcnt[0];
	msg2 = msgcnt[2];
	if ( msg0 && msg2 ) { /* L(:,k) and U(k,:) are not empty. */
	    nsupr = lsub[1]; /* LDA of lusup. */
	    if ( myrow == krow ) { /* Skip diagonal block L(k,k). */
		lptr0 = BC_HEADER + LB_DESCRIPTOR + lsub[BC_HEADER+1];
		luptr0 = knsupc;
		nlb = lsub[0] - 1;
	    } else {
		lptr0 = BC_HEADER;
		luptr0 = 0;
		nlb = lsub[0];
	    }
	    lptr = lptr0;
	    for (lb = 0; lb < nlb; ++lb) { /* Initialize block row pointers. */
		ib = lsub[lptr];
		lib = LBi( ib, grid );
		iuip[lib] = BR_HEADER;
		ruip[lib] = 0;
		lptr += LB_DESCRIPTOR + lsub[lptr+1];
	    }
	    nub = usub[0];    /* Number of blocks in the block row U(k,:) */
	    iukp = BR_HEADER; /* Skip header; Pointer to index[] of U(k,:) */
	    rukp = 0;         /* Pointer to nzval[] of U(k,:) */
	    klst = FstBlockC( k+1 );
	    
	    /* ---------------------------------------------------
	       Update the first block column A(:,k+1).
	       --------------------------------------------------- */
	    jb = usub[iukp];   /* Global block number of block U(k,j). */
	    if ( jb == k+1 ) { /* First update (k+1)-th block. */
		--nub;
		lptr = lptr0;
		luptr = luptr0;
		ljb = LBj( jb, grid ); /* Local block number of U(k,j). */
		nsupc = SuperSize( jb );
		iukp += UB_DESCRIPTOR; /* Start fstnz of block U(k,j). */

		/* Prepare to call DGEMM. */
		jj = iukp;
		while ( usub[jj] == klst ) ++jj;
		ldu = klst - usub[jj++];
		ncols = 1;
		full = 1;
		for (; jj < iukp+nsupc; ++jj) {
		    segsize = klst - usub[jj];
		    if ( segsize ) {
		        ++ncols;
			if ( segsize != ldu ) full = 0;
		        if ( segsize > ldu ) ldu = segsize;
		    }
		}
#if ( DEBUGlevel>=3 )
		++num_update;
#endif
		if ( full ) {
		    tempu = &uval[rukp];
		} else { /* Copy block U(k,j) into tempU2d. */
#if ( DEBUGlevel>=3 )
		  printf("(%d) full=%d,k=%d,jb=%d,ldu=%d,ncols=%d,nsupc=%d\n",
			 iam, full, k, jb, ldu, ncols, nsupc);
		  ++num_copy;
#endif
		    tempu = tempU2d;
		    for (jj = iukp; jj < iukp+nsupc; ++jj) {
		        segsize = klst - usub[jj];
			if ( segsize ) {
			    lead_zero = ldu - segsize;
			    for (i = 0; i < lead_zero; ++i) tempu[i] = 0.0;
			    tempu += lead_zero;
			    for (i = 0; i < segsize; ++i)
				tempu[i] = uval[rukp+i];
			    rukp += segsize;
			    tempu += segsize;
			}
		    }
		    tempu = tempU2d;
		    rukp -= usub[iukp - 1]; /* Return to start of U(k,j). */
		} /* if full ... */

		for (lb = 0; lb < nlb; ++lb) { 
		    ib = lsub[lptr]; /* Row block L(i,k). */
		    nbrow = lsub[lptr+1];  /* Number of full rows. */
		    lptr += LB_DESCRIPTOR; /* Skip descriptor. */
		    tempv = tempv2d;
#ifdef _CRAY
		    SGEMM(ftcs, ftcs, &nbrow, &ncols, &ldu, &alpha, 
			  &lusup[luptr+(knsupc-ldu)*nsupr], &nsupr, 
			  tempu, &ldu, &beta, tempv, &ldt);
#elif defined (USE_VENDOR_BLAS)
		    dgemm_("N", "N", &nbrow, &ncols, &ldu, &alpha, 
			   &lusup[luptr+(knsupc-ldu)*nsupr], &nsupr, 
			   tempu, &ldu, &beta, tempv, &ldt, 1, 1);
#else
		    dgemm_("N", "N", &nbrow, &ncols, &ldu, &alpha, 
			   &lusup[luptr+(knsupc-ldu)*nsupr], &nsupr, 
			   tempu, &ldu, &beta, tempv, &ldt);
#endif
		    stat->ops[FACT] += 2 * nbrow * ldu * ncols;

		    /* Now gather the result into the destination block. */
		    if ( ib < jb ) { /* A(i,j) is in U. */
			ilst = FstBlockC( ib+1 );
			lib = LBi( ib, grid );
			index = Ufstnz_br_ptr[lib];
			ijb = index[iuip[lib]];
			while ( ijb < jb ) { /* Search for dest block. */
			    ruip[lib] += index[iuip[lib]+1];
			    iuip[lib] += UB_DESCRIPTOR + SuperSize( ijb );
			    ijb = index[iuip[lib]];
			}
			iuip[lib] += UB_DESCRIPTOR; /* Skip descriptor. */

			tempv = tempv2d;
			for (jj = 0; jj < nsupc; ++jj) {
			    segsize = klst - usub[iukp + jj];
			    fnz = index[iuip[lib]++];
			    if ( segsize ) { /* Nonzero segment in U(k.j). */
				ucol = &Unzval_br_ptr[lib][ruip[lib]];
				for (i = 0, it = 0; i < nbrow; ++i) {
				    rel = lsub[lptr + i] - fnz;
				    ucol[rel] -= tempv[it++];
				}
				tempv += ldt;
			    }
			    ruip[lib] += ilst - fnz;
			}
		    } else { /* A(i,j) is in L. */
			index = Lrowind_bc_ptr[ljb];
			ldv = index[1];   /* LDA of the dest lusup. */
			lptrj = BC_HEADER;
			luptrj = 0;
			ijb = index[lptrj];
			while ( ijb != ib ) { /* Search for dest block -- 
						 blocks are not ordered! */
			    luptrj += index[lptrj+1];
			    lptrj += LB_DESCRIPTOR + index[lptrj+1];
			    ijb = index[lptrj];
			}
			/*
			 * Build indirect table. This is needed because the
			 * indices are not sorted.
			 */
			fnz = FstBlockC( ib );
			lptrj += LB_DESCRIPTOR;
			for (i = 0; i < index[lptrj-1]; ++i) {
			    rel = index[lptrj + i] - fnz;
			    indirect[rel] = i;
			}
			nzval = Lnzval_bc_ptr[ljb] + luptrj;
			tempv = tempv2d;
			for (jj = 0; jj < nsupc; ++jj) {
			    segsize = klst - usub[iukp + jj];
			    if ( segsize ) {
/*#pragma _CRI cache_bypass nzval,tempv*/
				for (it = 0, i = 0; i < nbrow; ++i) {
				    rel = lsub[lptr + i] - fnz;
				    nzval[indirect[rel]] -= tempv[it++];
				}
				tempv += ldt;
			    }
			    nzval += ldv;
			}
		    } /* if ib < jb ... */
		    lptr += nbrow;
		    luptr += nbrow;
		} /* for lb ... */
		rukp += usub[iukp - 1]; /* Move to block U(k,j+1) */
		iukp += nsupc;
	    }  /* if jb == k+1 */
	} /* if L(:,k) and U(k,:) not empty */


	if ( k+1 < nsupers ) {
	  kcol = PCOL( k+1, grid );
	  if ( mycol == kcol ) {
#if ( VAMPIR>=1 )
	    VT_begin(5);
#endif
	    /* Factor diagonal and subdiagonal blocks and test for exact
	       singularity.  */
	    pdgstrf2(options, k+1, thresh, Glu_persist, grid, Llu,
		     U_diag_blk_send_req, stat, info);

#if ( VAMPIR>=1 )
	    VT_end(5);
#endif

	    /* Process column *kcol+1* multicasts numeric values of L(:,k+1) 
	       to process rows. */
	    lk = LBj( k+1, grid ); /* Local block number. */
	    lsub1 = Lrowind_bc_ptr[lk];
 	    if ( lsub1 ) {
		msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0]*LB_DESCRIPTOR;
		msgcnt[1] = lsub1[1] * SuperSize( k+1 );
	    } else {
		msgcnt[0] = 0;
		msgcnt[1] = 0;
	    }
	    scp = &grid->rscp; /* The scope of process row. */
	    for (pj = 0; pj < Pc; ++pj) {
		if ( ToSendR[lk][pj] != EMPTY ) {
		    lusup1 = Lnzval_bc_ptr[lk];
#if ( PROFlevel>=1 )
		    TIC(t1);
#endif
#if ( VAMPIR>=1 )
		    VT_begin(1);
#endif
		    MPI_Isend( lsub1, msgcnt[0], mpi_int_t, pj,
			      (4*(k+1))%NTAGS, scp->comm, &send_req[pj] );
		    MPI_Isend( lusup1, msgcnt[1], MPI_DOUBLE, pj,
			     (4*(k+1)+1)%NTAGS, scp->comm, &send_req[pj+Pc] );
#if ( VAMPIR>=1 )
		    VT_end(1);
#endif
#if ( PROFlevel>=1 )
		    TOC(t2, t1);
		    stat->utime[COMM] += t2;
		    msg_cnt += 2;
		    msg_vol += msgcnt[0]*iword + msgcnt[1]*dword;
#endif
#if ( DEBUGlevel>=2 )
		    printf("(%d) Send L(:,%4d): lsub %4d, lusup %4d to Pc %2d\n",
			   iam, k+1, msgcnt[0], msgcnt[1], pj);
#endif
		}
	    } /* for pj ... */
	  } else { /* Post Recv of block column L(:,k+1). */
	    if ( ToRecv[k+1] >= 1 ) {
		scp = &grid->rscp; /* The scope of process row. */
		MPI_Irecv(Lsub_buf_2[(k+1)%2], Llu->bufmax[0], mpi_int_t, kcol,
			  (4*(k+1))%NTAGS, scp->comm, &recv_req[0]);
		MPI_Irecv(Lval_buf_2[(k+1)%2], Llu->bufmax[1], MPI_DOUBLE, kcol, 
			  (4*(k+1)+1)%NTAGS, scp->comm, &recv_req[1]);
#if ( DEBUGlevel>=2 )
		printf("(%d) Post Irecv L(:,%4d)\n", iam, k+1);
#endif
	    }
	  } /* if mycol == Pc(k+1) */
        } /* if k+1 < nsupers */

	if ( msg0 && msg2 ) { /* L(:,k) and U(k,:) are not empty. */
	    /* ---------------------------------------------------
	       Update all other blocks using block row U(k,:)
	       --------------------------------------------------- */
	    for (j = 0; j < nub; ++j) { 
		lptr = lptr0;
		luptr = luptr0;
		jb = usub[iukp];  /* Global block number of block U(k,j). */
		ljb = LBj( jb, grid ); /* Local block number of U(k,j). */
		nsupc = SuperSize( jb );
		iukp += UB_DESCRIPTOR; /* Start fstnz of block U(k,j). */

		/* Prepare to call DGEMM. */
		jj = iukp;
		while ( usub[jj] == klst ) ++jj;
		ldu = klst - usub[jj++];
		ncols = 1;
		full = 1;
		for (; jj < iukp+nsupc; ++jj) {
		    segsize = klst - usub[jj];
		    if ( segsize ) {
		        ++ncols;
			if ( segsize != ldu ) full = 0;
		        if ( segsize > ldu ) ldu = segsize;
		    }
		}
#if ( DEBUGlevel>=3 )
		printf("(%d) full=%d,k=%d,jb=%d,ldu=%d,ncols=%d,nsupc=%d\n",
		       iam, full, k, jb, ldu, ncols, nsupc);
		++num_update;
#endif
		if ( full ) {
		    tempu = &uval[rukp];
		} else { /* Copy block U(k,j) into tempU2d. */
#if ( DEBUGlevel>=3 )
		    ++num_copy;
#endif
		    tempu = tempU2d;
		    for (jj = iukp; jj < iukp+nsupc; ++jj) {
		        segsize = klst - usub[jj];
			if ( segsize ) {
			    lead_zero = ldu - segsize;
			    for (i = 0; i < lead_zero; ++i) tempu[i] = 0.0;
			    tempu += lead_zero;
			    for (i = 0; i < segsize; ++i)
			        tempu[i] = uval[rukp+i];
			    rukp += segsize;
			    tempu += segsize;
			}
		    }
		    tempu = tempU2d;
		    rukp -= usub[iukp - 1]; /* Return to start of U(k,j). */
		} /* if full ... */

		for (lb = 0; lb < nlb; ++lb) { 
		    ib = lsub[lptr];       /* Row block L(i,k). */
		    nbrow = lsub[lptr+1];  /* Number of full rows. */
		    lptr += LB_DESCRIPTOR; /* Skip descriptor. */
		    tempv = tempv2d;
#ifdef _CRAY
		    SGEMM(ftcs, ftcs, &nbrow, &ncols, &ldu, &alpha, 
			  &lusup[luptr+(knsupc-ldu)*nsupr], &nsupr, 
			  tempu, &ldu, &beta, tempv, &ldt);
#elif defined (USE_VENDOR_BLAS)
		    dgemm_("N", "N", &nbrow, &ncols, &ldu, &alpha, 
			   &lusup[luptr+(knsupc-ldu)*nsupr], &nsupr, 
			   tempu, &ldu, &beta, tempv, &ldt, 1, 1);
#else
		    dgemm_("N", "N", &nbrow, &ncols, &ldu, &alpha, 
			   &lusup[luptr+(knsupc-ldu)*nsupr], &nsupr, 
			   tempu, &ldu, &beta, tempv, &ldt);
#endif
		    stat->ops[FACT] += 2 * nbrow * ldu * ncols;

		    /* Now gather the result into the destination block. */
		    if ( ib < jb ) { /* A(i,j) is in U. */
			ilst = FstBlockC( ib+1 );
			lib = LBi( ib, grid );
			index = Ufstnz_br_ptr[lib];
			ijb = index[iuip[lib]];
			while ( ijb < jb ) { /* Search for dest block. */
			    ruip[lib] += index[iuip[lib]+1];
			    iuip[lib] += UB_DESCRIPTOR + SuperSize( ijb );
			    ijb = index[iuip[lib]];
			}
			/* Skip descriptor.  Now point to fstnz index of 
			   block U(i,j). */
			iuip[lib] += UB_DESCRIPTOR;

			tempv = tempv2d;
			for (jj = 0; jj < nsupc; ++jj) {
			    segsize = klst - usub[iukp + jj];
			    fnz = index[iuip[lib]++];
			    if ( segsize ) { /* Nonzero segment in U(k.j). */
				ucol = &Unzval_br_ptr[lib][ruip[lib]];
				for (i = 0 ; i < nbrow; ++i) {
				    rel = lsub[lptr + i] - fnz;
				    ucol[rel] -= tempv[i];
				}
				tempv += ldt;
			    }
			    ruip[lib] += ilst - fnz;
			}
		    } else { /* A(i,j) is in L. */
			index = Lrowind_bc_ptr[ljb];
			ldv = index[1];   /* LDA of the dest lusup. */
			lptrj = BC_HEADER;
			luptrj = 0;
			ijb = index[lptrj];
			while ( ijb != ib ) { /* Search for dest block -- 
						 blocks are not ordered! */
			    luptrj += index[lptrj+1];
			    lptrj += LB_DESCRIPTOR + index[lptrj+1];
			    ijb = index[lptrj];
			}
			/*
			 * Build indirect table. This is needed because the
			 * indices are not sorted for the L blocks.
			 */
			fnz = FstBlockC( ib );
			lptrj += LB_DESCRIPTOR;
			for (i = 0; i < index[lptrj-1]; ++i) {
			    rel = index[lptrj + i] - fnz;
			    indirect[rel] = i;
			}
			nzval = Lnzval_bc_ptr[ljb] + luptrj;
			tempv = tempv2d;
			for (jj = 0; jj < nsupc; ++jj) {
			    segsize = klst - usub[iukp + jj];
			    if ( segsize ) {
/*#pragma _CRI cache_bypass nzval,tempv*/
				for (i = 0; i < nbrow; ++i) {
				    rel = lsub[lptr + i] - fnz;
				    nzval[indirect[rel]] -= tempv[i];
				}
				tempv += ldt;
			    }
			    nzval += ldv;
			}
		    } /* if ib < jb ... */
		    lptr += nbrow;
		    luptr += nbrow;
		} /* for lb ... */
		rukp += usub[iukp - 1]; /* Move to block U(k,j+1) */
		iukp += nsupc;
	    } /* for j ... */
	} /* if  k L(:,k) and U(k,:) are not empty */

    } 
    /* ------------------------------------------
       END MAIN LOOP: for k = ...
       ------------------------------------------ */

#if ( VAMPIR>=1 )
    VT_end(100);
    VT_traceoff();
#endif

    if ( Pr*Pc > 1 ) {
	SUPERLU_FREE(Lsub_buf_2[0]); /* also free Lsub_buf_2[1] */
	SUPERLU_FREE(Lval_buf_2[0]); /* also free Lval_buf_2[1] */
	if ( Llu->bufmax[2] != 0 ) SUPERLU_FREE(Usub_buf);
	if ( Llu->bufmax[3] != 0 ) SUPERLU_FREE(Uval_buf);
	SUPERLU_FREE(send_req);
	if ( U_diag_blk_send_req[myrow] ) {
	    /* wait for last Isend requests to complete, deallocate objects */ 
	    for (krow = 0; krow < Pr; ++krow)
		if ( krow != myrow )
                    MPI_Wait(U_diag_blk_send_req + krow, &status);
	}
	SUPERLU_FREE(U_diag_blk_send_req);
    }

    SUPERLU_FREE(Llu->ujrow);
    SUPERLU_FREE(tempv2d);
    SUPERLU_FREE(indirect);
    SUPERLU_FREE(iuip);
    SUPERLU_FREE(ruip);

    /* Prepare error message. */
    if ( *info == 0 ) *info = n + 1;
#if ( PROFlevel>=1 )
    TIC(t1);
#endif
    MPI_Allreduce( info, &iinfo, 1, mpi_int_t, MPI_MIN, grid->comm );
#if ( PROFlevel>=1 )
    TOC(t2, t1);
    stat->utime[COMM] += t2;
    {
	float msg_vol_max, msg_vol_sum, msg_cnt_max, msg_cnt_sum;
	
	MPI_Reduce( &msg_cnt, &msg_cnt_sum,
		   1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
	MPI_Reduce( &msg_cnt, &msg_cnt_max,
		   1, MPI_FLOAT, MPI_MAX, 0, grid->comm );
	MPI_Reduce( &msg_vol, &msg_vol_sum,
		   1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
	MPI_Reduce( &msg_vol, &msg_vol_max,
		   1, MPI_FLOAT, MPI_MAX, 0, grid->comm );
	if ( !iam ) {
	    printf("\tPDGSTRF comm stat:"
		   "\tAvg\tMax\t\tAvg\tMax\n"
		   "\t\t\tCount:\t%.0f\t%.0f\tVol(MB)\t%.2f\t%.2f\n",
		   msg_cnt_sum/Pr/Pc, msg_cnt_max,
		   msg_vol_sum/Pr/Pc*1e-6, msg_vol_max*1e-6);
	}
    }
#endif
    if ( iinfo == n + 1 ) *info = 0;
    else *info = iinfo;


#if ( PRNTlevel==3 )
    MPI_Allreduce( &zero_msg, &iinfo, 1, mpi_int_t, MPI_SUM, grid->comm );
    if ( !iam ) printf(".. # msg of zero size\t%d\n", iinfo);
    MPI_Allreduce( &total_msg, &iinfo, 1, mpi_int_t, MPI_SUM, grid->comm );
    if ( !iam ) printf(".. # total msg\t%d\n", iinfo);
#endif

#if ( DEBUGlevel>=2 )
    for (i = 0; i < Pr * Pc; ++i) {
	if ( iam == i ) {
	    dPrintLblocks(iam, nsupers, grid, Glu_persist, Llu);
	    dPrintUblocks(iam, nsupers, grid, Glu_persist, Llu);
	    printf("(%d)\n", iam);
	    PrintInt10("Recv", nsupers, Llu->ToRecv);
	}
	MPI_Barrier( grid->comm );
    }
#endif

#if ( DEBUGlevel>=3 )
    printf("(%d) num_copy=%d, num_update=%d\n", iam, num_copy, num_update);
#endif
#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Exit pdgstrf()");
#endif
} /* PDGSTRF */
Ejemplo n.º 2
0
/*! \brief
 *
 * <pre>
 * Purpose
 * =======
 *
 * PDGSRFS improves the computed solution to a system of linear
 * equations and provides error bounds and backward error estimates
 * for the solution.
 *
 * Arguments
 * =========
 *
 * n      (input) int (global)
 *        The order of the system of linear equations.
 *
 * A      (input) SuperMatrix*
 *	  The original matrix A, or the scaled A if equilibration was done.
 *        A is also permuted into diag(R)*A*diag(C)*Pc'. The type of A can be:
 *        Stype = SLU_NR_loc; Dtype = SLU_D; Mtype = SLU_GE.
 *
 * anorm  (input) double
 *        The norm of the original matrix A, or the scaled A if
 *        equilibration was done.
 *
 * LUstruct (input) LUstruct_t*
 *        The distributed data structures storing L and U factors.
 *        The L and U factors are obtained from pdgstrf for
 *        the possibly scaled and permuted matrix A.
 *        See superlu_ddefs.h for the definition of 'LUstruct_t'.
 *
 * ScalePermstruct (input) ScalePermstruct_t* (global)
 *         The data structure to store the scaling and permutation vectors
 *         describing the transformations performed to the matrix A.
 *
 * grid   (input) gridinfo_t*
 *        The 2D process mesh. It contains the MPI communicator, the number
 *        of process rows (NPROW), the number of process columns (NPCOL),
 *        and my process rank. It is an input argument to all the
 *        parallel routines.
 *        Grid can be initialized by subroutine SUPERLU_GRIDINIT.
 *        See superlu_defs.h for the definition of 'gridinfo_t'.
 *
 * B      (input) double* (local)
 *        The m_loc-by-NRHS right-hand side matrix of the possibly
 *        equilibrated system. That is, B may be overwritten by diag(R)*B.
 *
 * ldb    (input) int (local)
 *        Leading dimension of matrix B.
 *
 * X      (input/output) double* (local)
 *        On entry, the solution matrix Y, as computed by PDGSTRS, of the
 *            transformed system A1*Y = Pc*Pr*B. where
 *            A1 = Pc*Pr*diag(R)*A*diag(C)*Pc' and Y = Pc*diag(C)^(-1)*X.
 *        On exit, the improved solution matrix Y.
 *
 *        In order to obtain the solution X to the original system,
 *        Y should be permutated by Pc^T, and premultiplied by diag(C)
 *        if DiagScale = COL or BOTH.
 *        This must be done after this routine is called.
 *
 * ldx    (input) int (local)
 *        Leading dimension of matrix X.
 *
 * nrhs   (input) int
 *        Number of right-hand sides.
 *
 * SOLVEstruct (output) SOLVEstruct_t* (global)
 *        Contains the information for the communication during the
 *        solution phase.
 *
 * berr   (output) double*, dimension (nrhs)
 *         The componentwise relative backward error of each solution
 *         vector X(j) (i.e., the smallest relative change in
 *         any element of A or B that makes X(j) an exact solution).
 *
 * stat   (output) SuperLUStat_t*
 *        Record the statistics about the refinement steps.
 *        See util.h for the definition of SuperLUStat_t.
 *
 * info   (output) int*
 *        = 0: successful exit
 *        < 0: if info = -i, the i-th argument had an illegal value
 *
 * Internal Parameters
 * ===================
 *
 * ITMAX is the maximum number of steps of iterative refinement.
 * </pre>
 */
void
pdgsrfs(int_t n, SuperMatrix *A, double anorm, LUstruct_t *LUstruct,
        ScalePermstruct_t *ScalePermstruct, gridinfo_t *grid,
        double *B, int_t ldb, double *X, int_t ldx, int nrhs,
        SOLVEstruct_t *SOLVEstruct,
        double *berr, SuperLUStat_t *stat, int *info)
{
#define ITMAX 20

    Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
    LocalLU_t *Llu = LUstruct->Llu;
    double *ax, *R, *dx, *temp, *work, *B_col, *X_col;
    int_t count, i, j, lwork, nz;
    int   iam;
    double eps, lstres;
    double s, safmin, safe1, safe2;

    /* Data structures used by matrix-vector multiply routine. */
    pdgsmv_comm_t *gsmv_comm = SOLVEstruct->gsmv_comm;
    NRformat_loc *Astore;
    int_t        m_loc, fst_row;


    /* Initialization. */
    Astore = (NRformat_loc *) A->Store;
    m_loc = Astore->m_loc;
    fst_row = Astore->fst_row;
    iam = grid->iam;

    /* Test the input parameters. */
    *info = 0;
    if ( n < 0 ) *info = -1;
    else if ( A->nrow != A->ncol || A->nrow < 0 || A->Stype != SLU_NR_loc
              || A->Dtype != SLU_D || A->Mtype != SLU_GE )
        *info = -2;
    else if ( ldb < SUPERLU_MAX(0, m_loc) ) *info = -10;
    else if ( ldx < SUPERLU_MAX(0, m_loc) ) *info = -12;
    else if ( nrhs < 0 ) *info = -13;
    if (*info != 0) {
        i = -(*info);
        pxerbla("PDGSRFS", grid, i);
        return;
    }

    /* Quick return if possible. */
    if ( n == 0 || nrhs == 0 ) {
        return;
    }


#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Enter pdgsrfs()");
#endif

    lwork = 2 * m_loc;  /* For ax/R/dx and temp */
    if ( !(work = doubleMalloc_dist(lwork)) )
        ABORT("Malloc fails for work[]");
    ax = R = dx = work;
    temp = ax + m_loc;

    /* NZ = maximum number of nonzero elements in each row of A, plus 1 */
    nz     = A->ncol + 1;
    eps    = dmach("Epsilon");
    safmin = dmach("Safe minimum");

    /* Set SAFE1 essentially to be the underflow threshold times the
       number of additions in each row. */
    safe1  = nz * safmin;
    safe2  = safe1 / eps;

#if ( DEBUGlevel>=1 )
    if ( !iam ) printf(".. eps = %e\tanorm = %e\tsafe1 = %e\tsafe2 = %e\n",
                           eps, anorm, safe1, safe2);
#endif

    /* Do for each right-hand side ... */
    for (j = 0; j < nrhs; ++j) {
        count = 0;
        lstres = 3.;
        B_col = &B[j*ldb];
        X_col = &X[j*ldx];

        while (1) { /* Loop until stopping criterion is satisfied. */

            /* Compute residual R = B - op(A) * X,
               where op(A) = A, A**T, or A**H, depending on TRANS. */

            /* Matrix-vector multiply. */
            pdgsmv(0, A, grid, gsmv_comm, X_col, ax);

            /* Compute residual, stored in R[]. */
            for (i = 0; i < m_loc; ++i) R[i] = B_col[i] - ax[i];

            /* Compute abs(op(A))*abs(X) + abs(B), stored in temp[]. */
            pdgsmv(1, A, grid, gsmv_comm, X_col, temp);
            for (i = 0; i < m_loc; ++i) temp[i] += fabs(B_col[i]);

            s = 0.0;
            for (i = 0; i < m_loc; ++i) {
                if ( temp[i] > safe2 ) {
                    s = SUPERLU_MAX(s, fabs(R[i]) / temp[i]);
                } else if ( temp[i] != 0.0 ) {
                    /* Adding SAFE1 to the numerator guards against
                       spuriously zero residuals (underflow). */
                    s = SUPERLU_MAX(s, (safe1 + fabs(R[i])) /temp[i]);
                }
                /* If temp[i] is exactly 0.0 (computed by PxGSMV), then
                   we know the true residual also must be exactly 0.0. */
            }
            MPI_Allreduce( &s, &berr[j], 1, MPI_DOUBLE, MPI_MAX, grid->comm );

#if ( PRNTlevel>= 1 )
            if ( !iam )
                printf("(%2d) .. Step " IFMT ": berr[j] = %e\n", iam, count, berr[j]);
#endif
            if ( berr[j] > eps && berr[j] * 2 <= lstres && count < ITMAX ) {
                /* Compute new dx. */
                pdgstrs(n, LUstruct, ScalePermstruct, grid,
                        dx, m_loc, fst_row, m_loc, 1,
                        SOLVEstruct, stat, info);

                /* Update solution. */
                for (i = 0; i < m_loc; ++i) X_col[i] += dx[i];

                lstres = berr[j];
                ++count;
            } else {
                break;
            }
        } /* end while */

        stat->RefineSteps = count;

    } /* for j ... */

    /* Deallocate storage. */
    SUPERLU_FREE(work);

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Exit pdgsrfs()");
#endif

} /* PDGSRFS */
Ejemplo n.º 3
0
void
pdgssvx(superlu_options_t *options, SuperMatrix *A, 
	ScalePermstruct_t *ScalePermstruct,
	double B[], int ldb, int nrhs, gridinfo_t *grid,
	LUstruct_t *LUstruct, SOLVEstruct_t *SOLVEstruct, double *berr,
	SuperLUStat_t *stat, int *info)
{
/* 
 * -- Distributed SuperLU routine (version 2.2) --
 * Lawrence Berkeley National Lab, Univ. of California Berkeley.
 * November 1, 2007
 * Feburary 20, 2008
 *
 *
 * Purpose
 * =======
 *
 * PDGSSVX solves a system of linear equations A*X=B,
 * by using Gaussian elimination with "static pivoting" to
 * compute the LU factorization of A.
 *
 * Static pivoting is a technique that combines the numerical stability
 * of partial pivoting with the scalability of Cholesky (no pivoting),
 * to run accurately and efficiently on large numbers of processors.
 * See our paper at http://www.nersc.gov/~xiaoye/SuperLU/ for a detailed
 * description of the parallel algorithms.
 *
 * The input matrices A and B are distributed by block rows.
 * Here is a graphical illustration (0-based indexing):
 *
 *                        A                B
 *               0 ---------------       ------
 *                   |           |        |  |
 *                   |           |   P0   |  |
 *                   |           |        |  |
 *                 ---------------       ------
 *        - fst_row->|           |        |  |
 *        |          |           |        |  |
 *       m_loc       |           |   P1   |  |
 *        |          |           |        |  |
 *        -          |           |        |  |
 *                 ---------------       ------
 *                   |    .      |        |. |
 *                   |    .      |        |. |
 *                   |    .      |        |. |
 *                 ---------------       ------
 * 
 * where, fst_row is the row number of the first row,
 *        m_loc is the number of rows local to this processor
 * These are defined in the 'SuperMatrix' structure, see supermatrix.h.
 *
 *
 * Here are the options for using this code:
 *
 *   1. Independent of all the other options specified below, the
 *      user must supply
 *
 *      -  B, the matrix of right-hand sides, distributed by block rows,
 *            and its dimensions ldb (local) and nrhs (global)
 *      -  grid, a structure describing the 2D processor mesh
 *      -  options->IterRefine, which determines whether or not to
 *            improve the accuracy of the computed solution using 
 *            iterative refinement
 *
 *      On output, B is overwritten with the solution X.
 *
 *   2. Depending on options->Fact, the user has four options
 *      for solving A*X=B. The standard option is for factoring
 *      A "from scratch". (The other options, described below,
 *      are used when A is sufficiently similar to a previously 
 *      solved problem to save time by reusing part or all of 
 *      the previous factorization.)
 *
 *      -  options->Fact = DOFACT: A is factored "from scratch"
 *
 *      In this case the user must also supply
 *
 *        o  A, the input matrix
 *
 *        as well as the following options to determine what matrix to
 *        factorize.
 *
 *        o  options->Equil,   to specify how to scale the rows and columns
 *                             of A to "equilibrate" it (to try to reduce its
 *                             condition number and so improve the
 *                             accuracy of the computed solution)
 *
 *        o  options->RowPerm, to specify how to permute the rows of A
 *                             (typically to control numerical stability)
 *
 *        o  options->ColPerm, to specify how to permute the columns of A
 *                             (typically to control fill-in and enhance
 *                             parallelism during factorization)
 *
 *        o  options->ReplaceTinyPivot, to specify how to deal with tiny
 *                             pivots encountered during factorization
 *                             (to control numerical stability)
 *
 *      The outputs returned include
 *         
 *        o  ScalePermstruct,  modified to describe how the input matrix A
 *                             was equilibrated and permuted:
 *          .  ScalePermstruct->DiagScale, indicates whether the rows and/or
 *                                         columns of A were scaled
 *          .  ScalePermstruct->R, array of row scale factors
 *          .  ScalePermstruct->C, array of column scale factors
 *          .  ScalePermstruct->perm_r, row permutation vector
 *          .  ScalePermstruct->perm_c, column permutation vector
 *
 *          (part of ScalePermstruct may also need to be supplied on input,
 *           depending on options->RowPerm and options->ColPerm as described 
 *           later).
 *
 *        o  A, the input matrix A overwritten by the scaled and permuted
 *              matrix diag(R)*A*diag(C)*Pc^T, where 
 *              Pc is the row permutation matrix determined by
 *                  ScalePermstruct->perm_c
 *              diag(R) and diag(C) are diagonal scaling matrices determined
 *                  by ScalePermstruct->DiagScale, ScalePermstruct->R and 
 *                  ScalePermstruct->C
 *
 *        o  LUstruct, which contains the L and U factorization of A1 where
 *
 *                A1 = Pc*Pr*diag(R)*A*diag(C)*Pc^T = L*U
 *
 *               (Note that A1 = Pc*Pr*Aout, where Aout is the matrix stored
 *                in A on output.)
 *
 *   3. The second value of options->Fact assumes that a matrix with the same
 *      sparsity pattern as A has already been factored:
 *     
 *      -  options->Fact = SamePattern: A is factored, assuming that it has
 *            the same nonzero pattern as a previously factored matrix. In
 *            this case the algorithm saves time by reusing the previously
 *            computed column permutation vector stored in
 *            ScalePermstruct->perm_c and the "elimination tree" of A
 *            stored in LUstruct->etree
 *
 *      In this case the user must still specify the following options
 *      as before:
 *
 *        o  options->Equil
 *        o  options->RowPerm
 *        o  options->ReplaceTinyPivot
 *
 *      but not options->ColPerm, whose value is ignored. This is because the
 *      previous column permutation from ScalePermstruct->perm_c is used as
 *      input. The user must also supply 
 *
 *        o  A, the input matrix
 *        o  ScalePermstruct->perm_c, the column permutation
 *        o  LUstruct->etree, the elimination tree
 *
 *      The outputs returned include
 *         
 *        o  A, the input matrix A overwritten by the scaled and permuted
 *              matrix as described above
 *        o  ScalePermstruct, modified to describe how the input matrix A was
 *                            equilibrated and row permuted
 *        o  LUstruct, modified to contain the new L and U factors
 *
 *   4. The third value of options->Fact assumes that a matrix B with the same
 *      sparsity pattern as A has already been factored, and where the
 *      row permutation of B can be reused for A. This is useful when A and B
 *      have similar numerical values, so that the same row permutation
 *      will make both factorizations numerically stable. This lets us reuse
 *      all of the previously computed structure of L and U.
 *
 *      -  options->Fact = SamePattern_SameRowPerm: A is factored,
 *            assuming not only the same nonzero pattern as the previously
 *            factored matrix B, but reusing B's row permutation.
 *
 *      In this case the user must still specify the following options
 *      as before:
 *
 *        o  options->Equil
 *        o  options->ReplaceTinyPivot
 *
 *      but not options->RowPerm or options->ColPerm, whose values are
 *      ignored. This is because the permutations from ScalePermstruct->perm_r
 *      and ScalePermstruct->perm_c are used as input.
 *
 *      The user must also supply 
 *
 *        o  A, the input matrix
 *        o  ScalePermstruct->DiagScale, how the previous matrix was row
 *                                       and/or column scaled
 *        o  ScalePermstruct->R, the row scalings of the previous matrix,
 *                               if any
 *        o  ScalePermstruct->C, the columns scalings of the previous matrix, 
 *                               if any
 *        o  ScalePermstruct->perm_r, the row permutation of the previous
 *                                    matrix
 *        o  ScalePermstruct->perm_c, the column permutation of the previous 
 *                                    matrix
 *        o  all of LUstruct, the previously computed information about
 *                            L and U (the actual numerical values of L and U
 *                            stored in LUstruct->Llu are ignored)
 *
 *      The outputs returned include
 *         
 *        o  A, the input matrix A overwritten by the scaled and permuted
 *              matrix as described above
 *        o  ScalePermstruct,  modified to describe how the input matrix A was
 *                             equilibrated (thus ScalePermstruct->DiagScale,
 *                             R and C may be modified)
 *        o  LUstruct, modified to contain the new L and U factors
 *
 *   5. The fourth and last value of options->Fact assumes that A is
 *      identical to a matrix that has already been factored on a previous 
 *      call, and reuses its entire LU factorization
 *
 *      -  options->Fact = Factored: A is identical to a previously
 *            factorized matrix, so the entire previous factorization
 *            can be reused.
 *
 *      In this case all the other options mentioned above are ignored
 *      (options->Equil, options->RowPerm, options->ColPerm, 
 *       options->ReplaceTinyPivot)
 *
 *      The user must also supply 
 *
 *        o  A, the unfactored matrix, only in the case that iterative
 *              refinment is to be done (specifically A must be the output
 *              A from the previous call, so that it has been scaled and permuted)
 *        o  all of ScalePermstruct
 *        o  all of LUstruct, including the actual numerical values of
 *           L and U
 *
 *      all of which are unmodified on output.
 *         
 * Arguments
 * =========
 *
 * options (input) superlu_options_t* (global)
 *         The structure defines the input parameters to control
 *         how the LU decomposition will be performed.
 *         The following fields should be defined for this structure:
 *         
 *         o Fact (fact_t)
 *           Specifies whether or not the factored form of the matrix
 *           A is supplied on entry, and if not, how the matrix A should
 *           be factorized based on the previous history.
 *
 *           = DOFACT: The matrix A will be factorized from scratch.
 *                 Inputs:  A
 *                          options->Equil, RowPerm, ColPerm, ReplaceTinyPivot
 *                 Outputs: modified A
 *                             (possibly row and/or column scaled and/or 
 *                              permuted)
 *                          all of ScalePermstruct
 *                          all of LUstruct
 *
 *           = SamePattern: the matrix A will be factorized assuming
 *             that a factorization of a matrix with the same sparsity
 *             pattern was performed prior to this one. Therefore, this
 *             factorization will reuse column permutation vector 
 *             ScalePermstruct->perm_c and the elimination tree
 *             LUstruct->etree
 *                 Inputs:  A
 *                          options->Equil, RowPerm, ReplaceTinyPivot
 *                          ScalePermstruct->perm_c
 *                          LUstruct->etree
 *                 Outputs: modified A
 *                             (possibly row and/or column scaled and/or 
 *                              permuted)
 *                          rest of ScalePermstruct (DiagScale, R, C, perm_r)
 *                          rest of LUstruct (GLU_persist, Llu)
 *
 *           = SamePattern_SameRowPerm: the matrix A will be factorized
 *             assuming that a factorization of a matrix with the same
 *             sparsity	pattern and similar numerical values was performed
 *             prior to this one. Therefore, this factorization will reuse
 *             both row and column scaling factors R and C, and the
 *             both row and column permutation vectors perm_r and perm_c,
 *             distributed data structure set up from the previous symbolic
 *             factorization.
 *                 Inputs:  A
 *                          options->Equil, ReplaceTinyPivot
 *                          all of ScalePermstruct
 *                          all of LUstruct
 *                 Outputs: modified A
 *                             (possibly row and/or column scaled and/or 
 *                              permuted)
 *                          modified LUstruct->Llu
 *           = FACTORED: the matrix A is already factored.
 *                 Inputs:  all of ScalePermstruct
 *                          all of LUstruct
 *
 *         o Equil (yes_no_t)
 *           Specifies whether to equilibrate the system.
 *           = NO:  no equilibration.
 *           = YES: scaling factors are computed to equilibrate the system:
 *                      diag(R)*A*diag(C)*inv(diag(C))*X = diag(R)*B.
 *                  Whether or not the system will be equilibrated depends
 *                  on the scaling of the matrix A, but if equilibration is
 *                  used, A is overwritten by diag(R)*A*diag(C) and B by
 *                  diag(R)*B.
 *
 *         o RowPerm (rowperm_t)
 *           Specifies how to permute rows of the matrix A.
 *           = NATURAL:   use the natural ordering.
 *           = LargeDiag: use the Duff/Koster algorithm to permute rows of
 *                        the original matrix to make the diagonal large
 *                        relative to the off-diagonal.
 *           = MY_PERMR:  use the ordering given in ScalePermstruct->perm_r
 *                        input by the user.
 *           
 *         o ColPerm (colperm_t)
 *           Specifies what type of column permutation to use to reduce fill.
 *           = NATURAL:       natural ordering.
 *           = MMD_AT_PLUS_A: minimum degree ordering on structure of A'+A.
 *           = MMD_ATA:       minimum degree ordering on structure of A'*A.
 *           = MY_PERMC:      the ordering given in ScalePermstruct->perm_c.
 *         
 *         o ReplaceTinyPivot (yes_no_t)
 *           = NO:  do not modify pivots
 *           = YES: replace tiny pivots by sqrt(epsilon)*norm(A) during 
 *                  LU factorization.
 *
 *         o IterRefine (IterRefine_t)
 *           Specifies how to perform iterative refinement.
 *           = NO:     no iterative refinement.
 *           = DOUBLE: accumulate residual in double precision.
 *           = EXTRA:  accumulate residual in extra precision.
 *
 *         NOTE: all options must be indentical on all processes when
 *               calling this routine.
 *
 * A (input/output) SuperMatrix* (local)
 *         On entry, matrix A in A*X=B, of dimension (A->nrow, A->ncol).
 *           The number of linear equations is A->nrow. The type of A must be:
 *           Stype = SLU_NR_loc; Dtype = SLU_D; Mtype = SLU_GE.
 *           That is, A is stored in distributed compressed row format.
 *           See supermatrix.h for the definition of 'SuperMatrix'.
 *           This routine only handles square A, however, the LU factorization
 *           routine PDGSTRF can factorize rectangular matrices.
 *         On exit, A may be overwtirren by diag(R)*A*diag(C)*Pc^T,
 *           depending on ScalePermstruct->DiagScale and options->ColPerm:
 *             if ScalePermstruct->DiagScale != NOEQUIL, A is overwritten by
 *                diag(R)*A*diag(C).
 *             if options->ColPerm != NATURAL, A is further overwritten by
 *                diag(R)*A*diag(C)*Pc^T.
 *           If all the above condition are true, the LU decomposition is
 *           performed on the matrix Pc*Pr*diag(R)*A*diag(C)*Pc^T.
 *
 * ScalePermstruct (input/output) ScalePermstruct_t* (global)
 *         The data structure to store the scaling and permutation vectors
 *         describing the transformations performed to the matrix A.
 *         It contains the following fields:
 *
 *         o DiagScale (DiagScale_t)
 *           Specifies the form of equilibration that was done.
 *           = NOEQUIL: no equilibration.
 *           = ROW:     row equilibration, i.e., A was premultiplied by
 *                      diag(R).
 *           = COL:     Column equilibration, i.e., A was postmultiplied
 *                      by diag(C).
 *           = BOTH:    both row and column equilibration, i.e., A was 
 *                      replaced by diag(R)*A*diag(C).
 *           If options->Fact = FACTORED or SamePattern_SameRowPerm,
 *           DiagScale is an input argument; otherwise it is an output
 *           argument.
 *
 *         o perm_r (int*)
 *           Row permutation vector, which defines the permutation matrix Pr;
 *           perm_r[i] = j means row i of A is in position j in Pr*A.
 *           If options->RowPerm = MY_PERMR, or
 *           options->Fact = SamePattern_SameRowPerm, perm_r is an
 *           input argument; otherwise it is an output argument.
 *
 *         o perm_c (int*)
 *           Column permutation vector, which defines the 
 *           permutation matrix Pc; perm_c[i] = j means column i of A is 
 *           in position j in A*Pc.
 *           If options->ColPerm = MY_PERMC or options->Fact = SamePattern
 *           or options->Fact = SamePattern_SameRowPerm, perm_c is an
 *           input argument; otherwise, it is an output argument.
 *           On exit, perm_c may be overwritten by the product of the input
 *           perm_c and a permutation that postorders the elimination tree
 *           of Pc*A'*A*Pc'; perm_c is not changed if the elimination tree
 *           is already in postorder.
 *
 *         o R (double*) dimension (A->nrow)
 *           The row scale factors for A.
 *           If DiagScale = ROW or BOTH, A is multiplied on the left by 
 *                          diag(R).
 *           If DiagScale = NOEQUIL or COL, R is not defined.
 *           If options->Fact = FACTORED or SamePattern_SameRowPerm, R is
 *           an input argument; otherwise, R is an output argument.
 *
 *         o C (double*) dimension (A->ncol)
 *           The column scale factors for A.
 *           If DiagScale = COL or BOTH, A is multiplied on the right by 
 *                          diag(C).
 *           If DiagScale = NOEQUIL or ROW, C is not defined.
 *           If options->Fact = FACTORED or SamePattern_SameRowPerm, C is
 *           an input argument; otherwise, C is an output argument.
 *         
 * B       (input/output) double* (local)
 *         On entry, the right-hand side matrix of dimension (m_loc, nrhs),
 *           where, m_loc is the number of rows stored locally on my
 *           process and is defined in the data structure of matrix A.
 *         On exit, the solution matrix if info = 0;
 *
 * ldb     (input) int (local)
 *         The leading dimension of matrix B.
 *
 * nrhs    (input) int (global)
 *         The number of right-hand sides.
 *         If nrhs = 0, only LU decomposition is performed, the forward
 *         and back substitutions are skipped.
 *
 * grid    (input) gridinfo_t* (global)
 *         The 2D process mesh. It contains the MPI communicator, the number
 *         of process rows (NPROW), the number of process columns (NPCOL),
 *         and my process rank. It is an input argument to all the
 *         parallel routines.
 *         Grid can be initialized by subroutine SUPERLU_GRIDINIT.
 *         See superlu_ddefs.h for the definition of 'gridinfo_t'.
 *
 * LUstruct (input/output) LUstruct_t*
 *         The data structures to store the distributed L and U factors.
 *         It contains the following fields:
 *
 *         o etree (int*) dimension (A->ncol) (global)
 *           Elimination tree of Pc*(A'+A)*Pc' or Pc*A'*A*Pc'.
 *           It is computed in sp_colorder() during the first factorization,
 *           and is reused in the subsequent factorizations of the matrices
 *           with the same nonzero pattern.
 *           On exit of sp_colorder(), the columns of A are permuted so that
 *           the etree is in a certain postorder. This postorder is reflected
 *           in ScalePermstruct->perm_c.
 *           NOTE:
 *           Etree is a vector of parent pointers for a forest whose vertices
 *           are the integers 0 to A->ncol-1; etree[root]==A->ncol.
 *
 *         o Glu_persist (Glu_persist_t*) (global)
 *           Global data structure (xsup, supno) replicated on all processes,
 *           describing the supernode partition in the factored matrices
 *           L and U:
 *	       xsup[s] is the leading column of the s-th supernode,
 *             supno[i] is the supernode number to which column i belongs.
 *
 *         o Llu (LocalLU_t*) (local)
 *           The distributed data structures to store L and U factors.
 *           See superlu_ddefs.h for the definition of 'LocalLU_t'.
 *
 * SOLVEstruct (input/output) SOLVEstruct_t*
 *         The data structure to hold the communication pattern used
 *         in the phases of triangular solution and iterative refinement.
 *         This pattern should be intialized only once for repeated solutions.
 *         If options->SolveInitialized = YES, it is an input argument.
 *         If options->SolveInitialized = NO and nrhs != 0, it is an output
 *         argument. See superlu_ddefs.h for the definition of 'SOLVEstruct_t'.
 *
 * berr    (output) double*, dimension (nrhs) (global)
 *         The componentwise relative backward error of each solution   
 *         vector X(j) (i.e., the smallest relative change in   
 *         any element of A or B that makes X(j) an exact solution).
 *
 * stat   (output) SuperLUStat_t*
 *        Record the statistics on runtime and floating-point operation count.
 *        See util.h for the definition of 'SuperLUStat_t'.
 *
 * info    (output) int*
 *         = 0: successful exit
 *         > 0: if info = i, and i is
 *             <= A->ncol: U(i,i) is exactly zero. The factorization has
 *                been completed, but the factor U is exactly singular,
 *                so the solution could not be computed.
 *             > A->ncol: number of bytes allocated when memory allocation
 *                failure occurred, plus A->ncol.
 *
 * See superlu_ddefs.h for the definitions of varioous data types.
 *
 */
    NRformat_loc *Astore;
    SuperMatrix GA;      /* Global A in NC format */
    NCformat *GAstore;
    double   *a_GA;
    SuperMatrix GAC;      /* Global A in NCP format (add n end pointers) */
    NCPformat *GACstore;
    Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
    Glu_freeable_t *Glu_freeable;
            /* The nonzero structures of L and U factors, which are
	       replicated on all processrs.
	           (lsub, xlsub) contains the compressed subscript of
		                 supernodes in L.
          	   (usub, xusub) contains the compressed subscript of
		                 nonzero segments in U.
	      If options->Fact != SamePattern_SameRowPerm, they are 
	      computed by SYMBFACT routine, and then used by PDDISTRIBUTE
	      routine. They will be freed after PDDISTRIBUTE routine.
	      If options->Fact == SamePattern_SameRowPerm, these
	      structures are not used.                                  */
    fact_t   Fact;
    double   *a;
    int_t    *colptr, *rowind;
    int_t    *perm_r; /* row permutations from partial pivoting */
    int_t    *perm_c; /* column permutation vector */
    int_t    *etree;  /* elimination tree */
    int_t    *rowptr, *colind;  /* Local A in NR*/
    int_t    *rowind_loc, *colptr_loc;
    int_t    colequ, Equil, factored, job, notran, rowequ, need_value;
    int_t    i, iinfo, j, irow, m, n, nnz, permc_spec, dist_mem_use;
    int_t    nnz_loc, m_loc, fst_row, icol;
    int      iam;
    int      ldx;  /* LDA for matrix X (local). */
    char     equed[1], norm[1];
    double   *C, *R, *C1, *R1, amax, anorm, colcnd, rowcnd;
    double   *X, *b_col, *b_work, *x_col;
    double   t;
    static mem_usage_t num_mem_usage, symb_mem_usage;
#if ( PRNTlevel>= 2 )
    double   dmin, dsum, dprod;
#endif
    int_t procs;

    /* Structures needed for parallel symbolic factorization */
    int_t *sizes, *fstVtxSep, parSymbFact;
    int   noDomains, nprocs_num;
    MPI_Comm symb_comm; /* communicator for symbolic factorization */
    int   col, key; /* parameters for creating a new communicator */
    Pslu_freeable_t Pslu_freeable;
    float  flinfo;

    /* Initialization. */
    m       = A->nrow;
    n       = A->ncol;
    Astore  = (NRformat_loc *) A->Store;
    nnz_loc = Astore->nnz_loc;
    m_loc   = Astore->m_loc;
    fst_row = Astore->fst_row;
    a       = (double *) Astore->nzval;
    rowptr  = Astore->rowptr;
    colind  = Astore->colind;
    sizes   = NULL;
    fstVtxSep = NULL;
    symb_comm = MPI_COMM_NULL;

    /* Test the input parameters. */
    *info = 0;
    Fact = options->Fact;
    if ( Fact < 0 || Fact > FACTORED )
	*info = -1;
    else if ( options->RowPerm < 0 || options->RowPerm > MY_PERMR )
	*info = -1;
    else if ( options->ColPerm < 0 || options->ColPerm > MY_PERMC )
	*info = -1;
    else if ( options->IterRefine < 0 || options->IterRefine > EXTRA )
	*info = -1;
    else if ( options->IterRefine == EXTRA ) {
	*info = -1;
	fprintf(stderr, "Extra precise iterative refinement yet to support.");
    } else if ( A->nrow != A->ncol || A->nrow < 0 || A->Stype != SLU_NR_loc
		|| A->Dtype != SLU_D || A->Mtype != SLU_GE )
	*info = -2;
    else if ( ldb < m_loc )
	*info = -5;
    else if ( nrhs < 0 )
	*info = -6;
    if ( *info ) {
	i = -(*info);
	pxerbla("pdgssvx", grid, -*info);
	return;
    }

    factored = (Fact == FACTORED);
    Equil = (!factored && options->Equil == YES);
    notran = (options->Trans == NOTRANS);
    iam = grid->iam;
    job = 5;
    if ( factored || (Fact == SamePattern_SameRowPerm && Equil) ) {
	rowequ = (ScalePermstruct->DiagScale == ROW) ||
	         (ScalePermstruct->DiagScale == BOTH);
	colequ = (ScalePermstruct->DiagScale == COL) ||
	         (ScalePermstruct->DiagScale == BOTH);
    } else rowequ = colequ = FALSE;

    /* The following arrays are replicated on all processes. */
    perm_r = ScalePermstruct->perm_r;
    perm_c = ScalePermstruct->perm_c;
    etree = LUstruct->etree;
    R = ScalePermstruct->R;
    C = ScalePermstruct->C;
    /********/

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Enter pdgssvx()");
#endif

    /* Not factored & ask for equilibration */
    if ( Equil && Fact != SamePattern_SameRowPerm ) { 
	/* Allocate storage if not done so before. */
	switch ( ScalePermstruct->DiagScale ) {
	    case NOEQUIL:
		if ( !(R = (double *) doubleMalloc_dist(m)) )
		    ABORT("Malloc fails for R[].");
	        if ( !(C = (double *) doubleMalloc_dist(n)) )
		    ABORT("Malloc fails for C[].");
		ScalePermstruct->R = R;
		ScalePermstruct->C = C;
		break;
	    case ROW: 
	        if ( !(C = (double *) doubleMalloc_dist(n)) )
		    ABORT("Malloc fails for C[].");
		ScalePermstruct->C = C;
		break;
	    case COL: 
		if ( !(R = (double *) doubleMalloc_dist(m)) )
		    ABORT("Malloc fails for R[].");
		ScalePermstruct->R = R;
		break;
	}
    }

    /* ------------------------------------------------------------
       Diagonal scaling to equilibrate the matrix.
       ------------------------------------------------------------*/
    if ( Equil ) {
#if ( DEBUGlevel>=1 )
	CHECK_MALLOC(iam, "Enter equil");
#endif
	t = SuperLU_timer_();

	if ( Fact == SamePattern_SameRowPerm ) {
	    /* Reuse R and C. */
	    switch ( ScalePermstruct->DiagScale ) {
	      case NOEQUIL:
		break;
	      case ROW:
		irow = fst_row;
		for (j = 0; j < m_loc; ++j) {
		    for (i = rowptr[j]; i < rowptr[j+1]; ++i) {
			a[i] *= R[irow];       /* Scale rows. */
		    }
		    ++irow;
		}
		break;
	      case COL:
		for (j = 0; j < m_loc; ++j)
		    for (i = rowptr[j]; i < rowptr[j+1]; ++i){
		        icol = colind[i];
			a[i] *= C[icol];          /* Scale columns. */
		    }
		break;
	      case BOTH:
		irow = fst_row;
		for (j = 0; j < m_loc; ++j) {
		    for (i = rowptr[j]; i < rowptr[j+1]; ++i) {
			icol = colind[i];
			a[i] *= R[irow] * C[icol]; /* Scale rows and cols. */
		    }
		    ++irow;
		}
	        break;
	    }
	} else { /* Compute R & C from scratch */
            /* Compute the row and column scalings. */
	    pdgsequ(A, R, C, &rowcnd, &colcnd, &amax, &iinfo, grid);

	    /* Equilibrate matrix A if it is badly-scaled. */
	    pdlaqgs(A, R, C, rowcnd, colcnd, amax, equed);

	    if ( lsame_(equed, "R") ) {
		ScalePermstruct->DiagScale = rowequ = ROW;
	    } else if ( lsame_(equed, "C") ) {
		ScalePermstruct->DiagScale = colequ = COL;
	    } else if ( lsame_(equed, "B") ) {
		ScalePermstruct->DiagScale = BOTH;
		rowequ = ROW;
		colequ = COL;
	    } else ScalePermstruct->DiagScale = NOEQUIL;

#if ( PRNTlevel>=1 )
	    if ( !iam ) {
		printf(".. equilibrated? *equed = %c\n", *equed);
		/*fflush(stdout);*/
	    }
#endif
	} /* if Fact ... */

	stat->utime[EQUIL] = SuperLU_timer_() - t;
#if ( DEBUGlevel>=1 )
	CHECK_MALLOC(iam, "Exit equil");
#endif
    } /* if Equil ... */

    if ( !factored ) { /* Skip this if already factored. */
        /*
         * Gather A from the distributed compressed row format to
         * global A in compressed column format.
         * Numerical values are gathered only when a row permutation
         * for large diagonal is sought after.
         */
	if ( Fact != SamePattern_SameRowPerm ) {
            need_value = (options->RowPerm == LargeDiag);
            pdCompRow_loc_to_CompCol_global(need_value, A, grid, &GA);
            GAstore = (NCformat *) GA.Store;
            colptr = GAstore->colptr;
            rowind = GAstore->rowind;
            nnz = GAstore->nnz;
            if ( need_value ) a_GA = (double *) GAstore->nzval;
            else assert(GAstore->nzval == NULL);
	}

        /* ------------------------------------------------------------
           Find the row permutation for A.
           ------------------------------------------------------------*/
        if ( options->RowPerm != NO ) {
	    t = SuperLU_timer_();
	    if ( Fact != SamePattern_SameRowPerm ) {
	        if ( options->RowPerm == MY_PERMR ) { /* Use user's perm_r. */
	            /* Permute the global matrix GA for symbfact() */
	            for (i = 0; i < colptr[n]; ++i) {
	            	irow = rowind[i]; 
		    	rowind[i] = perm_r[irow];
	            }
	        } else { /* options->RowPerm == LargeDiag */
	            /* Get a new perm_r[] */
	            if ( job == 5 ) {
		        /* Allocate storage for scaling factors. */
		        if ( !(R1 = doubleMalloc_dist(m)) )
		            ABORT("SUPERLU_MALLOC fails for R1[]");
		    	if ( !(C1 = doubleMalloc_dist(n)) )
		            ABORT("SUPERLU_MALLOC fails for C1[]");
	            }

	            if ( !iam ) {
		        /* Process 0 finds a row permutation */
		        dldperm(job, m, nnz, colptr, rowind, a_GA,
		                perm_r, R1, C1);
		
		        MPI_Bcast( perm_r, m, mpi_int_t, 0, grid->comm );
		        if ( job == 5 && Equil ) {
		            MPI_Bcast( R1, m, MPI_DOUBLE, 0, grid->comm );
		            MPI_Bcast( C1, n, MPI_DOUBLE, 0, grid->comm );
		        }
	            } else {
		        MPI_Bcast( perm_r, m, mpi_int_t, 0, grid->comm );
		        if ( job == 5 && Equil ) {
		            MPI_Bcast( R1, m, MPI_DOUBLE, 0, grid->comm );
		            MPI_Bcast( C1, n, MPI_DOUBLE, 0, grid->comm );
		        }
	            }

#if ( PRNTlevel>=2 )
	            dmin = dlamch_("Overflow");
	            dsum = 0.0;
	            dprod = 1.0;
#endif
	            if ( job == 5 ) {
		        if ( Equil ) {
		            for (i = 0; i < n; ++i) {
			        R1[i] = exp(R1[i]);
			        C1[i] = exp(C1[i]);
		            }

		            /* Scale the distributed matrix */
		            irow = fst_row;
		            for (j = 0; j < m_loc; ++j) {
			        for (i = rowptr[j]; i < rowptr[j+1]; ++i) {
			            icol = colind[i];
			            a[i] *= R1[irow] * C1[icol];
#if ( PRNTlevel>=2 )
			            if ( perm_r[irow] == icol ) { /* New diagonal */
			              if ( job == 2 || job == 3 )
				        dmin = SUPERLU_MIN(dmin, fabs(a[i]));
			              else if ( job == 4 )
				        dsum += fabs(a[i]);
			              else if ( job == 5 )
				        dprod *= fabs(a[i]);
			            }
#endif
			        }
			        ++irow;
		            }

		            /* Multiply together the scaling factors. */
		            if ( rowequ ) for (i = 0; i < m; ++i) R[i] *= R1[i];
		            else for (i = 0; i < m; ++i) R[i] = R1[i];
		            if ( colequ ) for (i = 0; i < n; ++i) C[i] *= C1[i];
		            else for (i = 0; i < n; ++i) C[i] = C1[i];
		    
		            ScalePermstruct->DiagScale = BOTH;
		            rowequ = colequ = 1;

		        } /* end Equil */

                        /* Now permute global A to prepare for symbfact() */
                        for (j = 0; j < n; ++j) {
		            for (i = colptr[j]; i < colptr[j+1]; ++i) {
	                        irow = rowind[i];
		                rowind[i] = perm_r[irow];
		            }
		        }
		        SUPERLU_FREE (R1);
		        SUPERLU_FREE (C1);
	            } else { /* job = 2,3,4 */
		        for (j = 0; j < n; ++j) {
		            for (i = colptr[j]; i < colptr[j+1]; ++i) {
			        irow = rowind[i];
			        rowind[i] = perm_r[irow];
		            } /* end for i ... */
		        } /* end for j ... */
	            } /* end else job ... */

#if ( PRNTlevel>=2 )
	            if ( job == 2 || job == 3 ) {
		        if ( !iam ) printf("\tsmallest diagonal %e\n", dmin);
	            } else if ( job == 4 ) {
		        if ( !iam ) printf("\tsum of diagonal %e\n", dsum);
	            } else if ( job == 5 ) {
		        if ( !iam ) printf("\t product of diagonal %e\n", dprod);
	            }
#endif
	    
                } /* end if options->RowPerm ... */

	        t = SuperLU_timer_() - t;
	        stat->utime[ROWPERM] = t;
#if ( PRNTlevel>=1 )
	        if ( !iam ) printf(".. LDPERM job %d\t time: %.2f\n", job, t);
#endif
            } /* end if Fact ... */
        } else { /* options->RowPerm == NOROWPERM */
            for (i = 0; i < m; ++i) perm_r[i] = i;
        }

#if ( DEBUGlevel>=2 )
        if ( !iam ) PrintInt10("perm_r",  m, perm_r);
#endif
    } /* end if (!factored) */

    if ( !factored || options->IterRefine ) {
	/* Compute norm(A), which will be used to adjust small diagonal. */
	if ( notran ) *(unsigned char *)norm = '1';
	else *(unsigned char *)norm = 'I';
	anorm = pdlangs(norm, A, grid);
#if ( PRNTlevel>=1 )
	if ( !iam ) printf(".. anorm %e\n", anorm);
#endif
    }

    /* ------------------------------------------------------------
       Perform the LU factorization.
       ------------------------------------------------------------*/
    if ( !factored ) {
	t = SuperLU_timer_();
	/*
	 * Get column permutation vector perm_c[], according to permc_spec:
	 *   permc_spec = NATURAL:  natural ordering 
	 *   permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A
	 *   permc_spec = MMD_ATA:  minimum degree on structure of A'*A
	 *   permc_spec = METIS_AT_PLUS_A: METIS on structure of A'+A
	 *   permc_spec = PARMETIS: parallel METIS on structure of A'+A
	 *   permc_spec = MY_PERMC: the ordering already supplied in perm_c[]
	 */
	permc_spec = options->ColPerm;
	parSymbFact = options->ParSymbFact;

#if ( PRNTlevel>=1 )
	if ( parSymbFact && permc_spec != PARMETIS )
	    if ( !iam ) printf(".. Parallel symbolic factorization"
			       " only works wth ParMetis!\n");
#endif

	if ( parSymbFact == YES || permc_spec == PARMETIS ) {	
	    nprocs_num = grid->nprow * grid->npcol;
  	    noDomains = (int) ( pow(2, ((int) LOG2( nprocs_num ))));

	    /* create a new communicator for the first noDomains processors in
	       grid->comm */
	    key = iam;
    	    if (iam < noDomains) col = 0;
	    else col = MPI_UNDEFINED;
	    MPI_Comm_split (grid->comm, col, key, &symb_comm );

	    permc_spec = PARMETIS; /* only works with PARMETIS */
        }

	if ( permc_spec != MY_PERMC && Fact == DOFACT ) {
	  if ( permc_spec == PARMETIS ) {
	      /* Get column permutation vector in perm_c.                    *
	       * This routine takes as input the distributed input matrix A  *
	       * and does not modify it.  It also allocates memory for       *
	       * sizes[] and fstVtxSep[] arrays, that contain information    *
	       * on the separator tree computed by ParMETIS.                 */
	      flinfo = get_perm_c_parmetis(A, perm_r, perm_c, nprocs_num,
                                  	   noDomains, &sizes, &fstVtxSep,
                                           grid, &symb_comm);
	      if (flinfo > 0)
	          ABORT("ERROR in get perm_c parmetis.");
	  } else {
	      get_perm_c_dist(iam, permc_spec, &GA, perm_c);
          }
        }

	stat->utime[COLPERM] = SuperLU_timer_() - t;

	/* Compute the elimination tree of Pc*(A'+A)*Pc' or Pc*A'*A*Pc'
	   (a.k.a. column etree), depending on the choice of ColPerm.
	   Adjust perm_c[] to be consistent with a postorder of etree.
	   Permute columns of A to form A*Pc'. */
	if ( Fact != SamePattern_SameRowPerm ) {
	    if ( parSymbFact == NO ) {
	        int_t *GACcolbeg, *GACcolend, *GACrowind;

	        sp_colorder(options, &GA, perm_c, etree, &GAC); 

	        /* Form Pc*A*Pc' to preserve the diagonal of the matrix GAC. */
	        GACstore = (NCPformat *) GAC.Store;
	        GACcolbeg = GACstore->colbeg;
	        GACcolend = GACstore->colend;
	        GACrowind = GACstore->rowind;
	        for (j = 0; j < n; ++j) {
	            for (i = GACcolbeg[j]; i < GACcolend[j]; ++i) {
		        irow = GACrowind[i];
		        GACrowind[i] = perm_c[irow];
	            }
	        }

	        /* Perform a symbolic factorization on Pc*Pr*A*Pc' and set up
                   the nonzero data structures for L & U. */
#if ( PRNTlevel>=1 ) 
                if ( !iam )
		  printf(".. symbfact(): relax %4d, maxsuper %4d, fill %4d\n",
		          sp_ienv_dist(2), sp_ienv_dist(3), sp_ienv_dist(6));
#endif
  	        t = SuperLU_timer_();
	        if ( !(Glu_freeable = (Glu_freeable_t *)
		      SUPERLU_MALLOC(sizeof(Glu_freeable_t))) )
		    ABORT("Malloc fails for Glu_freeable.");

	    	/* Every process does this. */
	    	iinfo = symbfact(options, iam, &GAC, perm_c, etree, 
			     	 Glu_persist, Glu_freeable);

	    	stat->utime[SYMBFAC] = SuperLU_timer_() - t;
	    	if ( iinfo < 0 ) { /* Successful return */
		    QuerySpace_dist(n, -iinfo, Glu_freeable, &symb_mem_usage);
#if ( PRNTlevel>=1 )
		    if ( !iam ) {
		    	printf("\tNo of supers %ld\n", Glu_persist->supno[n-1]+1);
		    	printf("\tSize of G(L) %ld\n", Glu_freeable->xlsub[n]);
		    	printf("\tSize of G(U) %ld\n", Glu_freeable->xusub[n]);
		    	printf("\tint %d, short %d, float %d, double %d\n", 
			       sizeof(int_t), sizeof(short), sizeof(float),
			       sizeof(double));
		    	printf("\tSYMBfact (MB):\tL\\U %.2f\ttotal %.2f\texpansions %d\n",
			   	symb_mem_usage.for_lu*1e-6, 
			   	symb_mem_usage.total*1e-6,
			   	symb_mem_usage.expansions);
		    }
#endif
	    	} else {
		    if ( !iam ) {
		        fprintf(stderr,"symbfact() error returns %d\n",iinfo);
		    	exit(-1);
		    }
	        }
	    } /* end if serial symbolic factorization */
	    else {  /* parallel symbolic factorization */
	    	t = SuperLU_timer_();
	    	flinfo = symbfact_dist(nprocs_num, noDomains, A, perm_c, perm_r,
				       sizes, fstVtxSep, &Pslu_freeable, 
				       &(grid->comm), &symb_comm,
				       &symb_mem_usage); 
	    	stat->utime[SYMBFAC] = SuperLU_timer_() - t;
	    	if (flinfo > 0) 
	      	    ABORT("Insufficient memory for parallel symbolic factorization.");
	    }
	} /* end if Fact ... */

#if ( PRNTlevel>=1 )
	if (!iam) printf("\tSYMBfact time: %.2f\n", stat->utime[SYMBFAC]);
#endif
        if (sizes) SUPERLU_FREE (sizes);
        if (fstVtxSep) SUPERLU_FREE (fstVtxSep);
	if (symb_comm != MPI_COMM_NULL)
	  MPI_Comm_free (&symb_comm); 

	if (parSymbFact == NO || Fact == SamePattern_SameRowPerm) {
  	    /* Apply column permutation to the original distributed A */
	    for (j = 0; j < nnz_loc; ++j) colind[j] = perm_c[colind[j]];

	    /* Distribute Pc*Pr*diag(R)*A*diag(C)*Pc' into L and U storage. 
	       NOTE: the row permutation Pc*Pr is applied internally in the
  	       distribution routine. */
	    t = SuperLU_timer_();
	    dist_mem_use = pddistribute(Fact, n, A, ScalePermstruct,
                                      Glu_freeable, LUstruct, grid);
	    stat->utime[DIST] = SuperLU_timer_() - t;

  	    /* Deallocate storage used in symbolic factorization. */
	    if ( Fact != SamePattern_SameRowPerm ) {
	        iinfo = symbfact_SubFree(Glu_freeable);
	        SUPERLU_FREE(Glu_freeable);
	    }
	} else {
	    /* Distribute Pc*Pr*diag(R)*A*diag(C)*Pc' into L and U storage. 
	       NOTE: the row permutation Pc*Pr is applied internally in the
	       distribution routine. */
	    /* Apply column permutation to the original distributed A */
	    for (j = 0; j < nnz_loc; ++j) colind[j] = perm_c[colind[j]];

    	    t = SuperLU_timer_();
	    dist_mem_use = ddist_psymbtonum(Fact, n, A, ScalePermstruct,
		  			   &Pslu_freeable, LUstruct, grid);
	    if (dist_mem_use > 0)
	        ABORT ("Not enough memory available for dist_psymbtonum\n");
	    stat->utime[DIST] = SuperLU_timer_() - t;
	}

#if ( PRNTlevel>=1 )
	if (!iam) printf ("\tDISTRIBUTE time    %8.2f\n", stat->utime[DIST]);
#endif

	/* Perform numerical factorization in parallel. */
	t = SuperLU_timer_();
	pdgstrf(options, m, n, anorm, LUstruct, grid, stat, info);
	stat->utime[FACT] = SuperLU_timer_() - t;

#if ( PRNTlevel>=1 )
	{
	    int_t TinyPivots;
	    float for_lu, total, max, avg, temp;
	    dQuerySpace_dist(n, LUstruct, grid, &num_mem_usage);
	    MPI_Reduce( &num_mem_usage.for_lu, &for_lu,
		       1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
	    MPI_Reduce( &num_mem_usage.total, &total,
		       1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
	    temp = SUPERLU_MAX(symb_mem_usage.total,
			       symb_mem_usage.for_lu +
			       (float)dist_mem_use + num_mem_usage.for_lu);
	    if (parSymbFact == TRUE)
	      /* The memory used in the redistribution routine
		 includes the memory used for storing the symbolic
		 structure and the memory allocated for numerical
		 factorization */
	      temp = SUPERLU_MAX(symb_mem_usage.total,
				 (float)dist_mem_use);
	    temp = SUPERLU_MAX(temp, num_mem_usage.total);
	    MPI_Reduce( &temp, &max,
		       1, MPI_FLOAT, MPI_MAX, 0, grid->comm );
	    MPI_Reduce( &temp, &avg,
		       1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
	    MPI_Allreduce( &stat->TinyPivots, &TinyPivots, 1, mpi_int_t,
			  MPI_SUM, grid->comm );
	    stat->TinyPivots = TinyPivots;
	    if ( !iam ) {
		printf("\tNUMfact (MB) all PEs:\tL\\U\t%.2f\tall\t%.2f\n",
		       for_lu*1e-6, total*1e-6);
		printf("\tAll space (MB):"
		       "\t\ttotal\t%.2f\tAvg\t%.2f\tMax\t%.2f\n",
		       avg*1e-6, avg/grid->nprow/grid->npcol*1e-6, max*1e-6);
		printf("\tNumber of tiny pivots: %10d\n", stat->TinyPivots);
	    }
	}
#endif
    
        /* Destroy GA */
        if ( Fact != SamePattern_SameRowPerm )
            Destroy_CompCol_Matrix_dist(&GA);
    } /* end if (!factored) */
	
    /* ------------------------------------------------------------
       Compute the solution matrix X.
       ------------------------------------------------------------*/
    if ( nrhs ) {

	if ( !(b_work = doubleMalloc_dist(n)) )
	    ABORT("Malloc fails for b_work[]");

	/* ------------------------------------------------------------
	   Scale the right-hand side if equilibration was performed. 
	   ------------------------------------------------------------*/
	if ( notran ) {
	    if ( rowequ ) {
		b_col = B;
		for (j = 0; j < nrhs; ++j) {
		    irow = fst_row;
		    for (i = 0; i < m_loc; ++i) {
		        b_col[i] *= R[irow];
		        ++irow;
		    }
		    b_col += ldb;
		}
	    }
	} else if ( colequ ) {
	    b_col = B;
	    for (j = 0; j < nrhs; ++j) {
	        irow = fst_row;
		for (i = 0; i < m_loc; ++i) {
		    b_col[i] *= C[irow];
		    ++irow;
		}
		b_col += ldb;
	    }
	}

	/* Save a copy of the right-hand side. */
	ldx = ldb;
	if ( !(X = doubleMalloc_dist(((size_t)ldx) * nrhs)) )
	    ABORT("Malloc fails for X[]");
	x_col = X;  b_col = B;
	for (j = 0; j < nrhs; ++j) {
	    for (i = 0; i < m_loc; ++i) x_col[i] = b_col[i];
	    x_col += ldx;  b_col += ldb;
	}

	/* ------------------------------------------------------------
	   Solve the linear system.
	   ------------------------------------------------------------*/
	if ( options->SolveInitialized == NO ) {
	    dSolveInit(options, A, perm_r, perm_c, nrhs, LUstruct, grid,
		       SOLVEstruct);
	}

	pdgstrs(n, LUstruct, ScalePermstruct, grid, X, m_loc, 
		fst_row, ldb, nrhs, SOLVEstruct, stat, info);

	/* ------------------------------------------------------------
	   Use iterative refinement to improve the computed solution and
	   compute error bounds and backward error estimates for it.
	   ------------------------------------------------------------*/
	if ( options->IterRefine ) {
	    /* Improve the solution by iterative refinement. */
	    int_t *it, *colind_gsmv = SOLVEstruct->A_colind_gsmv;
	    SOLVEstruct_t *SOLVEstruct1;  /* Used by refinement. */

	    t = SuperLU_timer_();
	    if ( options->RefineInitialized == NO || Fact == DOFACT ) {
	        /* All these cases need to re-initialize gsmv structure */
	        if ( options->RefineInitialized )
		    pdgsmv_finalize(SOLVEstruct->gsmv_comm);
	        pdgsmv_init(A, SOLVEstruct->row_to_proc, grid,
			    SOLVEstruct->gsmv_comm);
	       
                /* Save a copy of the transformed local col indices
		   in colind_gsmv[]. */
	        if ( colind_gsmv ) SUPERLU_FREE(colind_gsmv);
	        if ( !(it = intMalloc_dist(nnz_loc)) )
		    ABORT("Malloc fails for colind_gsmv[]");
	        colind_gsmv = SOLVEstruct->A_colind_gsmv = it;
	        for (i = 0; i < nnz_loc; ++i) colind_gsmv[i] = colind[i];
	        options->RefineInitialized = YES;
	    } else if ( Fact == SamePattern ||
			Fact == SamePattern_SameRowPerm ) {
	        double at;
	        int_t k, jcol, p;
	        /* Swap to beginning the part of A corresponding to the
		   local part of X, as was done in pdgsmv_init() */
	        for (i = 0; i < m_loc; ++i) { /* Loop through each row */
		    k = rowptr[i];
		    for (j = rowptr[i]; j < rowptr[i+1]; ++j) {
		        jcol = colind[j];
		        p = SOLVEstruct->row_to_proc[jcol];
		        if ( p == iam ) { /* Local */
		            at = a[k]; a[k] = a[j]; a[j] = at;
		            ++k;
		        }
		    }
	        }
	      
	        /* Re-use the local col indices of A obtained from the
		   previous call to pdgsmv_init() */
	        for (i = 0; i < nnz_loc; ++i) colind[i] = colind_gsmv[i];
	    }

	    if ( nrhs == 1 ) { /* Use the existing solve structure */
	        SOLVEstruct1 = SOLVEstruct;
	    } else { /* For nrhs > 1, since refinement is performed for RHS
			one at a time, the communication structure for pdgstrs
			is different than the solve with nrhs RHS. 
			So we use SOLVEstruct1 for the refinement step.
		     */
	        if ( !(SOLVEstruct1 = (SOLVEstruct_t *) 
		                       SUPERLU_MALLOC(sizeof(SOLVEstruct_t))) )
		    ABORT("Malloc fails for SOLVEstruct1");
	        /* Copy the same stuff */
	        SOLVEstruct1->row_to_proc = SOLVEstruct->row_to_proc;
	        SOLVEstruct1->inv_perm_c = SOLVEstruct->inv_perm_c;
	        SOLVEstruct1->num_diag_procs = SOLVEstruct->num_diag_procs;
	        SOLVEstruct1->diag_procs = SOLVEstruct->diag_procs;
	        SOLVEstruct1->diag_len = SOLVEstruct->diag_len;
	        SOLVEstruct1->gsmv_comm = SOLVEstruct->gsmv_comm;
	        SOLVEstruct1->A_colind_gsmv = SOLVEstruct->A_colind_gsmv;
		
		/* Initialize the *gstrs_comm for 1 RHS. */
		if ( !(SOLVEstruct1->gstrs_comm = (pxgstrs_comm_t *)
		       SUPERLU_MALLOC(sizeof(pxgstrs_comm_t))) )
		    ABORT("Malloc fails for gstrs_comm[]");
		pxgstrs_init(n, m_loc, 1, fst_row, perm_r, perm_c, grid, 
			     Glu_persist, SOLVEstruct1);
	    }

	    pdgsrfs(n, A, anorm, LUstruct, ScalePermstruct, grid,
		    B, ldb, X, ldx, nrhs, SOLVEstruct1, berr, stat, info);

            /* Deallocate the storage associated with SOLVEstruct1 */
	    if ( nrhs > 1 ) {
	        pxgstrs_finalize(SOLVEstruct1->gstrs_comm);
	        SUPERLU_FREE(SOLVEstruct1);
	    }

	    stat->utime[REFINE] = SuperLU_timer_() - t;
	}

	/* Permute the solution matrix B <= Pc'*X. */
	pdPermute_Dense_Matrix(fst_row, m_loc, SOLVEstruct->row_to_proc,
			       SOLVEstruct->inv_perm_c,
			       X, ldx, B, ldb, nrhs, grid);
#if ( DEBUGlevel>=2 )
	printf("\n (%d) .. After pdPermute_Dense_Matrix(): b =\n", iam);
	for (i = 0; i < m_loc; ++i)
	  printf("\t(%d)\t%4d\t%.10f\n", iam, i+fst_row, B[i]);
#endif
	
	/* Transform the solution matrix X to a solution of the original
	   system before the equilibration. */
	if ( notran ) {
	    if ( colequ ) {
		b_col = B;
		for (j = 0; j < nrhs; ++j) {
		    irow = fst_row;
		    for (i = 0; i < m_loc; ++i) {
		        b_col[i] *= C[irow];
		        ++irow;
		    }
		    b_col += ldb;
		}
	    }
	} else if ( rowequ ) {
	    b_col = B;
	    for (j = 0; j < nrhs; ++j) {
	        irow = fst_row;
		for (i = 0; i < m_loc; ++i) {
		    b_col[i] *= R[irow];
		    ++irow;
		}
		b_col += ldb;
	    }
	}

	SUPERLU_FREE(b_work);
	SUPERLU_FREE(X);

    } /* end if nrhs != 0 */

#if ( PRNTlevel>=1 )
    if ( !iam ) printf(".. DiagScale = %d\n", ScalePermstruct->DiagScale);
#endif

    /* Deallocate R and/or C if it was not used. */
    if ( Equil && Fact != SamePattern_SameRowPerm ) {
	switch ( ScalePermstruct->DiagScale ) {
	    case NOEQUIL:
	        SUPERLU_FREE(R);
		SUPERLU_FREE(C);
		break;
	    case ROW: 
		SUPERLU_FREE(C);
		break;
	    case COL: 
		SUPERLU_FREE(R);
		break;
	}
    }
    if ( !factored && Fact != SamePattern_SameRowPerm && !parSymbFact)
 	Destroy_CompCol_Permuted_dist(&GAC);

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Exit pdgssvx()");
#endif

}
Ejemplo n.º 4
0
/*! \brief
 *
 * <pre>
 * Purpose
 * =======
 *
 * pzgssvx_ABglobal solves a system of linear equations A*X=B,
 * by using Gaussian elimination with "static pivoting" to
 * compute the LU factorization of A.
 *
 * Static pivoting is a technique that combines the numerical stability
 * of partial pivoting with the scalability of Cholesky (no pivoting),
 * to run accurately and efficiently on large numbers of processors.
 *
 * See our paper at http://www.nersc.gov/~xiaoye/SuperLU/ for a detailed
 * description of the parallel algorithms.
 *
 * Here are the options for using this code:
 *
 *   1. Independent of all the other options specified below, the
 *      user must supply
 *
 *      -  B, the matrix of right hand sides, and its dimensions ldb and nrhs
 *      -  grid, a structure describing the 2D processor mesh
 *      -  options->IterRefine, which determines whether or not to
 *            improve the accuracy of the computed solution using 
 *            iterative refinement
 *
 *      On output, B is overwritten with the solution X.
 *
 *   2. Depending on options->Fact, the user has several options
 *      for solving A*X=B. The standard option is for factoring
 *      A "from scratch". (The other options, described below,
 *      are used when A is sufficiently similar to a previously 
 *      solved problem to save time by reusing part or all of 
 *      the previous factorization.)
 *
 *      -  options->Fact = DOFACT: A is factored "from scratch"
 *
 *      In this case the user must also supply
 *
 *      -  A, the input matrix
 *
 *      as well as the following options, which are described in more 
 *      detail below:
 *
 *      -  options->Equil,   to specify how to scale the rows and columns
 *                           of A to "equilibrate" it (to try to reduce its
 *                           condition number and so improve the
 *                           accuracy of the computed solution)
 *
 *      -  options->RowPerm, to specify how to permute the rows of A
 *                           (typically to control numerical stability)
 *
 *      -  options->ColPerm, to specify how to permute the columns of A
 *                           (typically to control fill-in and enhance
 *                           parallelism during factorization)
 *
 *      -  options->ReplaceTinyPivot, to specify how to deal with tiny
 *                           pivots encountered during factorization
 *                           (to control numerical stability)
 *
 *      The outputs returned include
 *         
 *      -  ScalePermstruct,  modified to describe how the input matrix A
 *                           was equilibrated and permuted:
 *         -  ScalePermstruct->DiagScale, indicates whether the rows and/or
 *                                        columns of A were scaled
 *         -  ScalePermstruct->R, array of row scale factors
 *         -  ScalePermstruct->C, array of column scale factors
 *         -  ScalePermstruct->perm_r, row permutation vector
 *         -  ScalePermstruct->perm_c, column permutation vector
 *
 *            (part of ScalePermstruct may also need to be supplied on input,
 *             depending on options->RowPerm and options->ColPerm as described 
 *             later).
 *
 *      -  A, the input matrix A overwritten by the scaled and permuted matrix
 *                Pc*Pr*diag(R)*A*diag(C)
 *             where 
 *                Pr and Pc are row and columns permutation matrices determined
 *                  by ScalePermstruct->perm_r and ScalePermstruct->perm_c, 
 *                  respectively, and 
 *                diag(R) and diag(C) are diagonal scaling matrices determined
 *                  by ScalePermstruct->DiagScale, ScalePermstruct->R and 
 *                  ScalePermstruct->C
 *
 *      -  LUstruct, which contains the L and U factorization of A1 where
 *
 *                A1 = Pc*Pr*diag(R)*A*diag(C)*Pc^T = L*U
 *
 *              (Note that A1 = Aout * Pc^T, where Aout is the matrix stored
 *               in A on output.)
 *
 *   3. The second value of options->Fact assumes that a matrix with the same
 *      sparsity pattern as A has already been factored:
 *     
 *      -  options->Fact = SamePattern: A is factored, assuming that it has
 *            the same nonzero pattern as a previously factored matrix. In this
 *            case the algorithm saves time by reusing the previously computed
 *            column permutation vector stored in ScalePermstruct->perm_c
 *            and the "elimination tree" of A stored in LUstruct->etree.
 *
 *      In this case the user must still specify the following options
 *      as before:
 *
 *      -  options->Equil
 *      -  options->RowPerm
 *      -  options->ReplaceTinyPivot
 *
 *      but not options->ColPerm, whose value is ignored. This is because the
 *      previous column permutation from ScalePermstruct->perm_c is used as
 *      input. The user must also supply 
 *
 *      -  A, the input matrix
 *      -  ScalePermstruct->perm_c, the column permutation
 *      -  LUstruct->etree, the elimination tree
 *
 *      The outputs returned include
 *         
 *      -  A, the input matrix A overwritten by the scaled and permuted matrix
 *            as described above
 *      -  ScalePermstruct,  modified to describe how the input matrix A was
 *                           equilibrated and row permuted
 *      -  LUstruct, modified to contain the new L and U factors
 *
 *   4. The third value of options->Fact assumes that a matrix B with the same
 *      sparsity pattern as A has already been factored, and where the
 *      row permutation of B can be reused for A. This is useful when A and B
 *      have similar numerical values, so that the same row permutation
 *      will make both factorizations numerically stable. This lets us reuse
 *      all of the previously computed structure of L and U.
 *
 *      -  options->Fact = SamePattern_SameRowPerm: A is factored,
 *            assuming not only the same nonzero pattern as the previously
 *            factored matrix B, but reusing B's row permutation.
 *
 *      In this case the user must still specify the following options
 *      as before:
 *
 *      -  options->Equil
 *      -  options->ReplaceTinyPivot
 *
 *      but not options->RowPerm or options->ColPerm, whose values are ignored.
 *      This is because the permutations from ScalePermstruct->perm_r and
 *      ScalePermstruct->perm_c are used as input.
 *
 *      The user must also supply 
 *
 *      -  A, the input matrix
 *      -  ScalePermstruct->DiagScale, how the previous matrix was row and/or
 *                                     column scaled
 *      -  ScalePermstruct->R, the row scalings of the previous matrix, if any
 *      -  ScalePermstruct->C, the columns scalings of the previous matrix, 
 *                             if any
 *      -  ScalePermstruct->perm_r, the row permutation of the previous matrix
 *      -  ScalePermstruct->perm_c, the column permutation of the previous 
 *                                  matrix
 *      -  all of LUstruct, the previously computed information about L and U
 *                (the actual numerical values of L and U stored in
 *                 LUstruct->Llu are ignored)
 *
 *      The outputs returned include
 *         
 *      -  A, the input matrix A overwritten by the scaled and permuted matrix
 *            as described above
 *      -  ScalePermstruct,  modified to describe how the input matrix A was
 *                           equilibrated 
 *                  (thus ScalePermstruct->DiagScale, R and C may be modified)
 *      -  LUstruct, modified to contain the new L and U factors
 *
 *   5. The fourth and last value of options->Fact assumes that A is
 *      identical to a matrix that has already been factored on a previous 
 *      call, and reuses its entire LU factorization
 *
 *      -  options->Fact = Factored: A is identical to a previously
 *            factorized matrix, so the entire previous factorization
 *            can be reused.
 *
 *      In this case all the other options mentioned above are ignored
 *      (options->Equil, options->RowPerm, options->ColPerm, 
 *       options->ReplaceTinyPivot)
 *
 *      The user must also supply 
 *
 *      -  A, the unfactored matrix, only in the case that iterative refinment
 *            is to be done (specifically A must be the output A from 
 *            the previous call, so that it has been scaled and permuted)
 *      -  all of ScalePermstruct
 *      -  all of LUstruct, including the actual numerical values of L and U
 *
 *      all of which are unmodified on output.
 *         
 * Arguments
 * =========
 *
 * options (input) superlu_options_t*
 *         The structure defines the input parameters to control
 *         how the LU decomposition will be performed.
 *         The following fields should be defined for this structure:
 *         
 *         o Fact (fact_t)
 *           Specifies whether or not the factored form of the matrix
 *           A is supplied on entry, and if not, how the matrix A should
 *           be factorized based on the previous history.
 *
 *           = DOFACT: The matrix A will be factorized from scratch.
 *                 Inputs:  A
 *                          options->Equil, RowPerm, ColPerm, ReplaceTinyPivot
 *                 Outputs: modified A
 *                             (possibly row and/or column scaled and/or 
 *                              permuted)
 *                          all of ScalePermstruct
 *                          all of LUstruct
 *
 *           = SamePattern: the matrix A will be factorized assuming
 *             that a factorization of a matrix with the same sparsity
 *             pattern was performed prior to this one. Therefore, this
 *             factorization will reuse column permutation vector 
 *             ScalePermstruct->perm_c and the elimination tree
 *             LUstruct->etree
 *                 Inputs:  A
 *                          options->Equil, RowPerm, ReplaceTinyPivot
 *                          ScalePermstruct->perm_c
 *                          LUstruct->etree
 *                 Outputs: modified A
 *                             (possibly row and/or column scaled and/or 
 *                              permuted)
 *                          rest of ScalePermstruct (DiagScale, R, C, perm_r)
 *                          rest of LUstruct (GLU_persist, Llu)
 *
 *           = SamePattern_SameRowPerm: the matrix A will be factorized
 *             assuming that a factorization of a matrix with the same
 *             sparsity	pattern and similar numerical values was performed
 *             prior to this one. Therefore, this factorization will reuse
 *             both row and column scaling factors R and C, and the
 *             both row and column permutation vectors perm_r and perm_c,
 *             distributed data structure set up from the previous symbolic
 *             factorization.
 *                 Inputs:  A
 *                          options->Equil, ReplaceTinyPivot
 *                          all of ScalePermstruct
 *                          all of LUstruct
 *                 Outputs: modified A
 *                             (possibly row and/or column scaled and/or 
 *                              permuted)
 *                          modified LUstruct->Llu
 *           = FACTORED: the matrix A is already factored.
 *                 Inputs:  all of ScalePermstruct
 *                          all of LUstruct
 *
 *         o Equil (yes_no_t)
 *           Specifies whether to equilibrate the system.
 *           = NO:  no equilibration.
 *           = YES: scaling factors are computed to equilibrate the system:
 *                      diag(R)*A*diag(C)*inv(diag(C))*X = diag(R)*B.
 *                  Whether or not the system will be equilibrated depends
 *                  on the scaling of the matrix A, but if equilibration is
 *                  used, A is overwritten by diag(R)*A*diag(C) and B by
 *                  diag(R)*B.
 *
 *         o RowPerm (rowperm_t)
 *           Specifies how to permute rows of the matrix A.
 *           = NATURAL:   use the natural ordering.
 *           = LargeDiag: use the Duff/Koster algorithm to permute rows of
 *                        the original matrix to make the diagonal large
 *                        relative to the off-diagonal.
 *           = MY_PERMR:  use the ordering given in ScalePermstruct->perm_r
 *                        input by the user.
 *           
 *         o ColPerm (colperm_t)
 *           Specifies what type of column permutation to use to reduce fill.
 *           = NATURAL:       natural ordering.
 *           = MMD_AT_PLUS_A: minimum degree ordering on structure of A'+A.
 *           = MMD_ATA:       minimum degree ordering on structure of A'*A.
 *           = MY_PERMC:      the ordering given in ScalePermstruct->perm_c.
 *         
 *         o ReplaceTinyPivot (yes_no_t)
 *           = NO:  do not modify pivots
 *           = YES: replace tiny pivots by sqrt(epsilon)*norm(A) during 
 *                  LU factorization.
 *
 *         o IterRefine (IterRefine_t)
 *           Specifies how to perform iterative refinement.
 *           = NO:     no iterative refinement.
 *           = SLU_DOUBLE: accumulate residual in double precision.
 *           = SLU_EXTRA:  accumulate residual in extra precision.
 *
 *         NOTE: all options must be indentical on all processes when
 *               calling this routine.
 *
 * A (input/output) SuperMatrix*
 *         On entry, matrix A in A*X=B, of dimension (A->nrow, A->ncol).
 *         The number of linear equations is A->nrow. The type of A must be:
 *         Stype = SLU_NC; Dtype = SLU_Z; Mtype = SLU_GE. That is, A is stored in
 *         compressed column format (also known as Harwell-Boeing format).
 *         See supermatrix.h for the definition of 'SuperMatrix'.
 *         This routine only handles square A, however, the LU factorization
 *         routine pzgstrf can factorize rectangular matrices.
 *         On exit, A may be overwritten by Pc*Pr*diag(R)*A*diag(C),
 *         depending on ScalePermstruct->DiagScale, options->RowPerm and
 *         options->colpem:
 *             if ScalePermstruct->DiagScale != NOEQUIL, A is overwritten by
 *                diag(R)*A*diag(C).
 *             if options->RowPerm != NATURAL, A is further overwritten by
 *                Pr*diag(R)*A*diag(C).
 *             if options->ColPerm != NATURAL, A is further overwritten by
 *                Pc*Pr*diag(R)*A*diag(C).
 *         If all the above condition are true, the LU decomposition is
 *         performed on the matrix Pc*Pr*diag(R)*A*diag(C)*Pc^T.
 *
 *         NOTE: Currently, A must reside in all processes when calling
 *               this routine.
 *
 * ScalePermstruct (input/output) ScalePermstruct_t*
 *         The data structure to store the scaling and permutation vectors
 *         describing the transformations performed to the matrix A.
 *         It contains the following fields:
 *
 *         o DiagScale (DiagScale_t)
 *           Specifies the form of equilibration that was done.
 *           = NOEQUIL: no equilibration.
 *           = ROW:     row equilibration, i.e., A was premultiplied by
 *                      diag(R).
 *           = COL:     Column equilibration, i.e., A was postmultiplied
 *                      by diag(C).
 *           = BOTH:    both row and column equilibration, i.e., A was 
 *                      replaced by diag(R)*A*diag(C).
 *           If options->Fact = FACTORED or SamePattern_SameRowPerm,
 *           DiagScale is an input argument; otherwise it is an output
 *           argument.
 *
 *         o perm_r (int*)
 *           Row permutation vector, which defines the permutation matrix Pr;
 *           perm_r[i] = j means row i of A is in position j in Pr*A.
 *           If options->RowPerm = MY_PERMR, or
 *           options->Fact = SamePattern_SameRowPerm, perm_r is an
 *           input argument; otherwise it is an output argument.
 *
 *         o perm_c (int*)
 *           Column permutation vector, which defines the 
 *           permutation matrix Pc; perm_c[i] = j means column i of A is 
 *           in position j in A*Pc.
 *           If options->ColPerm = MY_PERMC or options->Fact = SamePattern
 *           or options->Fact = SamePattern_SameRowPerm, perm_c is an
 *           input argument; otherwise, it is an output argument.
 *           On exit, perm_c may be overwritten by the product of the input
 *           perm_c and a permutation that postorders the elimination tree
 *           of Pc*A'*A*Pc'; perm_c is not changed if the elimination tree
 *           is already in postorder.
 *
 *         o R (double*) dimension (A->nrow)
 *           The row scale factors for A.
 *           If DiagScale = ROW or BOTH, A is multiplied on the left by 
 *                          diag(R).
 *           If DiagScale = NOEQUIL or COL, R is not defined.
 *           If options->Fact = FACTORED or SamePattern_SameRowPerm, R is
 *           an input argument; otherwise, R is an output argument.
 *
 *         o C (double*) dimension (A->ncol)
 *           The column scale factors for A.
 *           If DiagScale = COL or BOTH, A is multiplied on the right by 
 *                          diag(C).
 *           If DiagScale = NOEQUIL or ROW, C is not defined.
 *           If options->Fact = FACTORED or SamePattern_SameRowPerm, C is
 *           an input argument; otherwise, C is an output argument.
 *         
 * B       (input/output) doublecomplex*
 *         On entry, the right-hand side matrix of dimension (A->nrow, nrhs).
 *         On exit, the solution matrix if info = 0;
 *
 *         NOTE: Currently, B must reside in all processes when calling
 *               this routine.
 *
 * ldb     (input) int (global)
 *         The leading dimension of matrix B.
 *
 * nrhs    (input) int (global)
 *         The number of right-hand sides.
 *         If nrhs = 0, only LU decomposition is performed, the forward
 *         and back substitutions are skipped.
 *
 * grid    (input) gridinfo_t*
 *         The 2D process mesh. It contains the MPI communicator, the number
 *         of process rows (NPROW), the number of process columns (NPCOL),
 *         and my process rank. It is an input argument to all the
 *         parallel routines.
 *         Grid can be initialized by subroutine SUPERLU_GRIDINIT.
 *         See superlu_zdefs.h for the definition of 'gridinfo_t'.
 *
 * LUstruct (input/output) LUstruct_t*
 *         The data structures to store the distributed L and U factors.
 *         It contains the following fields:
 *
 *         o etree (int*) dimension (A->ncol)
 *           Elimination tree of Pc*(A'+A)*Pc' or Pc*A'*A*Pc', dimension A->ncol.
 *           It is computed in sp_colorder() during the first factorization,
 *           and is reused in the subsequent factorizations of the matrices
 *           with the same nonzero pattern.
 *           On exit of sp_colorder(), the columns of A are permuted so that
 *           the etree is in a certain postorder. This postorder is reflected
 *           in ScalePermstruct->perm_c.
 *           NOTE:
 *           Etree is a vector of parent pointers for a forest whose vertices
 *           are the integers 0 to A->ncol-1; etree[root]==A->ncol.
 *
 *         o Glu_persist (Glu_persist_t*)
 *           Global data structure (xsup, supno) replicated on all processes,
 *           describing the supernode partition in the factored matrices
 *           L and U:
 *	       xsup[s] is the leading column of the s-th supernode,
 *             supno[i] is the supernode number to which column i belongs.
 *
 *         o Llu (LocalLU_t*)
 *           The distributed data structures to store L and U factors.
 *           See superlu_ddefs.h for the definition of 'LocalLU_t'.
 *
 * berr    (output) double*, dimension (nrhs)
 *         The componentwise relative backward error of each solution   
 *         vector X(j) (i.e., the smallest relative change in   
 *         any element of A or B that makes X(j) an exact solution).
 *
 * stat   (output) SuperLUStat_t*
 *        Record the statistics on runtime and floating-point operation count.
 *        See util.h for the definition of 'SuperLUStat_t'.
 *
 * info    (output) int*
 *         = 0: successful exit
 *         > 0: if info = i, and i is
 *             <= A->ncol: U(i,i) is exactly zero. The factorization has
 *                been completed, but the factor U is exactly singular,
 *                so the solution could not be computed.
 *             > A->ncol: number of bytes allocated when memory allocation
 *                failure occurred, plus A->ncol.
 *
 *
 * See superlu_zdefs.h for the definitions of various data types.
 * </pre>
 */
void
pzgssvx_ABglobal(superlu_options_t *options, SuperMatrix *A, 
		 ScalePermstruct_t *ScalePermstruct,
		 doublecomplex B[], int ldb, int nrhs, gridinfo_t *grid,
		 LUstruct_t *LUstruct, double *berr,
		 SuperLUStat_t *stat, int *info)
{
    SuperMatrix AC;
    NCformat *Astore;
    NCPformat *ACstore;
    Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
    Glu_freeable_t *Glu_freeable;
            /* The nonzero structures of L and U factors, which are
	       replicated on all processrs.
	           (lsub, xlsub) contains the compressed subscript of
		                 supernodes in L.
          	   (usub, xusub) contains the compressed subscript of
		                 nonzero segments in U.
	      If options->Fact != SamePattern_SameRowPerm, they are 
	      computed by SYMBFACT routine, and then used by DDISTRIBUTE
	      routine. They will be freed after DDISTRIBUTE routine.
	      If options->Fact == SamePattern_SameRowPerm, these
	      structures are not used.                                  */
    fact_t   Fact;
    doublecomplex   *a;
    int_t    *perm_r; /* row permutations from partial pivoting */
    int_t    *perm_c; /* column permutation vector */
    int_t    *etree;  /* elimination tree */
    int_t    *colptr, *rowind;
    int_t    colequ, Equil, factored, job, notran, rowequ;
    int_t    i, iinfo, j, irow, m, n, nnz, permc_spec, dist_mem_use;
    int      iam;
    int      ldx;  /* LDA for matrix X (global). */
    char     equed[1], norm[1];
    double   *C, *R, *C1, *R1, amax, anorm, colcnd, rowcnd;
    doublecomplex   *X, *b_col, *b_work, *x_col;
    double   t;
    static mem_usage_t num_mem_usage, symb_mem_usage;
#if ( PRNTlevel>= 2 )
    double   dmin, dsum, dprod;
#endif

    /* Test input parameters. */
    *info = 0;
    Fact = options->Fact;
    if ( Fact < 0 || Fact > FACTORED )
	*info = -1;
    else if ( options->RowPerm < 0 || options->RowPerm > MY_PERMR )
	*info = -1;
    else if ( options->ColPerm < 0 || options->ColPerm > MY_PERMC )
	*info = -1;
    else if ( options->IterRefine < 0 || options->IterRefine > SLU_EXTRA )
	*info = -1;
    else if ( options->IterRefine == SLU_EXTRA ) {
	*info = -1;
	fprintf(stderr, "Extra precise iterative refinement yet to support.");
    } else if ( A->nrow != A->ncol || A->nrow < 0 ||
         A->Stype != SLU_NC || A->Dtype != SLU_Z || A->Mtype != SLU_GE )
	*info = -2;
    else if ( ldb < A->nrow )
	*info = -5;
    else if ( nrhs < 0 )
	*info = -6;
    if ( *info ) {
	i = -(*info);
	pxerbla("pzgssvx_ABglobal", grid, -*info);
	return;
    }

    /* Initialization */
    factored = (Fact == FACTORED);
    Equil = (!factored && options->Equil == YES);
    notran = (options->Trans == NOTRANS);
    iam = grid->iam;
    job = 5;
    m = A->nrow;
    n = A->ncol;
    Astore = A->Store;
    nnz = Astore->nnz;
    a = Astore->nzval;
    colptr = Astore->colptr;
    rowind = Astore->rowind;
    if ( factored || (Fact == SamePattern_SameRowPerm && Equil) ) {
	rowequ = (ScalePermstruct->DiagScale == ROW) ||
	         (ScalePermstruct->DiagScale == BOTH);
	colequ = (ScalePermstruct->DiagScale == COL) ||
	         (ScalePermstruct->DiagScale == BOTH);
    } else rowequ = colequ = FALSE;

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Enter pzgssvx_ABglobal()");
#endif

    perm_r = ScalePermstruct->perm_r;
    perm_c = ScalePermstruct->perm_c;
    etree = LUstruct->etree;
    R = ScalePermstruct->R;
    C = ScalePermstruct->C;
    if ( Equil && Fact != SamePattern_SameRowPerm ) {
	/* Allocate storage if not done so before. */
	switch ( ScalePermstruct->DiagScale ) {
	    case NOEQUIL:
		if ( !(R = (double *) doubleMalloc_dist(m)) )
		    ABORT("Malloc fails for R[].");
	        if ( !(C = (double *) doubleMalloc_dist(n)) )
		    ABORT("Malloc fails for C[].");
		ScalePermstruct->R = R;
		ScalePermstruct->C = C;
		break;
	    case ROW: 
	        if ( !(C = (double *) doubleMalloc_dist(n)) )
		    ABORT("Malloc fails for C[].");
		ScalePermstruct->C = C;
		break;
	    case COL: 
		if ( !(R = (double *) doubleMalloc_dist(m)) )
		    ABORT("Malloc fails for R[].");
		ScalePermstruct->R = R;
		break;
	}
    }

    /* ------------------------------------------------------------
       Diagonal scaling to equilibrate the matrix.
       ------------------------------------------------------------*/
    if ( Equil ) {
#if ( DEBUGlevel>=1 )
	CHECK_MALLOC(iam, "Enter equil");
#endif
	t = SuperLU_timer_();

	if ( Fact == SamePattern_SameRowPerm ) {
	    /* Reuse R and C. */
	    switch ( ScalePermstruct->DiagScale ) {
	      case NOEQUIL:
		break;
	      case ROW:
		for (j = 0; j < n; ++j) {
		    for (i = colptr[j]; i < colptr[j+1]; ++i) {
			irow = rowind[i];
			zd_mult(&a[i], &a[i], R[i]); /* Scale rows. */
		    }
		}
		break;
	      case COL:
		for (j = 0; j < n; ++j)
		    for (i = colptr[j]; i < colptr[j+1]; ++i)
			zd_mult(&a[i], &a[i], C[j]); /* Scale columns. */
		break;
	      case BOTH: 
		for (j = 0; j < n; ++j) {
		    for (i = colptr[j]; i < colptr[j+1]; ++i) {
			irow = rowind[i];
			zd_mult(&a[i], &a[i], R[irow]); /* Scale rows. */
			zd_mult(&a[i], &a[i], C[j]); /* Scale columns. */
		    }
		}
	        break;
	    }
	} else {
	    if ( !iam ) {
		/* Compute row and column scalings to equilibrate matrix A. */
		zgsequ_dist(A, R, C, &rowcnd, &colcnd, &amax, &iinfo);
	    
		MPI_Bcast( &iinfo, 1, mpi_int_t, 0, grid->comm );
		if ( iinfo == 0 ) {
		    MPI_Bcast( R,       m, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( C,       n, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( &rowcnd, 1, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( &colcnd, 1, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( &amax,   1, MPI_DOUBLE, 0, grid->comm );
		} else {
		    if ( iinfo > 0 ) {
			if ( iinfo <= m )
			    fprintf(stderr, "The %d-th row of A is exactly zero\n", 
				    iinfo);
			else fprintf(stderr, "The %d-th column of A is exactly zero\n", 
				     iinfo-n);
			exit(-1);
		    }
		}
	    } else {
		MPI_Bcast( &iinfo, 1, mpi_int_t, 0, grid->comm );
		if ( iinfo == 0 ) {
		    MPI_Bcast( R,       m, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( C,       n, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( &rowcnd, 1, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( &colcnd, 1, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( &amax,   1, MPI_DOUBLE, 0, grid->comm );
		} else {
		    ABORT("ZGSEQU failed\n");
		}
	    }
	
	    /* Equilibrate matrix A. */
	    zlaqgs_dist(A, R, C, rowcnd, colcnd, amax, equed);
	    if ( lsame_(equed, "R") ) {
		ScalePermstruct->DiagScale = rowequ = ROW;
	    } else if ( lsame_(equed, "C") ) {
		ScalePermstruct->DiagScale = colequ = COL;
	    } else if ( lsame_(equed, "B") ) {
		ScalePermstruct->DiagScale = BOTH;
		rowequ = ROW;
		colequ = COL;
	    } else ScalePermstruct->DiagScale = NOEQUIL;

#if ( PRNTlevel>=1 )
	    if ( !iam ) {
		printf(".. equilibrated? *equed = %c\n", *equed);
		/*fflush(stdout);*/
	    }
#endif
	} /* if Fact ... */

	stat->utime[EQUIL] = SuperLU_timer_() - t;
#if ( DEBUGlevel>=1 )
	CHECK_MALLOC(iam, "Exit equil");
#endif
    } /* end if Equil ... */
    
    /* ------------------------------------------------------------
       Permute rows of A. 
       ------------------------------------------------------------*/
    if ( options->RowPerm != NO ) {
	t = SuperLU_timer_();

	if ( Fact == SamePattern_SameRowPerm /* Reuse perm_r. */
	    || options->RowPerm == MY_PERMR ) { /* Use my perm_r. */
	    for (j = 0; j < n; ++j) {
		for (i = colptr[j]; i < colptr[j+1]; ++i) {
		    irow = rowind[i];
		    rowind[i] = perm_r[irow];
		}
	    }
	} else if ( !factored ) {
	    if ( job == 5 ) {
		/* Allocate storage for scaling factors. */
		if ( !(R1 = (double *) SUPERLU_MALLOC(m * sizeof(double))) ) 
		    ABORT("SUPERLU_MALLOC fails for R1[]");
		if ( !(C1 = (double *) SUPERLU_MALLOC(n * sizeof(double))) )
		    ABORT("SUPERLU_MALLOC fails for C1[]");
	    }

	    if ( !iam ) {
		/* Process 0 finds a row permutation for large diagonal. */
		zldperm(job, m, nnz, colptr, rowind, a, perm_r, R1, C1);
		
		MPI_Bcast( perm_r, m, mpi_int_t, 0, grid->comm );
		if ( job == 5 && Equil ) {
		    MPI_Bcast( R1, m, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( C1, n, MPI_DOUBLE, 0, grid->comm );
		}
	    } else {
		MPI_Bcast( perm_r, m, mpi_int_t, 0, grid->comm );
		if ( job == 5 && Equil ) {
		    MPI_Bcast( R1, m, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( C1, n, MPI_DOUBLE, 0, grid->comm );
		}
	    }

#if ( PRNTlevel>=2 )
	    dmin = dlamch_("Overflow");
	    dsum = 0.0;
	    dprod = 1.0;
#endif
	    if ( job == 5 ) {
		if ( Equil ) {
		    for (i = 0; i < n; ++i) {
			R1[i] = exp(R1[i]);
			C1[i] = exp(C1[i]);
		    }
		    for (j = 0; j < n; ++j) {
			for (i = colptr[j]; i < colptr[j+1]; ++i) {
			    irow = rowind[i];
			    zd_mult(&a[i], &a[i], R1[irow]); /* Scale rows. */
			    zd_mult(&a[i], &a[i], C1[j]); /* Scale columns. */
			    rowind[i] = perm_r[irow];
#if ( PRNTlevel>=2 )
			    if ( rowind[i] == j ) /* New diagonal */
				dprod *= slud_z_abs1(&a[i]);
#endif
			}
		    }

		    /* Multiply together the scaling factors. */
		    if ( rowequ ) for (i = 0; i < m; ++i) R[i] *= R1[i];
		    else for (i = 0; i < m; ++i) R[i] = R1[i];
		    if ( colequ ) for (i = 0; i < n; ++i) C[i] *= C1[i];
		    else for (i = 0; i < n; ++i) C[i] = C1[i];
		    
		    ScalePermstruct->DiagScale = BOTH;
		    rowequ = colequ = 1;
		} else { /* No equilibration. */
		    for (j = 0; j < n; ++j) {
			for (i = colptr[j]; i < colptr[j+1]; ++i) {
			    irow = rowind[i];
			    rowind[i] = perm_r[irow];
			}
		    }
		}
		SUPERLU_FREE (R1);
		SUPERLU_FREE (C1);
	    } else { /* job = 2,3,4 */
		for (j = 0; j < n; ++j) {
		    for (i = colptr[j]; i < colptr[j+1]; ++i) {
			irow = rowind[i];
			rowind[i] = perm_r[irow];
#if ( PRNTlevel>=2 )
			if ( rowind[i] == j ) { /* New diagonal */
			    if ( job == 2 || job == 3 )
				dmin = SUPERLU_MIN(dmin, slud_z_abs1(&a[i]));
			    else if ( job == 4 )
				dsum += slud_z_abs1(&a[i]);
			    else if ( job == 5 )
				dprod *= slud_z_abs1(&a[i]);
			}
#endif
		    }
		}
	    }

#if ( PRNTlevel>=2 )
	    if ( job == 2 || job == 3 ) {
		if ( !iam ) printf("\tsmallest diagonal %e\n", dmin);
	    } else if ( job == 4 ) {
		if ( !iam ) printf("\tsum of diagonal %e\n", dsum);
	    } else if ( job == 5 ) {
		if ( !iam ) printf("\t product of diagonal %e\n", dprod);
	    }
#endif
	    
        } /* else !factored */

	t = SuperLU_timer_() - t;
	stat->utime[ROWPERM] = t;
    
    } else { /* options->RowPerm == NOROWPERM */
        for (i = 0; i < m; ++i) perm_r[i] = i;
    }

    if ( !factored || options->IterRefine ) {
	/* Compute norm(A), which will be used to adjust small diagonal. */
	if ( notran ) *(unsigned char *)norm = '1';
	else *(unsigned char *)norm = 'I';
	anorm = zlangs_dist(norm, A);
    }

    /* ------------------------------------------------------------
       Perform the LU factorization.
       ------------------------------------------------------------*/
    if ( !factored ) {
	t = SuperLU_timer_();
	/*
	 * Get column permutation vector perm_c[], according to permc_spec:
	 *   permc_spec = NATURAL:  natural ordering 
	 *   permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A
	 *   permc_spec = MMD_ATA:  minimum degree on structure of A'*A
	 *   permc_spec = MY_PERMC: the ordering already supplied in perm_c[]
	 */
	permc_spec = options->ColPerm;
	if ( permc_spec != MY_PERMC && Fact == DOFACT )
	    /* Use an ordering provided by SuperLU */
	    get_perm_c_dist(iam, permc_spec, A, perm_c);

	/* Compute the elimination tree of Pc*(A'+A)*Pc' or Pc*A'*A*Pc'
	   (a.k.a. column etree), depending on the choice of ColPerm.
	   Adjust perm_c[] to be consistent with a postorder of etree.
	   Permute columns of A to form A*Pc'. */
	sp_colorder(options, A, perm_c, etree, &AC);

	/* Form Pc*A*Pc' to preserve the diagonal of the matrix Pr*A. */
	ACstore = AC.Store;
	for (j = 0; j < n; ++j) 
	    for (i = ACstore->colbeg[j]; i < ACstore->colend[j]; ++i) {
		irow = ACstore->rowind[i];
		ACstore->rowind[i] = perm_c[irow];
	    }
	stat->utime[COLPERM] = SuperLU_timer_() - t;

	/* Perform a symbolic factorization on matrix A and set up the
	   nonzero data structures which are suitable for supernodal GENP. */
	if ( Fact != SamePattern_SameRowPerm ) {
#if ( PRNTlevel>=1 ) 
	    if ( !iam ) 
		printf(".. symbfact(): relax %4d, maxsuper %4d, fill %4d\n",
		       sp_ienv_dist(2), sp_ienv_dist(3), sp_ienv_dist(6));
#endif
	    t = SuperLU_timer_();
	    if ( !(Glu_freeable = (Glu_freeable_t *)
		   SUPERLU_MALLOC(sizeof(Glu_freeable_t))) )
		ABORT("Malloc fails for Glu_freeable.");

	    iinfo = symbfact(options, iam, &AC, perm_c, etree, 
			     Glu_persist, Glu_freeable);

	    stat->utime[SYMBFAC] = SuperLU_timer_() - t;

	    if ( iinfo < 0 ) {
		QuerySpace_dist(n, -iinfo, Glu_freeable, &symb_mem_usage);
#if ( PRNTlevel>=1 ) 
		if ( !iam ) {
		    printf("\tNo of supers %ld\n", Glu_persist->supno[n-1]+1);
		    printf("\tSize of G(L) %ld\n", Glu_freeable->xlsub[n]);
		    printf("\tSize of G(U) %ld\n", Glu_freeable->xusub[n]);
		    printf("\tint %d, short %d, float %d, double %d\n", 
			   sizeof(int_t), sizeof(short), sizeof(float),
			   sizeof(double));
		    printf("\tSYMBfact (MB):\tL\\U %.2f\ttotal %.2f\texpansions %d\n",
			   symb_mem_usage.for_lu*1e-6, 
			   symb_mem_usage.total*1e-6,
			   symb_mem_usage.expansions);
		}
#endif
	    } else {
		if ( !iam ) {
		    fprintf(stderr, "symbfact() error returns %d\n", iinfo);
		    exit(-1);
		}
	    }
	}

	/* Distribute the L and U factors onto the process grid. */
	t = SuperLU_timer_();
	dist_mem_use = zdistribute(Fact, n, &AC, Glu_freeable, LUstruct, grid);
	stat->utime[DIST] = SuperLU_timer_() - t;

	/* Deallocate storage used in symbolic factor. */
	if ( Fact != SamePattern_SameRowPerm ) {
	    iinfo = symbfact_SubFree(Glu_freeable);
	    SUPERLU_FREE(Glu_freeable);
	}

	/* Perform numerical factorization in parallel. */
	t = SuperLU_timer_();
	pzgstrf(options, m, n, anorm, LUstruct, grid, stat, info);
	stat->utime[FACT] = SuperLU_timer_() - t;

#if ( PRNTlevel>=1 )
	{
	    int_t TinyPivots;
	    float for_lu, total, max, avg, temp;
	    zQuerySpace_dist(n, LUstruct, grid, &num_mem_usage);
	    MPI_Reduce( &num_mem_usage.for_lu, &for_lu,
		       1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
	    MPI_Reduce( &num_mem_usage.total, &total,
		       1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
	    temp = SUPERLU_MAX(symb_mem_usage.total,
			       symb_mem_usage.for_lu +
			       (float)dist_mem_use + num_mem_usage.for_lu);
	    temp = SUPERLU_MAX(temp, num_mem_usage.total);
	    MPI_Reduce( &temp, &max,
		       1, MPI_FLOAT, MPI_MAX, 0, grid->comm );
	    MPI_Reduce( &temp, &avg,
		       1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
	    MPI_Allreduce( &stat->TinyPivots, &TinyPivots, 1, mpi_int_t,
			  MPI_SUM, grid->comm );
	    stat->TinyPivots = TinyPivots;
	    if ( !iam ) {
		printf("\tNUMfact (MB) all PEs:\tL\\U\t%.2f\tall\t%.2f\n",
		       for_lu*1e-6, total*1e-6);
		printf("\tAll space (MB):"
		       "\t\ttotal\t%.2f\tAvg\t%.2f\tMax\t%.2f\n",
		       avg*1e-6, avg/grid->nprow/grid->npcol*1e-6, max*1e-6);
		printf("\tNumber of tiny pivots: %10d\n", stat->TinyPivots);
	    }
	}
#endif
    
#if ( PRNTlevel>=2 )
	if ( !iam ) printf(".. pzgstrf INFO = %d\n", *info);
#endif

    } else if ( options->IterRefine ) { /* options->Fact==FACTORED */
	/* Permute columns of A to form A*Pc' using the existing perm_c.
	 * NOTE: rows of A were previously permuted to Pc*A.
	 */
	sp_colorder(options, A, perm_c, NULL, &AC);
    } /* if !factored ... */
	
    /* ------------------------------------------------------------
       Compute the solution matrix X.
       ------------------------------------------------------------*/
    if ( nrhs ) {

	if ( !(b_work = doublecomplexMalloc_dist(n)) )
	    ABORT("Malloc fails for b_work[]");

	/* ------------------------------------------------------------
	   Scale the right-hand side if equilibration was performed. 
	   ------------------------------------------------------------*/
	if ( notran ) {
	    if ( rowequ ) {
		b_col = B;
		for (j = 0; j < nrhs; ++j) {
		    for (i = 0; i < m; ++i) zd_mult(&b_col[i], &b_col[i], R[i]);
		    b_col += ldb;
		}
	    }
	} else if ( colequ ) {
	    b_col = B;
	    for (j = 0; j < nrhs; ++j) {
		for (i = 0; i < m; ++i) zd_mult(&b_col[i], &b_col[i], C[i]);
		b_col += ldb;
	    }
	}

	/* ------------------------------------------------------------
	   Permute the right-hand side to form Pr*B.
	   ------------------------------------------------------------*/
	if ( options->RowPerm != NO ) {
	    if ( notran ) {
		b_col = B;
		for (j = 0; j < nrhs; ++j) {
		    for (i = 0; i < m; ++i) b_work[perm_r[i]] = b_col[i];
		    for (i = 0; i < m; ++i) b_col[i] = b_work[i];
		    b_col += ldb;
		}
	    }
	}


	/* ------------------------------------------------------------
	   Permute the right-hand side to form Pc*B.
	   ------------------------------------------------------------*/
	if ( notran ) {
	    b_col = B;
	    for (j = 0; j < nrhs; ++j) {
		for (i = 0; i < m; ++i) b_work[perm_c[i]] = b_col[i];
		for (i = 0; i < m; ++i) b_col[i] = b_work[i];
		b_col += ldb;
	    }
	}

	/* Save a copy of the right-hand side. */
	ldx = ldb;
	if ( !(X = doublecomplexMalloc_dist(((size_t)ldx) * nrhs)) )
	    ABORT("Malloc fails for X[]");
	x_col = X;  b_col = B;
	for (j = 0; j < nrhs; ++j) {
	    for (i = 0; i < ldb; ++i) x_col[i] = b_col[i];
	    x_col += ldx;  b_col += ldb;
	}

	/* ------------------------------------------------------------
	   Solve the linear system.
	   ------------------------------------------------------------*/
	pzgstrs_Bglobal(n, LUstruct, grid, X, ldb, nrhs, stat, info);

	/* ------------------------------------------------------------
	   Use iterative refinement to improve the computed solution and
	   compute error bounds and backward error estimates for it.
	   ------------------------------------------------------------*/
	if ( options->IterRefine ) {
	    /* Improve the solution by iterative refinement. */
	    t = SuperLU_timer_();
	    pzgsrfs_ABXglobal(n, &AC, anorm, LUstruct, grid, B, ldb,
			      X, ldx, nrhs, berr, stat, info);
	    stat->utime[REFINE] = SuperLU_timer_() - t;
	}

	/* Permute the solution matrix X <= Pc'*X. */
	for (j = 0; j < nrhs; j++) {
	    b_col = &B[j*ldb];
	    x_col = &X[j*ldx];
	    for (i = 0; i < n; ++i) b_col[i] = x_col[perm_c[i]];
	}
	
	/* Transform the solution matrix X to a solution of the original system
	   before the equilibration. */
	if ( notran ) {
	    if ( colequ ) {
		b_col = B;
		for (j = 0; j < nrhs; ++j) {
		    for (i = 0; i < n; ++i) zd_mult(&b_col[i], &b_col[i], C[i]);
		    b_col += ldb;
		}
	    }
	} else if ( rowequ ) {
	    b_col = B;
	    for (j = 0; j < nrhs; ++j) {
		for (i = 0; i < n; ++i) zd_mult(&b_col[i], &b_col[i], R[i]);
		b_col += ldb;
	    }
	}

	SUPERLU_FREE(b_work);
	SUPERLU_FREE(X);

    } /* end if nrhs != 0 */

#if ( PRNTlevel>=1 )
    if ( !iam ) printf(".. DiagScale = %d\n", ScalePermstruct->DiagScale);
#endif

    /* Deallocate R and/or C if it is not used. */
    if ( Equil && Fact != SamePattern_SameRowPerm ) {
	switch ( ScalePermstruct->DiagScale ) {
	    case NOEQUIL:
	        SUPERLU_FREE(R);
		SUPERLU_FREE(C);
		break;
	    case ROW: 
		SUPERLU_FREE(C);
		break;
	    case COL: 
		SUPERLU_FREE(R);
		break;
	}
    }
    if ( !factored || (factored && options->IterRefine) )
	Destroy_CompCol_Permuted_dist(&AC);

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Exit pzgssvx_ABglobal()");
#endif
}
Ejemplo n.º 5
0
void
pzgstrs(int_t n, LUstruct_t *LUstruct, 
	ScalePermstruct_t *ScalePermstruct,
	gridinfo_t *grid, doublecomplex *B,
	int_t m_loc, int_t fst_row, int_t ldb, int nrhs,
	SOLVEstruct_t *SOLVEstruct,
	SuperLUStat_t *stat, int *info)
{
/*
 * Purpose
 * =======
 *
 * PZGSTRS solves a system of distributed linear equations
 * A*X = B with a general N-by-N matrix A using the LU factorization
 * computed by PZGSTRF.
 * If the equilibration, and row and column permutations were performed,
 * the LU factorization was performed for A1 where
 *     A1 = Pc*Pr*diag(R)*A*diag(C)*Pc^T = L*U
 * and the linear system solved is
 *     A1 * Y = Pc*Pr*B1, where B was overwritten by B1 = diag(R)*B, and
 * the permutation to B1 by Pc*Pr is applied internally in this routine.
 * 
 * Arguments
 * =========
 *
 * n      (input) int (global)
 *        The order of the system of linear equations.
 *
 * LUstruct (input) LUstruct_t*
 *        The distributed data structures storing L and U factors.
 *        The L and U factors are obtained from PZGSTRF for
 *        the possibly scaled and permuted matrix A.
 *        See superlu_zdefs.h for the definition of 'LUstruct_t'.
 *        A may be scaled and permuted into A1, so that
 *        A1 = Pc*Pr*diag(R)*A*diag(C)*Pc^T = L*U
 *
 * grid   (input) gridinfo_t*
 *        The 2D process mesh. It contains the MPI communicator, the number
 *        of process rows (NPROW), the number of process columns (NPCOL),
 *        and my process rank. It is an input argument to all the
 *        parallel routines.
 *        Grid can be initialized by subroutine SUPERLU_GRIDINIT.
 *        See superlu_defs.h for the definition of 'gridinfo_t'.
 *
 * B      (input/output) doublecomplex*
 *        On entry, the distributed right-hand side matrix of the possibly
 *        equilibrated system. That is, B may be overwritten by diag(R)*B.
 *        On exit, the distributed solution matrix Y of the possibly
 *        equilibrated system if info = 0, where Y = Pc*diag(C)^(-1)*X,
 *        and X is the solution of the original system.
 *
 * m_loc  (input) int (local)
 *        The local row dimension of matrix B.
 *
 * fst_row (input) int (global)
 *        The row number of B's first row in the global matrix.
 *
 * ldb    (input) int (local)
 *        The leading dimension of matrix B.
 *
 * nrhs   (input) int (global)
 *        Number of right-hand sides.
 * 
 * SOLVEstruct (output) SOLVEstruct_t* (global)
 *        Contains the information for the communication during the
 *        solution phase.
 *
 * stat   (output) SuperLUStat_t*
 *        Record the statistics about the triangular solves.
 *        See util.h for the definition of 'SuperLUStat_t'.
 *
 * info   (output) int*
 * 	   = 0: successful exit
 *	   < 0: if info = -i, the i-th argument had an illegal value
 *        
 */
    Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
    LocalLU_t *Llu = LUstruct->Llu;
    doublecomplex alpha = {1.0, 0.0};
    doublecomplex zero = {0.0, 0.0};
    doublecomplex *lsum;  /* Local running sum of the updates to B-components */
    doublecomplex *x;     /* X component at step k. */
		    /* NOTE: x and lsum are of same size. */
    doublecomplex *lusup, *dest;
    doublecomplex *recvbuf, *tempv;
    doublecomplex *rtemp; /* Result of full matrix-vector multiply. */
    int_t  **Ufstnz_br_ptr = Llu->Ufstnz_br_ptr;
    int_t  *Urbs, *Urbs1; /* Number of row blocks in each block column of U. */
    Ucb_indptr_t **Ucb_indptr;/* Vertical linked list pointing to Uindex[] */
    int_t  **Ucb_valptr;      /* Vertical linked list pointing to Unzval[] */
    int_t  iam, kcol, krow, mycol, myrow;
    int_t  i, ii, il, j, jj, k, lb, ljb, lk, lptr, luptr;
    int_t  nb, nlb, nub, nsupers;
    int_t  *xsup, *supno, *lsub, *usub;
    int_t  *ilsum;    /* Starting position of each supernode in lsum (LOCAL)*/
    int_t  Pc, Pr;
    int    knsupc, nsupr;
    int    ldalsum;   /* Number of lsum entries locally owned. */
    int    maxrecvsz, p, pi;
    int_t  **Lrowind_bc_ptr;
    doublecomplex **Lnzval_bc_ptr;
    MPI_Status status;
#ifdef ISEND_IRECV
    MPI_Request *send_req, recv_req;
#endif
    pxgstrs_comm_t *gstrs_comm = SOLVEstruct->gstrs_comm;

    /*-- Counts used for L-solve --*/
    int_t  *fmod;         /* Modification count for L-solve --
                             Count the number of local block products to
                             be summed into lsum[lk]. */
    int_t  **fsendx_plist = Llu->fsendx_plist;
    int_t  nfrecvx = Llu->nfrecvx; /* Number of X components to be recv'd. */
    int_t  *frecv;        /* Count of lsum[lk] contributions to be received
                             from processes in this row. 
                             It is only valid on the diagonal processes. */
    int_t  nfrecvmod = 0; /* Count of total modifications to be recv'd. */
    int_t  nleaf = 0, nroot = 0;

    /*-- Counts used for U-solve --*/
    int_t  *bmod;         /* Modification count for U-solve. */
    int_t  **bsendx_plist = Llu->bsendx_plist;
    int_t  nbrecvx = Llu->nbrecvx; /* Number of X components to be recv'd. */
    int_t  *brecv;        /* Count of modifications to be recv'd from
			     processes in this row. */
    int_t  nbrecvmod = 0; /* Count of total modifications to be recv'd. */
    double t;
#if ( DEBUGlevel>=2 )
    int_t Ublocks = 0;
#endif

    t = SuperLU_timer_();

    /* Test input parameters. */
    *info = 0;
    if ( n < 0 ) *info = -1;
    else if ( nrhs < 0 ) *info = -9;
    if ( *info ) {
	pxerbla("PZGSTRS", grid, -*info);
	return;
    }
	
    /*
     * Initialization.
     */
    iam = grid->iam;
    Pc = grid->npcol;
    Pr = grid->nprow;
    myrow = MYROW( iam, grid );
    mycol = MYCOL( iam, grid );
    xsup = Glu_persist->xsup;
    supno = Glu_persist->supno;
    nsupers = supno[n-1] + 1;
    Lrowind_bc_ptr = Llu->Lrowind_bc_ptr;
    Lnzval_bc_ptr = Llu->Lnzval_bc_ptr;
    nlb = CEILING( nsupers, Pr ); /* Number of local block rows. */

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Enter pzgstrs()");
#endif

    stat->ops[SOLVE] = 0.0;
    Llu->SolveMsgSent = 0;

    /* Save the count to be altered so it can be used by
       subsequent call to PDGSTRS. */
    if ( !(fmod = intMalloc_dist(nlb)) )
	ABORT("Calloc fails for fmod[].");
    for (i = 0; i < nlb; ++i) fmod[i] = Llu->fmod[i];
    if ( !(frecv = intMalloc_dist(nlb)) )
	ABORT("Malloc fails for frecv[].");
    Llu->frecv = frecv;

#ifdef ISEND_IRECV
    k = SUPERLU_MAX( Llu->nfsendx, Llu->nbsendx ) + nlb;
    if ( !(send_req = (MPI_Request*) SUPERLU_MALLOC(k*sizeof(MPI_Request))) )
	ABORT("Malloc fails for send_req[].");
#endif

#ifdef _CRAY
    ftcs1 = _cptofcd("L", strlen("L"));
    ftcs2 = _cptofcd("N", strlen("N"));
    ftcs3 = _cptofcd("U", strlen("U"));
#endif


    /* Obtain ilsum[] and ldalsum for process column 0. */
    ilsum = Llu->ilsum;
    ldalsum = Llu->ldalsum;

    /* Allocate working storage. */
    knsupc = sp_ienv_dist(3);
    maxrecvsz = knsupc * nrhs + SUPERLU_MAX( XK_H, LSUM_H );
    if ( !(lsum = doublecomplexCalloc_dist(((size_t)ldalsum)*nrhs + nlb*LSUM_H)) )
	ABORT("Calloc fails for lsum[].");
    if ( !(x = doublecomplexMalloc_dist(ldalsum * nrhs + nlb * XK_H)) )
	ABORT("Malloc fails for x[].");
    if ( !(recvbuf = doublecomplexMalloc_dist(maxrecvsz)) )
	ABORT("Malloc fails for recvbuf[].");
    if ( !(rtemp = doublecomplexCalloc_dist(maxrecvsz)) )
	ABORT("Malloc fails for rtemp[].");

    
    /*---------------------------------------------------
     * Forward solve Ly = b.
     *---------------------------------------------------*/
    /* Redistribute B into X on the diagonal processes. */
    pzReDistribute_B_to_X(B, m_loc, nrhs, ldb, fst_row, ilsum, x, 
			  ScalePermstruct, Glu_persist, grid, SOLVEstruct);

    /* Set up the headers in lsum[]. */
    ii = 0;
    for (k = 0; k < nsupers; ++k) {
	knsupc = SuperSize( k );
	krow = PROW( k, grid );
	if ( myrow == krow ) {
	    lk = LBi( k, grid );   /* Local block number. */
	    il = LSUM_BLK( lk );
	    lsum[il - LSUM_H].r = k;/* Block number prepended in the header.*/
	    lsum[il - LSUM_H].i = 0;
	}
	ii += knsupc;
    }

    /*
     * Compute frecv[] and nfrecvmod counts on the diagonal processes.
     */
    {
	superlu_scope_t *scp = &grid->rscp;

	for (k = 0; k < nsupers; ++k) {
	    krow = PROW( k, grid );
	    if ( myrow == krow ) {
		lk = LBi( k, grid );    /* Local block number. */
		kcol = PCOL( k, grid ); /* Root process in this row scope. */
		if ( mycol != kcol && fmod[lk] )
		    i = 1;  /* Contribution from non-diagonal process. */
		else i = 0;
		MPI_Reduce( &i, &frecv[lk], 1, mpi_int_t,
			   MPI_SUM, kcol, scp->comm );
		if ( mycol == kcol ) { /* Diagonal process. */
		    nfrecvmod += frecv[lk];
		    if ( !frecv[lk] && !fmod[lk] ) ++nleaf;
#if ( DEBUGlevel>=2 )
		    printf("(%2d) frecv[%4d]  %2d\n", iam, k, frecv[lk]);
		    assert( frecv[lk] < Pc );
#endif
		}
	    }
	}
    }

    /* ---------------------------------------------------------
       Solve the leaf nodes first by all the diagonal processes.
       --------------------------------------------------------- */
#if ( DEBUGlevel>=2 )
    printf("(%2d) nleaf %4d\n", iam, nleaf);
#endif
    for (k = 0; k < nsupers && nleaf; ++k) {
	krow = PROW( k, grid );
	kcol = PCOL( k, grid );
	if ( myrow == krow && mycol == kcol ) { /* Diagonal process */
	    knsupc = SuperSize( k );
	    lk = LBi( k, grid );
	    if ( frecv[lk]==0 && fmod[lk]==0 ) {
		fmod[lk] = -1;  /* Do not solve X[k] in the future. */
		ii = X_BLK( lk );
		lk = LBj( k, grid ); /* Local block number, column-wise. */
		lsub = Lrowind_bc_ptr[lk];
		lusup = Lnzval_bc_ptr[lk];
		nsupr = lsub[1];
#ifdef _CRAY
		CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &knsupc, &nrhs, &alpha,
		      lusup, &nsupr, &x[ii], &knsupc);
#elif defined (USE_VENDOR_BLAS)
		ztrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, 
		       lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1);
#else
		ztrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, 
		       lusup, &nsupr, &x[ii], &knsupc);
#endif
		stat->ops[SOLVE] += 4 * knsupc * (knsupc - 1) * nrhs
		    + 10 * knsupc * nrhs; /* complex division */
		--nleaf;
#if ( DEBUGlevel>=2 )
		printf("(%2d) Solve X[%2d]\n", iam, k);
#endif
		
		/*
		 * Send Xk to process column Pc[k].
		 */
		for (p = 0; p < Pr; ++p) {
		    if ( fsendx_plist[lk][p] != EMPTY ) {
			pi = PNUM( p, kcol, grid );
#ifdef ISEND_IRECV
			MPI_Isend( &x[ii - XK_H], knsupc * nrhs + XK_H,
				   SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm,
                                   &send_req[Llu->SolveMsgSent++]);
#else
			MPI_Send( &x[ii - XK_H], knsupc * nrhs + XK_H,
				 SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm );
#endif
#if ( DEBUGlevel>=2 )
			printf("(%2d) Sent X[%2.0f] to P %2d\n",
			       iam, x[ii-XK_H], pi);
#endif
		    }
		}
		/*
		 * Perform local block modifications: lsum[i] -= L_i,k * X[k]
		 */
		nb = lsub[0] - 1;
		lptr = BC_HEADER + LB_DESCRIPTOR + knsupc;
		luptr = knsupc; /* Skip diagonal block L(k,k). */
		
		zlsum_fmod(lsum, x, &x[ii], rtemp, nrhs, knsupc, k,
			   fmod, nb, lptr, luptr, xsup, grid, Llu, 
			   send_req, stat);
	    }
	} /* if diagonal process ... */
    } /* for k ... */

    /* -----------------------------------------------------------
       Compute the internal nodes asynchronously by all processes.
       ----------------------------------------------------------- */
#if ( DEBUGlevel>=2 )
    printf("(%2d) nfrecvx %4d,  nfrecvmod %4d,  nleaf %4d\n",
	   iam, nfrecvx, nfrecvmod, nleaf);
#endif

    while ( nfrecvx || nfrecvmod ) { /* While not finished. */

	/* Receive a message. */
#ifdef ISEND_IRECV
	/* -MPI- FATAL: Remote protocol queue full */
	MPI_Irecv( recvbuf, maxrecvsz, SuperLU_MPI_DOUBLE_COMPLEX,
                 MPI_ANY_SOURCE, MPI_ANY_TAG, grid->comm, &recv_req );
	MPI_Wait( &recv_req, &status );
#else
	MPI_Recv( recvbuf, maxrecvsz, SuperLU_MPI_DOUBLE_COMPLEX,
                  MPI_ANY_SOURCE, MPI_ANY_TAG, grid->comm, &status );
#endif

        k = (*recvbuf).r;

#if ( DEBUGlevel>=2 )
	printf("(%2d) Recv'd block %d, tag %2d\n", iam, k, status.MPI_TAG);
#endif
	
	switch ( status.MPI_TAG ) {
	  case Xk:
	      --nfrecvx;
	      lk = LBj( k, grid ); /* Local block number, column-wise. */
	      lsub = Lrowind_bc_ptr[lk];
	      lusup = Lnzval_bc_ptr[lk];
	      if ( lsub ) {
		  nb   = lsub[0];
		  lptr = BC_HEADER;
		  luptr = 0;
		  knsupc = SuperSize( k );

		  /*
		   * Perform local block modifications: lsum[i] -= L_i,k * X[k]
		   */
		  zlsum_fmod(lsum, x, &recvbuf[XK_H], rtemp, nrhs, knsupc, k,
			     fmod, nb, lptr, luptr, xsup, grid, Llu, 
			     send_req, stat);
	      } /* if lsub */

	      break;

	  case LSUM: /* Receiver must be a diagonal process */
	      --nfrecvmod;
	      lk = LBi( k, grid ); /* Local block number, row-wise. */
	      ii = X_BLK( lk );
	      knsupc = SuperSize( k );
	      tempv = &recvbuf[LSUM_H];
	      RHS_ITERATE(j) {
		  for (i = 0; i < knsupc; ++i)
		      z_add(&x[i + ii + j*knsupc],
			    &x[i + ii + j*knsupc],
			    &tempv[i + j*knsupc]);
	      }

	      if ( (--frecv[lk])==0 && fmod[lk]==0 ) {
		  fmod[lk] = -1; /* Do not solve X[k] in the future. */
		  lk = LBj( k, grid ); /* Local block number, column-wise. */
		  lsub = Lrowind_bc_ptr[lk];
		  lusup = Lnzval_bc_ptr[lk];
		  nsupr = lsub[1];
#ifdef _CRAY
		  CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &knsupc, &nrhs, &alpha,
			lusup, &nsupr, &x[ii], &knsupc);
#elif defined (USE_VENDOR_BLAS)
		  ztrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, 
			 lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1);
#else
		  ztrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, 
			 lusup, &nsupr, &x[ii], &knsupc);
#endif
		  stat->ops[SOLVE] += 4 * knsupc * (knsupc - 1) * nrhs
		      + 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
		  printf("(%2d) Solve X[%2d]\n", iam, k);
#endif
		
		  /*
		   * Send Xk to process column Pc[k].
		   */
		  kcol = PCOL( k, grid );
		  for (p = 0; p < Pr; ++p) {
		      if ( fsendx_plist[lk][p] != EMPTY ) {
			  pi = PNUM( p, kcol, grid );
#ifdef ISEND_IRECV
			  MPI_Isend( &x[ii-XK_H], knsupc * nrhs + XK_H,
                                     SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm,
                                     &send_req[Llu->SolveMsgSent++]);
#else
			  MPI_Send( &x[ii - XK_H], knsupc * nrhs + XK_H,
				    SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm );
#endif
#if ( DEBUGlevel>=2 )
			  printf("(%2d) Sent X[%2.0f] to P %2d\n",
				 iam, x[ii-XK_H], pi);
#endif
		      }
                  }
		  /*
		   * Perform local block modifications.
		   */
		  nb = lsub[0] - 1;
		  lptr = BC_HEADER + LB_DESCRIPTOR + knsupc;
		  luptr = knsupc; /* Skip diagonal block L(k,k). */

		  zlsum_fmod(lsum, x, &x[ii], rtemp, nrhs, knsupc, k,
			     fmod, nb, lptr, luptr, xsup, grid, Llu,
			     send_req, stat);
	      } /* if */

	      break;

#if ( DEBUGlevel>=2 )
	    default:
	      printf("(%2d) Recv'd wrong message tag %4d\n", status.MPI_TAG);
	      break;
#endif
	  } /* switch */

    } /* while not finished ... */


#if ( PRNTlevel>=2 )
    t = SuperLU_timer_() - t;
    if ( !iam ) printf(".. L-solve time\t%8.2f\n", t);
    t = SuperLU_timer_();
#endif

#if ( DEBUGlevel==2 )
    {
      printf("(%d) .. After L-solve: y =\n", iam);
      for (i = 0, k = 0; k < nsupers; ++k) {
	  krow = PROW( k, grid );
	  kcol = PCOL( k, grid );
	  if ( myrow == krow && mycol == kcol ) { /* Diagonal process */
	      knsupc = SuperSize( k );
	      lk = LBi( k, grid );
	      ii = X_BLK( lk );
	      for (j = 0; j < knsupc; ++j)
		printf("\t(%d)\t%4d\t%.10f\n", iam, xsup[k]+j, x[ii+j]);
	      fflush(stdout);
	  }
	  MPI_Barrier( grid->comm );
      }
    }
#endif

    SUPERLU_FREE(fmod);
    SUPERLU_FREE(frecv);
    SUPERLU_FREE(rtemp);

#ifdef ISEND_IRECV
    for (i = 0; i < Llu->SolveMsgSent; ++i) MPI_Request_free(&send_req[i]);
    Llu->SolveMsgSent = 0;
#endif


    /*---------------------------------------------------
     * Back solve Ux = y.
     *
     * The Y components from the forward solve is already
     * on the diagonal processes.
     *---------------------------------------------------*/

    /* Save the count to be altered so it can be used by
       subsequent call to PZGSTRS. */
    if ( !(bmod = intMalloc_dist(nlb)) )
	ABORT("Calloc fails for bmod[].");
    for (i = 0; i < nlb; ++i) bmod[i] = Llu->bmod[i];
    if ( !(brecv = intMalloc_dist(nlb)) )
	ABORT("Malloc fails for brecv[].");
    Llu->brecv = brecv;

    /*
     * Compute brecv[] and nbrecvmod counts on the diagonal processes.
     */
    {
	superlu_scope_t *scp = &grid->rscp;

	for (k = 0; k < nsupers; ++k) {
	    krow = PROW( k, grid );
	    if ( myrow == krow ) {
		lk = LBi( k, grid );    /* Local block number. */
		kcol = PCOL( k, grid ); /* Root process in this row scope. */
		if ( mycol != kcol && bmod[lk] )
		    i = 1;  /* Contribution from non-diagonal process. */
		else i = 0;
		MPI_Reduce( &i, &brecv[lk], 1, mpi_int_t,
			   MPI_SUM, kcol, scp->comm );
		if ( mycol == kcol ) { /* Diagonal process. */
		    nbrecvmod += brecv[lk];
		    if ( !brecv[lk] && !bmod[lk] ) ++nroot;
#if ( DEBUGlevel>=2 )
		    printf("(%2d) brecv[%4d]  %2d\n", iam, k, brecv[lk]);
		    assert( brecv[lk] < Pc );
#endif
		}
	    }
	}
    }

    /* Re-initialize lsum to zero. Each block header is already in place. */
    for (k = 0; k < nsupers; ++k) {
	krow = PROW( k, grid );
	if ( myrow == krow ) {
	    knsupc = SuperSize( k );
	    lk = LBi( k, grid );
	    il = LSUM_BLK( lk );
	    dest = &lsum[il];
	    RHS_ITERATE(j) {
		for (i = 0; i < knsupc; ++i) dest[i + j*knsupc] = zero;
	    }
	}
    }
int
static_schedule(superlu_options_t * options, int m, int n, 
		LUstruct_t * LUstruct, gridinfo_t * grid, SuperLUStat_t * stat,
		int_t *perm_c_supno, int_t *iperm_c_supno, int *info)
{
    int_t *xsup;
    int_t  i, ib, jb, lb, nlb, il, iu;
    int_t Pc, Pr;
    int iam, krow, yourcol, mycol, myrow; 
    int j, k, nsupers;  /* k - current panel to work on */
    int_t *index;
    Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
    LocalLU_t *Llu = LUstruct->Llu;
    int ncb, nrb, p, pr, pc, nblocks;
    int_t *etree_supno_l, *etree_supno, *blocks, *blockr, *Ublock, *Urows,
        *Lblock, *Lrows, *sf_block, *sf_block_l, *nnodes_l,
        *nnodes_u, *edag_supno_l, *recvbuf, **edag_supno;
    float edag_supno_l_bytes;
    int nnodes, *sendcnts, *sdispls, *recvcnts, *rdispls, *srows, *rrows;
    etree_node *head, *tail, *ptr;
    int *num_child;

    int iword = sizeof (int_t);

    /* Test the input parameters. */
    *info = 0;
    if (m < 0) *info = -2;
    else if (n < 0) *info = -3;
    if (*info) {
        pxerbla ("pdgstrf", grid, -*info);
        return (-1);
    }

    /* Quick return if possible. */
    if (m == 0 || n == 0) return 0;
 
    /* 
     * Initialization.  
     */
    iam = grid->iam;
    Pc = grid->npcol; 
    Pr = grid->nprow;
    myrow = MYROW (iam, grid);
    mycol = MYCOL (iam, grid);
    nsupers = Glu_persist->supno[n - 1] + 1;
    xsup = Glu_persist->xsup;
    nblocks = 0;
    ncb = nsupers / Pc;
    nrb = nsupers / Pr;

#if ( DEBUGlevel >= 1 ) 
    print_memorylog(stat, "before static schedule");
#endif

    /* ================================================== *
     * static scheduling of j-th step of LU-factorization *
     * ================================================== */
    if (options->lookahead_etree == YES &&  /* use e-tree of symmetrized matrix and */
        (options->ParSymbFact == NO ||  /* 1) symmetric fact with serial symbolic, or */
         (options->SymPattern == YES && /* 2) symmetric pattern, and                  */
          options->RowPerm == NOROWPERM))) { /* no rowperm to destroy symmetry */

        /* if symmetric pattern or using e-tree of |A^T|+|A|,
           then we can use a simple tree structure for static schduling */

        if (options->ParSymbFact == NO) {
            /* Use the etree computed from serial symb. fact., and turn it
               into supernodal tree.  */
            int_t *etree = LUstruct->etree;
#if ( PRNTlevel>=1 )
            if (grid->iam == 0)
                printf (" === using column e-tree ===\n");
#endif

            /* look for the first off-diagonal blocks */
            etree_supno = SUPERLU_MALLOC (nsupers * sizeof (int_t));
	    log_memory(nsupers * iword, stat);

            for (i = 0; i < nsupers; i++) etree_supno[i] = nsupers;

            for (j = 0, lb = 0; lb < nsupers; lb++) {
                for (k = 0; k < SuperSize (lb); k++) {
                    jb = Glu_persist->supno[etree[j + k]];
                    if (jb != lb)
                        etree_supno[lb] = SUPERLU_MIN (etree_supno[lb], jb);
                }
                j += SuperSize (lb);
            }
        } else { /* ParSymbFACT==YES and SymPattern==YES and RowPerm == NOROWPERM */
            /* Compute an "etree" based on struct(L),
               assuming struct(U) = struct(L').   */
#if ( PRNTlevel>=1 )
            if (grid->iam == 0)
                printf (" === using supernodal e-tree ===\n");
#endif

            /* find the first block in each supernodal-column of local L-factor */
            etree_supno_l = SUPERLU_MALLOC (nsupers * sizeof (int_t));
	    log_memory(nsupers * iword, stat);

            for (i = 0; i < nsupers; i++) etree_supno_l[i] = nsupers;
            for (lb = 0; lb < ncb; lb++) {
                jb = lb * grid->npcol + mycol;
                index = Llu->Lrowind_bc_ptr[lb];
                if (index) {   /* Not an empty column */
                    i = index[0];
                    k = BC_HEADER;
                    krow = PROW (jb, grid);
                    if (krow == myrow) {  /* skip the diagonal block */
                        k += LB_DESCRIPTOR + index[k + 1];
                        i--;
                    }
                    if (i > 0)
                    {
                        etree_supno_l[jb] = index[k];
                        k += LB_DESCRIPTOR + index[k + 1];
                        i--;
                    }

                    for (j = 0; j < i; j++)
                    {
                        etree_supno_l[jb] =
                            SUPERLU_MIN (etree_supno_l[jb], index[k]);
                        k += LB_DESCRIPTOR + index[k + 1];
                    }
                }
            }
            if (mycol < nsupers % grid->npcol) {
                jb = ncb * grid->npcol + mycol;
                index = Llu->Lrowind_bc_ptr[ncb];
                if (index) {     /* Not an empty column */
                    i = index[0];
                    k = BC_HEADER;
                    krow = PROW (jb, grid);
                    if (krow == myrow) { /* skip the diagonal block */
                        k += LB_DESCRIPTOR + index[k + 1];
                        i--;
                    }
                    if (i > 0) {
                        etree_supno_l[jb] = index[k];
                        k += LB_DESCRIPTOR + index[k + 1];
                        i--;
                    }
                    for (j = 0; j < i; j++) {
                        etree_supno_l[jb] =
                            SUPERLU_MIN (etree_supno_l[jb], index[k]);
                        k += LB_DESCRIPTOR + index[k + 1];
                    }
                }
            }

            /* form global e-tree */
            etree_supno = SUPERLU_MALLOC (nsupers * sizeof (int_t));

            MPI_Allreduce (etree_supno_l, etree_supno, nsupers, mpi_int_t,
                           MPI_MIN, grid->comm);

            SUPERLU_FREE (etree_supno_l);
        }

        /* initialize number of children for each node */
        num_child = SUPERLU_MALLOC (nsupers * sizeof (int_t));
        for (i = 0; i < nsupers; i++) num_child[i] = 0;
        for (i = 0; i < nsupers; i++)
            if (etree_supno[i] != nsupers)  num_child[etree_supno[i]]++;

        /* push initial leaves to the fifo queue */
        nnodes = 0;
        for (i = 0; i < nsupers; i++) {
            if (num_child[i] == 0) {
                ptr = SUPERLU_MALLOC (sizeof (etree_node));
                ptr->id = i;
                ptr->next = NULL;
                /*printf( " == push leaf %d (%d) ==\n",i,nnodes ); */
                nnodes++;

                if (nnodes == 1) {
                    head = ptr;
                    tail = ptr;
                } else {
                    tail->next = ptr;
                    tail = ptr;
                }
            }
        }

        /* process fifo queue, and compute the ordering */
        i = 0;

        while (nnodes > 0) {
            ptr = head;
            j = ptr->id;
            head = ptr->next;
            perm_c_supno[i] = j;
            SUPERLU_FREE (ptr);
            i++;
            nnodes--;

            if (etree_supno[j] != nsupers) {
                num_child[etree_supno[j]]--;
                if (num_child[etree_supno[j]] == 0) {
                    nnodes++;

                    ptr = SUPERLU_MALLOC (sizeof (etree_node));
                    ptr->id = etree_supno[j];
                    ptr->next = NULL;

                    /*printf( "=== push %d ===\n",ptr->id ); */
                    if (nnodes == 1) {
                        head = ptr;
                        tail = ptr;
                    } else {
                        tail->next = ptr;
                        tail = ptr;
                    }
                }
            }
            /*printf( "\n" ); */
        }
        SUPERLU_FREE (num_child);
        SUPERLU_FREE (etree_supno);
	log_memory(-2 * nsupers * iword, stat);

    } else {         /* Unsymmetric pattern */

        /* Need to process both L- and U-factors, use the symmetrically
           pruned graph of L & U instead of tree (very naive implementation) */
        int nrbp1 = nrb + 1;
	float Ublock_bytes, Urows_bytes, Lblock_bytes, Lrows_bytes;

        /* allocate some workspace */
        if (! (sendcnts = SUPERLU_MALLOC ((4 + 2 * nrbp1) * Pr * Pc * sizeof (int))))
            ABORT ("Malloc fails for sendcnts[].");
	log_memory((4 + 2 * nrbp1) * Pr * Pc * sizeof (int), stat);

        sdispls = &sendcnts[Pr * Pc];
        recvcnts = &sdispls[Pr * Pc];
        rdispls = &recvcnts[Pr * Pc];
        srows = &rdispls[Pr * Pc];
        rrows = &srows[Pr * Pc * nrbp1];

        myrow = MYROW (iam, grid);
#if ( PRNTlevel>=1 )
        if (grid->iam == 0)
            printf (" === using DAG ===\n");
#endif

        /* send supno block of local U-factor to a processor *
         * who owns the corresponding block of L-factor      */

        /* srows   : # of block to send to a processor from each supno row */
        /* sendcnts: total # of blocks to send to a processor              */
        for (p = 0; p < Pr * Pc * nrbp1; p++) srows[p] = 0;
        for (p = 0; p < Pr * Pc; p++) sendcnts[p] = 0;

        /* sending blocks of U-factors corresponding to L-factors */
        /* count the number of blocks to send */
        for (lb = 0; lb < nrb; ++lb) {
            jb = lb * Pr + myrow;
            pc = jb % Pc;
            index = Llu->Ufstnz_br_ptr[lb];

            if (index) {         /* Not an empty row */
                k = BR_HEADER;
                nblocks += index[0];
                for (j = 0; j < index[0]; ++j) {
                    ib = index[k];
                    pr = ib % Pr;
                    p = pr * Pc + pc;
                    sendcnts[p]++;
                    srows[p * nrbp1 + lb]++;

                    k += UB_DESCRIPTOR + SuperSize (index[k]);
                }
            }
        }

        if (myrow < nsupers % grid->nprow) {
            jb = nrb * Pr + myrow;
            pc = jb % Pc;
            index = Llu->Ufstnz_br_ptr[nrb];

            if (index) {         /* Not an empty row */
                k = BR_HEADER;
                nblocks += index[0];
                for (j = 0; j < index[0]; ++j) {
                    ib = index[k];
                    pr = ib % Pr;
                    p = pr * Pc + pc;
                    sendcnts[p]++;
                    srows[p * nrbp1 + nrb]++;
                    k += UB_DESCRIPTOR + SuperSize (index[k]);
                }
            }
        }

        /* insert blocks to send */
        sdispls[0] = 0;
        for (p = 1; p < Pr * Pc; p++) sdispls[p] = sdispls[p - 1] + sendcnts[p - 1];
        if (!(blocks = intMalloc_dist (nblocks)))
            ABORT ("Malloc fails for blocks[].");
	log_memory( nblocks * iword, stat );

        for (lb = 0; lb < nrb; ++lb) {
            jb = lb * Pr + myrow;
            pc = jb % Pc;
            index = Llu->Ufstnz_br_ptr[lb];

            if (index) {       /* Not an empty row */
                k = BR_HEADER;
                for (j = 0; j < index[0]; ++j) {
                    ib = index[k];
                    pr = ib % Pr;
                    p = pr * Pc + pc;
                    blocks[sdispls[p]] = ib;
                    sdispls[p]++;

                    k += UB_DESCRIPTOR + SuperSize (index[k]);
                }
            }
        }

        if (myrow < nsupers % grid->nprow) {
            jb = nrb * Pr + myrow;
            pc = jb % Pc;
            index = Llu->Ufstnz_br_ptr[nrb];

            if (index) {       /* Not an empty row */
                k = BR_HEADER;
                for (j = 0; j < index[0]; ++j) {
                    ib = index[k];
                    pr = ib % Pr;
                    p = pr * Pc + pc;
                    blocks[sdispls[p]] = ib;
                    sdispls[p]++;

                    k += UB_DESCRIPTOR + SuperSize (index[k]);
                }
            }
        }

        /* communication */
        MPI_Alltoall (sendcnts, 1, MPI_INT, recvcnts, 1, MPI_INT, grid->comm);
        MPI_Alltoall (srows, nrbp1, MPI_INT, rrows, nrbp1, MPI_INT, grid->comm);

	log_memory( -(nblocks * iword), stat );  /* blocks[] to be freed soon */

        nblocks = recvcnts[0];
        rdispls[0] = sdispls[0] = 0;
        for (p = 1; p < Pr * Pc; p++) {
            rdispls[p] = rdispls[p - 1] + recvcnts[p - 1];
            sdispls[p] = sdispls[p - 1] + sendcnts[p - 1];
            nblocks += recvcnts[p];
        }

        if (!(blockr = intMalloc_dist (nblocks))) ABORT ("Malloc fails for blockr[].");
	log_memory( nblocks * iword, stat );

        MPI_Alltoallv (blocks, sendcnts, sdispls, mpi_int_t, blockr, recvcnts,
                       rdispls, mpi_int_t, grid->comm);

        SUPERLU_FREE (blocks); /* memory logged before */

	
        /* store the received U-blocks by rows */
        nlb = nsupers / Pc;
        if (!(Ublock = intMalloc_dist (nblocks))) ABORT ("Malloc fails for Ublock[].");
        if (!(Urows = intMalloc_dist (1 + nlb))) ABORT ("Malloc fails for Urows[].");

	Ublock_bytes = nblocks * iword;
	Urows_bytes = (1 + nlb) * iword;
	log_memory( Ublock_bytes + Urows_bytes, stat );

        k = 0;
        for (jb = 0; jb < nlb; jb++) {
            j = jb * Pc + mycol;
            pr = j % Pr;
            lb = j / Pr;
            Urows[jb] = 0;

            for (pc = 0; pc < Pc; pc++) {
                p = pr * Pc + pc; /* the processor owning this block of U-factor */

                for (i = rdispls[p]; i < rdispls[p] + rrows[p * nrbp1 + lb];
                     i++) {
                    Ublock[k] = blockr[i];
                    k++;
                    Urows[jb]++;
                }
                rdispls[p] += rrows[p * nrbp1 + lb];
            }
            /* sort by the column indices to make things easier for later on */

#ifdef ISORT
            isort1 (Urows[jb], &(Ublock[k - Urows[jb]]));
#else
            qsort (&(Ublock[k - Urows[jb]]), (size_t) (Urows[jb]),
                   sizeof (int_t), &superlu_sort_perm);
#endif
        }
        if (mycol < nsupers % grid->npcol) {
            j = nlb * Pc + mycol;
            pr = j % Pr;
            lb = j / Pr;
            Urows[nlb] = 0;

            for (pc = 0; pc < Pc; pc++) {
                p = pr * Pc + pc;
                for (i = rdispls[p]; i < rdispls[p] + rrows[p * nrbp1 + lb];
                     i++) {
                    Ublock[k] = blockr[i];
                    k++;
                    Urows[nlb]++;
                }
                rdispls[p] += rrows[p * nrb + lb];
            }
#ifdef ISORT
            isort1 (Urows[nlb], &(Ublock[k - Urows[nlb]]));
#else
            qsort (&(Ublock[k - Urows[nlb]]), (size_t) (Urows[nlb]),
                   sizeof (int_t), &superlu_sort_perm);
#endif
        }
        SUPERLU_FREE (blockr);
	log_memory( -nblocks * iword, stat );

        /* sort the block in L-factor */
        nblocks = 0;
        for (lb = 0; lb < ncb; lb++) {
            jb = lb * Pc + mycol;
            index = Llu->Lrowind_bc_ptr[lb];
            if (index) {        /* Not an empty column */
                nblocks += index[0];
            }
        }
        if (mycol < nsupers % grid->npcol) {
            jb = ncb * Pc + mycol;
            index = Llu->Lrowind_bc_ptr[ncb];
            if (index) {       /* Not an empty column */
                nblocks += index[0];
            }
        }

        if (!(Lblock = intMalloc_dist (nblocks))) ABORT ("Malloc fails for Lblock[].");
        if (!(Lrows = intMalloc_dist (1 + ncb))) ABORT ("Malloc fails for Lrows[].");

	Lblock_bytes = nblocks * iword;
	Lrows_bytes = (1 + ncb) * iword;
	log_memory(Lblock_bytes + Lrows_bytes, stat);

        for (lb = 0; lb <= ncb; lb++) Lrows[lb] = 0;
        nblocks = 0;
        for (lb = 0; lb < ncb; lb++) {
            Lrows[lb] = 0;

            jb = lb * Pc + mycol;
            index = Llu->Lrowind_bc_ptr[lb];
            if (index) {      /* Not an empty column */
                i = index[0];
                k = BC_HEADER;
                krow = PROW (jb, grid);
                if (krow == myrow)  /* skip the diagonal block */
                {
                    k += LB_DESCRIPTOR + index[k + 1];
                    i--;
                }

                for (j = 0; j < i; j++) {
                    Lblock[nblocks] = index[k];
                    Lrows[lb]++;
                    nblocks++;

                    k += LB_DESCRIPTOR + index[k + 1];
                }
            }
#ifdef ISORT
            isort1 (Lrows[lb], &(Lblock[nblocks - Lrows[lb]]));
#else
            qsort (&(Lblock[nblocks - Lrows[lb]]), (size_t) (Lrows[lb]),
                   sizeof (int_t), &superlu_sort_perm);
#endif
        }
        if (mycol < nsupers % grid->npcol) {
            Lrows[ncb] = 0;
            jb = ncb * Pc + mycol;
            index = Llu->Lrowind_bc_ptr[ncb];
            if (index) {       /* Not an empty column */
                i = index[0];
                k = BC_HEADER;
                krow = PROW (jb, grid);
                if (krow == myrow) { /* skip the diagonal block */
                    k += LB_DESCRIPTOR + index[k + 1];
                    i--;
                }
                for (j = 0; j < i; j++) {
                    Lblock[nblocks] = index[k];
                    Lrows[ncb]++;
                    nblocks++;
                    k += LB_DESCRIPTOR + index[k + 1];
                }
#ifdef ISORT
                isort1 (Lrows[ncb], &(Lblock[nblocks - Lrows[ncb]]));
#else
                qsort (&(Lblock[nblocks - Lrows[ncb]]), (size_t) (Lrows[ncb]),
                       sizeof (int_t), &superlu_sort_perm);
#endif
            }
        }

        /* look for the first local symmetric nonzero block match */
        if (!(sf_block = intMalloc_dist (nsupers))) ABORT ("Malloc fails for sf_block[].");
        if (!(sf_block_l = intMalloc_dist (nsupers))) ABORT ("Malloc fails for sf_block_l[].");

	log_memory( 2 * nsupers * iword, stat );

        for (lb = 0; lb < nsupers; lb++)
            sf_block_l[lb] = nsupers;
        i = 0;
        j = 0;
        for (jb = 0; jb < nlb; jb++) {
            if (Urows[jb] > 0) {
                ib = i + Urows[jb];
                lb = jb * Pc + mycol;
                for (k = 0; k < Lrows[jb]; k++) {
                    while (Ublock[i] < Lblock[j] && i + 1 < ib)
                        i++;

                    if (Ublock[i] == Lblock[j]) {
                        sf_block_l[lb] = Lblock[j];
                        j += (Lrows[jb] - k);
                        k = Lrows[jb];
                    } else {
                        j++;
                    }
                }
                i = ib;
            } else {
                j += Lrows[jb];
            }
        }
        if (mycol < nsupers % grid->npcol) {
            if (Urows[nlb] > 0) {
                ib = i + Urows[nlb];
                lb = nlb * Pc + mycol;
                for (k = 0; k < Lrows[nlb]; k++) {
                    while (Ublock[i] < Lblock[j] && i + 1 < ib)
                        i++;

                    if (Ublock[i] == Lblock[j])
                    {
                        sf_block_l[lb] = Lblock[j];
                        j += (Lrows[nlb] - k);
                        k = Lrows[nlb];
                    }
                    else
                    {
                        j++;
                    }
                }
                i = ib;
            } else {
                j += Lrows[nlb];
            }
        }

        /* compute the first global symmetric matchs */
        MPI_Allreduce (sf_block_l, sf_block, nsupers, mpi_int_t, MPI_MIN,
                       grid->comm);
        SUPERLU_FREE (sf_block_l);
	log_memory( -nsupers * iword, stat );

        /* count number of nodes in DAG (i.e., the number of blocks on and above the first match) */
        if (!(nnodes_l = intMalloc_dist (nsupers))) ABORT ("Malloc fails for nnodes_l[].");
        if (!(nnodes_u = intMalloc_dist (nsupers))) ABORT ("Malloc fails for nnodes_u[].");
	log_memory( 2 * nsupers * iword, stat );

        for (lb = 0; lb < nsupers; lb++)  nnodes_l[lb] = 0;
        for (lb = 0; lb < nsupers; lb++)  nnodes_u[lb] = 0;

        nblocks = 0;
        /* from U-factor */
        for (i = 0, jb = 0; jb < nlb; jb++) {
            lb = jb * Pc + mycol;
            ib = i + Urows[jb];
            while (i < ib) {
                if (Ublock[i] <= sf_block[lb]) {
                    nnodes_u[lb]++;
                    i++;
                    nblocks++;
                } else {     /* get out */
                    i = ib;
                }
            }
            i = ib;
        }
        if (mycol < nsupers % grid->npcol) {
            lb = nlb * Pc + mycol;
            ib = i + Urows[nlb];
            while (i < ib) {
                if (Ublock[i] <= sf_block[lb]) {
                    nnodes_u[lb]++;
                    i++;
                    nblocks++;
                } else {         /* get out */
                    i = ib;
                }
            }
            i = ib;
        }

        /* from L-factor */
        for (i = 0, jb = 0; jb < nlb; jb++) {
            lb = jb * Pc + mycol;
            ib = i + Lrows[jb];
            while (i < ib) {
                if (Lblock[i] < sf_block[lb]) {
                    nnodes_l[lb]++;
                    i++;
                    nblocks++;
                } else {
                    i = ib;
                }
            }
            i = ib;
        }
        if (mycol < nsupers % grid->npcol) {
            lb = nlb * Pc + mycol;
            ib = i + Lrows[nlb];
            while (i < ib) {
                if (Lblock[i] < sf_block[lb]) {
                    nnodes_l[lb]++;
                    i++;
                    nblocks++;
                } else {
                    i = ib;
                }
            }
            i = ib;
        }

#ifdef USE_ALLGATHER
        /* insert local nodes in DAG */
        if (!(edag_supno_l = intMalloc_dist (nsupers + nblocks)))
            ABORT ("Malloc fails for edag_supno_l[].");
	edag_supno_l_bytes = (nsupers + nblocks) * iword;
	log_memory(edag_supno_l_bytes, stat);

        iu = il = nblocks = 0;
        for (lb = 0; lb < nsupers; lb++) {
            j = lb / Pc;
            pc = lb % Pc;

            edag_supno_l[nblocks] = nnodes_l[lb] + nnodes_u[lb];
            nblocks++;
            if (mycol == pc) {
                /* from U-factor */
                ib = iu + Urows[j];
                for (jb = 0; jb < nnodes_u[lb]; jb++) {
                    edag_supno_l[nblocks] = Ublock[iu];
                    iu++;
                    nblocks++;
                }
                iu = ib;

                /* from L-factor */
                ib = il + Lrows[j];
                for (jb = 0; jb < nnodes_l[lb]; jb++) {
                    edag_supno_l[nblocks] = Lblock[il];
                    il++;
                    nblocks++;
                }
                il = ib;
            }
        }
        SUPERLU_FREE (nnodes_u);
	log_memory(-nsupers * iword, stat);

        /* form global DAG on each processor */
        MPI_Allgather (&nblocks, 1, MPI_INT, recvcnts, 1, MPI_INT,
                       grid->comm);
        nblocks = recvcnts[0];
        rdispls[0] = 0;
        for (lb = 1; lb < Pc * Pr; lb++) {
            rdispls[lb] = nblocks;
            nblocks += recvcnts[lb];
        }
        if (!(recvbuf = intMalloc_dist (nblocks))) ABORT ("Malloc fails for recvbuf[].");
	log_memory(nblocks * iword, stat);

        MPI_Allgatherv (edag_supno_l, recvcnts[iam], mpi_int_t,
                        recvbuf, recvcnts, rdispls, mpi_int_t, grid->comm);
        SUPERLU_FREE (edag_supno_l);
	log_memory(-edag_supno_l_bytes, stat);

        if (!(edag_supno = SUPERLU_MALLOC (nsupers * sizeof (int_t *))))
            ABORT ("Malloc fails for edag_supno[].");
	log_memory(nsupers * iword, stat);

        k = 0;
        for (lb = 0; lb < nsupers; lb++) nnodes_l[lb] = 0;
        for (p = 0; p < Pc * Pr; p++) {
            for (lb = 0; lb < nsupers; lb++) {
                nnodes_l[lb] += recvbuf[k];
                k += (1 + recvbuf[k]);
            }
        }
        for (lb = 0; lb < nsupers; lb++) {
            if (nnodes_l[lb] > 0)
                if (!(edag_supno[lb] = intMalloc_dist (nnodes_l[lb])))
                    ABORT ("Malloc fails for edag_supno[lb].");
            nnodes_l[lb] = 0;
        }
        k = 0;
        for (p = 0; p < Pc * Pr; p++) {
            for (lb = 0; lb < nsupers; lb++) {
                jb = k + recvbuf[k] + 1;
                k++;
                for (; k < jb; k++) {
                    edag_supno[lb][nnodes_l[lb]] = recvbuf[k];
                    nnodes_l[lb]++;
                }
            }
        }
        SUPERLU_FREE (recvbuf);
	log_memory(-nblocks * iword, stat);

#else   /* not USE_ALLGATHER */
        int nlsupers = nsupers / Pc;
        if (mycol < nsupers % Pc) nlsupers++;

        /* insert local nodes in DAG */
        if (!(edag_supno_l = intMalloc_dist (nlsupers + nblocks)))
            ABORT ("Malloc fails for edag_supno_l[].");
	edag_supno_l_bytes = (nlsupers + nblocks) * iword;
	log_memory(edag_supno_l_bytes, stat);

        iu = il = nblocks = 0;
        for (lb = 0; lb < nsupers; lb++) {
            j = lb / Pc;
            pc = lb % Pc;
            if (mycol == pc) {
                edag_supno_l[nblocks] = nnodes_l[lb] + nnodes_u[lb];
                nblocks++;
                /* from U-factor */
                ib = iu + Urows[j];
                for (jb = 0; jb < nnodes_u[lb]; jb++) {
                    edag_supno_l[nblocks] = Ublock[iu];
                    iu++;
                    nblocks++;
                }
                iu = ib;

                /* from L-factor */
                ib = il + Lrows[j];
                for (jb = 0; jb < nnodes_l[lb]; jb++) {
                    edag_supno_l[nblocks] = Lblock[il];
                    il++;
                    nblocks++;
                }
                il = ib;
            } else if (nnodes_l[lb] + nnodes_u[lb] != 0)
                printf (" # %d: nnodes[%d]=%d+%d\n", grid->iam, lb,
                        nnodes_l[lb], nnodes_u[lb]);
        }
        SUPERLU_FREE (nnodes_u);
	log_memory(-nsupers * iword, stat);

        /* form global DAG on each processor */  
        MPI_Allgather (&nblocks, 1, MPI_INT, recvcnts, 1, MPI_INT, grid->comm);
        nblocks = recvcnts[0];
        rdispls[0] = 0;
        for (lb = 1; lb < Pc * Pr; lb++) {
            rdispls[lb] = nblocks;
            nblocks += recvcnts[lb];
        }
        if (!(recvbuf = intMalloc_dist (nblocks))) ABORT ("Malloc fails for recvbuf[].");
	log_memory(nblocks * iword, stat);

        MPI_Allgatherv (edag_supno_l, recvcnts[iam], mpi_int_t,
                        recvbuf, recvcnts, rdispls, mpi_int_t, grid->comm);

        SUPERLU_FREE (edag_supno_l);
	log_memory(-edag_supno_l_bytes, stat);

        if (!(edag_supno = SUPERLU_MALLOC (nsupers * sizeof (int_t *))))
            ABORT ("Malloc fails for edag_supno[].");
	log_memory(nsupers * sizeof(int_t *), stat);

        k = 0;
        for (lb = 0; lb < nsupers; lb++) nnodes_l[lb] = 0;
        for (p = 0; p < Pc * Pr; p++) {
            yourcol = MYCOL (p, grid);

            for (lb = 0; lb < nsupers; lb++) {
                j = lb / Pc;
                pc = lb % Pc;
                if (yourcol == pc) {
                    nnodes_l[lb] += recvbuf[k];
                    k += (1 + recvbuf[k]);
                }
            }
        }
        for (lb = 0; lb < nsupers; lb++) {
            if (nnodes_l[lb] > 0)
                if (!(edag_supno[lb] = intMalloc_dist (nnodes_l[lb])))
                    ABORT ("Malloc fails for edag_supno[lb].");
            nnodes_l[lb] = 0;
        }
        k = 0;
        for (p = 0; p < Pc * Pr; p++) {
            yourcol = MYCOL (p, grid);

            for (lb = 0; lb < nsupers; lb++) {
                j = lb / Pc;
                pc = lb % Pc;
                if (yourcol == pc)
                {
                    jb = k + recvbuf[k] + 1;
                    k++;
                    for (; k < jb; k++)
                    {
                        edag_supno[lb][nnodes_l[lb]] = recvbuf[k];
                        nnodes_l[lb]++;
                    }
                }
            }
        }
        SUPERLU_FREE (recvbuf);
	log_memory( -nblocks * iword , stat);

#endif  /* end USE_ALL_GATHER */

        /* initialize the num of child for each node */
        num_child = SUPERLU_MALLOC (nsupers * sizeof (int_t));
        for (i = 0; i < nsupers; i++) num_child[i] = 0;
        for (i = 0; i < nsupers; i++) {
            for (jb = 0; jb < nnodes_l[i]; jb++) {
                num_child[edag_supno[i][jb]]++;
            }
        }

        /* push initial leaves to the fifo queue */
        nnodes = 0;
        for (i = 0; i < nsupers; i++) {
            if (num_child[i] == 0) {
                ptr = SUPERLU_MALLOC (sizeof (etree_node));
                ptr->id = i;
                ptr->next = NULL;
                /*printf( " == push leaf %d (%d) ==\n",i,nnodes ); */
                nnodes++;

                if (nnodes == 1) {
                    head = ptr;
                    tail = ptr;
                } else {
                    tail->next = ptr;
                    tail = ptr;
                }
            }
        }

        /* process fifo queue, and compute the ordering */
        i = 0;

        while (nnodes > 0) {
            /*printf( "=== pop %d (%d) ===\n",head->id,i ); */
            ptr = head;
            j = ptr->id;
            head = ptr->next;

            perm_c_supno[i] = j;
            SUPERLU_FREE (ptr);
            i++;
            nnodes--;

            for (jb = 0; jb < nnodes_l[j]; jb++) {
                num_child[edag_supno[j][jb]]--;
                if (num_child[edag_supno[j][jb]] == 0) {
                    nnodes++;

                    ptr = SUPERLU_MALLOC (sizeof (etree_node));
                    ptr->id = edag_supno[j][jb];
                    ptr->next = NULL;

                    /*printf( "=== push %d ===\n",ptr->id ); */
                    if (nnodes == 1) {
                        head = ptr;
                        tail = ptr;
                    } else {
                        tail->next = ptr;
                        tail = ptr;
                    }
                }
            }
            /*printf( "\n" ); */
        }
        for (lb = 0; lb < nsupers; lb++)
            if (nnodes_l[lb] > 0)  SUPERLU_FREE (edag_supno[lb]);

        SUPERLU_FREE (num_child);
        SUPERLU_FREE (edag_supno);
        SUPERLU_FREE (nnodes_l);
        SUPERLU_FREE (sf_block);
        SUPERLU_FREE (sendcnts);

	log_memory(-(4 * nsupers + (4 + 2 * nrbp1)*Pr*Pc) * iword, stat);

        SUPERLU_FREE (Ublock);
        SUPERLU_FREE (Urows);
        SUPERLU_FREE (Lblock);
        SUPERLU_FREE (Lrows);
	log_memory(-(Ublock_bytes + Urows_bytes + Lblock_bytes + Lrows_bytes), stat);
    }
    /* ======================== *
     * end of static scheduling *
     * ======================== */

    for (lb = 0; lb < nsupers; lb++) iperm_c_supno[perm_c_supno[lb]] = lb;

#if ( DEBUGlevel >= 1 )
    print_memorylog(stat, "after static schedule");
#endif

    return 0;
} /* STATIC_SCHEDULE */
Ejemplo n.º 7
0
int
pdtrans(char *trans, int *m, int *n, int * mb, int *nb, double *a, int *lda, double *beta,
	double *c__, int *ldc, int *imrow, int *imcol, double *work, int *iwork) {
    /* System generated locals */
    long a_dim1, a_offset, c_dim1, c_offset;
    int i__1, i__2, i__3, i__4;

    /* Local variables */
    int j1, k1, k2, ml, nl, mp, mq, np, nq, mb0, mb1, mb2, nb0,
	    nb1, nb2, kia, kja, kic, kjc, lbm, lbn, lcm, ldt, lbm0, lbm1,
	     lbm2, lbn0, lbn1, lbn2, igcd;
    long ipt;
    int mcol, info, lcmp, lcmq, item, ncol, kmod1, kmod2;
    double tbeta;
    int kpcol, mpcol, npcol, mrcol, mycol, kprow, mprow, nprow, mrrow, myrow;

/*  -- PUMMA Package routine (version 2.1) -- */
/*     Jaeyoung Choi, Oak Ridge National Laboratory. */
/*     Jack Dongarra, Univ. of Tennessee, Oak Ridge National Laboratory. */
/*     David Walker,  Oak Ridge National Laboratory. */
/*     October 31, 1994. */

/*  Purpose */

/*  PDTRANS routine is one of the PUMMA package based on block cyclic */
/*  data distribution on 2-D process configuration. */

/*  It is used for the following matrix transposition, */

/*     Form  C := A' + beta*C */

/*  where beta is a scalar, and A and C are matrices, with A an M by N */
/*  matrix (globally), and C an N by M matrix (globally). */

/*  Parameters */

/*  TRANS  - (input) CHARACTER*1 */
/*           TRANS specifies whether A is transposed or conjugate */
/*           transposed. */

/*              TRANS = 'T',   transpose; */

/*              TRANS = 'C',   conjugate transpose. */

/*  M      - (input) INTEGER */
/*           M specifies the (global) number of rows of the matrix A and */
/*           the (global) number of rows of the matrix C.  M >= 0. */

/*  N      - (input) INTEGER */
/*           N specifies the (global) number of columns of the matrix A */
/*           and columns of the matrix B.  N >= 0. */

/*  MB     - (input) INTEGER */
/*           MB specifies the row block size of the matrix A and the */
/*           column block of the matrix C.  MB >= 1. */

/*  NB     - (input) INTEGER */
/*           NB specifies the column block size of the matrix A and the */
/*           row block size of the matrix C.  NB >= 1. */

/*  A      - (input) DOUBLE PRECISION array of DIMENSION ( LDA, Nq ). */
/*           The leading Mp by Nq part of the array A must contain the */
/*           local matrix  A.  Mp and Nq are local variables */
/*           (see description of local parameters). */

/*  LDA    - (input) INTEGER */
/*           The leading dimension of the (local) array A. */
/*           LDA >= MAX( 1, Mp ). */

/*  BETA   - (input) DOUBLE PRECISION */
/*           BETA  specifies the scalar beta.  When BETA is supplied as */
/*           zero then C need not be set on input. */

/*  C      - (input/ouput) DOUBLE PRECISION array of DIMENSION (LDC, Mq). */
/*           On entry the leading Np by Mq part of the array C must */
/*           contain the local matrix C, except when beta is zero, */
/*           in which case C need not be set on entry. */
/*           On exit, the array C is overwritten by the Np by Mq matrix */
/*           (A'+bata*C).  Np and Mq are local variables */
/*           (see description of local parameters). */

/*  LDC    - (input) INTEGER */
/*           The leading dimension of the (local) array C. */
/*           LDC >= MAX( 1, Np ). */

/*  IMROW  - (input) INTEGER */
/*           IMROW specifies a row of the process template, which holds */
/*           the first block of the matrices.  0 <= IMROW < NPROW. */

/*  IMCOL  - (input) INTEGER */
/*           IMCOL specifies a column of the process template, which */
/*           holds the first block of the matrices.  0 <= IMCOL < NPCOL. */

/*  WORK   - (workspace) DOUBLE PRECISION array */
/*           See requirements. */

/*  IWORK  - (workspace) INTEGER array */
/*           See requirements. */

/*  Local  Parameters */

/*  LCM   =  the lowest common multiple of P and Q */
/*  LCMP  =  LCM/P = number of template rows in LCM block */
/*  LCMQ  =  LCM/Q = number of template columns in LCM block */
/*  IGCD   =  the greatest common divisor (GCD) of P and Q */
/*  MpxNq =  size of (local) matrix A in the process, iam */
/*  NpxMq =  size of (local) matrix C in the process, iam */
/*  KMOD  =  Define Group I.D. */
/*  item  =  temporal integer parameter */

/*    Two buffers for storing A' and T(= subblock of A') */
/*       WORK       <== A' */
/*       WORK(IPT)  <== T */

/*    Three interger buffers */
/*       IWORK(1,k) <== starting point of row subblock of A  to send and */
/*                      C to receive in K2 loop (rowwise communication) */
/*       IWORK(2,k) <== starting point of column subblock of A to send in */
/*                      J1 loop (columnwise communication) */
/*       IWORK(3,k) <== starting point of column subblock of C to receive */
/*                      in J1 loop (columnwise communication) */

/*  Requirements (approximate) */

/*   Size(IWORK) = 3 x MAX(P, Q) */
/*   Size(WORK)  = 2 x Ceil(Ceil(M,MB),LCM)xMB x Ceil(Ceil(N,NB),LCM)xNB */

/*     Get grid parameters */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    c_dim1 = *ldc;
    c_offset = 1 + c_dim1;
    c__ -= c_offset;
    --work;
    --iwork;

    /* Function Body */
    Cblacs_gridinfo(context_1.ictxt, &nprow, &npcol, &myrow, &mycol);

/*     Test for the input parameters. */

    info = 0;
    if (*trans != 'T' && *trans != 'C') {
	info = 1;
    } else if (*m < 0) {
	info = 2;
    } else if (*n < 0) {
	info = 3;
    } else if (*mb < 1) {
	info = 4;
    } else if (*nb < 1) {
	info = 5;
    } else if (*lda < 1) {
	info = 7;
    } else if (*ldc < 1) {
	info = 10;
    } else if (*imrow < 0 || *imrow >= nprow) {
	info = 11;
    } else if (*imcol < 0 || *imcol >= npcol) {
	info = 12;
    }

L10:
    if (info != 0) {
	pxerbla( &context_1.ictxt, "PDTRANS", &info );
	return 0;
    }

/*     Initialize parameters */

    mprow = nprow + myrow;
    mpcol = npcol + mycol;
    mrrow = (mprow - *imrow) % nprow;
    mrcol = (mpcol - *imcol) % npcol;

    lcm = ilcm_(&nprow, &npcol);
    lcmp = lcm / nprow;
    lcmq = lcm / npcol;
    igcd = nprow / lcmq;

    mp = numroc_(m, mb, &mrrow, &c__0, &nprow);
    mq = numroc_(m, mb, &mrcol, &c__0, &npcol);
    np = numroc_(n, nb, &mrrow, &c__0, &nprow);
    nq = numroc_(n, nb, &mrcol, &c__0, &npcol);

    i__1 = iceil_(m, mb);
    lbm = iceil_(&i__1, &lcm);
    i__1 = iceil_(n, nb);
    lbn = iceil_(&i__1, &lcm);

/*     Test for the input parameters again with local parameters */

    if (*lda < mp) {
	info = 7;
    } else if (*ldc < np) {
	info = 10;
    }
    if (info != 0) {
	goto L10;
    }

/*     Quick return if possible. */

    if (*m == 0 || *n == 0) {
	return 0;
    }

/*     At first, scale C with beta if beta != 0.0 & beta != 1.0 */

    tbeta = *beta;
    if (*beta != 0. && *beta != 1.) {
	i__1 = mq;
	for (j1 = 1; j1 <= i__1; ++j1) {
	    HPL_dscal( np, *beta, &c__[j1 * c_dim1 + 1], 1 );
/* L20: */
	}
	tbeta = 1.;
    }

    commtrb_1.iaz = lcmp * *mb;
    commtrb_1.jaz = lcmq * *nb;
    commtrb_1.itz = lcmp * *nb;
    commtrb_1.jtz = lcmq * *mb;

    ml = lbm * *mb;
    nl = lbn * *nb;
    ipt = (long)ml * (long)nl + 1;
    ldt = nl;
    kprow = mrrow + nprow;
    kpcol = mrcol + npcol;

/*     Initialize Parameters -- Compute the positions of subblocks */

    i__1 = npcol - 1;
    for (k1 = 0; k1 <= i__1; ++k1) {
	ncol = (kpcol - k1) % npcol;
	i__2 = lcmq - 1;
	for (j1 = 0; j1 <= i__2; ++j1) {
	    item = npcol * j1 + ncol;
	    if (item % nprow == mrrow) {
		iwork[ncol * 3 + 1] = item / nprow;
	    }
/* L30: */
	}
    }

    i__2 = lcmq - 1;
    for (j1 = 0; j1 <= i__2; ++j1) {
	item = (npcol * j1 + mrcol) % nprow;
	iwork[item * 3 + 2] = j1;
	iwork[item * 3 + 3] = j1;
	i__1 = igcd - 1;
	for (k1 = 1; k1 <= i__1; ++k1) {
	    iwork[(item + nprow - k1) % nprow * 3 + 2] = j1;
	    iwork[(item + k1) % nprow * 3 + 3] = j1;
/* L40: */
	}
    }

/*     Set parameters for efficient copying */

    lbm0 = lbm;
    lbm1 = lbm;
    lbm2 = lbm;
    lbn0 = lbn;
    lbn1 = lbn;
    lbn2 = lbn;
    mb0 = *mb;
    mb1 = *mb;
    mb2 = *mb;
    nb0 = *nb;
    nb1 = *nb;
    nb2 = *nb;

    if (nprow == npcol) {
	lbm0 = 1;
	lbn0 = 1;
	mb0 = mp;
	nb0 = nq;
    }
    if (nprow == lcm) {
	lbm1 = 1;
	lbn2 = 1;
	mb1 = mp;
	nb2 = np;
    }
    if (npcol == lcm) {
	lbn1 = 1;
	lbm2 = 1;
	nb1 = nq;
	mb2 = mq;
    }

/*     For each K2 loop (rowwise), Copy A' to WORK & Send it to KTPROC */
/*                                 then, Receive WORK and Copy WORK to C */

    kmod1 = (nprow + mrcol - mrrow) % igcd;
    kmod2 = (igcd - kmod1) % igcd;

    i__1 = lcmp - 1;
    for (k2 = 0; k2 <= i__1; ++k2) {

/*        Copy A' to WORK in the appropriate order & Send it */

	k1 = k2 * igcd + kmod1;
	mcol = (kpcol - k1) % npcol;
	kia = iwork[mcol * 3 + 1] * *mb;
	mcol = (mcol + *imcol) % npcol;
	ncol = (mrcol + k2 * igcd + kmod2) % npcol;
	kic = iwork[ncol * 3 + 1] * *nb;
	ncol = (ncol + *imcol) % npcol;

	i__2 = lcmq - 1;
	for (j1 = 0; j1 <= i__2; ++j1) {
	    kja = iwork[(mrrow + igcd * j1) % nprow * 3 + 2] * *nb;

	    if (myrow == (myrow + igcd * j1 + kmod1) % nprow && mycol == mcol)
		     {
		kjc = iwork[(kprow - igcd * j1) % nprow * 3 + 3] * *mb;
		i__3 = mp - kia;
		i__4 = nq - kja;
		dtr2mx_(&a[kia + 1 + (kja + 1) * a_dim1], lda, &tbeta, &c__[
			kic + 1 + (kjc + 1) * c_dim1], ldc, &lbm0, &lbn0, &
			mb0, &nb0, &i__3, &i__4);

	    } else {
		i__3 = mp - kia;
		i__4 = nq - kja;
		dtr2bf_(&a[kia + 1 + (kja + 1) * a_dim1], lda, &work[1], &ldt,
			 &lbm1, &lbn1, &mb1, &nb1, &i__3, &i__4);

		if (nprow == npcol && *beta == 0. && *ldc == ldt) {
		    i__3 = (myrow + igcd * j1 + kmod1) % nprow;
		    i__4 = (mprow - igcd * j1 - kmod2) % nprow;
		    kjc = iwork[(kprow - igcd * j1) % nprow * 3 + 3] * *mb;
#if 0
		    Cdgesd2d(context_1.ictxt,nl,ml,&work[1],nl,i__3,mcol);
		    Cdgerv2d(context_1.ictxt,nl,ml,&c__[(kjc + 1) * c_dim1 + 1],*ldc,i__4,ncol);
#else
		    Cblacs_dSendrecv( context_1.ictxt,
                          nl, ml, &work[1], nl, i__3, mcol,
                          nl, ml, &c__[(kjc + 1) * c_dim1 + 1], *ldc, i__4, ncol );
#endif

		} else {
		    i__3 = (myrow + igcd * j1 + kmod1) % nprow;
		    i__4 = (mprow - igcd * j1 - kmod2) % nprow;
#if 0
		    Cdgesd2d(context_1.ictxt,nl,ml,&work[1],nl,i__3,mcol);
		    Cdgerv2d(context_1.ictxt,nl,ml,&work[ipt],nl, i__4,ncol);
#else
        Cblacs_dSendrecv( context_1.ictxt,
                          nl, ml, &work[1],   nl, i__3, mcol,
                          nl, ml, &work[ipt], nl, i__4, ncol );
#endif

		    kjc = iwork[(kprow - igcd * j1) % nprow * 3 + 3] * *mb;
		    i__3 = np - kic;
		    i__4 = mq - kjc;
		    dmv2mx_(&work[ipt], &ldt, &tbeta, &c__[kic + 1 + (kjc + 1)
			     * c_dim1], ldc, &lbn2, &lbm2, &nb2, &mb2, &i__3,
			    &i__4);
		}
	    }
	}
    }

    return 0;
} /* pdtrans_ */
Ejemplo n.º 8
0
void
pzgstrs(int_t n, LUstruct_t *LUstruct, 
	ScalePermstruct_t *ScalePermstruct,
	gridinfo_t *grid, doublecomplex *B,
	int_t m_loc, int_t fst_row, int_t ldb, int nrhs,
	SOLVEstruct_t *SOLVEstruct,
	SuperLUStat_t *stat, int *info)
{
    Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
    LocalLU_t *Llu = LUstruct->Llu;
    doublecomplex alpha = {1.0, 0.0};
    doublecomplex zero = {0.0, 0.0};
    doublecomplex *lsum;  /* Local running sum of the updates to B-components */
    doublecomplex *x;     /* X component at step k. */
		    /* NOTE: x and lsum are of same size. */
    doublecomplex *lusup, *dest;
    doublecomplex *recvbuf, *tempv;
    doublecomplex *rtemp; /* Result of full matrix-vector multiply. */
    int_t  **Ufstnz_br_ptr = Llu->Ufstnz_br_ptr;
    int_t  *Urbs, *Urbs1; /* Number of row blocks in each block column of U. */
    Ucb_indptr_t **Ucb_indptr;/* Vertical linked list pointing to Uindex[] */
    int_t  **Ucb_valptr;      /* Vertical linked list pointing to Unzval[] */
    int_t  iam, kcol, krow, mycol, myrow;
    int_t  i, ii, il, j, jj, k, lb, ljb, lk, lptr, luptr;
    int_t  nb, nlb, nub, nsupers;
    int_t  *xsup, *supno, *lsub, *usub;
    int_t  *ilsum;    /* Starting position of each supernode in lsum (LOCAL)*/
    int_t  Pc, Pr;
    int    knsupc, nsupr;
    int    ldalsum;   /* Number of lsum entries locally owned. */
    int    maxrecvsz, p, pi;
    int_t  **Lrowind_bc_ptr;
    doublecomplex **Lnzval_bc_ptr;
    MPI_Status status;
    MPI_Request *send_req, recv_req;
    pxgstrs_comm_t *gstrs_comm = SOLVEstruct->gstrs_comm;

    /*-- Counts used for L-solve --*/
    int_t  *fmod;         /* Modification count for L-solve --
                             Count the number of local block products to
                             be summed into lsum[lk]. */
    int_t  **fsendx_plist = Llu->fsendx_plist;
    int_t  nfrecvx = Llu->nfrecvx; /* Number of X components to be recv'd. */
    int_t  *frecv;        /* Count of lsum[lk] contributions to be received
                             from processes in this row. 
                             It is only valid on the diagonal processes. */
    int_t  nfrecvmod = 0; /* Count of total modifications to be recv'd. */
    int_t  nleaf = 0, nroot = 0;

    /*-- Counts used for U-solve --*/
    int_t  *bmod;         /* Modification count for U-solve. */
    int_t  **bsendx_plist = Llu->bsendx_plist;
    int_t  nbrecvx = Llu->nbrecvx; /* Number of X components to be recv'd. */
    int_t  *brecv;        /* Count of modifications to be recv'd from
			     processes in this row. */
    int_t  nbrecvmod = 0; /* Count of total modifications to be recv'd. */
    double t;
#if ( DEBUGlevel>=2 )
    int_t Ublocks = 0;
#endif

    int_t *mod_bit = Llu->mod_bit; /* flag contribution from each row block */
 
    t = SuperLU_timer_();

    /* Test input parameters. */
    *info = 0;
    if ( n < 0 ) *info = -1;
    else if ( nrhs < 0 ) *info = -9;
    if ( *info ) {
	pxerbla("PZGSTRS", grid, -*info);
	return;
    }
	
    /*
     * Initialization.
     */
    iam = grid->iam;
    Pc = grid->npcol;
    Pr = grid->nprow;
    myrow = MYROW( iam, grid );
    mycol = MYCOL( iam, grid );
    xsup = Glu_persist->xsup;
    supno = Glu_persist->supno;
    nsupers = supno[n-1] + 1;
    Lrowind_bc_ptr = Llu->Lrowind_bc_ptr;
    Lnzval_bc_ptr = Llu->Lnzval_bc_ptr;
    nlb = CEILING( nsupers, Pr ); /* Number of local block rows. */

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Enter pzgstrs()");
#endif

    stat->ops[SOLVE] = 0.0;
    Llu->SolveMsgSent = 0;

    /* Save the count to be altered so it can be used by
       subsequent call to PDGSTRS. */
    if ( !(fmod = intMalloc_dist(nlb)) )
	ABORT("Calloc fails for fmod[].");
    for (i = 0; i < nlb; ++i) fmod[i] = Llu->fmod[i];
    if ( !(frecv = intMalloc_dist(nlb)) )
	ABORT("Malloc fails for frecv[].");
    Llu->frecv = frecv;

    k = SUPERLU_MAX( Llu->nfsendx, Llu->nbsendx ) + nlb;
    if ( !(send_req = (MPI_Request*) SUPERLU_MALLOC(k*sizeof(MPI_Request))) )
	ABORT("Malloc fails for send_req[].");

#ifdef _CRAY
    ftcs1 = _cptofcd("L", strlen("L"));
    ftcs2 = _cptofcd("N", strlen("N"));
    ftcs3 = _cptofcd("U", strlen("U"));
#endif


    /* Obtain ilsum[] and ldalsum for process column 0. */
    ilsum = Llu->ilsum;
    ldalsum = Llu->ldalsum;

    /* Allocate working storage. */
    knsupc = sp_ienv_dist(3);
    maxrecvsz = knsupc * nrhs + SUPERLU_MAX( XK_H, LSUM_H );
    if ( !(lsum = doublecomplexCalloc_dist(((size_t)ldalsum)*nrhs + nlb*LSUM_H)) )
	ABORT("Calloc fails for lsum[].");
    if ( !(x = doublecomplexMalloc_dist(ldalsum * nrhs + nlb * XK_H)) )
	ABORT("Malloc fails for x[].");
    if ( !(recvbuf = doublecomplexMalloc_dist(maxrecvsz)) )
	ABORT("Malloc fails for recvbuf[].");
    if ( !(rtemp = doublecomplexCalloc_dist(maxrecvsz)) )
	ABORT("Malloc fails for rtemp[].");

    
    /*---------------------------------------------------
     * Forward solve Ly = b.
     *---------------------------------------------------*/
    /* Redistribute B into X on the diagonal processes. */
    pzReDistribute_B_to_X(B, m_loc, nrhs, ldb, fst_row, ilsum, x, 
			  ScalePermstruct, Glu_persist, grid, SOLVEstruct);

    /* Set up the headers in lsum[]. */
    ii = 0;
    for (k = 0; k < nsupers; ++k) {
	knsupc = SuperSize( k );
	krow = PROW( k, grid );
	if ( myrow == krow ) {
	    lk = LBi( k, grid );   /* Local block number. */
	    il = LSUM_BLK( lk );
	    lsum[il - LSUM_H].r = k;/* Block number prepended in the header.*/
	    lsum[il - LSUM_H].i = 0;
	}
	ii += knsupc;
    }

    /*
     * Compute frecv[] and nfrecvmod counts on the diagonal processes.
     */
    {
	superlu_scope_t *scp = &grid->rscp;

#if 1
	for (k = 0; k < nlb; ++k) mod_bit[k] = 0;
	for (k = 0; k < nsupers; ++k) {
	    krow = PROW( k, grid );
	    if ( myrow == krow ) {
		lk = LBi( k, grid );    /* local block number */
		kcol = PCOL( k, grid );
		if ( mycol != kcol && fmod[lk] )
		    mod_bit[lk] = 1;  /* contribution from off-diagonal */
	    }
	}
	/*PrintInt10("mod_bit", nlb, mod_bit);*/
	
#if ( PROFlevel>=2 )
	t_reduce_tmp = SuperLU_timer_();
#endif
	/* Every process receives the count, but it is only useful on the
	   diagonal processes.  */
	MPI_Allreduce( mod_bit, frecv, nlb, mpi_int_t, MPI_SUM, scp->comm );

#if ( PROFlevel>=2 )
	t_reduce += SuperLU_timer_() - t_reduce_tmp;
#endif

	for (k = 0; k < nsupers; ++k) {
	    krow = PROW( k, grid );
	    if ( myrow == krow ) {
		lk = LBi( k, grid );    /* local block number */
		kcol = PCOL( k, grid );
		if ( mycol == kcol ) { /* diagonal process */
		    nfrecvmod += frecv[lk];
		    if ( !frecv[lk] && !fmod[lk] ) ++nleaf;
		}
	    }
	}

#else /* old */

	for (k = 0; k < nsupers; ++k) {
	    krow = PROW( k, grid );
	    if ( myrow == krow ) {
		lk = LBi( k, grid );    /* Local block number. */
		kcol = PCOL( k, grid ); /* Root process in this row scope. */
		if ( mycol != kcol && fmod[lk] )
		    i = 1;  /* Contribution from non-diagonal process. */
		else i = 0;
		MPI_Reduce( &i, &frecv[lk], 1, mpi_int_t,
			   MPI_SUM, kcol, scp->comm );
		if ( mycol == kcol ) { /* Diagonal process. */
		    nfrecvmod += frecv[lk];
		    if ( !frecv[lk] && !fmod[lk] ) ++nleaf;
#if ( DEBUGlevel>=2 )
		    printf("(%2d) frecv[%4d]  %2d\n", iam, k, frecv[lk]);
		    assert( frecv[lk] < Pc );
#endif
		}
	    }
	}
#endif
    }

    /* ---------------------------------------------------------
       Solve the leaf nodes first by all the diagonal processes.
       --------------------------------------------------------- */
#if ( DEBUGlevel>=2 )
    printf("(%2d) nleaf %4d\n", iam, nleaf);
#endif
    for (k = 0; k < nsupers && nleaf; ++k) {
	krow = PROW( k, grid );
	kcol = PCOL( k, grid );
	if ( myrow == krow && mycol == kcol ) { /* Diagonal process */
	    knsupc = SuperSize( k );
	    lk = LBi( k, grid );
	    if ( frecv[lk]==0 && fmod[lk]==0 ) {
		fmod[lk] = -1;  /* Do not solve X[k] in the future. */
		ii = X_BLK( lk );
		lk = LBj( k, grid ); /* Local block number, column-wise. */
		lsub = Lrowind_bc_ptr[lk];
		lusup = Lnzval_bc_ptr[lk];
		nsupr = lsub[1];
#ifdef _CRAY
		CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &knsupc, &nrhs, &alpha,
		      lusup, &nsupr, &x[ii], &knsupc);
#elif defined (USE_VENDOR_BLAS)
		ztrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, 
		       lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1);
#else
		ztrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, 
		       lusup, &nsupr, &x[ii], &knsupc);
#endif
		stat->ops[SOLVE] += 4 * knsupc * (knsupc - 1) * nrhs
		    + 10 * knsupc * nrhs; /* complex division */
		--nleaf;
#if ( DEBUGlevel>=2 )
		printf("(%2d) Solve X[%2d]\n", iam, k);
#endif
		
		/*
		 * Send Xk to process column Pc[k].
		 */
		for (p = 0; p < Pr; ++p) {
		    if ( fsendx_plist[lk][p] != EMPTY ) {
			pi = PNUM( p, kcol, grid );

			MPI_Isend( &x[ii - XK_H], knsupc * nrhs + XK_H,
				   SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm,
                                   &send_req[Llu->SolveMsgSent++]);
#if 0
			MPI_Send( &x[ii - XK_H], knsupc * nrhs + XK_H,
				 SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm );
#endif
#if ( DEBUGlevel>=2 )
			printf("(%2d) Sent X[%2.0f] to P %2d\n",
			       iam, x[ii-XK_H], pi);
#endif
		    }
		}
		/*
		 * Perform local block modifications: lsum[i] -= L_i,k * X[k]
		 */
		nb = lsub[0] - 1;
		lptr = BC_HEADER + LB_DESCRIPTOR + knsupc;
		luptr = knsupc; /* Skip diagonal block L(k,k). */
		
		zlsum_fmod(lsum, x, &x[ii], rtemp, nrhs, knsupc, k,
			   fmod, nb, lptr, luptr, xsup, grid, Llu, 
			   send_req, stat);
	    }
	} /* if diagonal process ... */
    } /* for k ... */

    /* -----------------------------------------------------------
       Compute the internal nodes asynchronously by all processes.
       ----------------------------------------------------------- */
#if ( DEBUGlevel>=2 )
    printf("(%2d) nfrecvx %4d,  nfrecvmod %4d,  nleaf %4d\n",
	   iam, nfrecvx, nfrecvmod, nleaf);
#endif

    while ( nfrecvx || nfrecvmod ) { /* While not finished. */

	/* Receive a message. */
	MPI_Recv( recvbuf, maxrecvsz, SuperLU_MPI_DOUBLE_COMPLEX,
                  MPI_ANY_SOURCE, MPI_ANY_TAG, grid->comm, &status );

        k = (*recvbuf).r;

#if ( DEBUGlevel>=2 )
	printf("(%2d) Recv'd block %d, tag %2d\n", iam, k, status.MPI_TAG);
#endif
	
	switch ( status.MPI_TAG ) {
	  case Xk:
	      --nfrecvx;
	      lk = LBj( k, grid ); /* Local block number, column-wise. */
	      lsub = Lrowind_bc_ptr[lk];
	      lusup = Lnzval_bc_ptr[lk];
	      if ( lsub ) {
		  nb   = lsub[0];
		  lptr = BC_HEADER;
		  luptr = 0;
		  knsupc = SuperSize( k );

		  /*
		   * Perform local block modifications: lsum[i] -= L_i,k * X[k]
		   */
		  zlsum_fmod(lsum, x, &recvbuf[XK_H], rtemp, nrhs, knsupc, k,
			     fmod, nb, lptr, luptr, xsup, grid, Llu, 
			     send_req, stat);
	      } /* if lsub */

	      break;

	  case LSUM: /* Receiver must be a diagonal process */
	      --nfrecvmod;
	      lk = LBi( k, grid ); /* Local block number, row-wise. */
	      ii = X_BLK( lk );
	      knsupc = SuperSize( k );
	      tempv = &recvbuf[LSUM_H];
	      RHS_ITERATE(j) {
		  for (i = 0; i < knsupc; ++i)
		      z_add(&x[i + ii + j*knsupc],
			    &x[i + ii + j*knsupc],
			    &tempv[i + j*knsupc]);
	      }

	      if ( (--frecv[lk])==0 && fmod[lk]==0 ) {
		  fmod[lk] = -1; /* Do not solve X[k] in the future. */
		  lk = LBj( k, grid ); /* Local block number, column-wise. */
		  lsub = Lrowind_bc_ptr[lk];
		  lusup = Lnzval_bc_ptr[lk];
		  nsupr = lsub[1];
#ifdef _CRAY
		  CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &knsupc, &nrhs, &alpha,
			lusup, &nsupr, &x[ii], &knsupc);
#elif defined (USE_VENDOR_BLAS)
		  ztrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, 
			 lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1);
#else
		  ztrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, 
			 lusup, &nsupr, &x[ii], &knsupc);
#endif
		  stat->ops[SOLVE] += 4 * knsupc * (knsupc - 1) * nrhs
		      + 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
		  printf("(%2d) Solve X[%2d]\n", iam, k);
#endif
		
		  /*
		   * Send Xk to process column Pc[k].
		   */
		  kcol = PCOL( k, grid );
		  for (p = 0; p < Pr; ++p) {
		      if ( fsendx_plist[lk][p] != EMPTY ) {
			  pi = PNUM( p, kcol, grid );

			  MPI_Isend( &x[ii-XK_H], knsupc * nrhs + XK_H,
                                     SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm,
                                     &send_req[Llu->SolveMsgSent++]);
#if 0
			  MPI_Send( &x[ii - XK_H], knsupc * nrhs + XK_H,
				    SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm );
#endif
#if ( DEBUGlevel>=2 )
			  printf("(%2d) Sent X[%2.0f] to P %2d\n",
				 iam, x[ii-XK_H], pi);
#endif
		      }
                  }
		  /*
		   * Perform local block modifications.
		   */
		  nb = lsub[0] - 1;
		  lptr = BC_HEADER + LB_DESCRIPTOR + knsupc;
		  luptr = knsupc; /* Skip diagonal block L(k,k). */

		  zlsum_fmod(lsum, x, &x[ii], rtemp, nrhs, knsupc, k,
			     fmod, nb, lptr, luptr, xsup, grid, Llu,
			     send_req, stat);
	      } /* if */

	      break;

#if ( DEBUGlevel>=2 )
	    default:
	      printf("(%2d) Recv'd wrong message tag %4d\n", status.MPI_TAG);
	      break;
#endif
	  } /* switch */

    } /* while not finished ... */


#if ( PRNTlevel>=2 )
    t = SuperLU_timer_() - t;
    if ( !iam ) printf(".. L-solve time\t%8.2f\n", t);
    t = SuperLU_timer_();
#endif

#if ( DEBUGlevel==2 )
    {
      printf("(%d) .. After L-solve: y =\n", iam);
      for (i = 0, k = 0; k < nsupers; ++k) {
	  krow = PROW( k, grid );
	  kcol = PCOL( k, grid );
	  if ( myrow == krow && mycol == kcol ) { /* Diagonal process */
	      knsupc = SuperSize( k );
	      lk = LBi( k, grid );
	      ii = X_BLK( lk );
	      for (j = 0; j < knsupc; ++j)
		printf("\t(%d)\t%4d\t%.10f\n", iam, xsup[k]+j, x[ii+j]);
	      fflush(stdout);
	  }
	  MPI_Barrier( grid->comm );
      }
    }
#endif

    SUPERLU_FREE(fmod);
    SUPERLU_FREE(frecv);
    SUPERLU_FREE(rtemp);

    /*for (i = 0; i < Llu->SolveMsgSent; ++i) MPI_Request_free(&send_req[i]);*/

    for (i = 0; i < Llu->SolveMsgSent; ++i) MPI_Wait(&send_req[i], &status);
    Llu->SolveMsgSent = 0;

    MPI_Barrier( grid->comm );


    /*---------------------------------------------------
     * Back solve Ux = y.
     *
     * The Y components from the forward solve is already
     * on the diagonal processes.
     *---------------------------------------------------*/

    /* Save the count to be altered so it can be used by
       subsequent call to PZGSTRS. */
    if ( !(bmod = intMalloc_dist(nlb)) )
	ABORT("Calloc fails for bmod[].");
    for (i = 0; i < nlb; ++i) bmod[i] = Llu->bmod[i];
    if ( !(brecv = intMalloc_dist(nlb)) )
	ABORT("Malloc fails for brecv[].");
    Llu->brecv = brecv;

    /*
     * Compute brecv[] and nbrecvmod counts on the diagonal processes.
     */
    {
	superlu_scope_t *scp = &grid->rscp;

#if 1
	for (k = 0; k < nlb; ++k) mod_bit[k] = 0;
	for (k = 0; k < nsupers; ++k) {
	    krow = PROW( k, grid );
	    if ( myrow == krow ) {
		lk = LBi( k, grid );    /* local block number */
		kcol = PCOL( k, grid ); /* root process in this row scope */
		if ( mycol != kcol && bmod[lk] )
		    mod_bit[lk] = 1;  /* Contribution from off-diagonal */
	    }
	}

	/* Every process receives the count, but it is only useful on the
	   diagonal processes.  */
	MPI_Allreduce( mod_bit, brecv, nlb, mpi_int_t, MPI_SUM, scp->comm );

	for (k = 0; k < nsupers; ++k) {
	    krow = PROW( k, grid );
	    if ( myrow == krow ) {
		lk = LBi( k, grid );    /* local block number */
		kcol = PCOL( k, grid ); /* root process in this row scope. */
		if ( mycol == kcol ) { /* diagonal process */
		    nbrecvmod += brecv[lk];
		    if ( !brecv[lk] && !bmod[lk] ) ++nroot;
#if ( DEBUGlevel>=2 )
		    printf("(%2d) brecv[%4d]  %2d\n", iam, k, brecv[lk]);
		    assert( brecv[lk] < Pc );
#endif
		}
	    }
	}

#else /* old */

	for (k = 0; k < nsupers; ++k) {
	    krow = PROW( k, grid );
	    if ( myrow == krow ) {
		lk = LBi( k, grid );    /* Local block number. */
		kcol = PCOL( k, grid ); /* Root process in this row scope. */
		if ( mycol != kcol && bmod[lk] )
		    i = 1;  /* Contribution from non-diagonal process. */
		else i = 0;
		MPI_Reduce( &i, &brecv[lk], 1, mpi_int_t,
			   MPI_SUM, kcol, scp->comm );
		if ( mycol == kcol ) { /* Diagonal process. */
		    nbrecvmod += brecv[lk];
		    if ( !brecv[lk] && !bmod[lk] ) ++nroot;
#if ( DEBUGlevel>=2 )
		    printf("(%2d) brecv[%4d]  %2d\n", iam, k, brecv[lk]);
		    assert( brecv[lk] < Pc );
#endif
		}
	    }
	}
#endif
    }

    /* Re-initialize lsum to zero. Each block header is already in place. */
    for (k = 0; k < nsupers; ++k) {
	krow = PROW( k, grid );
	if ( myrow == krow ) {
	    knsupc = SuperSize( k );
	    lk = LBi( k, grid );
	    il = LSUM_BLK( lk );
	    dest = &lsum[il];
	    RHS_ITERATE(j) {
		for (i = 0; i < knsupc; ++i) dest[i + j*knsupc] = zero;
	    }
	}
    }
Ejemplo n.º 9
0
void
pdgssvx_ABglobal(superlu_options_t_Distributed *options, SuperMatrix *A, 
		 ScalePermstruct_t *ScalePermstruct,
		 double B[], int ldb, int nrhs, gridinfo_t *grid,
		 LUstruct_t *LUstruct, double *berr,
		 SuperLUStat_t *stat, int *info)
{
/* 
 * -- Distributed SuperLU routine (version 1.0) --
 * Lawrence Berkeley National Lab, Univ. of California Berkeley.
 * September 1, 1999
 *
 *
 * Purpose
 * =======
 *
 * pdgssvx_ABglobal solves a system of linear equations A*X=B,
 * by using Gaussian elimination with "static pivoting" to
 * compute the LU factorization of A.
 *
 * Static pivoting is a technique that combines the numerical stability
 * of partial pivoting with the scalability of Cholesky (no pivoting),
 * to run accurately and efficiently on large numbers of processors.
 *
 * See our paper at http://www.nersc.gov/~xiaoye/SuperLU/ for a detailed
 * description of the parallel algorithms.
 *
 * Here are the options for using this code:
 *
 *   1. Independent of all the other options specified below, the
 *      user must supply
 *
 *      -  B, the matrix of right hand sides, and its dimensions ldb and nrhs
 *      -  grid, a structure describing the 2D processor mesh
 *      -  options->IterRefine, which determines whether or not to
 *            improve the accuracy of the computed solution using 
 *            iterative refinement
 *
 *      On output, B is overwritten with the solution X.
 *
 *   2. Depending on options->Fact, the user has several options
 *      for solving A*X=B. The standard option is for factoring
 *      A "from scratch". (The other options, described below,
 *      are used when A is sufficiently similar to a previously 
 *      solved problem to save time by reusing part or all of 
 *      the previous factorization.)
 *
 *      -  options->Fact = DOFACT: A is factored "from scratch"
 *
 *      In this case the user must also supply
 *
 *      -  A, the input matrix
 *
 *      as well as the following options, which are described in more 
 *      detail below:
 *
 *      -  options->Equil,   to specify how to scale the rows and columns
 *                           of A to "equilibrate" it (to try to reduce its
 *                           condition number and so improve the
 *                           accuracy of the computed solution)
 *
 *      -  options->RowPerm, to specify how to permute the rows of A
 *                           (typically to control numerical stability)
 *
 *      -  options->ColPerm, to specify how to permute the columns of A
 *                           (typically to control fill-in and enhance
 *                           parallelism during factorization)
 *
 *      -  options->ReplaceTinyPivot, to specify how to deal with tiny
 *                           pivots encountered during factorization
 *                           (to control numerical stability)
 *
 *      The outputs returned include
 *         
 *      -  ScalePermstruct,  modified to describe how the input matrix A
 *                           was equilibrated and permuted:
 *         -  ScalePermstruct->DiagScale, indicates whether the rows and/or
 *                                        columns of A were scaled
 *         -  ScalePermstruct->R, array of row scale factors
 *         -  ScalePermstruct->C, array of column scale factors
 *         -  ScalePermstruct->perm_r, row permutation vector
 *         -  ScalePermstruct->perm_c, column permutation vector
 *
 *            (part of ScalePermstruct may also need to be supplied on input,
 *             depending on options->RowPerm and options->ColPerm as described 
 *             later).
 *
 *      -  A, the input matrix A overwritten by the scaled and permuted matrix
 *                Pc*Pr*diag(R)*A*diag(C)
 *             where 
 *                Pr and Pc are row and columns permutation matrices determined
 *                  by ScalePermstruct->perm_r and ScalePermstruct->perm_c, 
 *                  respectively, and 
 *                diag(R) and diag(C) are diagonal scaling matrices determined
 *                  by ScalePermstruct->DiagScale, ScalePermstruct->R and 
 *                  ScalePermstruct->C
 *
 *      -  LUstruct, which contains the L and U factorization of A1 where
 *
 *                A1 = Pc*Pr*diag(R)*A*diag(C)*Pc^T = L*U
 *
 *              (Note that A1 = Aout * Pc^T, where Aout is the matrix stored
 *               in A on output.)
 *
 *   3. The second value of options->Fact assumes that a matrix with the same
 *      sparsity pattern as A has already been factored:
 *     
 *      -  options->Fact = SamePattern: A is factored, assuming that it has
 *            the same nonzero pattern as a previously factored matrix. In this
 *            case the algorithm saves time by reusing the previously computed
 *            column permutation vector stored in ScalePermstruct->perm_c
 *            and the "elimination tree" of A stored in LUstruct->etree.
 *
 *      In this case the user must still specify the following options
 *      as before:
 *
 *      -  options->Equil
 *      -  options->RowPerm
 *      -  options->ReplaceTinyPivot
 *
 *      but not options->ColPerm, whose value is ignored. This is because the
 *      previous column permutation from ScalePermstruct->perm_c is used as
 *      input. The user must also supply 
 *
 *      -  A, the input matrix
 *      -  ScalePermstruct->perm_c, the column permutation
 *      -  LUstruct->etree, the elimination tree
 *
 *      The outputs returned include
 *         
 *      -  A, the input matrix A overwritten by the scaled and permuted matrix
 *            as described above
 *      -  ScalePermstruct,  modified to describe how the input matrix A was
 *                           equilibrated and row permuted
 *      -  LUstruct, modified to contain the new L and U factors
 *
 *   4. The third value of options->Fact assumes that a matrix B with the same
 *      sparsity pattern as A has already been factored, and where the
 *      row permutation of B can be reused for A. This is useful when A and B
 *      have similar numerical values, so that the same row permutation
 *      will make both factorizations numerically stable. This lets us reuse
 *      all of the previously computed structure of L and U.
 *
 *      -  options->Fact = SamePattern_SameRowPerm: A is factored,
 *            assuming not only the same nonzero pattern as the previously
 *            factored matrix B, but reusing B's row permutation.
 *
 *      In this case the user must still specify the following options
 *      as before:
 *
 *      -  options->Equil
 *      -  options->ReplaceTinyPivot
 *
 *      but not options->RowPerm or options->ColPerm, whose values are ignored.
 *      This is because the permutations from ScalePermstruct->perm_r and
 *      ScalePermstruct->perm_c are used as input.
 *
 *      The user must also supply 
 *
 *      -  A, the input matrix
 *      -  ScalePermstruct->DiagScale, how the previous matrix was row and/or
 *                                     column scaled
 *      -  ScalePermstruct->R, the row scalings of the previous matrix, if any
 *      -  ScalePermstruct->C, the columns scalings of the previous matrix, 
 *                             if any
 *      -  ScalePermstruct->perm_r, the row permutation of the previous matrix
 *      -  ScalePermstruct->perm_c, the column permutation of the previous 
 *                                  matrix
 *      -  all of LUstruct, the previously computed information about L and U
 *                (the actual numerical values of L and U stored in
 *                 LUstruct->Llu are ignored)
 *
 *      The outputs returned include
 *         
 *      -  A, the input matrix A overwritten by the scaled and permuted matrix
 *            as described above
 *      -  ScalePermstruct,  modified to describe how the input matrix A was
 *                           equilibrated 
 *                  (thus ScalePermstruct->DiagScale, R and C may be modified)
 *      -  LUstruct, modified to contain the new L and U factors
 *
 *   5. The fourth and last value of options->Fact assumes that A is
 *      identical to a matrix that has already been factored on a previous 
 *      call, and reuses its entire LU factorization
 *
 *      -  options->Fact = Factored: A is identical to a previously
 *            factorized matrix, so the entire previous factorization
 *            can be reused.
 *
 *      In this case all the other options mentioned above are ignored
 *      (options->Equil, options->RowPerm, options->ColPerm, 
 *       options->ReplaceTinyPivot)
 *
 *      The user must also supply 
 *
 *      -  A, the unfactored matrix, only in the case that iterative refinment
 *            is to be done (specifically A must be the output A from 
 *            the previous call, so that it has been scaled and permuted)
 *      -  all of ScalePermstruct
 *      -  all of LUstruct, including the actual numerical values of L and U
 *
 *      all of which are unmodified on output.
 *         
 * Arguments
 * =========
 *
 * options (input) superlu_options_t_Distributed*
 *         The structure defines the input parameters to control
 *         how the LU decomposition will be performed.
 *         The following fields should be defined for this structure:
 *         
 *         o Fact (fact_t)
 *           Specifies whether or not the factored form of the matrix
 *           A is supplied on entry, and if not, how the matrix A should
 *           be factorized based on the previous history.
 *
 *           = DOFACT: The matrix A will be factorized from scratch.
 *                 Inputs:  A
 *                          options->Equil, RowPerm, ColPerm, ReplaceTinyPivot
 *                 Outputs: modified A
 *                             (possibly row and/or column scaled and/or 
 *                              permuted)
 *                          all of ScalePermstruct
 *                          all of LUstruct
 *
 *           = SamePattern: the matrix A will be factorized assuming
 *             that a factorization of a matrix with the same sparsity
 *             pattern was performed prior to this one. Therefore, this
 *             factorization will reuse column permutation vector 
 *             ScalePermstruct->perm_c and the elimination tree
 *             LUstruct->etree
 *                 Inputs:  A
 *                          options->Equil, RowPerm, ReplaceTinyPivot
 *                          ScalePermstruct->perm_c
 *                          LUstruct->etree
 *                 Outputs: modified A
 *                             (possibly row and/or column scaled and/or 
 *                              permuted)
 *                          rest of ScalePermstruct (DiagScale, R, C, perm_r)
 *                          rest of LUstruct (GLU_persist, Llu)
 *
 *           = SamePattern_SameRowPerm: the matrix A will be factorized
 *             assuming that a factorization of a matrix with the same
 *             sparsity	pattern and similar numerical values was performed
 *             prior to this one. Therefore, this factorization will reuse
 *             both row and column scaling factors R and C, and the
 *             both row and column permutation vectors perm_r and perm_c,
 *             distributed data structure set up from the previous symbolic
 *             factorization.
 *                 Inputs:  A
 *                          options->Equil, ReplaceTinyPivot
 *                          all of ScalePermstruct
 *                          all of LUstruct
 *                 Outputs: modified A
 *                             (possibly row and/or column scaled and/or 
 *                              permuted)
 *                          modified LUstruct->Llu
 *           = FACTORED: the matrix A is already factored.
 *                 Inputs:  all of ScalePermstruct
 *                          all of LUstruct
 *
 *         o Equil (yes_no_t)
 *           Specifies whether to equilibrate the system.
 *           = NO:  no equilibration.
 *           = YES: scaling factors are computed to equilibrate the system:
 *                      diag(R)*A*diag(C)*inv(diag(C))*X = diag(R)*B.
 *                  Whether or not the system will be equilibrated depends
 *                  on the scaling of the matrix A, but if equilibration is
 *                  used, A is overwritten by diag(R)*A*diag(C) and B by
 *                  diag(R)*B.
 *
 *         o RowPerm (rowperm_t)
 *           Specifies how to permute rows of the matrix A.
 *           = NATURAL:   use the natural ordering.
 *           = LargeDiag: use the Duff/Koster algorithm to permute rows of
 *                        the original matrix to make the diagonal large
 *                        relative to the off-diagonal.
 *           = MY_PERMR:  use the ordering given in ScalePermstruct->perm_r
 *                        input by the user.
 *           
 *         o ColPerm (colperm_t)
 *           Specifies what type of column permutation to use to reduce fill.
 *           = NATURAL:       natural ordering.
 *           = MMD_AT_PLUS_A: minimum degree ordering on structure of A'+A.
 *           = MMD_ATA:       minimum degree ordering on structure of A'*A.
 *           = COLAMD:        approximate minimum degree column ordering.
 *           = MY_PERMC:      the ordering given in ScalePermstruct->perm_c.
 *         
 *         o ReplaceTinyPivot (yes_no_t)
 *           = NO:  do not modify pivots
 *           = YES: replace tiny pivots by sqrt(epsilon)*norm(A) during 
 *                  LU factorization.
 *
 *         o IterRefine (IterRefine_t)
 *           Specifies how to perform iterative refinement.
 *           = NO:     no iterative refinement.
 *           = DOUBLE: accumulate residual in double precision.
 *           = EXTRA:  accumulate residual in extra precision.
 *
 *         NOTE: all options must be indentical on all processes when
 *               calling this routine.
 *
 * A (input/output) SuperMatrix*
 *         On entry, matrix A in A*X=B, of dimension (A->nrow, A->ncol).
 *         The number of linear equations is A->nrow. The type of A must be:
 *         Stype = NC; Dtype = D; Mtype = GE. That is, A is stored in
 *         compressed column format (also known as Harwell-Boeing format).
 *         See supermatrix.h for the definition of 'SuperMatrix'.
 *         This routine only handles square A, however, the LU factorization
 *         routine pdgstrf can factorize rectangular matrices.
 *         On exit, A may be overwtirren by Pc*Pr*diag(R)*A*diag(C),
 *         depending on ScalePermstruct->DiagScale, options->RowPerm and
 *         options->colpem:
 *             if ScalePermstruct->DiagScale != NOEQUIL, A is overwritten by
 *                diag(R)*A*diag(C).
 *             if options->RowPerm != NATURAL, A is further overwritten by
 *                Pr*diag(R)*A*diag(C).
 *             if options->ColPerm != NATURAL, A is further overwritten by
 *                Pc*Pr*diag(R)*A*diag(C).
 *         If all the above condition are true, the LU decomposition is
 *         performed on the matrix Pc*Pr*diag(R)*A*diag(C)*Pc^T.
 *
 *         NOTE: Currently, A must reside in all processes when calling
 *               this routine.
 *
 * ScalePermstruct (input/output) ScalePermstruct_t*
 *         The data structure to store the scaling and permutation vectors
 *         describing the transformations performed to the matrix A.
 *         It contains the following fields:
 *
 *         o DiagScale (DiagScale_t)
 *           Specifies the form of equilibration that was done.
 *           = NOEQUIL: no equilibration.
 *           = ROW:     row equilibration, i.e., A was premultiplied by
 *                      diag(R).
 *           = COL:     Column equilibration, i.e., A was postmultiplied
 *                      by diag(C).
 *           = BOTH:    both row and column equilibration, i.e., A was 
 *                      replaced by diag(R)*A*diag(C).
 *           If options->Fact = FACTORED or SamePattern_SameRowPerm,
 *           DiagScale is an input argument; otherwise it is an output
 *           argument.
 *
 *         o perm_r (int*)
 *           Row permutation vector, which defines the permutation matrix Pr;
 *           perm_r[i] = j means row i of A is in position j in Pr*A.
 *           If options->RowPerm = MY_PERMR, or
 *           options->Fact = SamePattern_SameRowPerm, perm_r is an
 *           input argument; otherwise it is an output argument.
 *
 *         o perm_c (int*)
 *           Column permutation vector, which defines the 
 *           permutation matrix Pc; perm_c[i] = j means column i of A is 
 *           in position j in A*Pc.
 *           If options->ColPerm = MY_PERMC or options->Fact = SamePattern
 *           or options->Fact = SamePattern_SameRowPerm, perm_c is an
 *           input argument; otherwise, it is an output argument.
 *           On exit, perm_c may be overwritten by the product of the input
 *           perm_c and a permutation that postorders the elimination tree
 *           of Pc*A'*A*Pc'; perm_c is not changed if the elimination tree
 *           is already in postorder.
 *
 *         o R (double*) dimension (A->nrow)
 *           The row scale factors for A.
 *           If DiagScale = ROW or BOTH, A is multiplied on the left by 
 *                          diag(R).
 *           If DiagScale = NOEQUIL or COL, R is not defined.
 *           If options->Fact = FACTORED or SamePattern_SameRowPerm, R is
 *           an input argument; otherwise, R is an output argument.
 *
 *         o C (double*) dimension (A->ncol)
 *           The column scale factors for A.
 *           If DiagScale = COL or BOTH, A is multiplied on the right by 
 *                          diag(C).
 *           If DiagScale = NOEQUIL or ROW, C is not defined.
 *           If options->Fact = FACTORED or SamePattern_SameRowPerm, C is
 *           an input argument; otherwise, C is an output argument.
 *         
 * B       (input/output) double*
 *         On entry, the right-hand side matrix of dimension (A->nrow, nrhs).
 *         On exit, the solution matrix if info = 0;
 *
 *         NOTE: Currently, B must reside in all processes when calling
 *               this routine.
 *
 * ldb     (input) int (global)
 *         The leading dimension of matrix B.
 *
 * nrhs    (input) int (global)
 *         The number of right-hand sides.
 *         If nrhs = 0, only LU decomposition is performed, the forward
 *         and back substitutions are skipped.
 *
 * grid    (input) gridinfo_t*
 *         The 2D process mesh. It contains the MPI communicator, the number
 *         of process rows (NPROW), the number of process columns (NPCOL),
 *         and my process rank. It is an input argument to all the
 *         parallel routines.
 *         Grid can be initialized by subroutine SUPERLU_GRIDINIT.
 *         See superlu_ddefs.h for the definition of 'gridinfo_t'.
 *
 * LUstruct (input/output) LUstruct_t*
 *         The data structures to store the distributed L and U factors.
 *         It contains the following fields:
 *
 *         o etree (int*) dimension (A->ncol)
 *           Elimination tree of Pc*(A'+A)*Pc' or Pc*A'*A*Pc', dimension A->ncol.
 *           It is computed in sp_colorder() during the first factorization,
 *           and is reused in the subsequent factorizations of the matrices
 *           with the same nonzero pattern.
 *           On exit of sp_colorder(), the columns of A are permuted so that
 *           the etree is in a certain postorder. This postorder is reflected
 *           in ScalePermstruct->perm_c.
 *           NOTE:
 *           Etree is a vector of parent pointers for a forest whose vertices
 *           are the integers 0 to A->ncol-1; etree[root]==A->ncol.
 *
 *         o Glu_persist (Glu_persist_t*)
 *           Global data structure (xsup, supno) replicated on all processes,
 *           describing the supernode partition in the factored matrices
 *           L and U:
 *	       xsup[s] is the leading column of the s-th supernode,
 *             supno[i] is the supernode number to which column i belongs.
 *
 *         o Llu (LocalLU_t*)
 *           The distributed data structures to store L and U factors.
 *           See superlu_ddefs.h for the definition of 'LocalLU_t'.
 *
 * berr    (output) double*, dimension (nrhs)
 *         The componentwise relative backward error of each solution   
 *         vector X(j) (i.e., the smallest relative change in   
 *         any element of A or B that makes X(j) an exact solution).
 *
 * stat   (output) SuperLUStat_t*
 *        Record the statistics on runtime and floating-point operation count.
 *        See util.h for the definition of 'SuperLUStat_t'.
 *
 * info    (output) int*
 *         = 0: successful exit
 *         > 0: if info = i, and i is
 *             <= A->ncol: U(i,i) is exactly zero. The factorization has
 *                been completed, but the factor U is exactly singular,
 *                so the solution could not be computed.
 *             > A->ncol: number of bytes allocated when memory allocation
 *                failure occurred, plus A->ncol.
 *
 *
 * See superlu_ddefs.h for the definitions of various data types.
 *
 */
    SuperMatrix AC;
    NCformat *Astore;
    NCPformat *ACstore;
    Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
    Glu_freeable_t *Glu_freeable;
            /* The nonzero structures of L and U factors, which are
	       replicated on all processrs.
	           (lsub, xlsub) contains the compressed subscript of
		                 supernodes in L.
          	   (usub, xusub) contains the compressed subscript of
		                 nonzero segments in U.
	      If options->Fact != SamePattern_SameRowPerm, they are 
	      computed by SYMBFACT routine, and then used by DDISTRIBUTE
	      routine. They will be freed after DDISTRIBUTE routine.
	      If options->Fact == SamePattern_SameRowPerm, these
	      structures are not used.                                  */
    fact_t   Fact;
    double   *a;
    int_t    *perm_r; /* row permutations from partial pivoting */
    int_t    *perm_c; /* column permutation vector */
    int_t    *etree;  /* elimination tree */
    int_t    *colptr, *rowind;
    int_t    colequ, Equil, factored, job, notran, rowequ;
    int_t    i, iinfo, j, irow, m, n, nnz, permc_spec, dist_mem_use;
    int      iam;
    int      ldx;  /* LDA for matrix X (global). */
    char     equed[1], norm[1];
    double   *C, *R, *C1, *R1, amax, anorm, colcnd, rowcnd;
    double   *X, *b_col, *b_work, *x_col;
    double   t;
    static mem_usage_t_Distributed num_mem_usage, symb_mem_usage;
#if ( PRNTlevel>= 2 )
    double   dmin, dsum, dprod;
#endif

    /* Test input parameters. */
    *info = 0;
    Fact = options->Fact;
    if ( Fact < 0 || Fact > FACTORED )
	*info = -1;
    else if ( options->RowPerm < 0 || options->RowPerm > MY_PERMR )
	*info = -1;
    else if ( options->ColPerm < 0 || options->ColPerm > MY_PERMC )
	*info = -1;
    else if ( options->IterRefine < 0 || options->IterRefine > EXTRA )
	*info = -1;
    else if ( options->IterRefine == EXTRA ) {
	*info = -1;
	fprintf(stderr, "Extra precise iterative refinement yet to support.");
    } else if ( A->nrow != A->ncol || A->nrow < 0 ||
         A->Stype != SLU_NC || A->Dtype != SLU_D || A->Mtype != SLU_GE )
	*info = -2;
    else if ( ldb < A->nrow )
	*info = -5;
    else if ( nrhs < 0 )
	*info = -6;
    if ( *info ) {
	i = -(*info);
	pxerbla("pdgssvx_ABglobal", grid, -*info);
	return;
    }

    /* Initialization */
    factored = (Fact == FACTORED);
    Equil = (!factored && options->Equil == YES);
    notran = (options->Trans == NOTRANS);
    iam = grid->iam;
    job = 5;
    m = A->nrow;
    n = A->ncol;
    Astore = A->Store;
    nnz = Astore->nnz;
    a = Astore->nzval;
    colptr = Astore->colptr;
    rowind = Astore->rowind;
    if ( factored || (Fact == SamePattern_SameRowPerm && Equil) ) {
	rowequ = (ScalePermstruct->DiagScale == ROW) ||
	         (ScalePermstruct->DiagScale == BOTH);
	colequ = (ScalePermstruct->DiagScale == COL) ||
	         (ScalePermstruct->DiagScale == BOTH);
    } else rowequ = colequ = FALSE;

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Enter pdgssvx_ABglobal()");
#endif

    perm_r = ScalePermstruct->perm_r;
    perm_c = ScalePermstruct->perm_c;
    etree = LUstruct->etree;
    R = ScalePermstruct->R;
    C = ScalePermstruct->C;
    if ( Equil ) {
	/* Allocate storage if not done so before. */
	switch ( ScalePermstruct->DiagScale ) {
	    case NOEQUIL:
		if ( !(R = (double *) doubleMalloc_dist(m)) )
		    ABORT("Malloc fails for R[].");
	        if ( !(C = (double *) doubleMalloc_dist(n)) )
		    ABORT("Malloc fails for C[].");
		ScalePermstruct->R = R;
		ScalePermstruct->C = C;
		break;
	    case ROW: 
	        if ( !(C = (double *) doubleMalloc_dist(n)) )
		    ABORT("Malloc fails for C[].");
		ScalePermstruct->C = C;
		break;
	    case COL: 
		if ( !(R = (double *) doubleMalloc_dist(m)) )
		    ABORT("Malloc fails for R[].");
		ScalePermstruct->R = R;
		break;
	}
    }

    /* ------------------------------------------------------------
       Diagonal scaling to equilibrate the matrix.
       ------------------------------------------------------------*/
    if ( Equil ) {
#if ( DEBUGlevel>=1 )
	CHECK_MALLOC(iam, "Enter equil");
#endif
	t = SuperLU_timer_();

	if ( Fact == SamePattern_SameRowPerm ) {
	    /* Reuse R and C. */
	    switch ( ScalePermstruct->DiagScale ) {
	      case NOEQUIL:
		break;
	      case ROW:
		for (j = 0; j < n; ++j) {
		    for (i = colptr[j]; i < colptr[j+1]; ++i) {
			irow = rowind[i];
			a[i] *= R[irow];       /* Scale rows. */
		    }
		}
		break;
	      case COL:
		for (j = 0; j < n; ++j)
		    for (i = colptr[j]; i < colptr[j+1]; ++i)
			a[i] *= C[j];          /* Scale columns. */
		break;
	      case BOTH: 
		for (j = 0; j < n; ++j) {
		    for (i = colptr[j]; i < colptr[j+1]; ++i) {
			irow = rowind[i];
			a[i] *= R[irow] * C[j]; /* Scale rows and columns. */
		    }
		}
	        break;
	    }
	} else {
	    if ( !iam ) {
		/* Compute row and column scalings to equilibrate matrix A. */
		dgsequ_dist(A, R, C, &rowcnd, &colcnd, &amax, &iinfo);
	    
		MPI_Bcast( &iinfo, 1, mpi_int_t, 0, grid->comm );
		if ( iinfo == 0 ) {
		    MPI_Bcast( R,       m, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( C,       n, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( &rowcnd, 1, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( &colcnd, 1, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( &amax,   1, MPI_DOUBLE, 0, grid->comm );
		} else {
		    if ( iinfo > 0 ) {
			if ( iinfo <= m )
			    fprintf(stderr, "The %d-th row of A is exactly zero\n", 
				    iinfo);
			else fprintf(stderr, "The %d-th column of A is exactly zero\n", 
				     iinfo-n);
			exit(-1);
		    }
		}
	    } else {
		MPI_Bcast( &iinfo, 1, mpi_int_t, 0, grid->comm );
		if ( iinfo == 0 ) {
		    MPI_Bcast( R,       m, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( C,       n, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( &rowcnd, 1, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( &colcnd, 1, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( &amax,   1, MPI_DOUBLE, 0, grid->comm );
		} else {
		    ABORT("DGSEQU failed\n");
		}
	    }
	
	    /* Equilibrate matrix A. */
	    dlaqgs_dist(A, R, C, rowcnd, colcnd, amax, equed);
	    if ( lsame_(equed, "R") ) {
		ScalePermstruct->DiagScale = rowequ = ROW;
	    } else if ( lsame_(equed, "C") ) {
		ScalePermstruct->DiagScale = colequ = COL;
	    } else if ( lsame_(equed, "B") ) {
		ScalePermstruct->DiagScale = BOTH;
		rowequ = ROW;
		colequ = COL;
	    } else ScalePermstruct->DiagScale = NOEQUIL;

#if ( PRNTlevel>=1 )
	    if ( !iam ) {
		printf(".. equilibrated? *equed = %c\n", *equed);
		/*fflush(stdout);*/
	    }
#endif
	} /* if Fact ... */

	stat->utime[EQUIL] = SuperLU_timer_() - t;
#if ( DEBUGlevel>=1 )
	CHECK_MALLOC(iam, "Exit equil");
#endif
    } /* if Equil ... */
    
    /* ------------------------------------------------------------
       Permute rows of A. 
       ------------------------------------------------------------*/
    if ( options->RowPerm != NO ) {
	t = SuperLU_timer_();

	if ( Fact == SamePattern_SameRowPerm /* Reuse perm_r. */
	    || options->RowPerm == MY_PERMR ) { /* Use my perm_r. */
/*	    for (j = 0; j < n; ++j) {
		for (i = colptr[j]; i < colptr[j+1]; ++i) {*/
	    for (i = 0; i < colptr[n]; ++i) {
		    irow = rowind[i]; 
		    rowind[i] = perm_r[irow];
/*		}*/
	    }
	} else if ( !factored ) {
	    if ( job == 5 ) {
		/* Allocate storage for scaling factors. */
		if ( !(R1 = (double *) SUPERLU_MALLOC(m * sizeof(double))) ) 
		    ABORT("SUPERLU_MALLOC fails for R1[]");
		if ( !(C1 = (double *) SUPERLU_MALLOC(n * sizeof(double))) )
		    ABORT("SUPERLU_MALLOC fails for C1[]");
	    }

	    if ( !iam ) {
		/* Process 0 finds a row permutation for large diagonal. */
		dldperm(job, m, nnz, colptr, rowind, a, perm_r, R1, C1);
		
		MPI_Bcast( perm_r, m, mpi_int_t, 0, grid->comm );
		if ( job == 5 && Equil ) {
		    MPI_Bcast( R1, m, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( C1, n, MPI_DOUBLE, 0, grid->comm );
		}
	    } else {
		MPI_Bcast( perm_r, m, mpi_int_t, 0, grid->comm );
		if ( job == 5 && Equil ) {
		    MPI_Bcast( R1, m, MPI_DOUBLE, 0, grid->comm );
		    MPI_Bcast( C1, n, MPI_DOUBLE, 0, grid->comm );
		}
	    }

#if ( PRNTlevel>=2 )
	    dmin = dlamch_("Overflow");
	    dsum = 0.0;
	    dprod = 1.0;
#endif
	    if ( job == 5 ) {
		if ( Equil ) {
		    for (i = 0; i < n; ++i) {
			R1[i] = exp(R1[i]);
			C1[i] = exp(C1[i]);
		    }
		    for (j = 0; j < n; ++j) {
			for (i = colptr[j]; i < colptr[j+1]; ++i) {
			    irow = rowind[i];
			    a[i] *= R1[irow] * C1[j]; /* Scale the matrix. */
			    rowind[i] = perm_r[irow];
#if ( PRNTlevel>=2 )
			    if ( rowind[i] == j ) /* New diagonal */
				dprod *= fabs(a[i]);
#endif
			}
		    }

		    /* Multiply together the scaling factors. */
		    if ( rowequ ) for (i = 0; i < m; ++i) R[i] *= R1[i];
		    else for (i = 0; i < m; ++i) R[i] = R1[i];
		    if ( colequ ) for (i = 0; i < n; ++i) C[i] *= C1[i];
		    else for (i = 0; i < n; ++i) C[i] = C1[i];
		    
		    ScalePermstruct->DiagScale = BOTH;
		    rowequ = colequ = 1;
		} else { /* No equilibration. */
/*		    for (j = 0; j < n; ++j) {
			for (i = colptr[j]; i < colptr[j+1]; ++i) {*/
		    for (i = colptr[0]; i < colptr[n]; ++i) {
			    irow = rowind[i];
			    rowind[i] = perm_r[irow];
			}
/*		    }*/
		}
		SUPERLU_FREE (R1);
		SUPERLU_FREE (C1);
	    } else { /* job = 2,3,4 */
		for (j = 0; j < n; ++j) {
		    for (i = colptr[j]; i < colptr[j+1]; ++i) {
			irow = rowind[i];
			rowind[i] = perm_r[irow];
#if ( PRNTlevel>=2 )
			if ( rowind[i] == j ) { /* New diagonal */
			    if ( job == 2 || job == 3 )
				dmin = SUPERLU_MIN(dmin, fabs(a[i]));
			    else if ( job == 4 )
				dsum += fabs(a[i]);
			    else if ( job == 5 )
				dprod *= fabs(a[i]);
			}
#endif
		    }
		}
	    }

#if ( PRNTlevel>=2 )
	    if ( job == 2 || job == 3 ) {
		if ( !iam ) printf("\tsmallest diagonal %e\n", dmin);
	    } else if ( job == 4 ) {
		if ( !iam ) printf("\tsum of diagonal %e\n", dsum);
	    } else if ( job == 5 ) {
		if ( !iam ) printf("\t product of diagonal %e\n", dprod);
	    }
#endif
	    
        } /* else !factored */

	t = SuperLU_timer_() - t;
	stat->utime[ROWPERM] = t;
#if ( PRNTlevel>=1 )
	if ( !iam ) printf(".. LDPERM job %d\t time: %.2f\n", job, t);
#endif
    
    } /* if options->RowPerm ... */

    if ( !factored || options->IterRefine ) {
	/* Compute norm(A), which will be used to adjust small diagonal. */
	if ( notran ) *(unsigned char *)norm = '1';
	else *(unsigned char *)norm = 'I';
	anorm = dlangs_dist(norm, A);
#if ( PRNTlevel>=1 )
	if ( !iam ) printf(".. anorm %e\n", anorm);
#endif
    }

    /* ------------------------------------------------------------
       Perform the LU factorization.
       ------------------------------------------------------------*/
    if ( !factored ) {
	t = SuperLU_timer_();
	/*
	 * Get column permutation vector perm_c[], according to permc_spec:
	 *   permc_spec = NATURAL:  natural ordering 
	 *   permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A
	 *   permc_spec = MMD_ATA:  minimum degree on structure of A'*A
	 *   permc_spec = COLAMD:   approximate minimum degree column ordering
	 *   permc_spec = MY_PERMC: the ordering already supplied in perm_c[]
	 */
	permc_spec = options->ColPerm;
	if ( permc_spec != MY_PERMC && Fact == DOFACT )
	    /* Use an ordering provided by SuperLU */
	    get_perm_c_dist(iam, permc_spec, A, perm_c);

	/* Compute the elimination tree of Pc*(A'+A)*Pc' or Pc*A'*A*Pc'
	   (a.k.a. column etree), depending on the choice of ColPerm.
	   Adjust perm_c[] to be consistent with a postorder of etree.
	   Permute columns of A to form A*Pc'. */
	sp_colorder(options, A, perm_c, etree, &AC);

	/* Form Pc*A*Pc' to preserve the diagonal of the matrix Pr*A. */
	ACstore = AC.Store;
	for (j = 0; j < n; ++j) 
	    for (i = ACstore->colbeg[j]; i < ACstore->colend[j]; ++i) {
		irow = ACstore->rowind[i];
		ACstore->rowind[i] = perm_c[irow];
	    }
	stat->utime[COLPERM] = SuperLU_timer_() - t;

	/* Perform a symbolic factorization on matrix A and set up the
	   nonzero data structures which are suitable for supernodal GENP. */
	if ( Fact != SamePattern_SameRowPerm ) {
#if ( PRNTlevel>=1 ) 
	    if ( !iam ) 
		printf(".. symbfact(): relax %4d, maxsuper %4d, fill %4d\n",
		       sp_ienv_dist(2), sp_ienv_dist(3), sp_ienv_dist(6));
#endif
	    t = SuperLU_timer_();
	    if ( !(Glu_freeable = (Glu_freeable_t *)
		   SUPERLU_MALLOC(sizeof(Glu_freeable_t))) )
		ABORT("Malloc fails for Glu_freeable.");

	    iinfo = symbfact(iam, &AC, perm_c, etree, 
			     Glu_persist, Glu_freeable);

	    stat->utime[SYMBFAC] = SuperLU_timer_() - t;

	    if ( iinfo < 0 ) {
		QuerySpace_dist(n, -iinfo, Glu_freeable, &symb_mem_usage);
#if ( PRNTlevel>=1 )
		if ( !iam ) {
		    printf("\tNo of supers %ld\n", Glu_persist->supno[n-1]+1);
		    printf("\tSize of G(L) %ld\n", Glu_freeable->xlsub[n]);
		    printf("\tSize of G(U) %ld\n", Glu_freeable->xusub[n]);
		    printf("\tint %d, short %d, float %d, double %d\n", 
			   sizeof(int_t), sizeof(short), sizeof(float),
			   sizeof(double));
		    printf("\tSYMBfact (MB):\tL\\U %.2f\ttotal %.2f\texpansions %d\n",
			   symb_mem_usage.for_lu*1e-6, 
			   symb_mem_usage.total*1e-6,
			   symb_mem_usage.expansions);
		}
#endif
	    } else {
		if ( !iam ) {
		    fprintf(stderr, "symbfact() error returns %d\n", iinfo);
		    exit(-1);
		}
	    }
	}

	/* Distribute the L and U factors onto the process grid. */
	t = SuperLU_timer_();
	dist_mem_use = ddistribute(Fact, n, &AC, Glu_freeable, LUstruct, grid);
	stat->utime[DIST] = SuperLU_timer_() - t;

	/* Deallocate storage used in symbolic factor. */
	if ( Fact != SamePattern_SameRowPerm ) {
	    iinfo = symbfact_SubFree(Glu_freeable);
	    SUPERLU_FREE(Glu_freeable);
	}

	/* Perform numerical factorization in parallel. */
	t = SuperLU_timer_();
	pdgstrf(options, m, n, anorm, LUstruct, grid, stat, info);
	stat->utime[FACT] = SuperLU_timer_() - t;

#if ( PRNTlevel>=1 )
	{
	    int_t TinyPivots;
	    float for_lu, total, max, avg, temp;
	    dQuerySpace_dist(n, LUstruct, grid, &num_mem_usage);
	    MPI_Reduce( &num_mem_usage.for_lu, &for_lu,
		       1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
	    MPI_Reduce( &num_mem_usage.total, &total,
		       1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
	    temp = SUPERLU_MAX(symb_mem_usage.total,
			       symb_mem_usage.for_lu +
			       (float)dist_mem_use + num_mem_usage.for_lu);
	    temp = SUPERLU_MAX(temp, num_mem_usage.total);
	    MPI_Reduce( &temp, &max,
		       1, MPI_FLOAT, MPI_MAX, 0, grid->comm );
	    MPI_Reduce( &temp, &avg,
		       1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
	    MPI_Allreduce( &stat->TinyPivots, &TinyPivots, 1, mpi_int_t,
			  MPI_SUM, grid->comm );
	    stat->TinyPivots = TinyPivots;
	    if ( !iam ) {
		printf("\tNUMfact (MB) all PEs:\tL\\U\t%.2f\tall\t%.2f\n",
		       for_lu*1e-6, total*1e-6);
		printf("\tAll space (MB):"
		       "\t\ttotal\t%.2f\tAvg\t%.2f\tMax\t%.2f\n",
		       avg*1e-6, avg/grid->nprow/grid->npcol*1e-6, max*1e-6);
		printf("\tNumber of tiny pivots: %10d\n", stat->TinyPivots);
	    }
	}
#endif
    
#if ( PRNTlevel>=2 )
	if ( !iam ) printf(".. pdgstrf INFO = %d\n", *info);
#endif

    } else if ( options->IterRefine ) { /* options->Fact==FACTORED */
	/* Permute columns of A to form A*Pc' using the existing perm_c.
	 * NOTE: rows of A were previously permuted to Pc*A.
	 */
	sp_colorder(options, A, perm_c, NULL, &AC);
    } /* if !factored ... */
	
    /* ------------------------------------------------------------
       Compute the solution matrix X.
       ------------------------------------------------------------*/
    if ( nrhs ) {

	if ( !(b_work = doubleMalloc_dist(n)) )
	    ABORT("Malloc fails for b_work[]");

	/* ------------------------------------------------------------
	   Scale the right-hand side if equilibration was performed. 
	   ------------------------------------------------------------*/
	if ( notran ) {
	    if ( rowequ ) {
		b_col = B;
		for (j = 0; j < nrhs; ++j) {
		    for (i = 0; i < m; ++i) b_col[i] *= R[i];
		    b_col += ldb;
		}
	    }
	} else if ( colequ ) {
	    b_col = B;
	    for (j = 0; j < nrhs; ++j) {
		for (i = 0; i < m; ++i) b_col[i] *= C[i];
		b_col += ldb;
	    }
	}

	/* ------------------------------------------------------------
	   Permute the right-hand side to form Pr*B.
	   ------------------------------------------------------------*/
	if ( options->RowPerm != NO ) {
	    if ( notran ) {
		b_col = B;
		for (j = 0; j < nrhs; ++j) {
		    for (i = 0; i < m; ++i) b_work[perm_r[i]] = b_col[i];
		    for (i = 0; i < m; ++i) b_col[i] = b_work[i];
		    b_col += ldb;
		}
	    }
	}


	/* ------------------------------------------------------------
	   Permute the right-hand side to form Pc*B.
	   ------------------------------------------------------------*/
	if ( notran ) {
	    b_col = B;
	    for (j = 0; j < nrhs; ++j) {
		for (i = 0; i < m; ++i) b_work[perm_c[i]] = b_col[i];
		for (i = 0; i < m; ++i) b_col[i] = b_work[i];
		b_col += ldb;
	    }
	}


	/* Save a copy of the right-hand side. */
	ldx = ldb;
	if ( !(X = doubleMalloc_dist(((size_t)ldx) * nrhs)) )
	    ABORT("Malloc fails for X[]");
	x_col = X;  b_col = B;
	for (j = 0; j < nrhs; ++j) {
	    for (i = 0; i < ldb; ++i) x_col[i] = b_col[i];
	    x_col += ldx;  b_col += ldb;
	}

	/* ------------------------------------------------------------
	   Solve the linear system.
	   ------------------------------------------------------------*/
	pdgstrs_Bglobal(n, LUstruct, grid, X, ldb, nrhs, stat, info);

	/* ------------------------------------------------------------
	   Use iterative refinement to improve the computed solution and
	   compute error bounds and backward error estimates for it.
	   ------------------------------------------------------------*/
	if ( options->IterRefine ) {
	    /* Improve the solution by iterative refinement. */
	    t = SuperLU_timer_();
	    pdgsrfs_ABXglobal(n, &AC, anorm, LUstruct, grid, B, ldb,
			      X, ldx, nrhs, berr, stat, info);
	    stat->utime[REFINE] = SuperLU_timer_() - t;
	}

	/* Permute the solution matrix X <= Pc'*X. */
	for (j = 0; j < nrhs; j++) {
	    b_col = &B[j*ldb];
	    x_col = &X[j*ldx];
	    for (i = 0; i < n; ++i) b_col[i] = x_col[perm_c[i]];
	}
	
	/* Transform the solution matrix X to a solution of the original system
	   before the equilibration. */
	if ( notran ) {
	    if ( colequ ) {
		b_col = B;
		for (j = 0; j < nrhs; ++j) {
		    for (i = 0; i < n; ++i) b_col[i] *= C[i];
		    b_col += ldb;
		}
	    }
	} else if ( rowequ ) {
	    b_col = B;
	    for (j = 0; j < nrhs; ++j) {
		for (i = 0; i < n; ++i) b_col[i] *= R[i];
		b_col += ldb;
	    }
	}

	SUPERLU_FREE(b_work);
	SUPERLU_FREE(X);

    } /* end if nrhs != 0 */

#if ( PRNTlevel>=1 )
    if ( !iam ) printf(".. DiagScale = %d\n", ScalePermstruct->DiagScale);
#endif

    /* Deallocate storage. */
    if ( Equil && Fact != SamePattern_SameRowPerm ) {
	switch ( ScalePermstruct->DiagScale ) {
	    case NOEQUIL:
	        SUPERLU_FREE(R);
		SUPERLU_FREE(C);
		break;
	    case ROW: 
		SUPERLU_FREE(C);
		break;
	    case COL: 
		SUPERLU_FREE(R);
		break;
	}
    }
    if ( !factored || (factored && options->IterRefine) )
	Destroy_CompCol_Permuted_dist(&AC);

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Exit pdgssvx_ABglobal()");
#endif
}
Ejemplo n.º 10
0
void
pdgstrs_Bglobal(int_t n, LUstruct_t *LUstruct, gridinfo_t *grid, 
                double *B, int_t ldb, int nrhs, 
                SuperLUStat_t *stat, int *info)
{
    Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
    LocalLU_t *Llu = LUstruct->Llu;
    double alpha = 1.0;
    double *lsum;  /* Local running sum of the updates to B-components */
    double *x;     /* X component at step k. */
    double *lusup, *dest;
    double *recvbuf, *tempv;
    double *rtemp; /* Result of full matrix-vector multiply. */
    int_t  **Ufstnz_br_ptr = Llu->Ufstnz_br_ptr;
    int_t  *Urbs, *Urbs1; /* Number of row blocks in each block column of U. */
    Ucb_indptr_t **Ucb_indptr;/* Vertical linked list pointing to Uindex[] */
    int_t  **Ucb_valptr;      /* Vertical linked list pointing to Unzval[] */
    int_t  iam, kcol, krow, mycol, myrow;
    int_t  i, ii, il, j, jj, k, lb, ljb, lk, lptr, luptr;
    int_t  nb, nlb, nub, nsupers;
    int_t  *xsup, *lsub, *usub;
    int_t  *ilsum;    /* Starting position of each supernode in lsum (LOCAL)*/
    int_t  Pc, Pr;
    int    knsupc, nsupr;
    int    ldalsum;   /* Number of lsum entries locally owned. */
    int    maxrecvsz, p, pi;
    int_t  **Lrowind_bc_ptr;
    double **Lnzval_bc_ptr;
    MPI_Status status;
#if defined (ISEND_IRECV) || defined (BSEND)
    MPI_Request *send_req, recv_req;
#endif

    /*-- Counts used for L-solve --*/
    int_t  *fmod;         /* Modification count for L-solve. */
    int_t  **fsendx_plist = Llu->fsendx_plist;
    int_t  nfrecvx = Llu->nfrecvx; /* Number of X components to be recv'd. */
    int_t  *frecv;        /* Count of modifications to be recv'd from
			     processes in this row. */
    int_t  nfrecvmod = 0; /* Count of total modifications to be recv'd. */
    int_t  nleaf = 0, nroot = 0;

    /*-- Counts used for U-solve --*/
    int_t  *bmod;         /* Modification count for L-solve. */
    int_t  **bsendx_plist = Llu->bsendx_plist;
    int_t  nbrecvx = Llu->nbrecvx; /* Number of X components to be recv'd. */
    int_t  *brecv;        /* Count of modifications to be recv'd from
			     processes in this row. */
    int_t  nbrecvmod = 0; /* Count of total modifications to be recv'd. */
    double t;
#if ( DEBUGlevel>=2 )
    int_t Ublocks = 0;
#endif

    int_t *mod_bit = Llu->mod_bit; /* flag contribution from each row block */
 
    t = SuperLU_timer_();

    /* Test input parameters. */
    *info = 0;
    if ( n < 0 ) *info = -1;
    else if ( nrhs < 0 ) *info = -9;
    if ( *info ) {
	pxerbla("PDGSTRS_BGLOBAL", grid, -*info);
	return;
    }
	
    /*
     * Initialization.
     */
    iam = grid->iam;
    Pc = grid->npcol;
    Pr = grid->nprow;
    myrow = MYROW( iam, grid );
    mycol = MYCOL( iam, grid );
    nsupers = Glu_persist->supno[n-1] + 1;
    xsup = Glu_persist->xsup;
    Lrowind_bc_ptr = Llu->Lrowind_bc_ptr;
    Lnzval_bc_ptr = Llu->Lnzval_bc_ptr;
    nlb = CEILING( nsupers, Pr ); /* Number of local block rows. */
    stat->ops[SOLVE] = 0.0;
    Llu->SolveMsgSent = 0;

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Enter pdgstrs_Bglobal()");
#endif

    /* Save the count to be altered so it can be used by
       subsequent call to PDGSTRS_BGLOBAL. */
    if ( !(fmod = intMalloc_dist(nlb)) )
	ABORT("Calloc fails for fmod[].");
    for (i = 0; i < nlb; ++i) fmod[i] = Llu->fmod[i];
    if ( !(frecv = intMalloc_dist(nlb)) )
	ABORT("Malloc fails for frecv[].");
    Llu->frecv = frecv;

#if defined (ISEND_IRECV) || defined (BSEND)
    k = SUPERLU_MAX( Llu->nfsendx, Llu->nbsendx ) + nlb;
    if ( !(send_req = (MPI_Request*) SUPERLU_MALLOC(k*sizeof(MPI_Request))) )
	ABORT("Malloc fails for send_req[].");
#endif

#ifdef _CRAY
    ftcs1 = _cptofcd("L", strlen("L"));
    ftcs2 = _cptofcd("N", strlen("N"));
    ftcs3 = _cptofcd("U", strlen("U"));
#endif


    /* Obtain ilsum[] and ldalsum for process column 0. */
    ilsum = Llu->ilsum;
    ldalsum = Llu->ldalsum;

    /* Allocate working storage. */
    knsupc = sp_ienv_dist(3);
    maxrecvsz = knsupc * nrhs + SUPERLU_MAX( XK_H, LSUM_H );
    if ( !(lsum = doubleCalloc_dist(((size_t)ldalsum) * nrhs 
        + nlb * LSUM_H)) )
	ABORT("Calloc fails for lsum[].");
    if ( !(x = doubleMalloc_dist(((size_t)ldalsum) * nrhs 
        + nlb * XK_H)) )
	ABORT("Malloc fails for x[].");
    if ( !(recvbuf = doubleMalloc_dist(maxrecvsz)) )
	ABORT("Malloc fails for recvbuf[].");
    if ( !(rtemp = doubleCalloc_dist(maxrecvsz)) )
	ABORT("Malloc fails for rtemp[].");

    
    /*---------------------------------------------------
     * Forward solve Ly = b.
     *---------------------------------------------------*/

    /*
     * Copy B into X on the diagonal processes.
     */
    ii = 0;
    for (k = 0; k < nsupers; ++k) {
	knsupc = SuperSize( k );
	krow = PROW( k, grid );
	if ( myrow == krow ) {
	    lk = LBi( k, grid );   /* Local block number. */
	    il = LSUM_BLK( lk );
	    lsum[il - LSUM_H] = k; /* Block number prepended in the header. */
	    kcol = PCOL( k, grid );
	    if ( mycol == kcol ) { /* Diagonal process. */
		jj = X_BLK( lk );
		x[jj - XK_H] = k;  /* Block number prepended in the header. */
		RHS_ITERATE(j)
		    for (i = 0; i < knsupc; ++i) /* X is stored in blocks. */
			x[i + jj + j*knsupc] = B[i + ii + j*ldb];
	    }
	}
Ejemplo n.º 11
0
void pdgstrs1(int_t n, LUstruct_t *LUstruct, gridinfo_t *grid,
	      double *x, int nrhs, SuperLUStat_t *stat, int *info)
{
/*
 * Purpose
 * =======
 *
 * PDGSTRS1 solves a system of distributed linear equations
 *
 *                   op( sub(A) ) * X = sub( B )
 *
 * with a general N-by-N distributed matrix sub( A ) using the LU
 * factorization computed by PDGSTRF.
 * 
 * Arguments
 * =========
 *
 * n      (input) int (global)
 *        The order of the system of linear equations.
 *
 * LUstruct (input) LUstruct_t*
 *        The distributed data structures to store L and U factors,
 *        and the permutation vectors.
 *        See superlu_ddefs.h for the definition of 'LUstruct_t' structure.
 *
 * grid   (input) gridinfo_t*
 *        The 2D process mesh.
 *
 * x      (input/output) double*
 *        On entry, the right hand side matrix.
 *        On exit, the solution matrix if info = 0;
 *
 *        NOTE: the right-hand side matrix is already distributed on
 *              the diagonal processes.
 *
 * nrhs   (input) int (global)
 *        Number of right-hand sides.
 *
 * stat   (output) SuperLUStat_t*
 *        Record the statistics about the triangular solves; 
 *        See SuperLUStat_t structure defined in util.h.
 *
 * info   (output) int*
 * 	   = 0: successful exit
 *	   < 0: if info = -i, the i-th argument had an illegal value
 *        
 */
    Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
    LocalLU_t *Llu = LUstruct->Llu;
    double alpha = 1.0;
    double *lsum;  /* Local running sum of the updates to B-components */
    double *lusup, *dest;
    double *recvbuf, *tempv;
    double *rtemp; /* Result of full matrix-vector multiply. */
    int_t  **Ufstnz_br_ptr = Llu->Ufstnz_br_ptr;
    int_t  *Urbs, *Urbs1; /* Number of row blocks in each block column of U. */
    Ucb_indptr_t **Ucb_indptr;/* Vertical linked list pointing to Uindex[] */
    int_t  **Ucb_valptr;      /* Vertical linked list pointing to Unzval[] */
    int_t  iam, kcol, krow, mycol, myrow;
    int_t  i, ii, il, j, k, lb, ljb, lk, lptr, luptr;
    int_t  nb, nlb, nub, nsupers;
    int_t  *xsup, *lsub, *usub;
    int_t  *ilsum;    /* Starting position of each supernode in lsum (LOCAL)*/
    int_t  Pc, Pr;
    int    knsupc, nsupr;
    int    ldalsum;   /* Number of lsum entries locally owned. */
    int    maxrecvsz, p, pi;
    int_t  **Lrowind_bc_ptr;
    double **Lnzval_bc_ptr;
    MPI_Status status;
#ifdef ISEND_IRECV
    MPI_Request *send_req, recv_req;
#endif

    /*-- Counts used for L-solve --*/
    int_t  *fmod;         /* Modification count for L-solve. */
    int_t  **fsendx_plist = Llu->fsendx_plist;
    int_t  nfrecvx = Llu->nfrecvx; /* Number of X components to be recv'd. */
    int_t  *frecv;        /* Count of modifications to be recv'd from
			     processes in this row. */
    int_t  nfrecvmod = 0; /* Count of total modifications to be recv'd. */
    int_t  nleaf = 0, nroot = 0;

    /*-- Counts used for U-solve --*/
    int_t  *bmod;         /* Modification count for L-solve. */
    int_t  **bsendx_plist = Llu->bsendx_plist;
    int_t  nbrecvx = Llu->nbrecvx; /* Number of X components to be recv'd. */
    int_t  *brecv;        /* Count of modifications to be recv'd from
			     processes in this row. */
    int_t  nbrecvmod = 0; /* Count of total modifications to be recv'd. */
    double t;
#if ( DEBUGlevel>=2 )
    int_t Ublocks = 0;
#endif

    t = SuperLU_timer_();

    /* Test input parameters. */
    *info = 0;
    if ( n < 0 ) *info = -1;
    else if ( nrhs < 0 ) *info = -8;
    if ( *info ) {
	pxerbla("PDGSTRS1", grid, -*info);
	return;
    }
	
    /*
     * Initialization.
     */
    iam = grid->iam;
    Pc = grid->npcol;
    Pr = grid->nprow;
    myrow = MYROW( iam, grid );
    mycol = MYCOL( iam, grid );
    nsupers = Glu_persist->supno[n-1] + 1;
    xsup = Glu_persist->xsup;
    Lrowind_bc_ptr = Llu->Lrowind_bc_ptr;
    Lnzval_bc_ptr = Llu->Lnzval_bc_ptr;
    nlb = CEILING( nsupers, Pr ); /* Number of local block rows. */
    Llu->SolveMsgSent = 0;

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC(iam, "Enter pdgstrs1()");
#endif

    /* Save the count to be altered so it can be used by
       subsequent call to PDGSTRS1. */
    if ( !(fmod = intMalloc_dist(nlb)) )
	ABORT("Calloc fails for fmod[].");
    for (i = 0; i < nlb; ++i) fmod[i] = Llu->fmod[i];
    if ( !(frecv = intMalloc_dist(nlb)) )
	ABORT("Malloc fails for frecv[].");
    Llu->frecv = frecv;

#ifdef ISEND_IRECV
    k = SUPERLU_MAX( Llu->nfsendx, Llu->nbsendx ) + nlb;
    if ( !(send_req = (MPI_Request*) SUPERLU_MALLOC(k*sizeof(MPI_Request))) )
	ABORT("Malloc fails for send_req[].");
#endif

#ifdef _CRAY
    ftcs1 = _cptofcd("L", strlen("L"));
    ftcs2 = _cptofcd("N", strlen("N"));
    ftcs3 = _cptofcd("U", strlen("U"));
#endif


    /* Compute ilsum[] and ldalsum for process column 0. */
    ilsum = Llu->ilsum;
    ldalsum = Llu->ldalsum;

    /* Allocate working storage. */
    knsupc = sp_ienv_dist(3);
    if ( !(lsum = doubleCalloc_dist(((size_t)ldalsum) * nrhs 
        + nlb * LSUM_H)) )
	ABORT("Calloc fails for lsum[].");
    maxrecvsz = knsupc * nrhs + SUPERLU_MAX(XK_H, LSUM_H);
    if ( !(recvbuf = doubleMalloc_dist(maxrecvsz)) )
	ABORT("Malloc fails for recvbuf[].");
    if ( !(rtemp = doubleCalloc_dist(maxrecvsz)) )
	ABORT("Malloc fails for rtemp[].");

    
    /*---------------------------------------------------
     * Forward solve Ly = b.
     *---------------------------------------------------*/

    /*
     * Prepended the block number in the header for lsum[].
     */
    for (k = 0; k < nsupers; ++k) {
	knsupc = SuperSize( k );
	krow = PROW( k, grid );
	if ( myrow == krow ) {
	    lk = LBi( k, grid );   /* Local block number. */
	    il = LSUM_BLK( lk );
	    lsum[il - LSUM_H] = k; 
	}
    }

    /*
     * Compute frecv[] and nfrecvmod counts on the diagonal processes.
     */
    {
	superlu_scope_t *scp = &grid->rscp;

	for (k = 0; k < nsupers; ++k) {
	    krow = PROW( k, grid );
	    if ( myrow == krow ) {
		lk = LBi( k, grid );    /* Local block number. */
		kcol = PCOL( k, grid ); /* Root process in this row scope. */
		if ( mycol != kcol && fmod[lk] )
		    i = 1;  /* Contribution from non-diagonal process. */
		else i = 0;
		MPI_Reduce( &i, &frecv[lk], 1, mpi_int_t,
			   MPI_SUM, kcol, scp->comm );
		if ( mycol == kcol ) { /* Diagonal process. */
		    nfrecvmod += frecv[lk];
		    if ( !frecv[lk] && !fmod[lk] ) ++nleaf;
#if ( DEBUGlevel>=2 )
		    printf("(%2d) frecv[%4d]  %2d\n", iam, k, frecv[lk]);
		    assert( frecv[lk] < Pc );
#endif
		}
	    }
	}
    }

    /* ---------------------------------------------------------
       Solve the leaf nodes first by all the diagonal processes.
       --------------------------------------------------------- */
#if ( DEBUGlevel>=2 )
    printf("(%2d) nleaf %4d\n", iam, nleaf);
#endif
    for (k = 0; k < nsupers && nleaf; ++k) {
	krow = PROW( k, grid );
	kcol = PCOL( k, grid );
	if ( myrow == krow && mycol == kcol ) { /* Diagonal process */
	    knsupc = SuperSize( k );
	    lk = LBi( k, grid );
	    if ( !frecv[lk] && !fmod[lk] ) {
		fmod[lk] = -1;  /* Do not solve X[k] in the future. */
		ii = X_BLK( lk );
		lk = LBj( k, grid ); /* Local block number, column-wise. */
		lsub = Lrowind_bc_ptr[lk];
		lusup = Lnzval_bc_ptr[lk];
		nsupr = lsub[1];
#ifdef _CRAY
		STRSM(ftcs1, ftcs1, ftcs2, ftcs3, &knsupc, &nrhs, &alpha,
		      lusup, &nsupr, &x[ii], &knsupc);
#elif defined (USE_VENDOR_BLAS)
		dtrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, 
		       lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1);
#else
		dtrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, 
		       lusup, &nsupr, &x[ii], &knsupc);
#endif
		/*stat->ops[SOLVE] += knsupc * (knsupc - 1) * nrhs;*/
		--nleaf;
#if ( DEBUGlevel>=2 )
		printf("(%2d) Solve X[%2d]\n", iam, k);
#endif
		
		/*
		 * Send Xk to process column Pc[k].
		 */
		for (p = 0; p < Pr; ++p)
		    if ( fsendx_plist[lk][p] != EMPTY ) {
			pi = PNUM( p, kcol, grid );
#ifdef ISEND_IRECV
			MPI_Isend( &x[ii - XK_H], knsupc * nrhs + XK_H,
                                   MPI_DOUBLE, pi, Xk, grid->comm,
                                   &send_req[Llu->SolveMsgSent++]);
#else
			MPI_Send( &x[ii - XK_H], knsupc * nrhs + XK_H,
				 MPI_DOUBLE, 
                                 pi, Xk, grid->comm );
#endif
#if ( DEBUGlevel>=2 )
			printf("(%2d) Sent X[%2.0f] to P %2d\n",
			       iam, x[ii-XK_H], pi);
#endif
		    }
		
		/*
		 * Perform local block modifications: lsum[i] -= L_i,k * X[k]
		 */
		nb = lsub[0] - 1;
		lptr = BC_HEADER + LB_DESCRIPTOR + knsupc;
		luptr = knsupc; /* Skip diagonal block L(k,k). */
		
		dlsum_fmod(lsum, x, &x[ii], rtemp, nrhs, knsupc, k,
			   fmod, nb, lptr, luptr, xsup, grid, Llu,
			   send_req, stat);
	    }
	} /* if diagonal process ... */
    } /* for k ... */

    /*
     * Compute the internal nodes asynchronously by all processes.
     */
#if ( DEBUGlevel>=2 )
    printf("(%2d) nfrecvx %4d,  nfrecvmod %4d,  nleaf %4d\n",
	   iam, nfrecvx, nfrecvmod, nleaf);
#endif

    while ( nfrecvx || nfrecvmod ) { /* While not finished. */

	/* Receive a message. */
#ifdef ISEND_IRECV
	/* -MPI- FATAL: Remote protocol queue full */
	MPI_Irecv( recvbuf, maxrecvsz, MPI_DOUBLE, MPI_ANY_SOURCE,
		 MPI_ANY_TAG, grid->comm, &recv_req );
	MPI_Wait( &recv_req, &status );
#else
	MPI_Recv( recvbuf, maxrecvsz, MPI_DOUBLE, MPI_ANY_SOURCE,
		 MPI_ANY_TAG, grid->comm, &status );
#endif

	k = *recvbuf;

#if ( DEBUGlevel>=2 )
	printf("(%2d) Recv'd block %d, tag %2d\n", iam, k, status.MPI_TAG);
#endif
	
	switch ( status.MPI_TAG ) {
	  case Xk:
	      --nfrecvx;
	      lk = LBj( k, grid ); /* Local block number, column-wise. */
	      lsub = Lrowind_bc_ptr[lk];
	      lusup = Lnzval_bc_ptr[lk];
	      if ( lsub ) {
		  nb   = lsub[0];
		  lptr = BC_HEADER;
		  luptr = 0;
		  knsupc = SuperSize( k );

		  /*
		   * Perform local block modifications: lsum[i] -= L_i,k * X[k]
		   */
		  dlsum_fmod(lsum, x, &recvbuf[XK_H], rtemp, nrhs, knsupc, k,
			     fmod, nb, lptr, luptr, xsup, grid, Llu,
			     send_req, stat);
	      } /* if lsub */

	      break;

	  case LSUM:
	      --nfrecvmod;
	      lk = LBi( k, grid ); /* Local block number, row-wise. */
	      ii = X_BLK( lk );
	      knsupc = SuperSize( k );
	      tempv = &recvbuf[LSUM_H];
	      RHS_ITERATE(j)
		  for (i = 0; i < knsupc; ++i)
		      x[i + ii + j*knsupc] += tempv[i + j*knsupc];

	      if ( (--frecv[lk])==0 && fmod[lk]==0 ) {
		  fmod[lk] = -1; /* Do not solve X[k] in the future. */
		  lk = LBj( k, grid ); /* Local block number, column-wise. */
		  lsub = Lrowind_bc_ptr[lk];
		  lusup = Lnzval_bc_ptr[lk];
		  nsupr = lsub[1];
#ifdef _CRAY
		  STRSM(ftcs1, ftcs1, ftcs2, ftcs3, &knsupc, &nrhs, &alpha,
			lusup, &nsupr, &x[ii], &knsupc);
#elif defined (USE_VENDOR_BLAS)
		  dtrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, 
			 lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1);
#else
		  dtrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, 
			 lusup, &nsupr, &x[ii], &knsupc);
#endif
		  /*stat->ops[SOLVE] += knsupc * (knsupc - 1) * nrhs;*/
#if ( DEBUGlevel>=2 )
		  printf("(%2d) Solve X[%2d]\n", iam, k);
#endif
		
		  /*
		   * Send Xk to process column Pc[k].
		   */
		  kcol = PCOL( k, grid );
		  for (p = 0; p < Pr; ++p)
		      if ( fsendx_plist[lk][p] != EMPTY ) {
			  pi = PNUM( p, kcol, grid );
#ifdef ISEND_IRECV
			  MPI_Isend( &x[ii - XK_H], knsupc * nrhs + XK_H,
                                     MPI_DOUBLE, pi, Xk, grid->comm,
				     &send_req[Llu->SolveMsgSent++] );
#else
			  MPI_Send( &x[ii - XK_H], knsupc * nrhs + XK_H,
				   MPI_DOUBLE, pi, Xk, grid->comm );
#endif
#if ( DEBUGlevel>=2 )
			  printf("(%2d) Sent X[%2.0f] to P %2d\n",
				 iam, x[ii-XK_H], pi);
#endif
		      }

		  /*
		   * Perform local block modifications.
		   */
		  nb = lsub[0] - 1;
		  lptr = BC_HEADER + LB_DESCRIPTOR + knsupc;
		  luptr = knsupc; /* Skip diagonal block L(k,k). */

		  dlsum_fmod(lsum, x, &x[ii], rtemp, nrhs, knsupc, k,
			     fmod, nb, lptr, luptr, xsup, grid, Llu,
			     send_req, stat);
	      } /* if */

	      break;

#if ( DEBUGlevel>=2 )
	    default:
	      printf("(%2d) Recv'd wrong message tag %4d\n", status.MPI_TAG);
	      break;
#endif
	  } /* switch */

    } /* while not finished ... */


#if ( PRNTlevel>=2 )
    t = SuperLU_timer_() - t;
    if ( !iam ) printf(".. L-solve time\t%8.2f\n", t);
    t = SuperLU_timer_();
#endif

#if ( DEBUGlevel>=2 )
    if ( !iam ) printf("\n.. After L-solve: y =\n");
    for (i = 0, k = 0; k < nsupers; ++k) {
	krow = PROW( k, grid );
	kcol = PCOL( k, grid );
	if ( myrow == krow && mycol == kcol ) { /* Diagonal process */
	    knsupc = SuperSize( k );
	    lk = LBi( k, grid );
	    ii = X_BLK( lk );
	    for (j = 0; j < knsupc; ++j)
		printf("\t(%d)\t%4d\t%.10f\n", iam, xsup[k]+j, x[ii+j]);
	}
	MPI_Barrier( grid->comm );
    }
#endif

    SUPERLU_FREE(fmod);
    SUPERLU_FREE(frecv);
    SUPERLU_FREE(rtemp);

#ifdef ISEND_IRECV
    for (i = 0; i < Llu->SolveMsgSent; ++i) MPI_Request_free(&send_req[i]);
    Llu->SolveMsgSent = 0;
#endif


    /*---------------------------------------------------
     * Back solve Ux = y.
     *
     * The Y components from the forward solve is already
     * on the diagonal processes.
     *---------------------------------------------------*/

    /* Save the count to be altered so it can be used by
       subsequent call to PDGSTRS1. */
    if ( !(bmod = intMalloc_dist(nlb)) )
	ABORT("Calloc fails for bmod[].");
    for (i = 0; i < nlb; ++i) bmod[i] = Llu->bmod[i];
    if ( !(brecv = intMalloc_dist(nlb)) )
	ABORT("Malloc fails for brecv[].");
    Llu->brecv = brecv;

    /*
     * Compute brecv[] and nbrecvmod counts on the diagonal processes.
     */
    {
	superlu_scope_t *scp = &grid->rscp;

	for (k = 0; k < nsupers; ++k) {
	    krow = PROW( k, grid );
	    if ( myrow == krow ) {
		lk = LBi( k, grid );    /* Local block number. */
		kcol = PCOL( k, grid ); /* Root process in this row scope. */
		if ( mycol != kcol && bmod[lk] )
		    i = 1;  /* Contribution from non-diagonal process. */
		else i = 0;
		MPI_Reduce( &i, &brecv[lk], 1, mpi_int_t,
			   MPI_SUM, kcol, scp->comm );
		if ( mycol == kcol ) { /* Diagonal process. */
		    nbrecvmod += brecv[lk];
		    if ( !brecv[lk] && !bmod[lk] ) ++nroot;
#if ( DEBUGlevel>=2 )
		    printf("(%2d) brecv[%4d]  %2d\n", iam, k, brecv[lk]);
		    assert( brecv[lk] < Pc );
#endif
		}
	    }
	}
    }

    /* Re-initialize lsum to zero. Each block header is already in place. */
    for (k = 0; k < nsupers; ++k) {
	krow = PROW( k, grid );
	if ( myrow == krow ) {
	    knsupc = SuperSize( k );
	    lk = LBi( k, grid );
	    il = LSUM_BLK( lk );
	    dest = &lsum[il];
	    RHS_ITERATE(j)
		for (i = 0; i < knsupc; ++i) dest[i + j*knsupc] = 0.0;
	}
    }