PRIVATE Int rescale_determinant
(
    Entry *d_mantissa,
    double *d_exponent
)
{
    double d_abs ;

    ABS (d_abs, *d_mantissa) ;

    if (SCALAR_IS_ZERO (d_abs))
    {
	/* the determinant is zero */
	*d_exponent = 0 ;
	return (FALSE) ;
    }

    if (SCALAR_IS_NAN (d_abs))
    {
	/* the determinant is NaN */
	return (FALSE) ;
    }

    while (d_abs < 1.)
    {
	SCALE (*d_mantissa, 10.0) ;
	*d_exponent = *d_exponent - 1.0 ;
	ABS (d_abs, *d_mantissa) ;
    }

    while (d_abs >= 10.)
    {
	SCALE (*d_mantissa, 0.1) ;
	*d_exponent = *d_exponent + 1.0 ;
	ABS (d_abs, *d_mantissa) ;
    }

    return (TRUE) ;
}
Пример #2
0
GLOBAL void UMF_2by2
(
    /* input, not modified: */
    Int n,		    /* A is n-by-n */
    const Int Ap [ ],	    /* size n+1 */
    const Int Ai [ ],	    /* size nz = Ap [n] */
    const double Ax [ ],    /* size nz if present */
#ifdef COMPLEX
    const double Az [ ],    /* size nz if present */
#endif
    double tol,		/* tolerance for determining whether or not an
			 * entry is numerically acceptable.  If tol <= 0
			 * then all numerical values ignored. */
    Int scale,		/* scaling to perform (none, sum, or max) */
    Int Cperm1 [ ],	/* singleton permutations */
#ifndef NDEBUG
    Int Rperm1 [ ],	/* not needed, since Rperm1 = Cperm1 for submatrix S */
#endif
    Int InvRperm1 [ ],	/* inverse of Rperm1 */
    Int n1,		/* number of singletons */
    Int nempty,		/* number of empty rows/cols */

    /* input, contents undefined on output: */
    Int Degree [ ],	/* Degree [j] is the number of off-diagonal
			 * entries in row/column j of S+S', where
			 * where S = A (Cperm1 [n1..], Rperm1 [n1..]).
			 * Note that S is not used, nor formed. */

    /* output: */
    Int P [ ],		/* P [k] = i means original row i is kth row in S(P,:)
			 * where S = A (Cperm1 [n1..], Rperm1 [n1..]) */
    Int *p_nweak,
    Int *p_unmatched,

    /* workspace (not defined on input or output): */
    Int Ri [ ],		/* of size >= max (nz, n) */
    Int Rp [ ],		/* of size n+1 */
    double Rs [ ],	/* of size n if present.  Rs = sum (abs (A),2) or
			 * max (abs (A),2), the sum or max of each row.  Unused
			 * if scale is equal to UMFPACK_SCALE_NONE. */
    Int Head [ ],	/* of size n.  Head pointers for bucket sort */
    Int Next [ ],	/* of size n.  Next pointers for bucket sort */
    Int Ci [ ],		/* size nz */
    Int Cp [ ]		/* size n+1 */
)
{

    /* ---------------------------------------------------------------------- */
    /* local variables */
    /* ---------------------------------------------------------------------- */

    Entry aij ;
    double cmax, value, rs, ctol, dvalue ;
    Int k, p, row, col, do_values, do_sum, do_max, do_scale, nweak, weak,
	p1, p2, dfound, unmatched, n2, oldrow, newrow, oldcol, newcol, pp ;
#ifdef COMPLEX
    Int split = SPLIT (Az) ;
#endif
#ifndef NRECIPROCAL
    Int do_recip = FALSE ;
#endif

#ifndef NDEBUG
    /* UMF_debug += 99 ; */
    DEBUGm3 (("\n ==================================UMF_2by2: tol %g\n", tol)) ;
    ASSERT (AMD_valid (n, n, Ap, Ai) == AMD_OK) ;
    for (k = n1 ; k < n - nempty ; k++)
    {
	ASSERT (Cperm1 [k] == Rperm1 [k]) ;
    }
#endif

    /* ---------------------------------------------------------------------- */
    /* determine scaling options */
    /* ---------------------------------------------------------------------- */

    /* use the values, but only if they are present */
    /* ignore the values if tol <= 0 */
    do_values = (tol > 0) && (Ax != (double *) NULL) ;
    if (do_values && (Rs != (double *) NULL))
    {
	do_sum = (scale == UMFPACK_SCALE_SUM) ;
	do_max = (scale == UMFPACK_SCALE_MAX) ;
    }
    else
    {
	/* no scaling */
	do_sum = FALSE ;
	do_max = FALSE ;
    }
    do_scale = do_max || do_sum ;
    DEBUGm3 (("do_values "ID" do_sum "ID" do_max "ID" do_scale "ID"\n",
	do_values, do_sum, do_max, do_scale)) ;

    /* ---------------------------------------------------------------------- */
    /* compute the row scaling, if requested */
    /* ---------------------------------------------------------------------- */

    /* see also umf_kernel_init */

    if (do_scale)
    {
#ifndef NRECIPROCAL
	double rsmin ;
#endif
	for (row = 0 ; row < n ; row++)
	{
	    Rs [row] = 0.0 ;
	}
	for (col = 0 ; col < n ; col++)
	{
	    p2 = Ap [col+1] ;
	    for (p = Ap [col] ; p < p2 ; p++)
	    {
		row = Ai [p] ;
		ASSIGN (aij, Ax, Az, p, split) ;
		APPROX_ABS (value, aij) ;
		rs = Rs [row] ;
		if (!SCALAR_IS_NAN (rs))
		{
		    if (SCALAR_IS_NAN (value))
		    {
			/* if any entry in a row is NaN, then the scale factor
			 * for the row is NaN.  It will be set to 1 later. */
			Rs [row] = value ;
		    }
		    else if (do_max)
		    {
			Rs [row] = MAX (rs, value) ;
		    }
		    else
		    {
			Rs [row] += value ;
		    }
		}
	    }
	}
#ifndef NRECIPROCAL
	rsmin = Rs [0] ;
	if (SCALAR_IS_ZERO (rsmin) || SCALAR_IS_NAN (rsmin))
	{
	    rsmin = 1.0 ;
	}
#endif
	for (row = 0 ; row < n ; row++)
	{
	    /* do not scale an empty row, or a row with a NaN */
	    rs = Rs [row] ;
	    if (SCALAR_IS_ZERO (rs) || SCALAR_IS_NAN (rs))
	    {
		Rs [row] = 1.0 ;
	    }
#ifndef NRECIPROCAL
	    rsmin = MIN (rsmin, Rs [row]) ;
#endif
	}

#ifndef NRECIPROCAL
	/* multiply by the reciprocal if Rs is not too small */
	do_recip = (rsmin >= RECIPROCAL_TOLERANCE) ;
	if (do_recip)
	{
	    /* invert the scale factors */
	    for (row = 0 ; row < n ; row++)
	    {
		Rs [row] = 1.0 / Rs [row] ;
	    }
	}
#endif
    }

    /* ---------------------------------------------------------------------- */
    /* compute the max in each column and find diagonal */
    /* ---------------------------------------------------------------------- */

    nweak = 0 ;

#ifndef NDEBUG
    for (k = 0 ; k < n ; k++)
    {
	ASSERT (Rperm1 [k] >= 0 && Rperm1 [k] < n) ;
	ASSERT (InvRperm1 [Rperm1 [k]] == k) ;
    }
#endif

    n2 = n - n1 - nempty ;

    /* use Ri to count the number of strong entries in each row */
    for (row = 0 ; row < n2 ; row++)
    {
	Ri [row] = 0 ;
    }

    pp = 0 ;
    ctol = 0 ;
    dvalue = 1 ;

    /* construct C = pruned submatrix, strong values only, column form */

    for (k = n1 ; k < n - nempty ; k++)
    {
	oldcol = Cperm1 [k] ;
	newcol = k - n1 ;
	Next [newcol] = EMPTY ;
	DEBUGm1 (("Column "ID" newcol "ID" oldcol "ID"\n", k, newcol, oldcol)) ;

	Cp [newcol] = pp ;

	dfound = FALSE ;
	p1 = Ap [oldcol] ;
	p2 = Ap [oldcol+1] ;
	if (do_values)
	{
	    cmax = 0 ;
	    dvalue = 0 ;

	    if (!do_scale)
	    {
		/* no scaling */
		for (p = p1 ; p < p2 ; p++)
		{
		    oldrow = Ai [p] ;
		    ASSERT (oldrow >= 0 && oldrow < n) ;
		    newrow = InvRperm1 [oldrow] - n1 ;
		    ASSERT (newrow >= -n1 && newrow < n2) ;
		    if (newrow < 0) continue ;
		    ASSIGN (aij, Ax, Az, p, split) ;
		    APPROX_ABS (value, aij) ;
		    /* if either cmax or value is NaN, define cmax as NaN */
		    if (!SCALAR_IS_NAN (cmax))
		    {
			if (SCALAR_IS_NAN (value))
			{
			    cmax = value ;
			}
			else
			{
			    cmax = MAX (cmax, value) ;
			}
		    }
		    if (oldrow == oldcol)
		    {
			/* we found the diagonal entry in this column */
			dvalue = value ;
			dfound = TRUE ;
			ASSERT (newrow == newcol) ;
		    }
		}
	    }
#ifndef NRECIPROCAL
	    else if (do_recip)
	    {
		/* multiply by the reciprocal */
		for (p = p1 ; p < p2 ; p++)
		{
		    oldrow = Ai [p] ;
		    ASSERT (oldrow >= 0 && oldrow < n) ;
		    newrow = InvRperm1 [oldrow] - n1 ;
		    ASSERT (newrow >= -n1 && newrow < n2) ;
		    if (newrow < 0) continue ;
		    ASSIGN (aij, Ax, Az, p, split) ;
		    APPROX_ABS (value, aij) ;
		    value *= Rs [oldrow] ;
		    /* if either cmax or value is NaN, define cmax as NaN */
		    if (!SCALAR_IS_NAN (cmax))
		    {
			if (SCALAR_IS_NAN (value))
			{
			    cmax = value ;
			}
			else
			{
			    cmax = MAX (cmax, value) ;
			}
		    }
		    if (oldrow == oldcol)
		    {
			/* we found the diagonal entry in this column */
			dvalue = value ;
			dfound = TRUE ;
			ASSERT (newrow == newcol) ;
		    }
		}
	    }
#endif
	    else
	    {
		/* divide instead */
		for (p = p1 ; p < p2 ; p++)
		{
		    oldrow = Ai [p] ;
		    ASSERT (oldrow >= 0 && oldrow < n) ;
		    newrow = InvRperm1 [oldrow] - n1 ;
		    ASSERT (newrow >= -n1 && newrow < n2) ;
		    if (newrow < 0) continue ;
		    ASSIGN (aij, Ax, Az, p, split) ;
		    APPROX_ABS (value, aij) ;
		    value /= Rs [oldrow] ;
		    /* if either cmax or value is NaN, define cmax as NaN */
		    if (!SCALAR_IS_NAN (cmax))
		    {
			if (SCALAR_IS_NAN (value))
			{
			    cmax = value ;
			}
			else
			{
			    cmax = MAX (cmax, value) ;
			}
		    }
		    if (oldrow == oldcol)
		    {
			/* we found the diagonal entry in this column */
			dvalue = value ;
			dfound = TRUE ;
			ASSERT (newrow == newcol) ;
		    }
		}
	    }

	    ctol = tol * cmax ;
	    DEBUGm1 (("    cmax col "ID" %g  ctol %g\n", oldcol, cmax, ctol)) ;
	}
	else
	{
	    for (p = p1 ; p < p2 ; p++)
	    {
		oldrow = Ai [p] ;
		ASSERT (oldrow >= 0 && oldrow < n) ;
		newrow = InvRperm1 [oldrow] - n1 ;
		ASSERT (newrow >= -n1 && newrow < n2) ;
		if (newrow < 0) continue ;
		Ci [pp++] = newrow ;
		if (oldrow == oldcol)
		{
		    /* we found the diagonal entry in this column */
		    ASSERT (newrow == newcol) ;
		    dfound = TRUE ;
		}
		/* count the entries in each column */
		Ri [newrow]++ ;
	    }
	}

	/* ------------------------------------------------------------------ */
	/* flag the weak diagonals */
	/* ------------------------------------------------------------------ */

	if (!dfound)
	{
	    /* no diagonal entry present */
	    weak = TRUE ;
	}
	else
	{
	    /* diagonal entry is present, check its value */
	    weak = (do_values) ?  WEAK (dvalue, ctol) : FALSE ;
	}
	if (weak)
	{
	    /* flag this column as weak */
	    DEBUG0 (("Weak!\n")) ;
	    Next [newcol] = IS_WEAK ;
	    nweak++ ;
	}

	/* ------------------------------------------------------------------ */
	/* count entries in each row that are not numerically weak */
	/* ------------------------------------------------------------------ */

	if (do_values)
	{
	    if (!do_scale)
	    {
		/* no scaling */
		for (p = p1 ; p < p2 ; p++)
		{
		    oldrow = Ai [p] ;
		    newrow = InvRperm1 [oldrow] - n1 ;
		    if (newrow < 0) continue ;
		    ASSIGN (aij, Ax, Az, p, split) ;
		    APPROX_ABS (value, aij) ;
		    weak = WEAK (value, ctol) ;
		    if (!weak)
		    {
			DEBUG0 (("    strong: row "ID": %g\n", oldrow, value)) ;
			Ci [pp++] = newrow ;
			Ri [newrow]++ ;
		    }
		}
	    }
#ifndef NRECIPROCAL
	    else if (do_recip)
	    {
		/* multiply by the reciprocal */
		for (p = p1 ; p < p2 ; p++)
		{
		    oldrow = Ai [p] ;
		    newrow = InvRperm1 [oldrow] - n1 ;
		    if (newrow < 0) continue ;
		    ASSIGN (aij, Ax, Az, p, split) ;
		    APPROX_ABS (value, aij) ;
		    value *= Rs [oldrow] ;
		    weak = WEAK (value, ctol) ;
		    if (!weak)
		    {
			DEBUG0 (("    strong: row "ID": %g\n", oldrow, value)) ;
			Ci [pp++] = newrow ;
			Ri [newrow]++ ;
		    }
		}
	    }
#endif
	    else
	    {
		/* divide instead */
		for (p = p1 ; p < p2 ; p++)
		{
		    oldrow = Ai [p] ;
		    newrow = InvRperm1 [oldrow] - n1 ;
		    if (newrow < 0) continue ;
		    ASSIGN (aij, Ax, Az, p, split) ;
		    APPROX_ABS (value, aij) ;
		    value /= Rs [oldrow] ;
		    weak = WEAK (value, ctol) ;
		    if (!weak)
		    {
			DEBUG0 (("    strong: row "ID": %g\n", oldrow, value)) ;
			Ci [pp++] = newrow ;
			Ri [newrow]++ ;
		    }
		}
	    }
	}
    }
    Cp [n2] = pp ;
    ASSERT (AMD_valid (n2, n2, Cp, Ci) == AMD_OK) ;

    if (nweak == 0)
    {
	/* nothing to do, quick return */
	DEBUGm2 (("\n =============================UMF_2by2: quick return\n")) ;
	for (k = 0 ; k < n ; k++)
	{
	    P [k] = k ;
	}
	*p_nweak = 0 ;
	*p_unmatched = 0 ;
	return ;
    }

#ifndef NDEBUG
    for (k = 0 ; k < n2 ; k++)
    {
	P [k] = EMPTY ;
    }
    for (k = 0 ; k < n2 ; k++)
    {
	ASSERT (Degree [k] >= 0 && Degree [k] < n2) ;
    }
#endif

    /* ---------------------------------------------------------------------- */
    /* find the 2-by-2 permutation */
    /* ---------------------------------------------------------------------- */

    /* The matrix S is now mapped to the index range 0 to n2-1.  We have
     * S = A (Rperm [n1 .. n-nempty-1], Cperm [n1 .. n-nempty-1]), and then
     * C = pattern of strong entries in S.  A weak diagonal k in S is marked
     * with Next [k] = IS_WEAK. */

    unmatched = two_by_two (n2, Cp, Ci, Degree, Next, Ri, P, Rp, Head) ;

    /* ---------------------------------------------------------------------- */

    *p_nweak = nweak ;
    *p_unmatched = unmatched ;

#ifndef NDEBUG
    DEBUGm4 (("UMF_2by2: weak "ID"  unmatched "ID"\n", nweak, unmatched)) ;
    for (row = 0 ; row < n ; row++)
    {
	DEBUGm2 (("P ["ID"] = "ID"\n", row, P [row])) ;
    }
    DEBUGm2 (("\n =============================UMF_2by2: done\n\n")) ;
#endif
}
GLOBAL void UMF_kernel_wrapup
(
    NumericType *Numeric,
    SymbolicType *Symbolic,
    WorkType *Work
)
{

    /* ---------------------------------------------------------------------- */
    /* local variables */
    /* ---------------------------------------------------------------------- */

    Entry pivot_value ;
    double d ;
    Entry *D ;
    Int i, k, col, row, llen, ulen, *ip, *Rperm, *Cperm, *Lilen, npiv, lp,
	*Uilen, *Lip, *Uip, *Cperm_init, up, pivrow, pivcol, *Lpos, *Upos, *Wr,
	*Wc, *Wp, *Frpos, *Fcpos, *Row_degree, *Col_degree, *Rperm_init,
	n_row, n_col, n_inner, zero_pivot, nan_pivot, n1 ;

#ifndef NDEBUG
    UMF_dump_matrix (Numeric, Work, FALSE) ;
#endif

    DEBUG0 (("Kernel complete, Starting Kernel wrapup\n")) ;
    n_row = Symbolic->n_row ;
    n_col = Symbolic->n_col ;
    n_inner = MIN (n_row, n_col) ;
    Rperm = Numeric->Rperm ;
    Cperm = Numeric->Cperm ;
    Lilen = Numeric->Lilen ;
    Uilen = Numeric->Uilen ;
    Upos = Numeric->Upos ;
    Lpos = Numeric->Lpos ;
    Lip = Numeric->Lip ;
    Uip = Numeric->Uip ;
    D = Numeric->D ;

    npiv = Work->npiv ;
    Numeric->npiv = npiv ;
    Numeric->ulen = Work->ulen ;

    ASSERT (n_row == Numeric->n_row) ;
    ASSERT (n_col == Symbolic->n_col) ;
    DEBUG0 (("Wrap-up: npiv "ID" ulen "ID"\n", npiv, Numeric->ulen)) ;
    ASSERT (npiv <= n_inner) ;

    /* this will be nonzero only if matrix is singular or rectangular */
    ASSERT (IMPLIES (npiv == n_col, Work->ulen == 0)) ;

    /* ---------------------------------------------------------------------- */
    /* find the smallest and largest entries in D */
    /* ---------------------------------------------------------------------- */

    for (k = 0 ; k < npiv ; k++)
    {
	pivot_value = D [k] ;
	ABS (d, pivot_value) ;
	zero_pivot = SCALAR_IS_ZERO (d) ;
	nan_pivot = SCALAR_IS_NAN (d) ;

	if (!zero_pivot)
	{
	    /* the pivot is nonzero, but might be Inf or NaN */
	    Numeric->nnzpiv++ ;
	}

	if (k == 0)
	{
	    Numeric->min_udiag = d ;
	    Numeric->max_udiag = d ;
	}
	else
	{
	    /* min (abs (diag (U))) behaves as follows:  If any entry is zero,
	       then the result is zero (regardless of the presence of NaN's).
	       Otherwise, if any entry is NaN, then the result is NaN.
	       Otherwise, the result is the smallest absolute value on the
	       diagonal of U.
	    */

	    if (SCALAR_IS_NONZERO (Numeric->min_udiag))
	    {
		if (zero_pivot || nan_pivot)
		{
		    Numeric->min_udiag = d ;
		}
		else if (!SCALAR_IS_NAN (Numeric->min_udiag))
		{
		    /* d and min_udiag are both non-NaN */
		    Numeric->min_udiag = MIN (Numeric->min_udiag, d) ;
		}
	    }

	    /*
	       max (abs (diag (U))) behaves as follows:  If any entry is NaN
	       then the result is NaN.  Otherise, the result is the largest
	       absolute value on the diagonal of U.
	    */

	    if (nan_pivot)
	    {
		Numeric->max_udiag = d ;
	    }
	    else if (!SCALAR_IS_NAN (Numeric->max_udiag))
	    {
		/* d and max_udiag are both non-NaN */
		Numeric->max_udiag = MAX (Numeric->max_udiag, d) ;
	    }
	}
    }

    /* ---------------------------------------------------------------------- */
    /* check if matrix is singular or rectangular */
    /* ---------------------------------------------------------------------- */

    Col_degree = Cperm ;	/* for NON_PIVOTAL_COL macro */
    Row_degree = Rperm ;	/* for NON_PIVOTAL_ROW macro */

    if (npiv < n_row)
    {
	/* finalize the row permutation */
	k = npiv ;
	DEBUGm3 (("Singular pivot rows "ID" to "ID"\n", k, n_row-1)) ;
	for (row = 0 ; row < n_row ; row++)
	{
	    if (NON_PIVOTAL_ROW (row))
	    {
		Rperm [row] = ONES_COMPLEMENT (k) ;
		DEBUGm3 (("Singular row "ID" is k: "ID" pivot row\n", row, k)) ;
		ASSERT (!NON_PIVOTAL_ROW (row)) ;
		Lpos [row] = EMPTY ;
		Uip [row] = EMPTY ;
		Uilen [row] = 0 ;
		k++ ;
	    }
	}
	ASSERT (k == n_row) ;
    }

    if (npiv < n_col)
    {
	/* finalize the col permutation */
	k = npiv ;
	DEBUGm3 (("Singular pivot cols "ID" to "ID"\n", k, n_col-1)) ;
	for (col = 0 ; col < n_col ; col++)
	{
	    if (NON_PIVOTAL_COL (col))
	    {
		Cperm [col] = ONES_COMPLEMENT (k) ;
		DEBUGm3 (("Singular col "ID" is k: "ID" pivot row\n", col, k)) ;
		ASSERT (!NON_PIVOTAL_COL (col)) ;
		Upos [col] = EMPTY ;
		Lip [col] = EMPTY ;
		Lilen [col] = 0 ;
		k++ ;
	    }
	}
	ASSERT (k == n_col) ;
    }

    if (npiv < n_inner)
    {
	/* finalize the diagonal of U */
	DEBUGm3 (("Diag of U is zero, "ID" to "ID"\n", npiv, n_inner-1)) ;
	for (k = npiv ; k < n_inner ; k++)
	{
	    CLEAR (D [k]) ;
	}
    }

    /* save the pattern of the last row of U */
    if (Numeric->ulen > 0)
    {
	DEBUGm3 (("Last row of U is not empty\n")) ;
	Numeric->Upattern = Work->Upattern ;
	Work->Upattern = (Int *) NULL ;
    }

    DEBUG2 (("Nnzpiv: "ID"  npiv "ID"\n", Numeric->nnzpiv, npiv)) ;
    ASSERT (Numeric->nnzpiv <= npiv) ;
    if (Numeric->nnzpiv < n_inner && !SCALAR_IS_NAN (Numeric->min_udiag))
    {
	/* the rest of the diagonal is zero, so min_udiag becomes 0,
	 * unless it is already NaN. */
	Numeric->min_udiag = 0.0 ;
    }

    /* ---------------------------------------------------------------------- */
    /* size n_row, n_col workspaces that can be used here: */
    /* ---------------------------------------------------------------------- */

    Frpos = Work->Frpos ;	/* of size n_row+1 */
    Fcpos = Work->Fcpos ;	/* of size n_col+1 */
    Wp = Work->Wp ;		/* of size MAX(n_row,n_col)+1 */
    /* Work->Upattern ;		cannot be used (in Numeric) */
    Wr = Work->Lpattern ;	/* of size n_row+1 */
    Wc = Work->Wrp ;		/* of size n_col+1 or bigger */

    /* ---------------------------------------------------------------------- */
    /* construct Rperm from inverse permutations */
    /* ---------------------------------------------------------------------- */

    /* use Frpos for temporary copy of inverse row permutation [ */

    for (pivrow = 0 ; pivrow < n_row ; pivrow++)
    {
	k = Rperm [pivrow] ;
	ASSERT (k < 0) ;
	k = ONES_COMPLEMENT (k) ;
	ASSERT (k >= 0 && k < n_row) ;
	Wp [k] = pivrow ;
	Frpos [pivrow] = k ;
    }
    for (k = 0 ; k < n_row ; k++)
    {
	Rperm [k] = Wp [k] ;
    }

    /* ---------------------------------------------------------------------- */
    /* construct Cperm from inverse permutation */
    /* ---------------------------------------------------------------------- */

    /* use Fcpos for temporary copy of inverse column permutation [ */

    for (pivcol = 0 ; pivcol < n_col ; pivcol++)
    {
	k = Cperm [pivcol] ;
	ASSERT (k < 0) ;
	k = ONES_COMPLEMENT (k) ;
	ASSERT (k >= 0 && k < n_col) ;
	Wp [k] = pivcol ;
	/* save a copy of the inverse column permutation in Fcpos */
	Fcpos [pivcol] = k ;
    }
    for (k = 0 ; k < n_col ; k++)
    {
	Cperm [k] = Wp [k] ;
    }

#ifndef NDEBUG
    for (k = 0 ; k < n_col ; k++)
    {
	col = Cperm [k] ;
	ASSERT (col >= 0 && col < n_col) ;
	ASSERT (Fcpos [col] == k) ;		/* col is the kth pivot */
    }
    for (k = 0 ; k < n_row ; k++)
    {
	row = Rperm [k] ;
	ASSERT (row >= 0 && row < n_row) ;
	ASSERT (Frpos [row] == k) ;		/* row is the kth pivot */
    }
#endif

#ifndef NDEBUG
    UMF_dump_lu (Numeric) ;
#endif

    /* ---------------------------------------------------------------------- */
    /* permute Lpos, Upos, Lilen, Lip, Uilen, and Uip */
    /* ---------------------------------------------------------------------- */

    for (k = 0 ; k < npiv ; k++)
    {
	pivrow = Rperm [k] ;
	Wr [k] = Uilen [pivrow] ;
	Wp [k] = Uip [pivrow] ;
    }

    for (k = 0 ; k < npiv ; k++)
    {
	Uilen [k] = Wr [k] ;
	Uip [k] = Wp [k] ;
    }

    for (k = 0 ; k < npiv ; k++)
    {
	pivrow = Rperm [k] ;
	Wp [k] = Lpos [pivrow] ;
    }

    for (k = 0 ; k < npiv ; k++)
    {
	Lpos [k] = Wp [k] ;
    }

    for (k = 0 ; k < npiv ; k++)
    {
	pivcol = Cperm [k] ;
	Wc [k] = Lilen [pivcol] ;
	Wp [k] = Lip [pivcol] ;
    }

    for (k = 0 ; k < npiv ; k++)
    {
	Lilen [k] = Wc [k] ;
	Lip [k] = Wp [k] ;
    }

    for (k = 0 ; k < npiv ; k++)
    {
	pivcol = Cperm [k] ;
	Wp [k] = Upos [pivcol] ;
    }

    for (k = 0 ; k < npiv ; k++)
    {
	Upos [k] = Wp [k] ;
    }

    /* ---------------------------------------------------------------------- */
    /* terminate the last Uchain and last Lchain */
    /* ---------------------------------------------------------------------- */

    Upos [npiv] = EMPTY ;
    Lpos [npiv] = EMPTY ;
    Uip [npiv] = EMPTY ;
    Lip [npiv] = EMPTY ;
    Uilen [npiv] = 0 ;
    Lilen [npiv] = 0 ;

    /* ---------------------------------------------------------------------- */
    /* convert U to the new pivot order */
    /* ---------------------------------------------------------------------- */

    n1 = Symbolic->n1 ;

    for (k = 0 ; k < n1 ; k++)
    {
	/* this is a singleton row of U */
	ulen = Uilen [k] ;
	DEBUG4 (("K "ID" New U.  ulen "ID" Singleton 1\n", k, ulen)) ;
	if (ulen > 0)
	{
	    up = Uip [k] ;
	    ip = (Int *) (Numeric->Memory + up) ;
	    for (i = 0 ; i < ulen ; i++)
	    {
		col = *ip ;
		DEBUG4 ((" old col "ID" new col "ID"\n", col, Fcpos [col]));
		ASSERT (col >= 0 && col < n_col) ;
		*ip++ = Fcpos [col] ;
	    }
	}
    }

    for (k = n1 ; k < npiv ; k++)
    {
	up = Uip [k] ;
	if (up < 0)
	{
	    /* this is the start of a new Uchain (with a pattern) */
	    ulen = Uilen [k] ;
	    DEBUG4 (("K "ID" New U.  ulen "ID" End_Uchain 1\n", k, ulen)) ;
	    if (ulen > 0)
	    {
		up = -up ;
		ip = (Int *) (Numeric->Memory + up) ;
		for (i = 0 ; i < ulen ; i++)
		{
		    col = *ip ;
		    DEBUG4 ((" old col "ID" new col "ID"\n", col, Fcpos [col]));
		    ASSERT (col >= 0 && col < n_col) ;
		    *ip++ = Fcpos [col] ;
		}
	    }
	}
    }

    ulen = Numeric->ulen ;
    if (ulen > 0)
    {
	/* convert last pivot row of U to the new pivot order */
	DEBUG4 (("K "ID" (last)\n", k)) ;
	for (i = 0 ; i < ulen ; i++)
	{
	    col = Numeric->Upattern [i] ;
	    DEBUG4 (("    old col "ID" new col "ID"\n", col, Fcpos [col])) ;
	    Numeric->Upattern [i] = Fcpos [col] ;
	}
    }

    /* Fcpos no longer needed ] */

    /* ---------------------------------------------------------------------- */
    /* convert L to the new pivot order */
    /* ---------------------------------------------------------------------- */

    for (k = 0 ; k < n1 ; k++)
    {
	llen = Lilen [k] ;
	DEBUG4 (("K "ID" New L.  llen "ID" Singleton col\n", k, llen)) ;
	if (llen > 0)
	{
	    lp = Lip [k] ;
	    ip = (Int *) (Numeric->Memory + lp) ;
	    for (i = 0 ; i < llen ; i++)
	    {
		row = *ip ;
		DEBUG4 (("    old row "ID" new row "ID"\n", row, Frpos [row])) ;
		ASSERT (row >= 0 && row < n_row) ;
		*ip++ = Frpos [row] ;
	    }
	}
    }

    for (k = n1 ; k < npiv ; k++)
    {
	llen = Lilen [k] ;
	DEBUG4 (("K "ID" New L.  llen "ID" \n", k, llen)) ;
	if (llen > 0)
	{
	    lp = Lip [k] ;
	    if (lp < 0)
	    {
		/* this starts a new Lchain */
		lp = -lp ;
	    }
	    ip = (Int *) (Numeric->Memory + lp) ;
	    for (i = 0 ; i < llen ; i++)
	    {
		row = *ip ;
		DEBUG4 (("    old row "ID" new row "ID"\n", row, Frpos [row])) ;
		ASSERT (row >= 0 && row < n_row) ;
		*ip++ = Frpos [row] ;
	    }
	}
    }

    /* Frpos no longer needed ] */

    /* ---------------------------------------------------------------------- */
    /* combine symbolic and numeric permutations */
    /* ---------------------------------------------------------------------- */

    Cperm_init = Symbolic->Cperm_init ;
    Rperm_init = Symbolic->Rperm_init ;

    for (k = 0 ; k < n_row ; k++)
    {
	Rperm [k] = Rperm_init [Rperm [k]] ;
    }

    for (k = 0 ; k < n_col ; k++)
    {
	Cperm [k] = Cperm_init [Cperm [k]] ;
    }

    /* Work object will be freed immediately upon return (to UMF_kernel */
    /* and then to UMFPACK_numeric). */
}
Пример #4
0
Int KLU_rcond           /* return TRUE if successful, FALSE otherwise */
(
    KLU_symbolic *Symbolic,     /* input, not modified */
    KLU_numeric *Numeric,       /* input, not modified */
    KLU_common *Common          /* result in Common->rcond */
)
{
    double ukk, umin = 0, umax = 0 ;
    Entry *Udiag ;
    Int j, n ;

    /* ---------------------------------------------------------------------- */
    /* check inputs */
    /* ---------------------------------------------------------------------- */

    if (Common == NULL)
    {
        return (FALSE) ;
    }
    if (Symbolic == NULL)
    {
        Common->status = KLU_INVALID ;
        return (FALSE) ;
    }
    if (Numeric == NULL)
    {
        Common->rcond = 0 ;
        Common->status = KLU_SINGULAR ;
        return (TRUE) ;
    }
    Common->status = KLU_OK ;

    /* ---------------------------------------------------------------------- */
    /* compute rcond */
    /* ---------------------------------------------------------------------- */

    n = Symbolic->n ;
    Udiag = Numeric->Udiag ;
    for (j = 0 ; j < n ; j++)
    {
        /* get the magnitude of the pivot */
        ABS (ukk, Udiag [j]) ;
        if (SCALAR_IS_NAN (ukk) || SCALAR_IS_ZERO (ukk))
        {
            /* if NaN, or zero, the rcond is zero */
            Common->rcond = 0 ;
            Common->status = KLU_SINGULAR ;
            return (TRUE) ;
        }
        if (j == 0)
        {
            /* first pivot entry */
            umin = ukk ;
            umax = ukk ;
        }
        else
        {
            /* subsequent pivots */
            umin = MIN (umin, ukk) ;
            umax = MAX (umax, ukk) ;
        }
    }

    Common->rcond = umin / umax ;
    if (SCALAR_IS_NAN (Common->rcond) || SCALAR_IS_ZERO (Common->rcond))
    {
        /* this can occur if umin or umax are Inf or NaN */
        Common->rcond = 0 ;
        Common->status = KLU_SINGULAR ;
    }
    return (TRUE) ;
}
GLOBAL Int UMFPACK_numeric
(
    const Int Ap [ ],
    const Int Ai [ ],
    const double Ax [ ],
#ifdef COMPLEX
    const double Az [ ],
#endif
    void *SymbolicHandle,
    void **NumericHandle,
    const double Control [UMFPACK_CONTROL],
    double User_Info [UMFPACK_INFO]
)
{

    /* ---------------------------------------------------------------------- */
    /* local variables */
    /* ---------------------------------------------------------------------- */

    double Info2 [UMFPACK_INFO], alloc_init, relpt, relpt2, droptol,
	front_alloc_init, stats [2] ;
    double *Info ;
    WorkType WorkSpace, *Work ;
    NumericType *Numeric ;
    SymbolicType *Symbolic ;
    Int n_row, n_col, n_inner, newsize, i, status, *inew, npiv, ulen, scale ;
    Unit *mnew ;

    /* ---------------------------------------------------------------------- */
    /* get the amount of time used by the process so far */
    /* ---------------------------------------------------------------------- */

    umfpack_tic (stats) ;

    /* ---------------------------------------------------------------------- */
    /* initialize and check inputs */
    /* ---------------------------------------------------------------------- */

#ifndef NDEBUG
    UMF_dump_start ( ) ;
    init_count = UMF_malloc_count ;
    DEBUGm4 (("\nUMFPACK numeric: U transpose version\n")) ;
#endif

    /* If front_alloc_init negative then allocate that size of front in
     * UMF_start_front.  If alloc_init negative, then allocate that initial
     * size of Numeric->Memory. */

    relpt = GET_CONTROL (UMFPACK_PIVOT_TOLERANCE,
	UMFPACK_DEFAULT_PIVOT_TOLERANCE) ;
    relpt2 = GET_CONTROL (UMFPACK_SYM_PIVOT_TOLERANCE,
	UMFPACK_DEFAULT_SYM_PIVOT_TOLERANCE) ;
    alloc_init = GET_CONTROL (UMFPACK_ALLOC_INIT, UMFPACK_DEFAULT_ALLOC_INIT) ;
    front_alloc_init = GET_CONTROL (UMFPACK_FRONT_ALLOC_INIT,
	UMFPACK_DEFAULT_FRONT_ALLOC_INIT) ;
    scale = GET_CONTROL (UMFPACK_SCALE, UMFPACK_DEFAULT_SCALE) ;
    droptol = GET_CONTROL (UMFPACK_DROPTOL, UMFPACK_DEFAULT_DROPTOL) ;

    relpt   = MAX (0.0, MIN (relpt,  1.0)) ;
    relpt2  = MAX (0.0, MIN (relpt2, 1.0)) ;
    droptol = MAX (0.0, droptol) ;
    front_alloc_init = MIN (1.0, front_alloc_init) ;

    if (scale != UMFPACK_SCALE_NONE && scale != UMFPACK_SCALE_MAX)
    {
	scale = UMFPACK_DEFAULT_SCALE ;
    }

    if (User_Info != (double *) NULL)
    {
	/* return Info in user's array */
	Info = User_Info ;
	/* clear the parts of Info that are set by UMFPACK_numeric */
	for (i = UMFPACK_NUMERIC_SIZE ; i <= UMFPACK_MAX_FRONT_NCOLS ; i++)
	{
	    Info [i] = EMPTY ;
	}
	for (i = UMFPACK_NUMERIC_DEFRAG ; i < UMFPACK_IR_TAKEN ; i++)
	{
	    Info [i] = EMPTY ;
	}
    }
    else
    {
	/* no Info array passed - use local one instead */
	Info = Info2 ;
	for (i = 0 ; i < UMFPACK_INFO ; i++)
	{
	    Info [i] = EMPTY ;
	}
    }

    Symbolic = (SymbolicType *) SymbolicHandle ;
    Numeric = (NumericType *) NULL ;
    if (!UMF_valid_symbolic (Symbolic))
    {
	Info [UMFPACK_STATUS] = UMFPACK_ERROR_invalid_Symbolic_object ;
	return (UMFPACK_ERROR_invalid_Symbolic_object) ;
    }

    /* compute alloc_init automatically for AMD or other symmetric ordering */
    if (/* Symbolic->ordering == UMFPACK_ORDERING_AMD */ alloc_init >= 0
        && Symbolic->amd_lunz > 0)
    {
	alloc_init = (Symbolic->nz + Symbolic->amd_lunz) / Symbolic->lunz_bound;
	alloc_init = MIN (1.0, alloc_init) ;
	alloc_init *= UMF_REALLOC_INCREASE ;
    }

    n_row = Symbolic->n_row ;
    n_col = Symbolic->n_col ;
    n_inner = MIN (n_row, n_col) ;

    /* check for integer overflow in Numeric->Memory minimum size */
    if (INT_OVERFLOW (Symbolic->dnum_mem_init_usage * sizeof (Unit)))
    {
	/* :: int overflow, initial Numeric->Memory size :: */
	/* There's no hope to allocate a Numeric object big enough simply to
	 * hold the initial matrix, so return an out-of-memory condition */
	DEBUGm4 (("out of memory: numeric int overflow\n")) ;
	Info [UMFPACK_STATUS] = UMFPACK_ERROR_out_of_memory ;
	return (UMFPACK_ERROR_out_of_memory) ;
    }

    Info [UMFPACK_STATUS] = UMFPACK_OK ;
    Info [UMFPACK_NROW] = n_row ;
    Info [UMFPACK_NCOL] = n_col ;
    Info [UMFPACK_SIZE_OF_UNIT] = (double) (sizeof (Unit)) ;

    if (!Ap || !Ai || !Ax || !NumericHandle)
    {
	Info [UMFPACK_STATUS] = UMFPACK_ERROR_argument_missing ;
	return (UMFPACK_ERROR_argument_missing) ;
    }

    Info [UMFPACK_NZ] = Ap [n_col] ;
    *NumericHandle = (void *) NULL ;

    /* ---------------------------------------------------------------------- */
    /* allocate the Work object */
    /* ---------------------------------------------------------------------- */

    /* (1) calls UMF_malloc 15 or 17 times, to obtain temporary workspace of
     * size c+1 Entry's and 2*(n_row+1) + 3*(n_col+1) + (n_col+n_inner+1) +
     * (nn+1) + * 3*(c+1) + 2*(r+1) + max(r,c) + (nfr+1) integers plus 2*nn
     * more integers if diagonal pivoting is to be done.  r is the maximum
     * number of rows in any frontal matrix, c is the maximum number of columns
     * in any frontal matrix, n_inner is min (n_row,n_col), nn is
     * max (n_row,n_col), and nfr is the number of frontal matrices.  For a
     * square matrix, this is c+1 Entry's and about 8n + 3c + 2r + max(r,c) +
     * nfr integers, plus 2n more for diagonal pivoting.
     */

    Work = &WorkSpace ;
    Work->n_row = n_row ;
    Work->n_col = n_col ;
    Work->nfr = Symbolic->nfr ;
    Work->nb = Symbolic->nb ;
    Work->n1 = Symbolic->n1 ;

    if (!work_alloc (Work, Symbolic))
    {
	DEBUGm4 (("out of memory: numeric work\n")) ;
	Info [UMFPACK_STATUS] = UMFPACK_ERROR_out_of_memory ;
	error (&Numeric, Work) ;
	return (UMFPACK_ERROR_out_of_memory) ;
    }
    ASSERT (UMF_malloc_count == init_count + 16 + 2*Symbolic->prefer_diagonal) ;

    /* ---------------------------------------------------------------------- */
    /* allocate Numeric object */
    /* ---------------------------------------------------------------------- */

    /* (2) calls UMF_malloc 10 or 11 times, for a total space of
     * sizeof (NumericType) bytes, 4*(n_row+1) + 4*(n_row+1) integers, and
     * (n_inner+1) Entry's, plus n_row Entry's if row scaling is to be done.
     * sizeof (NumericType) is a small constant.  Next, it calls UMF_malloc
     * once, for the variable-sized part of the Numeric object
     * (Numeric->Memory).  The size of this object is the larger of
     * (Control [UMFPACK_ALLOC_INIT]) *  (the approximate upper bound computed
     * by UMFPACK_symbolic), and the minimum required to start the numerical
     * factorization.  * This request is reduced if it fails.
     */

    if (!numeric_alloc (&Numeric, Symbolic, alloc_init, scale))
    {
	DEBUGm4 (("out of memory: initial numeric\n")) ;
	Info [UMFPACK_STATUS] = UMFPACK_ERROR_out_of_memory ;
	error (&Numeric, Work) ;
	return (UMFPACK_ERROR_out_of_memory) ;
    }
    DEBUG0 (("malloc: init_count "ID" UMF_malloc_count "ID"\n",
	init_count, UMF_malloc_count)) ;
    ASSERT (UMF_malloc_count == init_count
	+ (16 + 2*Symbolic->prefer_diagonal)
	+ (11 + (scale != UMFPACK_SCALE_NONE))) ;

    /* set control parameters */
    Numeric->relpt = relpt ;
    Numeric->relpt2 = relpt2 ;
    Numeric->droptol = droptol ;
    Numeric->alloc_init = alloc_init ;
    Numeric->front_alloc_init = front_alloc_init ;
    Numeric->scale = scale ;

    DEBUG0 (("umf relpt %g %g init %g %g inc %g red %g\n",
	relpt, relpt2, alloc_init, front_alloc_init,
	UMF_REALLOC_INCREASE, UMF_REALLOC_REDUCTION)) ;

    /* ---------------------------------------------------------------------- */
    /* scale and factorize */
    /* ---------------------------------------------------------------------- */

    /* (3) During numerical factorization (inside UMF_kernel), the variable-size
     * block of memory is increased in size via a call to UMF_realloc if it is
     * found to be too small.  During factorization, this block holds the
     * pattern and values of L and U at the top end, and the elements
     * (contibution blocks) and the current frontal matrix (Work->F*) at the
     * bottom end.  The peak size of the variable-sized object is estimated in
     * UMFPACK_*symbolic (Info [UMFPACK_VARIABLE_PEAK_ESTIMATE]), although this
     * upper bound can be very loose.  The size of the Symbolic object
     * (which is currently allocated) is in Info [UMFPACK_SYMBOLIC_SIZE], and
     * is between 2*n and 13*n integers.
     */

    DEBUG0 (("Calling umf_kernel\n")) ;
    status = UMF_kernel (Ap, Ai, Ax,
#ifdef COMPLEX
	Az,
#endif
	Numeric, Work, Symbolic) ;

    Info [UMFPACK_STATUS] = status ;
    if (status < UMFPACK_OK)
    {
	/* out of memory, or pattern has changed */
	error (&Numeric, Work) ;
	return (status) ;
    }

    Info [UMFPACK_FORCED_UPDATES] = Work->nforced ;
    Info [UMFPACK_VARIABLE_INIT] = Numeric->init_usage ;
    if (Symbolic->prefer_diagonal)
    {
	Info [UMFPACK_NOFF_DIAG] = Work->noff_diagonal ;
    }

    DEBUG0 (("malloc: init_count "ID" UMF_malloc_count "ID"\n",
	init_count, UMF_malloc_count)) ;

    npiv = Numeric->npiv ;	/* = n_inner for nonsingular matrices */
    ulen = Numeric->ulen ;	/* = 0 for square nonsingular matrices */

    /* ---------------------------------------------------------------------- */
    /* free Work object */
    /* ---------------------------------------------------------------------- */

    /* (4) After numerical factorization all of the objects allocated in step
     * (1) are freed via UMF_free, except that one object of size n_col+1 is
     * kept if there are off-diagonal nonzeros in the last pivot row (can only
     * occur for singular or rectangular matrices).  This is Work->Upattern,
     * which is transfered to Numeric->Upattern if ulen > 0.
     */

    DEBUG0 (("malloc: init_count "ID" UMF_malloc_count "ID"\n",
	init_count, UMF_malloc_count)) ;

    free_work (Work) ;

    DEBUG0 (("malloc: init_count "ID" UMF_malloc_count "ID"\n",
	init_count, UMF_malloc_count)) ;
    DEBUG0 (("Numeric->ulen: "ID" scale: "ID"\n", ulen, scale)) ;
    ASSERT (UMF_malloc_count == init_count + (ulen > 0) +
	(11 + (scale != UMFPACK_SCALE_NONE))) ;

    /* ---------------------------------------------------------------------- */
    /* reduce Lpos, Lilen, Lip, Upos, Uilen and Uip to size npiv+1 */
    /* ---------------------------------------------------------------------- */

    /* (5) Six components of the Numeric object are reduced in size if the
     * matrix is singular or rectangular.   The original size is 3*(n_row+1) +
     * 3*(n_col+1) integers.  The new size is 6*(npiv+1) integers.  For
     * square non-singular matrices, these two sizes are the same.
     */

    if (npiv < n_row)
    {
	/* reduce Lpos, Uilen, and Uip from size n_row+1 to size npiv */
	inew = (Int *) UMF_realloc (Numeric->Lpos, npiv+1, sizeof (Int)) ;
	if (inew)
	{
	    Numeric->Lpos = inew ;
	}
	inew = (Int *) UMF_realloc (Numeric->Uilen, npiv+1, sizeof (Int)) ;
	if (inew)
	{
	    Numeric->Uilen = inew ;
	}
	inew = (Int *) UMF_realloc (Numeric->Uip, npiv+1, sizeof (Int)) ;
	if (inew)
	{
	    Numeric->Uip = inew ;
	}
    }

    if (npiv < n_col)
    {
	/* reduce Upos, Lilen, and Lip from size n_col+1 to size npiv */
	inew = (Int *) UMF_realloc (Numeric->Upos, npiv+1, sizeof (Int)) ;
	if (inew)
	{
	    Numeric->Upos = inew ;
	}
	inew = (Int *) UMF_realloc (Numeric->Lilen, npiv+1, sizeof (Int)) ;
	if (inew)
	{
	    Numeric->Lilen = inew ;
	}
	inew = (Int *) UMF_realloc (Numeric->Lip, npiv+1, sizeof (Int)) ;
	if (inew)
	{
	    Numeric->Lip = inew ;
	}
    }

    /* ---------------------------------------------------------------------- */
    /* reduce Numeric->Upattern from size n_col+1 to size ulen+1 */
    /* ---------------------------------------------------------------------- */

    /* (6) The size of Numeric->Upattern (formerly Work->Upattern) is reduced
     * from size n_col+1 to size ulen + 1.  If ulen is zero, the object does
     * not exist. */

    DEBUG4 (("ulen: "ID" Upattern "ID"\n", ulen, (Int) Numeric->Upattern)) ;
    ASSERT (IMPLIES (ulen == 0, Numeric->Upattern == (Int *) NULL)) ;
    if (ulen > 0 && ulen < n_col)
    {
	inew = (Int *) UMF_realloc (Numeric->Upattern, ulen+1, sizeof (Int)) ;
	if (inew)
	{
	    Numeric->Upattern = inew ;
	}
    }

    /* ---------------------------------------------------------------------- */
    /* reduce Numeric->Memory to hold just the LU factors at the head */
    /* ---------------------------------------------------------------------- */

    /* (7) The variable-sized block (Numeric->Memory) is reduced to hold just L
     * and U, via a call to UMF_realloc, since the frontal matrices are no
     * longer needed.
     */

    newsize = Numeric->ihead ;
    if (newsize < Numeric->size)
    {
	mnew = (Unit *) UMF_realloc (Numeric->Memory, newsize, sizeof (Unit)) ;
	if (mnew)
	{
	    /* realloc succeeded (how can it fail since the size is reduced?) */
	    Numeric->Memory = mnew ;
	    Numeric->size = newsize ;
	}
    }
    Numeric->ihead = Numeric->size ;
    Numeric->itail = Numeric->ihead ;
    Numeric->tail_usage = 0 ;
    Numeric->ibig = EMPTY ;
    /* UMF_mem_alloc_tail_block can no longer be called (no tail marker) */

    /* ---------------------------------------------------------------------- */
    /* report the results and return the Numeric object */
    /* ---------------------------------------------------------------------- */

    UMF_set_stats (
	Info,
	Symbolic,
	(double) Numeric->max_usage,	/* actual peak Numeric->Memory */
	(double) Numeric->size,		/* actual final Numeric->Memory */
	Numeric->flops,			/* actual "true flops" */
	(double) Numeric->lnz + n_inner,		/* actual nz in L */
	(double) Numeric->unz + Numeric->nnzpiv,	/* actual nz in U */
	(double) Numeric->maxfrsize,	/* actual largest front size */
	(double) ulen,			/* actual Numeric->Upattern size */
	(double) npiv,			/* actual # pivots found */
	(double) Numeric->maxnrows,	/* actual largest #rows in front */
	(double) Numeric->maxncols,	/* actual largest #cols in front */
	scale != UMFPACK_SCALE_NONE,
	Symbolic->prefer_diagonal,
	ACTUAL) ;

    Info [UMFPACK_ALLOC_INIT_USED] = Numeric->alloc_init ;
    Info [UMFPACK_NUMERIC_DEFRAG] = Numeric->ngarbage ;
    Info [UMFPACK_NUMERIC_REALLOC] = Numeric->nrealloc ;
    Info [UMFPACK_NUMERIC_COSTLY_REALLOC] = Numeric->ncostly ;
    Info [UMFPACK_COMPRESSED_PATTERN] = Numeric->isize ;
    Info [UMFPACK_LU_ENTRIES] = Numeric->nLentries + Numeric->nUentries +
	    Numeric->npiv ;
    Info [UMFPACK_UDIAG_NZ] = Numeric->nnzpiv ;
    Info [UMFPACK_RSMIN] = Numeric->rsmin ;
    Info [UMFPACK_RSMAX] = Numeric->rsmax ;
    Info [UMFPACK_WAS_SCALED] = Numeric->scale ;

    /* nz in L and U with no dropping of small entries */
    Info [UMFPACK_ALL_LNZ] = Numeric->all_lnz + n_inner ;
    Info [UMFPACK_ALL_UNZ] = Numeric->all_unz + Numeric->nnzpiv ;
    Info [UMFPACK_NZDROPPED] =
	  (Numeric->all_lnz - Numeric->lnz)
	+ (Numeric->all_unz - Numeric->unz) ;

    /* estimate of the reciprocal of the condition number. */
    if (SCALAR_IS_ZERO (Numeric->min_udiag)
     || SCALAR_IS_ZERO (Numeric->max_udiag)
     ||	SCALAR_IS_NAN (Numeric->min_udiag)
     ||	SCALAR_IS_NAN (Numeric->max_udiag))
    {
	/* rcond is zero if there is any zero or NaN on the diagonal */
	Numeric->rcond = 0.0 ;
    }
    else
    {
	/* estimate of the recipricol of the condition number. */
	/* This is NaN if diagonal is zero-free, but has one or more NaN's. */
	Numeric->rcond = Numeric->min_udiag / Numeric->max_udiag ;
    }
    Info [UMFPACK_UMIN]  = Numeric->min_udiag ;
    Info [UMFPACK_UMAX]  = Numeric->max_udiag ;
    Info [UMFPACK_RCOND] = Numeric->rcond ;

    if (Numeric->nnzpiv < n_inner
    || SCALAR_IS_ZERO (Numeric->rcond) || SCALAR_IS_NAN (Numeric->rcond))
    {
	/* there are zeros and/or NaN's on the diagonal of U */
	DEBUG0 (("Warning, matrix is singular in umfpack_numeric\n")) ;
	DEBUG0 (("nnzpiv "ID" n_inner "ID" rcond %g\n", Numeric->nnzpiv,
	    n_inner, Numeric->rcond)) ;
	status = UMFPACK_WARNING_singular_matrix ;
	Info [UMFPACK_STATUS] = status ;
    }

    Numeric->valid = NUMERIC_VALID ;
    *NumericHandle = (void *) Numeric ;

    /* Numeric has 11 to 13 objects */
    ASSERT (UMF_malloc_count == init_count + 11 +
	+ (ulen > 0)			    /* Numeric->Upattern */
	+ (scale != UMFPACK_SCALE_NONE)) ;  /* Numeric->Rs */

    /* ---------------------------------------------------------------------- */
    /* get the time used by UMFPACK_numeric */
    /* ---------------------------------------------------------------------- */

    umfpack_toc (stats) ;
    Info [UMFPACK_NUMERIC_WALLTIME] = stats [0] ;
    Info [UMFPACK_NUMERIC_TIME] = stats [1] ;

    /* return UMFPACK_OK or UMFPACK_WARNING_singular_matrix */
    return (status) ;

}
Пример #6
0
GLOBAL Int UMF_row_search
(
    NumericType *Numeric,
    WorkType *Work,
    SymbolicType *Symbolic,
    Int cdeg0,			/* length of column in Front */
    Int cdeg1,			/* length of column outside Front */
    const Int Pattern [ ],	/* pattern of column, Pattern [0..cdeg1 -1] */
    const Int Pos [ ],		/* Pos [Pattern [0..cdeg1 -1]] = 0..cdeg1 -1 */
    Int pivrow [2],		/* pivrow [IN] and pivrow [OUT] */
    Int rdeg [2],		/* rdeg [IN] and rdeg [OUT] */
    Int W_i [ ],		/* pattern of pivrow [IN], */
				/* either Fcols or Woi */
    Int W_o [ ],		/* pattern of pivrow [OUT], */
				/* either Wio or Woo */
    Int prior_pivrow [2],	/* the two other rows just scanned, if any */
    const Entry Wxy [ ],	/* numerical values Wxy [0..cdeg1-1],
				   either Wx or Wy */

    Int pivcol,			/* the candidate column being searched */
    Int freebie [ ]
)
{

    /* ---------------------------------------------------------------------- */
    /* local variables */
    /* ---------------------------------------------------------------------- */

    double maxval, toler, toler2, value, pivot [2] ;
    Int i, row, deg, col, *Frpos, fnrows, *E, j, ncols, *Cols, *Rows,
	e, f, Wrpflag, *Fcpos, fncols, tpi, max_rdeg, nans_in_col, was_offdiag,
	diag_row, prefer_diagonal, *Wrp, found, *Diagonal_map ;
    Tuple *tp, *tpend, *tp1, *tp2 ;
    Unit *Memory, *p ;
    Element *ep ;
    Int *Row_tuples, *Row_degree, *Row_tlen ;

#ifndef NDEBUG
    Int *Col_degree ;
    DEBUG2 (("Row_search:\n")) ;
    for (i = 0 ; i < cdeg1 ; i++)
    {
	row = Pattern [i] ;
	DEBUG4 (("   row: "ID"\n", row)) ;
	ASSERT (row >= 0 && row < Numeric->n_row) ;
	ASSERT (i == Pos [row]) ;
    }
    /* If row is not in Pattern [0..cdeg1-1], then Pos [row] == EMPTY */
    if (UMF_debug > 0 || Numeric->n_row < 1000)
    {
	Int cnt = cdeg1 ;
	DEBUG4 (("Scan all rows:\n")) ;
	for (row = 0 ; row < Numeric->n_row ; row++)
	{
	    if (Pos [row] < 0)
	    {
		cnt++ ;
	    }
	    else
	    {
		DEBUG4 (("   row: "ID" pos "ID"\n", row, Pos [row])) ;
	    }
	}
	ASSERT (cnt == Numeric->n_row) ;
    }
    Col_degree = Numeric->Cperm ;   /* for NON_PIVOTAL_COL macro only */
    ASSERT (pivcol >= 0 && pivcol < Work->n_col) ;
    ASSERT (NON_PIVOTAL_COL (pivcol)) ;
#endif

    pivot [IN] = 0. ;
    pivot [OUT] = 0. ;

    /* ---------------------------------------------------------------------- */
    /* get parameters */
    /* ---------------------------------------------------------------------- */

    Row_degree = Numeric->Rperm ;
    Row_tuples = Numeric->Uip ;
    Row_tlen   = Numeric->Uilen ;
    Wrp = Work->Wrp ;
    Frpos = Work->Frpos ;
    E = Work->E ;
    Memory = Numeric->Memory ;
    fnrows = Work->fnrows ;

    prefer_diagonal = Symbolic->prefer_diagonal ;
    Diagonal_map = Work->Diagonal_map ;

    if (Diagonal_map)
    {
	diag_row = Diagonal_map [pivcol] ;
	was_offdiag = diag_row < 0 ;
	if (was_offdiag)
	{
	    /* the "diagonal" entry in this column was permuted here by an
	     * earlier pivot choice.  The tighter off-diagonal tolerance will
	     * be used instead of the symmetric tolerance. */
	    diag_row = FLIP (diag_row) ;
	}
	ASSERT (diag_row >= 0 && diag_row < Numeric->n_row) ;
    }
    else
    {
	diag_row = EMPTY ;	/* unused */
	was_offdiag = EMPTY ;	/* unused */
    }

    /* pivot row degree cannot exceed max_rdeg */
    max_rdeg = Work->fncols_max ;

    /* ---------------------------------------------------------------------- */
    /* scan pivot column for candidate rows */
    /* ---------------------------------------------------------------------- */

    maxval = 0.0 ;
    nans_in_col = FALSE ;

    for (i = 0 ; i < cdeg1 ; i++)
    {
	APPROX_ABS (value, Wxy [i]) ;
	if (SCALAR_IS_NAN (value))
	{
	    nans_in_col = TRUE ;
	    maxval = value ;
	    break ;
	}
	/* This test can now ignore the NaN case: */
	maxval = MAX (maxval, value) ;
    }

    /* if maxval is zero, the matrix is numerically singular */

    toler = Numeric->relpt * maxval ;
    toler2 = Numeric->relpt2 * maxval ;
    toler2 = was_offdiag ? toler : toler2 ;

    DEBUG5 (("Row_search begins [ maxval %g toler %g %g\n",
	maxval, toler, toler2)) ;
    if (SCALAR_IS_NAN (toler) || SCALAR_IS_NAN (toler2))
    {
	nans_in_col = TRUE ;
    }

    if (!nans_in_col)
    {

	/* look for the diagonal entry, if it exists */
	found = FALSE ;
	ASSERT (!SCALAR_IS_NAN (toler)) ;

	if (prefer_diagonal)
	{
	    ASSERT (diag_row != EMPTY) ;
	    i = Pos [diag_row] ;
	    if (i >= 0)
	    {
		double a ;
		ASSERT (i < cdeg1) ;
		ASSERT (diag_row == Pattern [i]) ;

		APPROX_ABS (a, Wxy [i]) ;

		ASSERT (!SCALAR_IS_NAN (a)) ;
		ASSERT (!SCALAR_IS_NAN (toler2)) ;

		if (SCALAR_IS_NONZERO (a) && a >= toler2)
		{
		    /* found it! */
		    DEBUG3 (("Symmetric pivot: "ID" "ID"\n", pivcol, diag_row));
		    found = TRUE ;
		    if (Frpos [diag_row] >= 0 && Frpos [diag_row] < fnrows)
		    {
			pivrow [IN] = diag_row ;
			pivrow [OUT] = EMPTY ;
		    }
		    else
		    {
			pivrow [IN] = EMPTY ;
			pivrow [OUT] = diag_row ;
		    }
		}
	    }
	}

	/* either no diagonal found, or we didn't look for it */
	if (!found)
	{
	    if (cdeg0 > 0)
	    {

		/* this is a column in the front */
		for (i = 0 ; i < cdeg0 ; i++)
		{
		    double a ;
		    APPROX_ABS (a, Wxy [i]) ;
		    ASSERT (!SCALAR_IS_NAN (a)) ;
		    ASSERT (!SCALAR_IS_NAN (toler)) ;
		    if (SCALAR_IS_NONZERO (a) && a >= toler)
		    {
			row = Pattern [i] ;
			deg = Row_degree [row] ;
#ifndef NDEBUG
			DEBUG6 ((ID" candidate row "ID" deg "ID" absval %g\n",
			    i, row, deg, a)) ;
			UMF_dump_rowcol (0, Numeric, Work, row, TRUE) ;
#endif
			ASSERT (Frpos [row] >= 0 && Frpos [row] < fnrows) ;
			ASSERT (Frpos [row] == i) ;
			/* row is in the current front */
			DEBUG4 ((" in front\n")) ;
			if (deg < rdeg [IN]
			    /* break ties by picking the largest entry: */
			       || (deg == rdeg [IN] && a > pivot [IN])
			    /* break ties by picking the diagonal entry: */
			    /* || (deg == rdeg [IN] && row == diag_row) */
			   )
			{
			    /* best row in front, so far */
			    pivrow [IN] = row ;
			    rdeg [IN] = deg ;
			    pivot [IN] = a ;
			}
		    }
		}
		for ( ; i < cdeg1 ; i++)
		{
		    double a ;
		    APPROX_ABS (a, Wxy [i]) ;
		    ASSERT (!SCALAR_IS_NAN (a)) ;
		    ASSERT (!SCALAR_IS_NAN (toler)) ;
		    if (SCALAR_IS_NONZERO (a) && a >= toler)
		    {
			row = Pattern [i] ;
			deg = Row_degree [row] ;
#ifndef NDEBUG
			DEBUG6 ((ID" candidate row "ID" deg "ID" absval %g\n",
			    i, row, deg, a)) ;
			UMF_dump_rowcol (0, Numeric, Work, row, TRUE) ;
#endif
			ASSERT (Frpos [row] == i) ;
			/* row is not in the current front */
			DEBUG4 ((" NOT in front\n")) ;
			if (deg < rdeg [OUT]
			    /* break ties by picking the largest entry: */
			       || (deg == rdeg [OUT] && a > pivot [OUT])
			    /* break ties by picking the diagonal entry: */
			    /* || (deg == rdeg [OUT] && row == diag_row) */
			   )
			{
			    /* best row not in front, so far */
			    pivrow [OUT] = row ;
			    rdeg [OUT] = deg ;
			    pivot [OUT] = a ;
			}
		    }
		}

	    }
	    else
	    {

		/* this column is not in the front */
		for (i = 0 ; i < cdeg1 ; i++)
		{
		    double a ;
		    APPROX_ABS (a, Wxy [i]) ;
		    ASSERT (!SCALAR_IS_NAN (a)) ;
		    ASSERT (!SCALAR_IS_NAN (toler)) ;
		    if (SCALAR_IS_NONZERO (a) && a >= toler)
		    {
			row = Pattern [i] ;
			deg = Row_degree [row] ;
#ifndef NDEBUG
			DEBUG6 ((ID" candidate row "ID" deg "ID" absval %g\n",
			    i, row, deg, a)) ;
			UMF_dump_rowcol (0, Numeric, Work, row, TRUE) ;
#endif
			if (Frpos [row] >= 0 && Frpos [row] < fnrows)
			{
			    /* row is in the current front */
			    DEBUG4 ((" in front\n")) ;
			    if (deg < rdeg [IN]
			    /* break ties by picking the largest entry: */
			       || (deg == rdeg [IN] && a > pivot [IN])
			    /* break ties by picking the diagonal entry: */
			    /* || (deg == rdeg [IN] && row == diag_row) */
			       )
			    {
				/* best row in front, so far */
				pivrow [IN] = row ;
				rdeg [IN] = deg ;
				pivot [IN] = a ;
			    }
			}
			else
			{
			    /* row is not in the current front */
			    DEBUG4 ((" NOT in front\n")) ;
			    if (deg < rdeg [OUT]
			    /* break ties by picking the largest entry: */
			       || (deg == rdeg[OUT] && a > pivot [OUT])
			    /* break ties by picking the diagonal entry: */
			    /* || (deg == rdeg[OUT] && row == diag_row) */
			       )
			    {
				/* best row not in front, so far */
				pivrow [OUT] = row ;
				rdeg [OUT] = deg ;
				pivot [OUT] = a ;
			    }
			}
		    }
		}
	    }
	}
    }

    /* ---------------------------------------------------------------------- */
    /* NaN handling */
    /* ---------------------------------------------------------------------- */

    /* if cdeg1 > 0 then we must have found a pivot row ... unless NaN's */
    /* exist.  Try with no numerical tests if no pivot found. */

    if (cdeg1 > 0 && pivrow [IN] == EMPTY && pivrow [OUT] == EMPTY)
    {
	/* cleanup for the NaN case */
	DEBUG0 (("Found a NaN in pivot column!\n")) ;

	/* grab the first entry in the pivot column, ignoring degree, */
	/* numerical stability, and symmetric preference */
	row = Pattern [0] ;
	deg = Row_degree [row] ;
	if (Frpos [row] >= 0 && Frpos [row] < fnrows)
	{
	    /* row is in the current front */
	    DEBUG4 ((" in front\n")) ;
	    pivrow [IN] = row ;
	    rdeg [IN] = deg ;
	}
	else
	{
	    /* row is not in the current front */
	    DEBUG4 ((" NOT in front\n")) ;
	    pivrow [OUT] = row ;
	    rdeg [OUT] = deg ;
	}

	/* We are now guaranteed to have a pivot, no matter how broken */
	/* (non-IEEE compliant) the underlying numerical operators are. */
	/* This is particularly a problem for Microsoft compilers (they do */
	/* not handle NaN's properly). Now try to find a sparser pivot, if */
	/* possible. */

	for (i = 1 ; i < cdeg1 ; i++)
	{
	    row = Pattern [i] ;
	    deg = Row_degree [row] ;

	    if (Frpos [row] >= 0 && Frpos [row] < fnrows)
	    {
		/* row is in the current front */
		DEBUG4 ((" in front\n")) ;
		if (deg < rdeg [IN] || (deg == rdeg [IN] && row == diag_row))
		{
		    /* best row in front, so far */
		    pivrow [IN] = row ;
		    rdeg [IN] = deg ;
		}
	    }
	    else
	    {
		/* row is not in the current front */
		DEBUG4 ((" NOT in front\n")) ;
		if (deg < rdeg [OUT] || (deg == rdeg [OUT] && row == diag_row))
		{
		    /* best row not in front, so far */
		    pivrow [OUT] = row ;
		    rdeg [OUT] = deg ;
		}
	    }
	}
    }

    /* We found a pivot if there are entries (even zero ones) in pivot col */
    ASSERT (IMPLIES (cdeg1 > 0, pivrow[IN] != EMPTY || pivrow[OUT] != EMPTY)) ;

    /* If there are no entries in the pivot column, then no pivot is found */
    ASSERT (IMPLIES (cdeg1 == 0, pivrow[IN] == EMPTY && pivrow[OUT] == EMPTY)) ;

    /* ---------------------------------------------------------------------- */
    /* check for singular matrix */
    /* ---------------------------------------------------------------------- */

    if (cdeg1  == 0)
    {
	if (fnrows > 0)
	{
	    /*
		Get the pivrow [OUT][IN] from the current front.
		The frontal matrix looks like this:

			pivcol[OUT]
			|
			v
		x x x x 0   <- so grab this row as the pivrow [OUT][IN].
		x x x x 0
		x x x x 0
		0 0 0 0 0

		The current frontal matrix has some rows in it.  The degree
		of the pivcol[OUT] is zero.  The column is empty, and the
		current front does not contribute to it.

	    */
	    pivrow [IN] = Work->Frows [0] ;
	    DEBUGm4 (("Got zero pivrow[OUT][IN] "ID" from current front\n",
		pivrow [IN])) ;
	}
	else
	{

	    /*
		Get a pivot row from the row-merge tree, use as
		pivrow [OUT][OUT].   pivrow [IN] remains EMPTY.
		This can only happen if the current front is 0-by-0.
	    */

	    Int *Front_leftmostdesc, *Front_1strow, *Front_new1strow, row1,
		row2, fleftmost, nfr, n_row, frontid ;

	    ASSERT (Work->fncols == 0) ;

	    Front_leftmostdesc = Symbolic->Front_leftmostdesc ;
	    Front_1strow = Symbolic->Front_1strow ;
	    Front_new1strow = Work->Front_new1strow ;
	    nfr = Symbolic->nfr ;
	    n_row = Numeric->n_row ;
	    frontid = Work->frontid ;

	    DEBUGm4 (("Note: pivcol: "ID" is empty front "ID"\n",
		pivcol, frontid)) ;
#ifndef NDEBUG
	    DEBUG1 (("Calling dump rowmerge\n")) ;
	    UMF_dump_rowmerge (Numeric, Symbolic, Work) ;
#endif

	    /* Row-merge set is the non-pivotal rows in the range */
	    /* Front_new1strow [Front_leftmostdesc [frontid]] to */
	    /* Front_1strow [frontid+1] - 1. */
	    /* If this is empty, then use the empty rows, in the range */
	    /* Front_new1strow [nfr] to n_row-1. */
	    /* If this too is empty, then pivrow [OUT] will be empty. */
	    /* In both cases, update Front_new1strow [...]. */

	    fleftmost = Front_leftmostdesc [frontid] ;
	    row1 = Front_new1strow [fleftmost] ;
	    row2 = Front_1strow [frontid+1] - 1 ;
	    DEBUG1 (("Leftmost: "ID" Rows ["ID" to "ID"] srch ["ID" to "ID"]\n",
		fleftmost, Front_1strow [frontid], row2, row1, row2)) ;

	    /* look in the range row1 ... row2 */
	    for (row = row1 ; row <= row2 ; row++)
	    {
		DEBUG3 (("   Row: "ID"\n", row)) ;
		if (NON_PIVOTAL_ROW (row))
		{
		    /* found it */
		    DEBUG3 (("   Row: "ID" found\n", row)) ;
		    ASSERT (Frpos [row] == EMPTY) ;
		    pivrow [OUT] = row ;
		    DEBUGm4 (("got row merge pivrow %d\n", pivrow [OUT])) ;
		    break ;
		}
	    }
	    Front_new1strow [fleftmost] = row ;

	    if (pivrow [OUT] == EMPTY)
	    {
		/* not found, look in empty row set in "dummy" front */
		row1 = Front_new1strow [nfr] ;
		row2 = n_row-1 ;
		DEBUG3 (("Empty: "ID" Rows ["ID" to "ID"] srch["ID" to "ID"]\n",
		    nfr, Front_1strow [nfr], row2, row1, row2)) ;

		/* look in the range row1 ... row2 */
		for (row = row1 ; row <= row2 ; row++)
		{
		    DEBUG3 (("   Empty Row: "ID"\n", row)) ;
		    if (NON_PIVOTAL_ROW (row))
		    {
			/* found it */
			DEBUG3 (("   Empty Row: "ID" found\n", row)) ;
			ASSERT (Frpos [row] == EMPTY) ;
			pivrow [OUT] = row ;
			DEBUGm4 (("got dummy row pivrow %d\n", pivrow [OUT])) ;
			break ;
		    }
		}
		Front_new1strow [nfr] = row ;
	    }

	    if (pivrow [OUT] == EMPTY)
	    {
		/* Row-merge set is empty.  We can just discard */
		/* the candidate pivot column. */
		DEBUG0 (("Note: row-merge set empty\n")) ;
		DEBUGm4 (("got no pivrow \n")) ;
		return (UMFPACK_WARNING_singular_matrix) ;
	    }
	}
    }

    /* ---------------------------------------------------------------------- */
    /* construct the candidate row in the front, if any */
    /* ---------------------------------------------------------------------- */

#ifndef NDEBUG
    /* check Wrp */
    ASSERT (Work->Wrpflag > 0) ;
    if (UMF_debug > 0 || Work->n_col < 1000)
    {
	for (i = 0 ; i < Work->n_col ; i++)
	{
	    ASSERT (Wrp [i] < Work->Wrpflag) ;
	}
    }
#endif

#ifndef NDEBUG
    DEBUG4 (("pivrow [IN]: "ID"\n", pivrow [IN])) ;
    UMF_dump_rowcol (0, Numeric, Work, pivrow [IN], TRUE) ;
#endif

    if (pivrow [IN] != EMPTY)
    {

	/* the row merge candidate row is not pivrow [IN] */
	freebie [IN] = (pivrow [IN] == prior_pivrow [IN]) && (cdeg1  > 0) ;
	ASSERT (cdeg1  >= 0) ;

	if (!freebie [IN])
	{
	    /* include current front in the degree of this row */

	    Fcpos = Work->Fcpos ;
	    fncols = Work->fncols ;

	    Wrpflag = Work->Wrpflag ;

	    /* -------------------------------------------------------------- */
	    /* construct the pattern of the IN row */
	    /* -------------------------------------------------------------- */

#ifndef NDEBUG
	    /* check Fcols */
	    DEBUG5 (("ROW ASSEMBLE: rdeg "ID"\nREDUCE ROW "ID"\n",
		fncols, pivrow [IN])) ;
	    for (j = 0 ; j < fncols ; j++)
	    {
		col = Work->Fcols [j] ;
		ASSERT (col >= 0 && col < Work->n_col) ;
		ASSERT (Fcpos [col] >= 0) ;
	    }
	    if (UMF_debug > 0 || Work->n_col < 1000)
	    {
		Int cnt = fncols ;
		for (col = 0 ; col < Work->n_col ; col++)
		{
		    if (Fcpos [col] < 0) cnt++ ;
		}
		ASSERT (cnt == Work->n_col) ;
	    }
#endif

	    rdeg [IN] = fncols ;

	    ASSERT (pivrow [IN] >= 0 && pivrow [IN] < Work->n_row) ;
	    ASSERT (NON_PIVOTAL_ROW (pivrow [IN])) ;

	    /* add the pivot column itself */
	    ASSERT (Wrp [pivcol] != Wrpflag) ;
	    if (Fcpos [pivcol] < 0)
	    {
		DEBUG3 (("Adding pivot col to pivrow [IN] pattern\n")) ;
		if (rdeg [IN] >= max_rdeg)
		{
		    /* :: pattern change (in) :: */
		    return (UMFPACK_ERROR_different_pattern) ;
		}
		Wrp [pivcol] = Wrpflag ;
		W_i [rdeg [IN]++] = pivcol ;
	    }

	    tpi = Row_tuples [pivrow [IN]] ;
	    if (tpi)
	    {
		tp = (Tuple *) (Memory + tpi) ;
		tp1 = tp ;
		tp2 = tp ;
		tpend = tp + Row_tlen [pivrow [IN]] ;
		for ( ; tp < tpend ; tp++)
		{
		    e = tp->e ;
		    ASSERT (e > 0 && e <= Work->nel) ;
		    if (!E [e])
		    {
			continue ;	/* element already deallocated */
		    }
		    f = tp->f ;
		    p = Memory + E [e] ;
		    ep = (Element *) p ;
		    p += UNITS (Element, 1) ;
		    Cols = (Int *) p ;
		    ncols = ep->ncols ;
		    Rows = Cols + ncols ;
		    if (Rows [f] == EMPTY)
		    {
			continue ;	/* row already assembled */
		    }
		    ASSERT (pivrow [IN] == Rows [f]) ;

		    for (j = 0 ; j < ncols ; j++)
		    {
			col = Cols [j] ;
			ASSERT (col >= EMPTY && col < Work->n_col) ;
			if ((col >= 0) && (Wrp [col] != Wrpflag)
			    && Fcpos [col] <0)
			{
			    ASSERT (NON_PIVOTAL_COL (col)) ;
			    if (rdeg [IN] >= max_rdeg)
			    {
				/* :: pattern change (rdeg in failure) :: */
				DEBUGm4 (("rdeg [IN] >= max_rdeg failure\n")) ;
				return (UMFPACK_ERROR_different_pattern) ;
			    }
			    Wrp [col] = Wrpflag ;
			    W_i [rdeg [IN]++] = col ;
			}
		    }

		    *tp2++ = *tp ;	/* leave the tuple in the list */
		}
		Row_tlen [pivrow [IN]] = tp2 - tp1 ;
	    }

#ifndef NDEBUG
	    DEBUG4 (("Reduced IN row:\n")) ;
	    for (j = 0 ; j < fncols ; j++)
	    {
		DEBUG6 ((" "ID" "ID" "ID"\n",
		    j, Work->Fcols [j], Fcpos [Work->Fcols [j]])) ;
		ASSERT (Fcpos [Work->Fcols [j]] >= 0) ;
	    }
	    for (j = fncols ; j < rdeg [IN] ; j++)
	    {
		DEBUG6 ((" "ID" "ID" "ID"\n", j, W_i [j], Wrp [W_i [j]]));
		ASSERT (W_i [j] >= 0 && W_i [j] < Work->n_col) ;
		ASSERT (Wrp [W_i [j]] == Wrpflag) ;
	    }
	    /* mark the end of the pattern in case we scan it by mistake */
	    /* Note that this means W_i must be of size >= fncols_max + 1 */
	    W_i [rdeg [IN]] = EMPTY ;
#endif

	    /* rdeg [IN] is now the exact degree of the IN row */

	    /* clear Work->Wrp. */
	    Work->Wrpflag++ ;
	    /* All Wrp [0..n_col] is now < Wrpflag */
	}
    }

#ifndef NDEBUG
    /* check Wrp */
    if (UMF_debug > 0 || Work->n_col < 1000)
    {
	for (i = 0 ; i < Work->n_col ; i++)
	{
	    ASSERT (Wrp [i] < Work->Wrpflag) ;
	}
    }
#endif

    /* ---------------------------------------------------------------------- */
    /* construct the candidate row not in the front, if any */
    /* ---------------------------------------------------------------------- */

#ifndef NDEBUG
    DEBUG4 (("pivrow [OUT]: "ID"\n", pivrow [OUT])) ;
    UMF_dump_rowcol (0, Numeric, Work, pivrow [OUT], TRUE) ;
#endif

    /* If this is a candidate row from the row merge set, force it to be */
    /* scanned (ignore prior_pivrow [OUT]). */

    if (pivrow [OUT] != EMPTY)
    {
	freebie [OUT] = (pivrow [OUT] == prior_pivrow [OUT]) && cdeg1  > 0 ;
	ASSERT (cdeg1  >= 0) ;

	if (!freebie [OUT])
	{

	    Wrpflag = Work->Wrpflag ;

	    /* -------------------------------------------------------------- */
	    /* construct the pattern of the row */
	    /* -------------------------------------------------------------- */

	    rdeg [OUT] = 0 ;

	    ASSERT (pivrow [OUT] >= 0 && pivrow [OUT] < Work->n_row) ;
	    ASSERT (NON_PIVOTAL_ROW (pivrow [OUT])) ;

	    /* add the pivot column itself */
	    ASSERT (Wrp [pivcol] != Wrpflag) ;
	    DEBUG3 (("Adding pivot col to pivrow [OUT] pattern\n")) ;
	    if (rdeg [OUT] >= max_rdeg)
	    {
		/* :: pattern change (out) :: */
		return (UMFPACK_ERROR_different_pattern) ;
	    }
	    Wrp [pivcol] = Wrpflag ;
	    W_o [rdeg [OUT]++] = pivcol ;

	    tpi = Row_tuples [pivrow [OUT]] ;
	    if (tpi)
	    {
		tp = (Tuple *) (Memory + tpi) ;
		tp1 = tp ;
		tp2 = tp ;
		tpend = tp + Row_tlen [pivrow [OUT]] ;
		for ( ; tp < tpend ; tp++)
		{
		    e = tp->e ;
		    ASSERT (e > 0 && e <= Work->nel) ;
		    if (!E [e])
		    {
			continue ;	/* element already deallocated */
		    }
		    f = tp->f ;
		    p = Memory + E [e] ;
		    ep = (Element *) p ;
		    p += UNITS (Element, 1) ;
		    Cols = (Int *) p ;
		    ncols = ep->ncols ;
		    Rows = Cols + ncols ;
		    if (Rows [f] == EMPTY)
		    {
			continue ;	/* row already assembled */
		    }
		    ASSERT (pivrow [OUT] == Rows [f]) ;

		    for (j = 0 ; j < ncols ; j++)
		    {
			col = Cols [j] ;
			ASSERT (col >= EMPTY && col < Work->n_col) ;
			if ((col >= 0) && (Wrp [col] != Wrpflag))
			{
			    ASSERT (NON_PIVOTAL_COL (col)) ;
			    if (rdeg [OUT] >= max_rdeg)
			    {
				/* :: pattern change (rdeg out failure) :: */
				DEBUGm4 (("rdeg [OUT] failure\n")) ;
				return (UMFPACK_ERROR_different_pattern) ;
			    }
			    Wrp [col] = Wrpflag ;
			    W_o [rdeg [OUT]++] = col ;
			}
		    }
		    *tp2++ = *tp ;	/* leave the tuple in the list */
		}
		Row_tlen [pivrow [OUT]] = tp2 - tp1 ;
	    }

#ifndef NDEBUG
	    DEBUG4 (("Reduced row OUT:\n")) ;
	    for (j = 0 ; j < rdeg [OUT] ; j++)
	    {
		DEBUG6 ((" "ID" "ID" "ID"\n", j, W_o [j], Wrp [W_o [j]])) ;
		ASSERT (W_o [j] >= 0 && W_o [j] < Work->n_col) ;
		ASSERT (Wrp [W_o [j]] == Wrpflag) ;
	    }
	    /* mark the end of the pattern in case we scan it by mistake */
	    /* Note that this means W_o must be of size >= fncols_max + 1 */
	    W_o [rdeg [OUT]] = EMPTY ;
#endif

	    /* rdeg [OUT] is now the exact degree of the row */

	    /* clear Work->Wrp. */
	    Work->Wrpflag++ ;
	    /* All Wrp [0..n] is now < Wrpflag */

	}

    }
    DEBUG5 (("Row_search end ] \n")) ;

#ifndef NDEBUG
    /* check Wrp */
    if (UMF_debug > 0 || Work->n_col < 1000)
    {
	for (i = 0 ; i < Work->n_col ; i++)
	{
	    ASSERT (Wrp [i] < Work->Wrpflag) ;
	}
    }
#endif

    return (UMFPACK_OK) ;
}
Пример #7
0
GLOBAL Int UMF_solve
(
    Int sys,
    const Int Ap [ ],
    const Int Ai [ ],
    const double Ax [ ],
    double Xx [ ],
    const double Bx [ ],
#ifdef COMPLEX
    const double Az [ ],
    double Xz [ ],
    const double Bz [ ],
#endif
    NumericType *Numeric,
    Int irstep,
    double Info [UMFPACK_INFO],
    Int Pattern [ ],		/* size n */
    double SolveWork [ ]	/* if irstep>0 real:  size 5*n.  complex:10*n */
				/* otherwise   real:  size   n.  complex: 4*n */
)
{
    /* ---------------------------------------------------------------------- */
    /* local variables */
    /* ---------------------------------------------------------------------- */

    Entry axx, wi, xj, zi, xi, aij, bi ;
    double omega [3], d, z2i, yi, flops ;
    Entry *W, *Z, *S, *X ;
    double *Z2, *Y, *B2, *Rs ;
    Int *Rperm, *Cperm, i, n, p, step, j, nz, status, p2, do_scale ;
#ifdef COMPLEX
    Int AXsplit ;
    Int Bsplit ;
#endif
#ifndef NRECIPROCAL
    Int do_recip = Numeric->do_recip ;
#endif

    /* ---------------------------------------------------------------------- */
    /* initializations */
    /* ---------------------------------------------------------------------- */

#ifndef NDEBUG
    UMF_dump_lu (Numeric) ;
    ASSERT (Numeric && Xx && Bx && Pattern && SolveWork && Info) ;
#endif

    nz = 0 ;
    omega [0] = 0. ;
    omega [1] = 0. ;
    omega [2] = 0. ;
    Rperm = Numeric->Rperm ;
    Cperm = Numeric->Cperm ;
    Rs = Numeric->Rs ;		/* row scale factors */
    do_scale = (Rs != (double *) NULL) ;
    flops = 0 ;
    Info [UMFPACK_SOLVE_FLOPS] = 0 ;
    Info [UMFPACK_IR_TAKEN] = 0 ;
    Info [UMFPACK_IR_ATTEMPTED] = 0 ;

    /* UMFPACK_solve does not call this routine if A is rectangular */
    ASSERT (Numeric->n_row == Numeric->n_col) ;
    n = Numeric->n_row ;
    if (Numeric->nnzpiv < n
	|| SCALAR_IS_ZERO (Numeric->rcond) || SCALAR_IS_NAN (Numeric->rcond))
    {
	/* Note that systems involving just L return UMFPACK_OK, even if */
	/* A is singular (L is always has a unit diagonal). */
	DEBUGm4 (("Note, matrix is singular in umf_solve\n")) ;
	status = UMFPACK_WARNING_singular_matrix ;
	irstep = 0 ;
    }
    else
    {
	status = UMFPACK_OK ;
    }
    irstep = MAX (0, irstep) ;			/* make sure irstep is >= 0 */

    W = (Entry *) SolveWork ;			/* Entry W [0..n-1] */

    Z = (Entry *) NULL ;	/* unused if no iterative refinement */
    S = (Entry *) NULL ;
    Y = (double *) NULL ;
    Z2 = (double *) NULL ;
    B2 = (double *) NULL ;

#ifdef COMPLEX
    if (irstep > 0)
    {
	if (!Ap || !Ai || !Ax)
	{
	    return (UMFPACK_ERROR_argument_missing) ;
	}
	/* A, B, and X in split format if Az, Bz, and Xz present */
	AXsplit = SPLIT (Az) || SPLIT(Xz);
	Z = (Entry *) (SolveWork + 4*n) ;	/* Entry Z [0..n-1] */
	S = (Entry *) (SolveWork + 6*n) ;	/* Entry S [0..n-1] */
	Y = (double *) (SolveWork + 8*n) ;	/* double Y [0..n-1] */
	B2 = (double *) (SolveWork + 9*n) ;	/* double B2 [0..n-1] */
	Z2 = (double *) Z ;		/* double Z2 [0..n-1], equiv. to Z */
    }
    else
    {
      /* A is ignored, only  look at X for split/packed cases */
      AXsplit = SPLIT(Xz);
    }
    Bsplit = SPLIT (Bz);

    if (AXsplit)
    {
	X = (Entry *) (SolveWork + 2*n) ;	/* Entry X [0..n-1] */
    }
    else
    {
	X = (Entry *) Xx ;			/* Entry X [0..n-1] */
    }
#else
    X = (Entry *) Xx ;				/* Entry X [0..n-1] */
    if (irstep > 0)
    {
	if (!Ap || !Ai || !Ax)
	{
	    return (UMFPACK_ERROR_argument_missing) ;
	}
	Z = (Entry *) (SolveWork + n) ;		/* Entry Z [0..n-1] */
	S = (Entry *) (SolveWork + 2*n) ;	/* Entry S [0..n-1] */
	Y = (double *) (SolveWork + 3*n) ;	/* double Y [0..n-1] */
	B2 = (double *) (SolveWork + 4*n) ;	/* double B2 [0..n-1] */
	Z2 = (double *) Z ;		/* double Z2 [0..n-1], equiv. to Z */
    }
#endif

    /* ---------------------------------------------------------------------- */
    /* determine which system to solve */
    /* ---------------------------------------------------------------------- */

    if (sys == UMFPACK_A)
    {

	/* ------------------------------------------------------------------ */
	/* solve A x = b with optional iterative refinement */
	/* ------------------------------------------------------------------ */

	if (irstep > 0)
	{

	    /* -------------------------------------------------------------- */
	    /* using iterative refinement:  compute Y and B2 */
	    /* -------------------------------------------------------------- */

	    nz = Ap [n] ;
	    Info [UMFPACK_NZ] = nz ;

	    /* A is stored by column */
	    /* Y (i) = ||R A_i||, 1-norm of row i of R A */
	    for (i = 0 ; i < n ; i++)
	    {
		Y [i] = 0. ;
	    }
	    flops += (ABS_FLOPS + 1) * nz ;
	    p2 = Ap [n] ;
	    for (p = 0 ; p < p2 ; p++)
	    {
		/* Y [Ai [p]] += ABS (Ax [p]) ; */
	        ASSIGN (aij, Ax, Az, p, AXsplit) ;
		ABS (d, aij) ;
		Y [Ai [p]] += d ;
	    }

	    /* B2 = abs (B) */
	    flops += ABS_FLOPS * n ;
	    for (i = 0 ; i < n ; i++)
	    {
		/* B2 [i] = ABS (B [i]) ; */
		ASSIGN (bi, Bx, Bz, i, Bsplit) ;
		ABS (B2 [i], bi) ;
	    }

	    /* scale Y and B2. */
	    if (do_scale)
	    {
		/* Y = R Y */
		/* B2 = R B2 */
#ifndef NRECIPROCAL
		if (do_recip)
		{
		    /* multiply by the scale factors */
		    for (i = 0 ; i < n ; i++)
		    {
			Y [i]  *= Rs [i] ;
			B2 [i] *= Rs [i] ;
		    }
		}
		else
#endif
		{
		    /* divide by the scale factors */
		    for (i = 0 ; i < n ; i++)
		    {
			Y [i]  /= Rs [i] ;
			B2 [i] /= Rs [i] ;
		    }
		}

		flops += 2 * n ;
	    }

	}

	for (step = 0 ; step <= irstep ; step++)
	{

	    /* -------------------------------------------------------------- */
	    /* Solve A x = b (step 0): */
	    /*  x = Q (U \ (L \ (P R b))) */
	    /* and then perform iterative refinement (step > 0): */
	    /*  x = x + Q (U \ (L \ (P R (b - A x)))) */
	    /* -------------------------------------------------------------- */

	    if (step == 0)
	    {
		if (do_scale)
		{
		    /* W = P R b, using X as workspace, since Z is not
		     * allocated if irstep = 0. */
#ifndef NRECIPROCAL
		    if (do_recip)
		    {
			/* multiply by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    ASSIGN (X [i], Bx, Bz, i, Bsplit) ;
			    SCALE (X [i], Rs [i]) ;
			}
		    }
		    else
#endif
		    {
			/* divide by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    ASSIGN (X [i], Bx, Bz, i, Bsplit) ;
			    SCALE_DIV (X [i], Rs [i]) ;
			}
		    }
		    flops += SCALE_FLOPS * n ;
		    for (i = 0 ; i < n ; i++)
		    {
			W [i] = X [Rperm [i]] ;
		    }
		}
		else
		{
		    /* W = P b, since the row scaling R = I */
		    for (i = 0 ; i < n ; i++)
		    {
			/* W [i] = B [Rperm [i]] ; */
			ASSIGN (W [i], Bx, Bz, Rperm [i], Bsplit) ;
		    }
		}
	    }
	    else
	    {
		for (i = 0 ; i < n ; i++)
		{
		    /* Z [i] = B [i] ; */
		    ASSIGN (Z [i], Bx, Bz, i, Bsplit) ;
		}
		flops += MULTSUB_FLOPS * nz ;
		for (i = 0 ; i < n ; i++)
		{
		    xi = X [i] ;
		    p2 = Ap [i+1] ;
		    for (p = Ap [i] ; p < p2 ; p++)
		    {
			/* Z [Ai [p]] -= Ax [p] * xi ; */
			ASSIGN (aij, Ax, Az, p, AXsplit) ;
			MULT_SUB (Z [Ai [p]], aij, xi) ;
		    }
		}
		/* scale, Z = R Z */
		if (do_scale)
		{
#ifndef NRECIPROCAL
		    if (do_recip)
		    {
			/* multiply by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    SCALE (Z [i], Rs [i]) ;
			}
		    }
		    else
#endif
		    {
			/* divide by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    SCALE_DIV (Z [i], Rs [i]) ;
			}
		    }
		    flops += SCALE_FLOPS * n ;
		}
		for (i = 0 ; i < n ; i++)
		{
		    W [i] = Z [Rperm [i]] ;
		}
	    }

	    flops += UMF_lsolve (Numeric, W, Pattern) ;
	    flops += UMF_usolve (Numeric, W, Pattern) ;

	    if (step == 0)
	    {
		for (i = 0 ; i < n ; i++)
		{
		    X [Cperm [i]] = W [i] ;
		}
	    }
	    else
	    {
		flops += ASSEMBLE_FLOPS * n ;
		for (i = 0 ; i < n ; i++)
		{
		    /* X [Cperm [i]] += W [i] ; */
		    ASSEMBLE (X [Cperm [i]], W [i]) ;
		}
	    }

	    /* -------------------------------------------------------------- */
	    /* sparse backward error estimate */
	    /* -------------------------------------------------------------- */

	    if (irstep > 0)
	    {

		/* ---------------------------------------------------------- */
		/* A is stored by column */
		/* W (i) = R (b - A x)_i, residual */
		/* Z2 (i) = R (|A||x|)_i */
		/* ---------------------------------------------------------- */

		for (i = 0 ; i < n ; i++)
		{
		    /* W [i] = B [i] ; */
		    ASSIGN (W [i], Bx, Bz, i, Bsplit) ;
		    Z2 [i] = 0. ;
		}
		flops += (MULT_FLOPS + DECREMENT_FLOPS + ABS_FLOPS + 1) * nz ;
		for (j = 0 ; j < n ; j++)
		{
		    xj = X [j] ;
		    p2 = Ap [j+1] ;
		    for (p = Ap [j] ; p < p2 ; p++)
		    {
			i = Ai [p] ;

			/* axx = Ax [p] * xj ; */
			ASSIGN (aij, Ax, Az, p, AXsplit) ;
			MULT (axx, aij, xj) ;

			/* W [i] -= axx ; */
			DECREMENT (W [i], axx) ;

			/* Z2 [i] += ABS (axx) ; */
			ABS (d, axx) ;
			Z2 [i] += d ;
		    }
		}

		/* scale W and Z2 */
		if (do_scale)
		{
		    /* Z2 = R Z2 */
		    /* W = R W */
#ifndef NRECIPROCAL
		    if (do_recip)
		    {
			/* multiply by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    SCALE (W [i], Rs [i]) ;
			    Z2 [i] *= Rs [i] ;
			}
		    }
		    else
#endif
		    {
			/* divide by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    SCALE_DIV (W [i], Rs [i]) ;
			    Z2 [i] /= Rs [i] ;
			}
		    }
		    flops += (SCALE_FLOPS + 1) * n ;
		}

		flops += (2*ABS_FLOPS + 5) * n ;
		if (do_step (omega, step, B2, X, W, Y, Z2, S, n, Info))
		{
		    /* iterative refinement is done */
		    break ;
		}

	    }

	}

    }
    else if (sys == UMFPACK_At)
    {

	/* ------------------------------------------------------------------ */
	/* solve A' x = b with optional iterative refinement */
	/* ------------------------------------------------------------------ */

	/* A' is the complex conjugate transpose */

	if (irstep > 0)
	{

	    /* -------------------------------------------------------------- */
	    /* using iterative refinement:  compute Y */
	    /* -------------------------------------------------------------- */

	    nz = Ap [n] ;
	    Info [UMFPACK_NZ] = nz ;

	    /* A' is stored by row */
	    /* Y (i) = ||(A' R)_i||, 1-norm of row i of A' R */

	    if (do_scale)
	    {
		flops += (ABS_FLOPS + 2) * nz ;
#ifndef NRECIPROCAL
		if (do_recip)
		{
		    /* multiply by the scale factors */
		    for (i = 0 ; i < n ; i++)
		    {
			yi = 0. ;
			p2 = Ap [i+1] ;
			for (p = Ap [i] ; p < p2 ; p++)
			{
			    /* yi += ABS (Ax [p]) * Rs [Ai [p]] ; */
			    /* note that abs (aij) is the same as
			     * abs (conj (aij)) */
			    ASSIGN (aij, Ax, Az, p, AXsplit) ;
			    ABS (d, aij) ;
			    yi += (d * Rs [Ai [p]]) ;
			}
			Y [i] = yi ;
		    }
		}
		else
#endif
		{
		    /* divide by the scale factors */
		    for (i = 0 ; i < n ; i++)
		    {
			yi = 0. ;
			p2 = Ap [i+1] ;
			for (p = Ap [i] ; p < p2 ; p++)
			{
			    /* yi += ABS (Ax [p]) / Rs [Ai [p]] ; */
			    /* note that abs (aij) is the same as
			     * abs (conj (aij)) */
			    ASSIGN (aij, Ax, Az, p, AXsplit) ;
			    ABS (d, aij) ;
			    yi += (d / Rs [Ai [p]]) ;
			}
			Y [i] = yi ;
		    }
		}
	    }
	    else
	    {
		/* no scaling */
		flops += (ABS_FLOPS + 1) * nz ;
		for (i = 0 ; i < n ; i++)
		{
		    yi = 0. ;
		    p2 = Ap [i+1] ;
		    for (p = Ap [i] ; p < p2 ; p++)
		    {
			/* yi += ABS (Ax [p]) ; */
			/* note that abs (aij) is the same as
			 * abs (conj (aij)) */
			ASSIGN (aij, Ax, Az, p, AXsplit) ;
			ABS (d, aij) ;
			yi += d ;
		    }
		    Y [i] = yi ;
		}
	    }

	    /* B2 = abs (B) */
	    for (i = 0 ; i < n ; i++)
	    {
		/* B2 [i] = ABS (B [i]) ; */
		ASSIGN (bi, Bx, Bz, i, Bsplit) ;
		ABS (B2 [i], bi) ;
	    }

	}

	for (step = 0 ; step <= irstep ; step++)
	{

	    /* -------------------------------------------------------------- */
	    /* Solve A' x = b (step 0): */
	    /*	x = R P' (L' \ (U' \ (Q' b))) */
	    /* and then perform iterative refinement (step > 0): */
	    /*	x = x + R P' (L' \ (U' \ (Q' (b - A' x)))) */
	    /* -------------------------------------------------------------- */

	    if (step == 0)
	    {
		/* W = Q' b */
		for (i = 0 ; i < n ; i++)
		{
		    /* W [i] = B [Cperm [i]] ; */
		    ASSIGN (W [i], Bx, Bz, Cperm [i], Bsplit) ;
		}
	    }
	    else
	    {
		/* Z = b - A' x */
		for (i = 0 ; i < n ; i++)
		{
		    /* Z [i] = B [i] ; */
		    ASSIGN (Z [i], Bx, Bz, i, Bsplit) ;
		}
		flops += MULTSUB_FLOPS * nz ;
		for (i = 0 ; i < n ; i++)
		{
		    zi = Z [i] ;
		    p2 = Ap [i+1] ;
		    for (p = Ap [i] ; p < p2 ; p++)
		    {
			/* zi -= conjugate (Ax [p]) * X [Ai [p]] ; */
			ASSIGN (aij, Ax, Az, p, Bsplit) ;
			MULT_SUB_CONJ (zi, X [Ai [p]], aij) ;
		    }
		    Z [i] = zi ;
		}
		/* W = Q' Z */
		for (i = 0 ; i < n ; i++)
		{
		    W [i] = Z [Cperm [i]] ;
		}
	    }

	    flops += UMF_uhsolve (Numeric, W, Pattern) ;
	    flops += UMF_lhsolve (Numeric, W, Pattern) ;

	    if (step == 0)
	    {

		/* X = R P' W */
		/* do not use Z, since it isn't allocated if irstep = 0 */

		/* X = P' W */
		for (i = 0 ; i < n ; i++)
		{
		    X [Rperm [i]] = W [i] ;
		}
		if (do_scale)
		{
		    /* X = R X */
#ifndef NRECIPROCAL
		    if (do_recip)
		    {
			/* multiply by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    SCALE (X [i], Rs [i]) ;
			}
		    }
		    else
#endif
		    {
			/* divide by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    SCALE_DIV (X [i], Rs [i]) ;
			}
		    }
		    flops += SCALE_FLOPS * n ;
		}

	    }
	    else
	    {

		/* Z = P' W */
		for (i = 0 ; i < n ; i++)
		{
		    Z [Rperm [i]] = W [i] ;
		}
		if (do_scale)
		{
		    /* Z = R Z */
#ifndef NRECIPROCAL
		    if (do_recip)
		    {
			/* multiply by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    SCALE (Z [i], Rs [i]) ;
			}
		    }
		    else
#endif
		    {
			/* divide by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    SCALE_DIV (Z [i], Rs [i]) ;
			}
		    }
		    flops += SCALE_FLOPS * n ;
		}

		flops += ASSEMBLE_FLOPS * n ;
		/* X += Z */
		for (i = 0 ; i < n ; i++)
		{
		    /* X [i] += Z [i] ; was +=W[i] in v4.3, which is wrong */
		    ASSEMBLE (X [i], Z [i]) ;	/* bug fix, v4.3.1 */
		}
	    }

	    /* -------------------------------------------------------------- */
	    /* sparse backward error estimate */
	    /* -------------------------------------------------------------- */

	    if (irstep > 0)
	    {

		/* ---------------------------------------------------------- */
		/* A' is stored by row */
		/* W (i) = (b - A' x)_i, residual */
		/* Z2 (i) = (|A'||x|)_i */
		/* ---------------------------------------------------------- */

		flops += (MULT_FLOPS + DECREMENT_FLOPS + ABS_FLOPS + 1) * nz ;
		for (i = 0 ; i < n ; i++)
		{
		    /* wi = B [i] ; */
		    ASSIGN (wi, Bx, Bz, i, Bsplit) ;
		    z2i = 0. ;
		    p2 = Ap [i+1] ;
		    for (p = Ap [i] ; p < p2 ; p++)
		    {
			/* axx = conjugate (Ax [p]) * X [Ai [p]] ; */
			ASSIGN (aij, Ax, Az, p, AXsplit) ;
			MULT_CONJ (axx, X [Ai [p]], aij) ;

			/* wi -= axx ; */
			DECREMENT (wi, axx) ;

			/* z2i += ABS (axx) ; */
			ABS (d, axx) ;
			z2i += d ;
		    }
		    W [i] = wi ;
		    Z2 [i] = z2i ;
		}

		flops += (2*ABS_FLOPS + 5) * n ;
		if (do_step (omega, step, B2, X, W, Y, Z2, S, n, Info))
		{
		    /* iterative refinement is done */
		    break ;
		}

	    }

	}

    }
    else if (sys == UMFPACK_Aat)
    {

	/* ------------------------------------------------------------------ */
	/* solve A.' x = b with optional iterative refinement */
	/* ------------------------------------------------------------------ */

	/* A' is the array transpose */

	if (irstep > 0)
	{

	    /* -------------------------------------------------------------- */
	    /* using iterative refinement:  compute Y */
	    /* -------------------------------------------------------------- */

	    nz = Ap [n] ;
	    Info [UMFPACK_NZ] = nz ;

	    /* A.' is stored by row */
	    /* Y (i) = ||(A.' R)_i||, 1-norm of row i of A.' R */

	    if (do_scale)
	    {
		flops += (ABS_FLOPS + 2) * nz ;
#ifndef NRECIPROCAL
		if (do_recip)
		{
		    /* multiply by the scale factors */
		    for (i = 0 ; i < n ; i++)
		    {
			yi = 0. ;
			p2 = Ap [i+1] ;
			for (p = Ap [i] ; p < p2 ; p++)
			{
			    /* yi += ABS (Ax [p]) * Rs [Ai [p]] ; */
			    /* note that A.' is the array transpose,
			     * so no conjugate */
			    ASSIGN (aij, Ax, Az, p, AXsplit) ;
			    ABS (d, aij) ;
			    yi += (d * Rs [Ai [p]]) ;
			}
			Y [i] = yi ;
		    }
		}
		else
#endif
		{
		    /* divide by the scale factors */
		    for (i = 0 ; i < n ; i++)
		    {
			yi = 0. ;
			p2 = Ap [i+1] ;
			for (p = Ap [i] ; p < p2 ; p++)
			{
			    /* yi += ABS (Ax [p]) / Rs [Ai [p]] ; */
			    /* note that A.' is the array transpose,
			     * so no conjugate */
			    ASSIGN (aij, Ax, Az, p, AXsplit) ;
			    ABS (d, aij) ;
			    yi += (d / Rs [Ai [p]]) ;
			}
			Y [i] = yi ;
		    }
		}
	    }
	    else
	    {
		/* no scaling */
		flops += (ABS_FLOPS + 1) * nz ;
		for (i = 0 ; i < n ; i++)
		{
		    yi = 0. ;
		    p2 = Ap [i+1] ;
		    for (p = Ap [i] ; p < p2 ; p++)
		    {
			/* yi += ABS (Ax [p]) */
			/* note that A.' is the array transpose,
			 * so no conjugate */
			ASSIGN (aij, Ax, Az, p, AXsplit) ;
			ABS (d, aij) ;
			yi += d ;
		    }
		    Y [i] = yi ;
		}
	    }

	    /* B2 = abs (B) */
	    for (i = 0 ; i < n ; i++)
	    {
		/* B2 [i] = ABS (B [i]) ; */
		ASSIGN (bi, Bx, Bz, i, Bsplit) ;
		ABS (B2 [i], bi) ;
	    }

	}

	for (step = 0 ; step <= irstep ; step++)
	{

	    /* -------------------------------------------------------------- */
	    /* Solve A.' x = b (step 0): */
	    /*	x = R P' (L.' \ (U.' \ (Q' b))) */
	    /* and then perform iterative refinement (step > 0): */
	    /*	x = x + R P' (L.' \ (U.' \ (Q' (b - A.' x)))) */
	    /* -------------------------------------------------------------- */

	    if (step == 0)
	    {
		/* W = Q' b */
		for (i = 0 ; i < n ; i++)
		{
		    /* W [i] = B [Cperm [i]] ; */
		    ASSIGN (W [i], Bx, Bz, Cperm [i], Bsplit) ;
		}
	    }
	    else
	    {
		/* Z = b - A.' x */
		for (i = 0 ; i < n ; i++)
		{
		    /* Z [i] = B [i] ; */
		    ASSIGN (Z [i], Bx, Bz, i, Bsplit) ;
		}
		flops += MULTSUB_FLOPS * nz ;
		for (i = 0 ; i < n ; i++)
		{
		    zi = Z [i] ;
		    p2 = Ap [i+1] ;
		    for (p = Ap [i] ; p < p2 ; p++)
		    {
			/* zi -= Ax [p] * X [Ai [p]] ; */
			ASSIGN (aij, Ax, Az, p, AXsplit) ;
			MULT_SUB (zi, aij, X [Ai [p]]) ;
		    }
		    Z [i] = zi ;
		}
		/* W = Q' Z */
		for (i = 0 ; i < n ; i++)
		{
		    W [i] = Z [Cperm [i]] ;
		}
	    }

	    flops += UMF_utsolve (Numeric, W, Pattern) ;
	    flops += UMF_ltsolve (Numeric, W, Pattern) ;

	    if (step == 0)
	    {

		/* X = R P' W */
		/* do not use Z, since it isn't allocated if irstep = 0 */

		/* X = P' W */
		for (i = 0 ; i < n ; i++)
		{
		    X [Rperm [i]] = W [i] ;
		}
		if (do_scale)
		{
		    /* X = R X */
#ifndef NRECIPROCAL
		    if (do_recip)
		    {
			/* multiply by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    SCALE (X [i], Rs [i]) ;
			}
		    }
		    else
#endif
		    {
			/* divide by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    SCALE_DIV (X [i], Rs [i]) ;
			}
		    }
		    flops += SCALE_FLOPS * n ;
		}

	    }
	    else
	    {

		/* Z = P' W */
		for (i = 0 ; i < n ; i++)
		{
		    Z [Rperm [i]] = W [i] ;
		}
		if (do_scale)
		{
		    /* Z = R Z */
#ifndef NRECIPROCAL
		    if (do_recip)
		    {
			/* multiply by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    SCALE (Z [i], Rs [i]) ;
			}
		    }
		    else
#endif
		    {
			/* divide by the scale factors */
			for (i = 0 ; i < n ; i++)
			{
			    SCALE_DIV (Z [i], Rs [i]) ;
			}
		    }
		    flops += SCALE_FLOPS * n ;
		}

		flops += ASSEMBLE_FLOPS * n ;
		/* X += Z */
		for (i = 0 ; i < n ; i++)
		{
		    /* X [i] += Z [i] ; was +=W[i] in v4.3, which is wrong */
		    ASSEMBLE (X [i], Z [i]) ;	/* bug fix, v4.3.1 */
		}
	    }

	    /* -------------------------------------------------------------- */
	    /* sparse backward error estimate */
	    /* -------------------------------------------------------------- */

	    if (irstep > 0)
	    {

		/* ---------------------------------------------------------- */
		/* A.' is stored by row */
		/* W (i) = (b - A.' x)_i, residual */
		/* Z (i) = (|A.'||x|)_i */
		/* ---------------------------------------------------------- */

		flops += (MULT_FLOPS + DECREMENT_FLOPS + ABS_FLOPS + 1) * nz ;
		for (i = 0 ; i < n ; i++)
		{
		    /* wi = B [i] ; */
		    ASSIGN (wi, Bx, Bz, i, Bsplit) ;
		    z2i = 0. ;
		    p2 = Ap [i+1] ;
		    for (p = Ap [i] ; p < p2 ; p++)
		    {
			/* axx = Ax [p] * X [Ai [p]] ; */
			ASSIGN (aij, Ax, Az, p, AXsplit) ;
			MULT (axx, aij, X [Ai [p]]) ;

			/* wi -= axx ; */
			DECREMENT (wi, axx) ;

			/* z2i += ABS (axx) ; */
			ABS (d, axx) ;
			z2i += d ;
		    }
		    W [i] = wi ;
		    Z2 [i] = z2i ;
		}

		flops += (2*ABS_FLOPS + 5) * n ;
		if (do_step (omega, step, B2, X, W, Y, Z2, S, n, Info))
		{
		    /* iterative refinement is done */
		    break ;
		}

	    }

	}

    }
    else if (sys == UMFPACK_Pt_L)
    {

	/* ------------------------------------------------------------------ */
	/* Solve P'Lx=b:  x = L \ Pb */
	/* ------------------------------------------------------------------ */

	for (i = 0 ; i < n ; i++)
	{
	    /* X [i] = B [Rperm [i]] ; */
	    ASSIGN (X [i], Bx, Bz, Rperm [i], Bsplit) ;
	}
	flops = UMF_lsolve (Numeric, X, Pattern) ;
	status = UMFPACK_OK ;

    }
    else if (sys == UMFPACK_L)
    {

	/* ------------------------------------------------------------------ */
	/* Solve Lx=b:  x = L \ b */
	/* ------------------------------------------------------------------ */

	for (i = 0 ; i < n ; i++)
	{
	    /* X [i] = B [i] ; */
	    ASSIGN (X [i], Bx, Bz, i, Bsplit) ;
	}
	flops = UMF_lsolve (Numeric, X, Pattern) ;
	status = UMFPACK_OK ;

    }
    else if (sys == UMFPACK_Lt_P)
    {

	/* ------------------------------------------------------------------ */
	/* Solve L'Px=b:  x = P' (L' \ b) */
	/* ------------------------------------------------------------------ */

	for (i = 0 ; i < n ; i++)
	{
	    /* W [i] = B [i] ; */
	    ASSIGN (W [i], Bx, Bz, i, Bsplit) ;
	}
	flops = UMF_lhsolve (Numeric, W, Pattern) ;
	for (i = 0 ; i < n ; i++)
	{
	    X [Rperm [i]] = W [i] ;
	}
	status = UMFPACK_OK ;

    }
    else if (sys == UMFPACK_Lat_P)
    {

	/* ------------------------------------------------------------------ */
	/* Solve L.'Px=b:  x = P' (L.' \ b) */
	/* ------------------------------------------------------------------ */

	for (i = 0 ; i < n ; i++)
	{
	    /* W [i] = B [i] ; */
	    ASSIGN (W [i], Bx, Bz, i, Bsplit) ;
	}
	flops = UMF_ltsolve (Numeric, W, Pattern) ;
	for (i = 0 ; i < n ; i++)
	{
	    X [Rperm [i]] = W [i] ;
	}
	status = UMFPACK_OK ;

    }
    else if (sys == UMFPACK_Lt)
    {

	/* ------------------------------------------------------------------ */
	/* Solve L'x=b:  x = L' \ b */
	/* ------------------------------------------------------------------ */

	for (i = 0 ; i < n ; i++)
	{
	    /* X [i] = B [i] ; */
	    ASSIGN (X [i], Bx, Bz, i, Bsplit) ;
	}
	flops = UMF_lhsolve (Numeric, X, Pattern) ;
	status = UMFPACK_OK ;

    }
    else if (sys == UMFPACK_Lat)
    {

	/* ------------------------------------------------------------------ */
	/* Solve L.'x=b:  x = L.' \ b */
	/* ------------------------------------------------------------------ */

	for (i = 0 ; i < n ; i++)
	{
	    /* X [i] = B [i] ; */
	    ASSIGN (X [i], Bx, Bz, i, Bsplit) ;
	}
	flops = UMF_ltsolve (Numeric, X, Pattern) ;
	status = UMFPACK_OK ;

    }
    else if (sys == UMFPACK_U_Qt)
    {

	/* ------------------------------------------------------------------ */
	/* Solve UQ'x=b:  x = Q (U \ b) */
	/* ------------------------------------------------------------------ */

	for (i = 0 ; i < n ; i++)
	{
	    /* W [i] = B [i] ; */
	    ASSIGN (W [i], Bx, Bz, i, Bsplit) ;
	}
	flops = UMF_usolve (Numeric, W, Pattern) ;
	for (i = 0 ; i < n ; i++)
	{
	    X [Cperm [i]] = W [i] ;
	}

    }
    else if (sys == UMFPACK_U)
    {

	/* ------------------------------------------------------------------ */
	/* Solve Ux=b:  x = U \ b */
	/* ------------------------------------------------------------------ */

	for (i = 0 ; i < n ; i++)
	{
	    /* X [i] = B [i] ; */
	    ASSIGN (X [i], Bx, Bz, i, Bsplit) ;
	}
	flops = UMF_usolve (Numeric, X, Pattern) ;

    }
    else if (sys == UMFPACK_Q_Ut)
    {

	/* ------------------------------------------------------------------ */
	/* Solve QU'x=b:  x = U' \ Q'b */
	/* ------------------------------------------------------------------ */

	for (i = 0 ; i < n ; i++)
	{
	    /* X [i] = B [Cperm [i]] ; */
	    ASSIGN (X [i], Bx, Bz, Cperm [i], Bsplit) ;
	}
	flops = UMF_uhsolve (Numeric, X, Pattern) ;

    }
    else if (sys == UMFPACK_Q_Uat)
    {

	/* ------------------------------------------------------------------ */
	/* Solve QU.'x=b:  x = U.' \ Q'b */
	/* ------------------------------------------------------------------ */

	for (i = 0 ; i < n ; i++)
	{
	    /* X [i] = B [Cperm [i]] ; */
	    ASSIGN (X [i], Bx, Bz, Cperm [i], Bsplit) ;
	}
	flops = UMF_utsolve (Numeric, X, Pattern) ;

    }
    else if (sys == UMFPACK_Ut)
    {

	/* ------------------------------------------------------------------ */
	/* Solve U'x=b:  x = U' \ b */
	/* ------------------------------------------------------------------ */

	for (i = 0 ; i < n ; i++)
	{
	    /* X [i] = B [i] ; */
	  ASSIGN (X [i], Bx, Bz, i, Bsplit) ;
	}
	flops = UMF_uhsolve (Numeric, X, Pattern) ;

    }
    else if (sys == UMFPACK_Uat)
    {

	/* ------------------------------------------------------------------ */
	/* Solve U'x=b:  x = U' \ b */
	/* ------------------------------------------------------------------ */

	for (i = 0 ; i < n ; i++)
	{
	    /* X [i] = B [i] ; */
	    ASSIGN (X [i], Bx, Bz, i, Bsplit) ;
	}
	flops = UMF_utsolve (Numeric, X, Pattern) ;

    }
    else
    {
	return (UMFPACK_ERROR_invalid_system) ;
    }

#ifdef COMPLEX
    /* copy the solution back, from Entry X [ ] to double Xx [ ] and Xz [ ] */
    if (AXsplit)
    {
	for (i = 0 ; i < n ; i++)
	{
	    Xx [i] = REAL_COMPONENT (X [i]) ;
	    Xz [i] = IMAG_COMPONENT (X [i]) ;
	}
    }
#endif

    /* return UMFPACK_OK, or UMFPACK_WARNING_singular_matrix */
    /* Note that systems involving just L will return UMFPACK_OK */
    Info [UMFPACK_SOLVE_FLOPS] = flops ;
    return (status) ;
}
Пример #8
0
PRIVATE Int do_step		/* return TRUE if iterative refinement done */
(
    double omega [3],
    Int step,			/* which step of iterative refinement to do */
    const double B2 [ ],	/* abs (B) */
    Entry X [ ],
    const Entry W [ ],
    const double Y [ ],
    const double Z2 [ ],
    Entry S [ ],
    Int n,
    double Info [UMFPACK_INFO]
)
{
    double last_omega [3], tau, nctau, d1, wd1, d2, wd2, xi, yix, wi, xnorm ;
    Int i ;

    /* DBL_EPSILON is a standard ANSI C term defined in <float.h> */
    /* It is the smallest positive x such that 1.0+x != 1.0 */

    nctau = 1000 * n * DBL_EPSILON ;
    DEBUG0 (("do_step start: nctau = %30.20e\n", nctau)) ;
    ASSERT (UMF_report_vector (n, (double *) X, (double *) NULL, UMF_debug,
	FALSE, FALSE) == UMFPACK_OK) ;

    /* for approximate flop count, assume d1 > tau is always true */
    /* flops += (2*ABS_FLOPS + 5) * n ; (done in UMF_solve, above) */

    /* ---------------------------------------------------------------------- */
    /* save the last iteration in case we need to reinstate it */
    /* ---------------------------------------------------------------------- */

    last_omega [0] = omega [0] ;
    last_omega [1] = omega [1] ;
    last_omega [2] = omega [2] ;

    /* ---------------------------------------------------------------------- */
    /* compute sparse backward errors: omega [1] and omega [2] */
    /* ---------------------------------------------------------------------- */

    /* xnorm = ||x|| maxnorm */
    xnorm = 0.0 ;
    for (i = 0 ; i < n ; i++)
    {
	/* xi = ABS (X [i]) ; */
	ABS (xi, X [i]) ;
	if (SCALAR_IS_NAN (xi))
	{
	    xnorm = xi ;
	    break ;
	}
	/* no NaN's to consider here: */
	xnorm = MAX (xnorm, xi) ;
    }

    omega [1] = 0. ;
    omega [2] = 0. ;
    for (i = 0 ; i < n ; i++)
    {
	yix = Y [i] * xnorm ;
	tau = (yix + B2 [i]) * nctau ;
	d1 = Z2 [i] + B2 [i] ;
	/* wi = ABS (W [i]) ; */
	ABS (wi, W [i]) ;
	if (SCALAR_IS_NAN (d1))
	{
	    omega [1] = d1 ;
	    omega [2] = d1 ;
	    break ;
	}
	if (SCALAR_IS_NAN (tau))
	{
	    omega [1] = tau ;
	    omega [2] = tau ;
	    break ;
	}
	if (d1 > tau)		/* a double relop, but no NaN's here */
	{
	    wd1 = wi / d1 ;
	    omega [1] = MAX (omega [1], wd1) ;
	}
	else if (tau > 0.0)	/* a double relop, but no NaN's here */
	{
	    d2 = Z2 [i] + yix ;
	    wd2 = wi / d2 ;
	    omega [2] = MAX (omega [2], wd2) ;
	}
    }

    omega [0] = omega [1] + omega [2] ;
    Info [UMFPACK_OMEGA1] = omega [1] ;
    Info [UMFPACK_OMEGA2] = omega [2] ;

    /* ---------------------------------------------------------------------- */
    /* stop the iterations if the backward error is small, or NaN */
    /* ---------------------------------------------------------------------- */

    Info [UMFPACK_IR_TAKEN] = step ;
    Info [UMFPACK_IR_ATTEMPTED] = step ;

    if (SCALAR_IS_NAN (omega [0]))
    {
	DEBUG0 (("omega[0] is NaN - done.\n")) ;
	ASSERT (UMF_report_vector (n, (double *) X, (double *) NULL, UMF_debug,
	    FALSE, FALSE) == UMFPACK_OK) ;
	return (TRUE) ;
    }

    if (omega [0] < DBL_EPSILON)    /* double relop, but no NaN case here */
    {
	DEBUG0 (("omega[0] too small - done.\n")) ;
	ASSERT (UMF_report_vector (n, (double *) X, (double *) NULL, UMF_debug,
	    FALSE, FALSE) == UMFPACK_OK) ;
	return (TRUE) ;
    }

    /* ---------------------------------------------------------------------- */
    /* stop if insufficient decrease in omega */
    /* ---------------------------------------------------------------------- */

    /* double relop, but no NaN case here: */
    if (step > 0 && omega [0] > last_omega [0] / 2)
    {
	DEBUG0 (("stop refinement\n")) ;
	if (omega [0] > last_omega [0])
	{
	    /* last iteration better than this one, reinstate it */
	    DEBUG0 (("last iteration better\n")) ;
	    for (i = 0 ; i < n ; i++)
	    {
		X [i] = S [i] ;
	    }
	    Info [UMFPACK_OMEGA1] = last_omega [1] ;
	    Info [UMFPACK_OMEGA2] = last_omega [2] ;
	}
	Info [UMFPACK_IR_TAKEN] = step - 1 ;
	ASSERT (UMF_report_vector (n, (double *) X, (double *) NULL, UMF_debug,
	    FALSE, FALSE) == UMFPACK_OK) ;
	return (TRUE) ;
    }

    /* ---------------------------------------------------------------------- */
    /* save current solution in case we need to reinstate */
    /* ---------------------------------------------------------------------- */

    for (i = 0 ; i < n ; i++)
    {
	S [i] = X [i] ;
    }

    /* ---------------------------------------------------------------------- */
    /* iterative refinement continues */
    /* ---------------------------------------------------------------------- */

    ASSERT (UMF_report_vector (n, (double *) X, (double *) NULL, UMF_debug,
	FALSE, FALSE) == UMFPACK_OK) ;
    return (FALSE) ;
}
Пример #9
0
static double do_1_solve (cholmod_sparse *A, cholmod_dense *B,
    cholmod_dense *Xknown, Int *Puser, Int *Quser,
    KLU_common *Common, cholmod_common *ch, Int *nan)
{
    Int *Ai, *Ap ;
    double *Ax, *Bx, *Xknownx, *Xx, *Ax2, *Axx ;
    KLU_symbolic *Symbolic = NULL ; 
    KLU_numeric *Numeric = NULL ;
    cholmod_dense *X = NULL, *R = NULL ;
    cholmod_sparse *AT = NULL, *A2 = NULL, *AT2 = NULL ;
    double one [2], minusone [2],
	rnorm, anorm, bnorm, xnorm, relresid, relerr, err = 0. ;
    Int i, j, nrhs2, isreal, n, nrhs, transpose, step, k, save, tries ;

    printf ("\ndo_1_solve: btf "ID" maxwork %g scale "ID" ordering "ID" user: "******" P,Q: %d halt: "ID"\n",
	Common->btf, Common->maxwork, Common->scale, Common->ordering,
	Common->user_data ?  (*((Int *) Common->user_data)) : -1,
	(Puser != NULL || Quser != NULL), Common->halt_if_singular) ;
    fflush (stdout) ;
    fflush (stderr) ;

    CHOLMOD_print_sparse (A, "A", ch) ;
    CHOLMOD_print_dense (B, "B", ch) ;

    Ap = A->p ;
    Ai = A->i ;
    Ax = A->x ;
    n = A->nrow ;
    isreal = (A->xtype == CHOLMOD_REAL) ;
    Bx = B->x ;
    Xknownx = Xknown->x ;
    nrhs = B->ncol ;

    one [0] = 1 ;
    one [1] = 0 ;

    minusone [0] = -1 ;
    minusone [1] = 0 ;

    /* ---------------------------------------------------------------------- */
    /* symbolic analysis */
    /* ---------------------------------------------------------------------- */

    Symbolic = NULL ;
    my_tries = 0 ;
    for (tries = 0 ; Symbolic == NULL && my_tries == 0 ; tries++)
    {
	my_tries = tries ;
	if (Puser != NULL || Quser != NULL)
	{
	    Symbolic = klu_analyze_given (n, Ap, Ai, Puser, Quser, Common) ;
	}
	else
	{
	    Symbolic = klu_analyze (n, Ap, Ai, Common) ;
	}
    }
    printf ("sym try "ID" btf "ID" ordering "ID"\n",
	tries, Common->btf, Common->ordering) ;
    if (Symbolic == NULL)
    {
	printf ("Symbolic is null\n") ;
	return (998) ;
    }
    my_tries = -1 ;

    /* create a modified version of A */

    A2 = CHOLMOD_copy_sparse (A, ch) ;
    Ax2 = A2->x ;
    my_srand (42) ;
    for (k = 0 ; k < Ap [n] * (isreal ? 1:2) ; k++)
    {
	Ax2 [k] = Ax [k] * 
	    (1 + 1e-4 * ((double) my_rand ( )) / ((double) MY_RAND_MAX)) ;
    }

    AT = isreal ? NULL : CHOLMOD_transpose (A, 1, ch) ;
    AT2 = isreal ? NULL : CHOLMOD_transpose (A2, 1, ch) ;

    /* ---------------------------------------------------------------------- */
    /* factorize then solve */
    /* ---------------------------------------------------------------------- */

    for (step = 1 ; step <= 3 ; step++)
    {
	printf ("step: "ID"\n", step) ;
	fflush (stdout) ;

	/* ------------------------------------------------------------------ */
	/* factorization or refactorization */
	/* ------------------------------------------------------------------ */

	/* step 1: factor
	   step 2: refactor with same A
	   step 3: refactor with modified A, and scaling forced on
	   and solve each time
	*/

	if (step == 1)
	{
	    /* numeric factorization */

	    Numeric = NULL ;
	    my_tries = 0 ;
	    for (tries = 0 ; Numeric == NULL && my_tries == 0 ; tries++)
	    {
		my_tries = tries ;
		if (isreal)
		{
		    Numeric = klu_factor (Ap, Ai, Ax, Symbolic, Common) ;
		}
		else
		{
		    Numeric = klu_z_factor (Ap, Ai, Ax, Symbolic, Common) ;
		}
	    }
	    printf ("num try "ID" btf "ID"\n", tries, Common->btf) ;
	    my_tries = -1 ;

	    if (Common->status == KLU_OK ||
	       (Common->status == KLU_SINGULAR && !Common->halt_if_singular))
	    {
		OK (Numeric) ;
	    }
	    else
	    {
		FAIL (Numeric) ;
	    }

	    if (Common->status < KLU_OK)
	    {
		printf ("factor failed: "ID"\n", Common->status) ;
	    }

	}
	else if (step == 2)
	{

	    /* numeric refactorization with same values, same scaling */
	    if (isreal)
	    {
		klu_refactor (Ap, Ai, Ax, Symbolic, Numeric, Common) ;
	    }
	    else
	    {
		klu_z_refactor (Ap, Ai, Ax, Symbolic, Numeric, Common) ;
	    }

	}
	else
	{

	    /* numeric refactorization with different values */
	    save = Common->scale ;
	    if (Common->scale == 0)
	    {
		Common->scale = 1 ;
	    }
	    for (tries = 0 ; tries <= 1 ; tries++)
	    {
		my_tries = tries ;
		if (isreal)
		{
		    klu_refactor (Ap, Ai, Ax2, Symbolic, Numeric, Common) ;
		}
		else
		{
		    klu_z_refactor (Ap, Ai, Ax2, Symbolic, Numeric, Common) ;
		}
	    }
	    my_tries = -1 ;
	    Common->scale = save ;
	}

	if (Common->status == KLU_SINGULAR)
	{
	    printf ("# singular column : "ID"\n", Common->singular_col) ;
	}

	/* ------------------------------------------------------------------ */
	/* diagnostics */
	/* ------------------------------------------------------------------ */

	Axx = (step == 3) ? Ax2 : Ax ;

	if (isreal)
	{
	    klu_rgrowth (Ap, Ai, Axx, Symbolic, Numeric, Common) ;
	    klu_condest (Ap, Axx, Symbolic, Numeric, Common) ;
	    klu_rcond (Symbolic, Numeric, Common) ;
	    klu_flops (Symbolic, Numeric, Common) ;
	}
	else
	{
	    klu_z_rgrowth (Ap, Ai, Axx, Symbolic, Numeric, Common) ;
	    klu_z_condest (Ap, Axx, Symbolic, Numeric, Common) ;
	    klu_z_rcond (Symbolic, Numeric, Common) ;
	    klu_z_flops (Symbolic, Numeric, Common) ;
	}

	printf ("growth %g condest %g rcond %g flops %g\n",
	    Common->rgrowth, Common->condest, Common->rcond, Common->flops) ;

	ludump (Symbolic, Numeric, isreal, ch, Common) ;

	if (Numeric == NULL || Common->status < KLU_OK)
	{
	    continue ;
	}

	/* ------------------------------------------------------------------ */
	/* solve */
	/* ------------------------------------------------------------------ */

	/* forward/backsolve to solve A*X=B or A'*X=B */ 
	for (transpose = (isreal ? 0 : -1) ; transpose <= 1 ; transpose++)
	{

	    for (nrhs2 = 1 ; nrhs2 <= nrhs ; nrhs2++)
	    {
		/* mangle B so that it has only nrhs2 columns */
		B->ncol = nrhs2 ;

		X = CHOLMOD_copy_dense (B, ch) ;
		CHOLMOD_print_dense (X, "X before solve", ch) ;
		Xx = X->x ;

		if (isreal)
		{
		    if (transpose)
		    {
			/* solve A'x=b */
			klu_tsolve (Symbolic, Numeric, n, nrhs2, Xx, Common) ;
		    }
		    else
		    {
			/* solve A*x=b */
			klu_solve (Symbolic, Numeric, n, nrhs2, Xx, Common) ;
		    }
		}
		else
		{
		    if (transpose)
		    {
			/* solve A'x=b (if 1) or A.'x=b (if -1) */
			klu_z_tsolve (Symbolic, Numeric, n, nrhs2, Xx,
			    (transpose == 1), Common) ;
		    }
		    else
		    {
			/* solve A*x=b */
			klu_z_solve (Symbolic, Numeric, n, nrhs2, Xx, Common) ;
		    }
		}

		CHOLMOD_print_dense (X, "X", ch) ;

		/* compute the residual, R = B-A*X, B-A'*X, or B-A.'*X */
		R = CHOLMOD_copy_dense (B, ch) ;
		if (transpose == -1)
		{
		    /* R = B-A.'*X (use A.' explicitly) */
		    CHOLMOD_sdmult ((step == 3) ? AT2 : AT,
			0, minusone, one, X, R, ch) ;
		}
		else
		{
		    /* R = B-A*X or B-A'*X */
		    CHOLMOD_sdmult ((step == 3) ? A2 :A,
			transpose, minusone, one, X, R, ch) ;
		}

		CHOLMOD_print_dense (R, "R", ch) ;

		/* compute the norms of R, A, X, and B */
		rnorm = CHOLMOD_norm_dense (R, 1, ch) ;
		anorm = CHOLMOD_norm_sparse ((step == 3) ? A2 : A, 1, ch) ;
		xnorm = CHOLMOD_norm_dense (X, 1, ch) ;
		bnorm = CHOLMOD_norm_dense (B, 1, ch) ;

		CHOLMOD_free_dense (&R, ch) ;

		/* relative residual = norm (r) / (norm (A) * norm (x)) */
		relresid = rnorm ;
		if (anorm > 0)
		{
		    relresid /= anorm ;
		}
		if (xnorm > 0)
		{
		    relresid /= xnorm ;
		}

		if (SCALAR_IS_NAN (relresid))
		{
		    *nan = TRUE ;
		}
		else
		{
		    err = MAX (err, relresid) ;
		}

		/* relative error = norm (x - xknown) / norm (xknown) */
		/* overwrite X with X - Xknown */
		if (transpose || step == 3)
		{
		    /* not computed */
		    relerr = -1 ;
		}
		else
		{
		    for (j = 0 ; j < nrhs2 ; j++)
		    {
			for (i = 0 ; i < n ; i++)
			{
			    if (isreal)
			    {
				Xx [i+j*n] -= Xknownx [i+j*n] ;
			    }
			    else
			    {
				Xx [2*(i+j*n)  ] -= Xknownx [2*(i+j*n)  ] ;
				Xx [2*(i+j*n)+1] -= Xknownx [2*(i+j*n)+1] ;
			    }
			}
		    }
		    relerr = CHOLMOD_norm_dense (X, 1, ch) ;
		    xnorm = CHOLMOD_norm_dense (Xknown, 1, ch) ;
		    if (xnorm > 0)
		    {
			relerr /= xnorm ;
		    }

		    if (SCALAR_IS_NAN (relerr))
		    {
			*nan = TRUE ;
		    }
		    else
		    {
			err = MAX (relerr, err) ;
		    }

		}

		CHOLMOD_free_dense (&X, ch) ;

		printf (ID" "ID" relresid %10.3g   relerr %10.3g %g\n", 
		    transpose, nrhs2, relresid, relerr, err) ;

		B->ncol = nrhs ;    /* restore B */
	    }
	}
    }

    /* ---------------------------------------------------------------------- */
    /* free factorization and temporary matrices, and return */
    /* ---------------------------------------------------------------------- */

    klu_free_symbolic (&Symbolic, Common) ;
    if (isreal)
    {
	klu_free_numeric (&Numeric, Common) ;
    }
    else
    {
	klu_z_free_numeric (&Numeric, Common) ;
    }
    CHOLMOD_free_sparse (&A2, ch) ;
    CHOLMOD_free_sparse (&AT, ch) ;
    CHOLMOD_free_sparse (&AT2, ch) ;
    fflush (stdout) ;
    fflush (stderr) ;
    return (err) ;
}