GLOBAL Int UMFPACK_report_vector ( Int n, const double Xx [ ], #ifdef COMPLEX const double Xz [ ], #endif const double Control [UMFPACK_CONTROL] ) { Int prl ; #ifndef COMPLEX double *Xz = (double *) NULL ; #endif prl = GET_CONTROL (UMFPACK_PRL, UMFPACK_DEFAULT_PRL) ; if (prl <= 2) { return (UMFPACK_OK) ; } return (UMF_report_vector (n, Xx, Xz, prl, TRUE, FALSE)) ; }
GLOBAL Int UMFPACK_report_numeric ( void *NumericHandle, const double Control [UMFPACK_CONTROL] ) { Int prl, *W, nn, n_row, n_col, n_inner, num_fixed_size, numeric_size, npiv ; NumericType *Numeric ; prl = GET_CONTROL (UMFPACK_PRL, UMFPACK_DEFAULT_PRL) ; if (prl <= 2) { return (UMFPACK_OK) ; } PRINTF (("Numeric object: ")) ; Numeric = (NumericType *) NumericHandle ; if (!UMF_valid_numeric (Numeric)) { PRINTF (("ERROR: LU factors invalid\n\n")) ; return (UMFPACK_ERROR_invalid_Numeric_object) ; } n_row = Numeric->n_row ; n_col = Numeric->n_col ; nn = MAX (n_row, n_col) ; n_inner = MIN (n_row, n_col) ; npiv = Numeric->npiv ; DEBUG1 (("n_row "ID" n_col "ID" nn "ID" n_inner "ID" npiv "ID"\n", n_row, n_col, nn, n_inner, npiv)) ; /* size of Numeric object, except Numeric->Memory and Numeric->Upattern */ /* see also UMF_set_stats */ num_fixed_size = UNITS (NumericType, 1) /* Numeric structure */ + UNITS (Entry, n_inner+1) /* D */ + UNITS (Int, n_row+1) /* Rperm */ + UNITS (Int, n_col+1) /* Cperm */ + 6 * UNITS (Int, npiv+1) /* Lpos, Uilen, Uip, Upos, Lilen, Lip */ + ((Numeric->scale != UMFPACK_SCALE_NONE) ? UNITS (Entry, n_row) : 0) ; /* Rs */ DEBUG1 (("num fixed size: "ID"\n", num_fixed_size)) ; DEBUG1 (("Numeric->size "ID"\n", Numeric->size)) ; DEBUG1 (("ulen units "ID"\n", UNITS (Int, Numeric->ulen))) ; /* size of Numeric->Memory is Numeric->size */ /* size of Numeric->Upattern is Numeric->ulen */ numeric_size = num_fixed_size + Numeric->size + UNITS (Int, Numeric->ulen) ; DEBUG1 (("numeric total size "ID"\n", numeric_size)) ; if (prl >= 4) { PRINTF (("\n n_row: "ID" n_col: "ID"\n", n_row, n_col)) ; PRINTF ((" relative pivot tolerance used: %g\n", Numeric->relpt)) ; PRINTF ((" relative symmetric pivot tolerance used: %g\n", Numeric->relpt2)) ; PRINTF ((" matrix scaled: ")) ; if (Numeric->scale == UMFPACK_SCALE_NONE) { PRINTF (("no")) ; } else if (Numeric->scale == UMFPACK_SCALE_SUM) { PRINTF (("yes (divided each row by sum abs value in each row)\n")) ; PRINTF ((" minimum sum (abs (rows of A)): %.5e\n", Numeric->rsmin)) ; PRINTF ((" maximum sum (abs (rows of A)): %.5e", Numeric->rsmax)) ; } else if (Numeric->scale == UMFPACK_SCALE_MAX) { PRINTF (("yes (divided each row by max abs value in each row)\n")) ; PRINTF ((" minimum max (abs (rows of A)): %.5e\n", Numeric->rsmin)) ; PRINTF ((" maximum max (abs (rows of A)): %.5e", Numeric->rsmax)) ; } PRINTF (("\n")) ; PRINTF ((" initial allocation parameter used: %g\n", Numeric->alloc_init)) ; PRINTF ((" frontal matrix allocation parameter used: %g\n", Numeric->front_alloc_init)) ; PRINTF ((" final total size of Numeric object (Units): "ID"\n", numeric_size)) ; PRINTF ((" final total size of Numeric object (MBytes): %.1f\n", MBYTES (numeric_size))) ; PRINTF ((" peak size of variable-size part (Units): "ID"\n", Numeric->max_usage)) ; PRINTF ((" peak size of variable-size part (MBytes): %.1f\n", MBYTES (Numeric->max_usage))) ; PRINTF ((" largest actual frontal matrix size: "ID"\n", Numeric->maxfrsize)) ; PRINTF ((" memory defragmentations: "ID"\n", Numeric->ngarbage)) ; PRINTF ((" memory reallocations: "ID"\n", Numeric->nrealloc)) ; PRINTF ((" costly memory reallocations: "ID"\n", Numeric->ncostly)) ; PRINTF ((" entries in compressed pattern (L and U): "ID"\n", Numeric->isize)) ; PRINTF ((" number of nonzeros in L (excl diag): "ID"\n", Numeric->lnz)) ; PRINTF ((" number of entries stored in L (excl diag): "ID"\n", Numeric->nLentries)) ; PRINTF ((" number of nonzeros in U (excl diag): "ID"\n", Numeric->unz)) ; PRINTF ((" number of entries stored in U (excl diag): "ID"\n", Numeric->nUentries)) ; PRINTF ((" factorization floating-point operations: %g\n", Numeric->flops)) ; PRINTF ((" number of nonzeros on diagonal of U: "ID"\n", Numeric->nnzpiv)) ; PRINTF ((" min abs. value on diagonal of U: %.5e\n", Numeric->min_udiag)) ; PRINTF ((" max abs. value on diagonal of U: %.5e\n", Numeric->max_udiag)) ; PRINTF ((" reciprocal condition number estimate: %.2e\n", Numeric->rcond)) ; } W = (Int *) UMF_malloc (nn, sizeof (Int)) ; if (!W) { PRINTF ((" ERROR: out of memory to check Numeric object\n\n")) ; return (UMFPACK_ERROR_out_of_memory) ; } if (Numeric->Rs) { #ifndef NRECIPROCAL if (Numeric->do_recip) { PRINTF4 (("\nScale factors applied via multiplication\n")) ; } else #endif { PRINTF4 (("\nScale factors applied via division\n")) ; } PRINTF4 (("Scale factors, Rs: ")) ; (void) UMF_report_vector (n_row, Numeric->Rs, (double *) NULL, prl, FALSE, TRUE) ; } else { PRINTF4 (("Scale factors, Rs: (not present)\n")) ; } PRINTF4 (("\nP: row ")) ; if (UMF_report_perm (n_row, Numeric->Rperm, W, prl, 0) != UMFPACK_OK) { (void) UMF_free ((void *) W) ; return (UMFPACK_ERROR_invalid_Numeric_object) ; } PRINTF4 (("\nQ: column ")) ; if (UMF_report_perm (n_col, Numeric->Cperm, W, prl, 0) != UMFPACK_OK) { (void) UMF_free ((void *) W) ; return (UMFPACK_ERROR_invalid_Numeric_object) ; } if (!report_L (Numeric, W, prl)) { (void) UMF_free ((void *) W) ; PRINTF ((" ERROR: L factor invalid\n\n")) ; return (UMFPACK_ERROR_invalid_Numeric_object) ; } if (!report_U (Numeric, W, prl)) { (void) UMF_free ((void *) W) ; PRINTF ((" ERROR: U factor invalid\n\n")) ; return (UMFPACK_ERROR_invalid_Numeric_object) ; } /* The diagonal of U is in "merged" (Entry) form, not "split" form. */ PRINTF4 (("\ndiagonal of U: ")) ; (void) UMF_report_vector (n_inner, (double *) Numeric->D, (double *) NULL, prl, FALSE, FALSE) ; (void) UMF_free ((void *) W) ; PRINTF4 ((" Numeric object: ")) ; PRINTF (("OK\n\n")) ; return (UMFPACK_OK) ; }
PRIVATE Int do_step /* return TRUE if iterative refinement done */ ( double omega [3], Int step, /* which step of iterative refinement to do */ const double B2 [ ], /* abs (B) */ Entry X [ ], const Entry W [ ], const double Y [ ], const double Z2 [ ], Entry S [ ], Int n, double Info [UMFPACK_INFO] ) { double last_omega [3], tau, nctau, d1, wd1, d2, wd2, xi, yix, wi, xnorm ; Int i ; /* DBL_EPSILON is a standard ANSI C term defined in <float.h> */ /* It is the smallest positive x such that 1.0+x != 1.0 */ nctau = 1000 * n * DBL_EPSILON ; DEBUG0 (("do_step start: nctau = %30.20e\n", nctau)) ; ASSERT (UMF_report_vector (n, (double *) X, (double *) NULL, UMF_debug, FALSE, FALSE) == UMFPACK_OK) ; /* for approximate flop count, assume d1 > tau is always true */ /* flops += (2*ABS_FLOPS + 5) * n ; (done in UMF_solve, above) */ /* ---------------------------------------------------------------------- */ /* save the last iteration in case we need to reinstate it */ /* ---------------------------------------------------------------------- */ last_omega [0] = omega [0] ; last_omega [1] = omega [1] ; last_omega [2] = omega [2] ; /* ---------------------------------------------------------------------- */ /* compute sparse backward errors: omega [1] and omega [2] */ /* ---------------------------------------------------------------------- */ /* xnorm = ||x|| maxnorm */ xnorm = 0.0 ; for (i = 0 ; i < n ; i++) { /* xi = ABS (X [i]) ; */ ABS (xi, X [i]) ; if (SCALAR_IS_NAN (xi)) { xnorm = xi ; break ; } /* no NaN's to consider here: */ xnorm = MAX (xnorm, xi) ; } omega [1] = 0. ; omega [2] = 0. ; for (i = 0 ; i < n ; i++) { yix = Y [i] * xnorm ; tau = (yix + B2 [i]) * nctau ; d1 = Z2 [i] + B2 [i] ; /* wi = ABS (W [i]) ; */ ABS (wi, W [i]) ; if (SCALAR_IS_NAN (d1)) { omega [1] = d1 ; omega [2] = d1 ; break ; } if (SCALAR_IS_NAN (tau)) { omega [1] = tau ; omega [2] = tau ; break ; } if (d1 > tau) /* a double relop, but no NaN's here */ { wd1 = wi / d1 ; omega [1] = MAX (omega [1], wd1) ; } else if (tau > 0.0) /* a double relop, but no NaN's here */ { d2 = Z2 [i] + yix ; wd2 = wi / d2 ; omega [2] = MAX (omega [2], wd2) ; } } omega [0] = omega [1] + omega [2] ; Info [UMFPACK_OMEGA1] = omega [1] ; Info [UMFPACK_OMEGA2] = omega [2] ; /* ---------------------------------------------------------------------- */ /* stop the iterations if the backward error is small, or NaN */ /* ---------------------------------------------------------------------- */ Info [UMFPACK_IR_TAKEN] = step ; Info [UMFPACK_IR_ATTEMPTED] = step ; if (SCALAR_IS_NAN (omega [0])) { DEBUG0 (("omega[0] is NaN - done.\n")) ; ASSERT (UMF_report_vector (n, (double *) X, (double *) NULL, UMF_debug, FALSE, FALSE) == UMFPACK_OK) ; return (TRUE) ; } if (omega [0] < DBL_EPSILON) /* double relop, but no NaN case here */ { DEBUG0 (("omega[0] too small - done.\n")) ; ASSERT (UMF_report_vector (n, (double *) X, (double *) NULL, UMF_debug, FALSE, FALSE) == UMFPACK_OK) ; return (TRUE) ; } /* ---------------------------------------------------------------------- */ /* stop if insufficient decrease in omega */ /* ---------------------------------------------------------------------- */ /* double relop, but no NaN case here: */ if (step > 0 && omega [0] > last_omega [0] / 2) { DEBUG0 (("stop refinement\n")) ; if (omega [0] > last_omega [0]) { /* last iteration better than this one, reinstate it */ DEBUG0 (("last iteration better\n")) ; for (i = 0 ; i < n ; i++) { X [i] = S [i] ; } Info [UMFPACK_OMEGA1] = last_omega [1] ; Info [UMFPACK_OMEGA2] = last_omega [2] ; } Info [UMFPACK_IR_TAKEN] = step - 1 ; ASSERT (UMF_report_vector (n, (double *) X, (double *) NULL, UMF_debug, FALSE, FALSE) == UMFPACK_OK) ; return (TRUE) ; } /* ---------------------------------------------------------------------- */ /* save current solution in case we need to reinstate */ /* ---------------------------------------------------------------------- */ for (i = 0 ; i < n ; i++) { S [i] = X [i] ; } /* ---------------------------------------------------------------------- */ /* iterative refinement continues */ /* ---------------------------------------------------------------------- */ ASSERT (UMF_report_vector (n, (double *) X, (double *) NULL, UMF_debug, FALSE, FALSE) == UMFPACK_OK) ; return (FALSE) ; }