/*! _zrovector*zhematrix operator */ inline _zrovector operator*(const _zrovector& vec, const zhematrix& mat) { #ifdef CPPL_VERBOSE std::cerr << "# [MARK] operator*(const _zrovector&, const zhematrix&)" << std::endl; #endif//CPPL_VERBOSE #ifdef CPPL_DEBUG if(vec.L!=mat.N){ std::cerr << "[ERROR] operator*(const zrovector&, const zhematrix&)" << std::endl << "These vector and matrix can not make a product." << std::endl << "Your input was (" << vec.L << ") * (" << mat.N << "x" << mat.N << ")." << std::endl; exit(1); } #endif//CPPL_DEBUG zrovector newvec(mat.N); zhemv_( 'L', mat.N, std::complex<double>(1.0,0.0), mat.Array, mat.N, vec.Array, 1, std::complex<double>(0.0,0.0), newvec.array, 1 ); vec.destroy(); return _(newvec); }
int f2c_zhemv(char* uplo, integer* N, doublecomplex* alpha, doublecomplex* A, integer* lda, doublecomplex* X, integer* incX, doublecomplex* beta, doublecomplex* Y, integer* incY) { zhemv_(uplo, N, alpha, A, lda, X, incX, beta, Y, incY); return 0; }
/*! zrovector*zhematrix operator */ inline _zrovector operator*(const zrovector& vec, const zhematrix& mat) {VERBOSE_REPORT; #ifdef CPPL_DEBUG if(vec.l!=mat.n){ ERROR_REPORT; std::cerr << "These vector and matrix can not make a product." << std::endl << "Your input was (" << vec.l << ") * (" << mat.n << "x" << mat.n << ")." << std::endl; exit(1); } #endif//CPPL_DEBUG zrovector newvec(mat.n); zhemv_( 'l', mat.n, comple(1.0,0.0), mat.array, mat.n, vec.array, 1, comple(0.0,0.0), newvec.array, 1 ); return _(newvec); }
/* Subroutine */ int zporfs_(char *uplo, integer *n, integer *nrhs, doublecomplex *a, integer *lda, doublecomplex *af, integer *ldaf, doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx, doublereal *ferr, doublereal *berr, doublecomplex *work, doublereal * rwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5; doublereal d__1, d__2, d__3, d__4; doublecomplex z__1; /* Local variables */ integer i__, j, k; doublereal s, xk; integer nz; doublereal eps; integer kase; doublereal safe1, safe2; integer isave[3], count; logical upper; doublereal safmin; doublereal lstres; /* -- LAPACK routine (version 3.2) -- */ /* November 2006 */ /* Modified to call ZLACN2 in place of ZLACON, 10 Feb 03, SJH. */ /* Purpose */ /* ======= */ /* ZPORFS improves the computed solution to a system of linear */ /* equations when the coefficient matrix is Hermitian positive definite, */ /* and provides error bounds and backward error estimates for the */ /* solution. */ /* Arguments */ /* ========= */ /* UPLO (input) CHARACTER*1 */ /* = 'U': Upper triangle of A is stored; */ /* = 'L': Lower triangle of A is stored. */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. */ /* NRHS (input) INTEGER */ /* The number of right hand sides, i.e., the number of columns */ /* of the matrices B and X. NRHS >= 0. */ /* A (input) COMPLEX*16 array, dimension (LDA,N) */ /* The Hermitian matrix A. If UPLO = 'U', the leading N-by-N */ /* upper triangular part of A contains the upper triangular part */ /* of the matrix A, and the strictly lower triangular part of A */ /* is not referenced. If UPLO = 'L', the leading N-by-N lower */ /* triangular part of A contains the lower triangular part of */ /* the matrix A, and the strictly upper triangular part of A is */ /* not referenced. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= max(1,N). */ /* AF (input) COMPLEX*16 array, dimension (LDAF,N) */ /* The triangular factor U or L from the Cholesky factorization */ /* A = U**H*U or A = L*L**H, as computed by ZPOTRF. */ /* LDAF (input) INTEGER */ /* The leading dimension of the array AF. LDAF >= max(1,N). */ /* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */ /* The right hand side matrix B. */ /* LDB (input) INTEGER */ /* The leading dimension of the array B. LDB >= max(1,N). */ /* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */ /* On entry, the solution matrix X, as computed by ZPOTRS. */ /* On exit, the improved solution matrix X. */ /* LDX (input) INTEGER */ /* The leading dimension of the array X. LDX >= max(1,N). */ /* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */ /* The estimated forward error bound for each solution vector */ /* X(j) (the j-th column of the solution matrix X). */ /* If XTRUE is the true solution corresponding to X(j), FERR(j) */ /* is an estimated upper bound for the magnitude of the largest */ /* element in (X(j) - XTRUE) divided by the magnitude of the */ /* largest element in X(j). The estimate is as reliable as */ /* the estimate for RCOND, and is almost always a slight */ /* overestimate of the true error. */ /* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */ /* The componentwise relative backward error of each solution */ /* vector X(j) (i.e., the smallest relative change in */ /* any element of A or B that makes X(j) an exact solution). */ /* WORK (workspace) COMPLEX*16 array, dimension (2*N) */ /* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value */ /* Internal Parameters */ /* =================== */ /* ITMAX is the maximum number of steps of iterative refinement. */ /* ==================================================================== */ /* Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; af_dim1 = *ldaf; af_offset = 1 + af_dim1; af -= af_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1; b -= b_offset; x_dim1 = *ldx; x_offset = 1 + x_dim1; x -= x_offset; --ferr; --berr; --work; --rwork; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*nrhs < 0) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*ldaf < max(1,*n)) { *info = -7; } else if (*ldb < max(1,*n)) { *info = -9; } else if (*ldx < max(1,*n)) { *info = -11; } if (*info != 0) { i__1 = -(*info); xerbla_("ZPORFS", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *nrhs == 0) { i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { ferr[j] = 0.; berr[j] = 0.; } return 0; } /* NZ = maximum number of nonzero elements in each row of A, plus 1 */ nz = *n + 1; eps = dlamch_("Epsilon"); safmin = dlamch_("Safe minimum"); safe1 = nz * safmin; safe2 = safe1 / eps; /* Do for each right hand side */ i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { count = 1; lstres = 3.; L20: /* Loop until stopping criterion is satisfied. */ /* Compute residual R = B - A * X */ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1); z__1.r = -1., z__1.i = -0.; zhemv_(uplo, n, &z__1, &a[a_offset], lda, &x[j * x_dim1 + 1], &c__1, & c_b1, &work[1], &c__1); /* Compute componentwise relative backward error from formula */ /* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */ /* where abs(Z) is the componentwise absolute value of the matrix */ /* or vector Z. If the i-th component of the denominator is less */ /* than SAFE2, then SAFE1 is added to the i-th components of the */ /* numerator and denominator before dividing. */ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[ i__ + j * b_dim1]), abs(d__2)); } /* Compute abs(A)*abs(X) + abs(B). */ if (upper) { i__2 = *n; for (k = 1; k <= i__2; ++k) { s = 0.; i__3 = k + j * x_dim1; xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j * x_dim1]), abs(d__2)); i__3 = k - 1; for (i__ = 1; i__ <= i__3; ++i__) { i__4 = i__ + k * a_dim1; rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[i__ + k * a_dim1]), abs(d__2))) * xk; i__4 = i__ + k * a_dim1; i__5 = i__ + j * x_dim1; s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[ i__ + k * a_dim1]), abs(d__2))) * ((d__3 = x[i__5] .r, abs(d__3)) + (d__4 = d_imag(&x[i__ + j * x_dim1]), abs(d__4))); } i__3 = k + k * a_dim1; rwork[k] = rwork[k] + (d__1 = a[i__3].r, abs(d__1)) * xk + s; } } else { i__2 = *n; for (k = 1; k <= i__2; ++k) { s = 0.; i__3 = k + j * x_dim1; xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[k + j * x_dim1]), abs(d__2)); i__3 = k + k * a_dim1; rwork[k] += (d__1 = a[i__3].r, abs(d__1)) * xk; i__3 = *n; for (i__ = k + 1; i__ <= i__3; ++i__) { i__4 = i__ + k * a_dim1; rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[i__ + k * a_dim1]), abs(d__2))) * xk; i__4 = i__ + k * a_dim1; i__5 = i__ + j * x_dim1; s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a[ i__ + k * a_dim1]), abs(d__2))) * ((d__3 = x[i__5] .r, abs(d__3)) + (d__4 = d_imag(&x[i__ + j * x_dim1]), abs(d__4))); } rwork[k] += s; } } s = 0.; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { if (rwork[i__] > safe2) { /* Computing MAX */ i__3 = i__; d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 = d_imag(&work[i__]), abs(d__2))) / rwork[i__]; s = max(d__3,d__4); } else { /* Computing MAX */ i__3 = i__; d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 = d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__] + safe1); s = max(d__3,d__4); } } berr[j] = s; /* Test stopping criterion. Continue iterating if */ /* 1) The residual BERR(J) is larger than machine epsilon, and */ /* 2) BERR(J) decreased by at least a factor of 2 during the */ /* last iteration, and */ /* 3) At most ITMAX iterations tried. */ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) { /* Update solution and try again. */ zpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[1], n, info); zaxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1); lstres = berr[j]; ++count; goto L20; } /* Bound error from formula */ /* norm(X - XTRUE) / norm(X) .le. FERR = */ /* norm( abs(inv(A))* */ /* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */ /* where */ /* norm(Z) is the magnitude of the largest component of Z */ /* inv(A) is the inverse of A */ /* abs(Z) is the componentwise absolute value of the matrix or */ /* vector Z */ /* NZ is the maximum number of nonzeros in any row of A, plus 1 */ /* EPS is machine epsilon */ /* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */ /* is incremented by SAFE1 if the i-th component of */ /* abs(A)*abs(X) + abs(B) is less than SAFE2. */ /* Use ZLACN2 to estimate the infinity-norm of the matrix */ /* inv(A) * diag(W), */ /* where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { if (rwork[i__] > safe2) { i__3 = i__; rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 = d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__] ; } else { i__3 = i__; rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 = d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__] + safe1; } } kase = 0; L100: zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave); if (kase != 0) { if (kase == 1) { /* Multiply by diag(W)*inv(A'). */ zpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[1], n, info); i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__; i__4 = i__; i__5 = i__; z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4] * work[i__5].i; work[i__3].r = z__1.r, work[i__3].i = z__1.i; } } else if (kase == 2) { /* Multiply by inv(A)*diag(W). */ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__; i__4 = i__; i__5 = i__; z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4] * work[i__5].i; work[i__3].r = z__1.r, work[i__3].i = z__1.i; } zpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[1], n, info); } goto L100; } /* Normalize error. */ lstres = 0.; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { /* Computing MAX */ i__3 = i__ + j * x_dim1; d__3 = lstres, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x[i__ + j * x_dim1]), abs(d__2)); lstres = max(d__3,d__4); } if (lstres != 0.) { ferr[j] /= lstres; } } return 0; /* End of ZPORFS */ } /* zporfs_ */
int zlatrd_(char *uplo, int *n, int *nb, doublecomplex *a, int *lda, double *e, doublecomplex *tau, doublecomplex *w, int *ldw) { /* System generated locals */ int a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3; double d__1; doublecomplex z__1, z__2, z__3, z__4; /* Local variables */ int i__, iw; doublecomplex alpha; extern int lsame_(char *, char *); extern int zscal_(int *, doublecomplex *, doublecomplex *, int *); extern /* Double Complex */ VOID zdotc_(doublecomplex *, int *, doublecomplex *, int *, doublecomplex *, int *); extern int zgemv_(char *, int *, int *, doublecomplex *, doublecomplex *, int *, doublecomplex *, int *, doublecomplex *, doublecomplex *, int *), zhemv_(char *, int *, doublecomplex *, doublecomplex *, int *, doublecomplex *, int *, doublecomplex *, doublecomplex *, int *), zaxpy_(int *, doublecomplex *, doublecomplex *, int *, doublecomplex *, int *), zlarfg_(int *, doublecomplex *, doublecomplex *, int *, doublecomplex *), zlacgv_(int *, doublecomplex *, int *); /* -- LAPACK auxiliary routine (version 3.2) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZLATRD reduces NB rows and columns of a complex Hermitian matrix A to */ /* Hermitian tridiagonal form by a unitary similarity */ /* transformation Q' * A * Q, and returns the matrices V and W which are */ /* needed to apply the transformation to the unreduced part of A. */ /* If UPLO = 'U', ZLATRD reduces the last NB rows and columns of a */ /* matrix, of which the upper triangle is supplied; */ /* if UPLO = 'L', ZLATRD reduces the first NB rows and columns of a */ /* matrix, of which the lower triangle is supplied. */ /* This is an auxiliary routine called by ZHETRD. */ /* Arguments */ /* ========= */ /* UPLO (input) CHARACTER*1 */ /* Specifies whether the upper or lower triangular part of the */ /* Hermitian matrix A is stored: */ /* = 'U': Upper triangular */ /* = 'L': Lower triangular */ /* N (input) INTEGER */ /* The order of the matrix A. */ /* NB (input) INTEGER */ /* The number of rows and columns to be reduced. */ /* A (input/output) COMPLEX*16 array, dimension (LDA,N) */ /* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */ /* n-by-n upper triangular part of A contains the upper */ /* triangular part of the matrix A, and the strictly lower */ /* triangular part of A is not referenced. If UPLO = 'L', the */ /* leading n-by-n lower triangular part of A contains the lower */ /* triangular part of the matrix A, and the strictly upper */ /* triangular part of A is not referenced. */ /* On exit: */ /* if UPLO = 'U', the last NB columns have been reduced to */ /* tridiagonal form, with the diagonal elements overwriting */ /* the diagonal elements of A; the elements above the diagonal */ /* with the array TAU, represent the unitary matrix Q as a */ /* product of elementary reflectors; */ /* if UPLO = 'L', the first NB columns have been reduced to */ /* tridiagonal form, with the diagonal elements overwriting */ /* the diagonal elements of A; the elements below the diagonal */ /* with the array TAU, represent the unitary matrix Q as a */ /* product of elementary reflectors. */ /* See Further Details. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= MAX(1,N). */ /* E (output) DOUBLE PRECISION array, dimension (N-1) */ /* If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal */ /* elements of the last NB columns of the reduced matrix; */ /* if UPLO = 'L', E(1:nb) contains the subdiagonal elements of */ /* the first NB columns of the reduced matrix. */ /* TAU (output) COMPLEX*16 array, dimension (N-1) */ /* The scalar factors of the elementary reflectors, stored in */ /* TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'. */ /* See Further Details. */ /* W (output) COMPLEX*16 array, dimension (LDW,NB) */ /* The n-by-nb matrix W required to update the unreduced part */ /* of A. */ /* LDW (input) INTEGER */ /* The leading dimension of the array W. LDW >= MAX(1,N). */ /* Further Details */ /* =============== */ /* If UPLO = 'U', the matrix Q is represented as a product of elementary */ /* reflectors */ /* Q = H(n) H(n-1) . . . H(n-nb+1). */ /* Each H(i) has the form */ /* H(i) = I - tau * v * v' */ /* where tau is a complex scalar, and v is a complex vector with */ /* v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), */ /* and tau in TAU(i-1). */ /* If UPLO = 'L', the matrix Q is represented as a product of elementary */ /* reflectors */ /* Q = H(1) H(2) . . . H(nb). */ /* Each H(i) has the form */ /* H(i) = I - tau * v * v' */ /* where tau is a complex scalar, and v is a complex vector with */ /* v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), */ /* and tau in TAU(i). */ /* The elements of the vectors v together form the n-by-nb matrix V */ /* which is needed, with W, to apply the transformation to the unreduced */ /* part of the matrix, using a Hermitian rank-2k update of the form: */ /* A := A - V*W' - W*V'. */ /* The contents of A on exit are illustrated by the following examples */ /* with n = 5 and nb = 2: */ /* if UPLO = 'U': if UPLO = 'L': */ /* ( a a a v4 v5 ) ( d ) */ /* ( a a v4 v5 ) ( 1 d ) */ /* ( a 1 v5 ) ( v1 1 a ) */ /* ( d 1 ) ( v1 v2 a a ) */ /* ( d ) ( v1 v2 a a a ) */ /* where d denotes a diagonal element of the reduced matrix, a denotes */ /* an element of the original matrix that is unchanged, and vi denotes */ /* an element of the vector defining H(i). */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Quick return if possible */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --e; --tau; w_dim1 = *ldw; w_offset = 1 + w_dim1; w -= w_offset; /* Function Body */ if (*n <= 0) { return 0; } if (lsame_(uplo, "U")) { /* Reduce last NB columns of upper triangle */ i__1 = *n - *nb + 1; for (i__ = *n; i__ >= i__1; --i__) { iw = i__ - *n + *nb; if (i__ < *n) { /* Update A(1:i,i) */ i__2 = i__ + i__ * a_dim1; i__3 = i__ + i__ * a_dim1; d__1 = a[i__3].r; a[i__2].r = d__1, a[i__2].i = 0.; i__2 = *n - i__; zlacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw); i__2 = *n - i__; z__1.r = -1., z__1.i = -0.; zgemv_("No transpose", &i__, &i__2, &z__1, &a[(i__ + 1) * a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, & c_b2, &a[i__ * a_dim1 + 1], &c__1); i__2 = *n - i__; zlacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw); i__2 = *n - i__; zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda); i__2 = *n - i__; z__1.r = -1., z__1.i = -0.; zgemv_("No transpose", &i__, &i__2, &z__1, &w[(iw + 1) * w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, & c_b2, &a[i__ * a_dim1 + 1], &c__1); i__2 = *n - i__; zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda); i__2 = i__ + i__ * a_dim1; i__3 = i__ + i__ * a_dim1; d__1 = a[i__3].r; a[i__2].r = d__1, a[i__2].i = 0.; } if (i__ > 1) { /* Generate elementary reflector H(i) to annihilate */ /* A(1:i-2,i) */ i__2 = i__ - 1 + i__ * a_dim1; alpha.r = a[i__2].r, alpha.i = a[i__2].i; i__2 = i__ - 1; zlarfg_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &tau[i__ - 1]); i__2 = i__ - 1; e[i__2] = alpha.r; i__2 = i__ - 1 + i__ * a_dim1; a[i__2].r = 1., a[i__2].i = 0.; /* Compute W(1:i-1,i) */ i__2 = i__ - 1; zhemv_("Upper", &i__2, &c_b2, &a[a_offset], lda, &a[i__ * a_dim1 + 1], &c__1, &c_b1, &w[iw * w_dim1 + 1], &c__1); if (i__ < *n) { i__2 = i__ - 1; i__3 = *n - i__; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[(iw + 1) * w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], & c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; z__1.r = -1., z__1.i = -0.; zgemv_("No transpose", &i__2, &i__3, &z__1, &a[(i__ + 1) * a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], & c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[( i__ + 1) * a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], &c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; z__1.r = -1., z__1.i = -0.; zgemv_("No transpose", &i__2, &i__3, &z__1, &w[(iw + 1) * w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], & c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1); } i__2 = i__ - 1; zscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1); z__3.r = -.5, z__3.i = -0.; i__2 = i__ - 1; z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i, z__2.i = z__3.r * tau[i__2].i + z__3.i * tau[i__2].r; i__3 = i__ - 1; zdotc_(&z__4, &i__3, &w[iw * w_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &c__1); z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i + z__2.i * z__4.r; alpha.r = z__1.r, alpha.i = z__1.i; i__2 = i__ - 1; zaxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw * w_dim1 + 1], &c__1); } /* L10: */ } } else { /* Reduce first NB columns of lower triangle */ i__1 = *nb; for (i__ = 1; i__ <= i__1; ++i__) { /* Update A(i:n,i) */ i__2 = i__ + i__ * a_dim1; i__3 = i__ + i__ * a_dim1; d__1 = a[i__3].r; a[i__2].r = d__1, a[i__2].i = 0.; i__2 = i__ - 1; zlacgv_(&i__2, &w[i__ + w_dim1], ldw); i__2 = *n - i__ + 1; i__3 = i__ - 1; z__1.r = -1., z__1.i = -0.; zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + a_dim1], lda, &w[i__ + w_dim1], ldw, &c_b2, &a[i__ + i__ * a_dim1], & c__1); i__2 = i__ - 1; zlacgv_(&i__2, &w[i__ + w_dim1], ldw); i__2 = i__ - 1; zlacgv_(&i__2, &a[i__ + a_dim1], lda); i__2 = *n - i__ + 1; i__3 = i__ - 1; z__1.r = -1., z__1.i = -0.; zgemv_("No transpose", &i__2, &i__3, &z__1, &w[i__ + w_dim1], ldw, &a[i__ + a_dim1], lda, &c_b2, &a[i__ + i__ * a_dim1], & c__1); i__2 = i__ - 1; zlacgv_(&i__2, &a[i__ + a_dim1], lda); i__2 = i__ + i__ * a_dim1; i__3 = i__ + i__ * a_dim1; d__1 = a[i__3].r; a[i__2].r = d__1, a[i__2].i = 0.; if (i__ < *n) { /* Generate elementary reflector H(i) to annihilate */ /* A(i+2:n,i) */ i__2 = i__ + 1 + i__ * a_dim1; alpha.r = a[i__2].r, alpha.i = a[i__2].i; i__2 = *n - i__; /* Computing MIN */ i__3 = i__ + 2; zlarfg_(&i__2, &alpha, &a[MIN(i__3, *n)+ i__ * a_dim1], &c__1, &tau[i__]); i__2 = i__; e[i__2] = alpha.r; i__2 = i__ + 1 + i__ * a_dim1; a[i__2].r = 1., a[i__2].i = 0.; /* Compute W(i+1:n,i) */ i__2 = *n - i__; zhemv_("Lower", &i__2, &c_b2, &a[i__ + 1 + (i__ + 1) * a_dim1] , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b1, &w[ i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[i__ + 1 + w_dim1], ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, & c_b1, &w[i__ * w_dim1 + 1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; z__1.r = -1., z__1.i = -0.; zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 + a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[ i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, & c_b1, &w[i__ * w_dim1 + 1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; z__1.r = -1., z__1.i = -0.; zgemv_("No transpose", &i__2, &i__3, &z__1, &w[i__ + 1 + w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[ i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; zscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1); z__3.r = -.5, z__3.i = -0.; i__2 = i__; z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i, z__2.i = z__3.r * tau[i__2].i + z__3.i * tau[i__2].r; i__3 = *n - i__; zdotc_(&z__4, &i__3, &w[i__ + 1 + i__ * w_dim1], &c__1, &a[ i__ + 1 + i__ * a_dim1], &c__1); z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i + z__2.i * z__4.r; alpha.r = z__1.r, alpha.i = z__1.i; i__2 = *n - i__; zaxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[ i__ + 1 + i__ * w_dim1], &c__1); } /* L20: */ } } return 0; /* End of ZLATRD */ } /* zlatrd_ */
void zhemv(char uplo, int n, doublecomplex *alpha, doublecomplex *a, int lda, doublecomplex *x, int incx, doublecomplex *beta, doublecomplex *y, int incy) { zhemv_( &uplo, &n, alpha, a, &lda, x, &incx, beta, y, &incy ); }
/* Subroutine */ int zlaghe_(integer *n, integer *k, doublereal *d, doublecomplex *a, integer *lda, integer *iseed, doublecomplex *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1; doublecomplex z__1, z__2, z__3, z__4; /* Builtin functions */ double z_abs(doublecomplex *); void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg( doublecomplex *, doublecomplex *); /* Local variables */ extern /* Subroutine */ int zher2_(char *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *); static integer i, j; static doublecomplex alpha; extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *), zscal_(integer *, doublecomplex *, doublecomplex *, integer *); extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *); extern /* Subroutine */ int zgemv_(char *, integer *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *), zhemv_(char *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *); extern doublereal dznrm2_(integer *, doublecomplex *, integer *); static doublecomplex wa, wb; static doublereal wn; extern /* Subroutine */ int xerbla_(char *, integer *), zlarnv_( integer *, integer *, integer *, doublecomplex *); static doublecomplex tau; /* -- LAPACK auxiliary test routine (version 2.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University September 30, 1994 Purpose ======= ZLAGHE generates a complex hermitian matrix A, by pre- and post- multiplying a real diagonal matrix D with a random unitary matrix: A = U*D*U'. The semi-bandwidth may then be reduced to k by additional unitary transformations. Arguments ========= N (input) INTEGER The order of the matrix A. N >= 0. K (input) INTEGER The number of nonzero subdiagonals within the band of A. 0 <= K <= N-1. D (input) DOUBLE PRECISION array, dimension (N) The diagonal elements of the diagonal matrix D. A (output) COMPLEX*16 array, dimension (LDA,N) The generated n by n hermitian matrix A (the full matrix is stored). LDA (input) INTEGER The leading dimension of the array A. LDA >= N. ISEED (input/output) INTEGER array, dimension (4) On entry, the seed of the random number generator; the array elements must be between 0 and 4095, and ISEED(4) must be odd. On exit, the seed is updated. WORK (workspace) COMPLEX*16 array, dimension (2*N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments Parameter adjustments */ --d; a_dim1 = *lda; a_offset = a_dim1 + 1; a -= a_offset; --iseed; --work; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } else if (*k < 0 || *k > *n - 1) { *info = -2; } else if (*lda < max(1,*n)) { *info = -5; } if (*info < 0) { i__1 = -(*info); xerbla_("ZLAGHE", &i__1); return 0; } /* initialize lower triangle of A to diagonal matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i = j + 1; i <= i__2; ++i) { i__3 = i + j * a_dim1; a[i__3].r = 0., a[i__3].i = 0.; /* L10: */ } /* L20: */ } i__1 = *n; for (i = 1; i <= i__1; ++i) { i__2 = i + i * a_dim1; i__3 = i; a[i__2].r = d[i__3], a[i__2].i = 0.; /* L30: */ } /* Generate lower triangle of hermitian matrix */ for (i = *n - 1; i >= 1; --i) { /* generate random reflection */ i__1 = *n - i + 1; zlarnv_(&c__3, &iseed[1], &i__1, &work[1]); i__1 = *n - i + 1; wn = dznrm2_(&i__1, &work[1], &c__1); d__1 = wn / z_abs(&work[1]); z__1.r = d__1 * work[1].r, z__1.i = d__1 * work[1].i; wa.r = z__1.r, wa.i = z__1.i; if (wn == 0.) { tau.r = 0., tau.i = 0.; } else { z__1.r = work[1].r + wa.r, z__1.i = work[1].i + wa.i; wb.r = z__1.r, wb.i = z__1.i; i__1 = *n - i; z_div(&z__1, &c_b2, &wb); zscal_(&i__1, &z__1, &work[2], &c__1); work[1].r = 1., work[1].i = 0.; z_div(&z__1, &wb, &wa); d__1 = z__1.r; tau.r = d__1, tau.i = 0.; } /* apply random reflection to A(i:n,i:n) from the left and the right compute y := tau * A * u */ i__1 = *n - i + 1; zhemv_("Lower", &i__1, &tau, &a[i + i * a_dim1], lda, &work[1], &c__1, &c_b1, &work[*n + 1], &c__1); /* compute v := y - 1/2 * tau * ( y, u ) * u */ z__3.r = -.5, z__3.i = 0.; z__2.r = z__3.r * tau.r - z__3.i * tau.i, z__2.i = z__3.r * tau.i + z__3.i * tau.r; i__1 = *n - i + 1; zdotc_(&z__4, &i__1, &work[*n + 1], &c__1, &work[1], &c__1); z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i + z__2.i * z__4.r; alpha.r = z__1.r, alpha.i = z__1.i; i__1 = *n - i + 1; zaxpy_(&i__1, &alpha, &work[1], &c__1, &work[*n + 1], &c__1); /* apply the transformation as a rank-2 update to A(i:n,i:n) */ i__1 = *n - i + 1; z__1.r = -1., z__1.i = 0.; zher2_("Lower", &i__1, &z__1, &work[1], &c__1, &work[*n + 1], &c__1, & a[i + i * a_dim1], lda); /* L40: */ } /* Reduce number of subdiagonals to K */ i__1 = *n - 1 - *k; for (i = 1; i <= i__1; ++i) { /* generate reflection to annihilate A(k+i+1:n,i) */ i__2 = *n - *k - i + 1; wn = dznrm2_(&i__2, &a[*k + i + i * a_dim1], &c__1); d__1 = wn / z_abs(&a[*k + i + i * a_dim1]); i__2 = *k + i + i * a_dim1; z__1.r = d__1 * a[i__2].r, z__1.i = d__1 * a[i__2].i; wa.r = z__1.r, wa.i = z__1.i; if (wn == 0.) { tau.r = 0., tau.i = 0.; } else { i__2 = *k + i + i * a_dim1; z__1.r = a[i__2].r + wa.r, z__1.i = a[i__2].i + wa.i; wb.r = z__1.r, wb.i = z__1.i; i__2 = *n - *k - i; z_div(&z__1, &c_b2, &wb); zscal_(&i__2, &z__1, &a[*k + i + 1 + i * a_dim1], &c__1); i__2 = *k + i + i * a_dim1; a[i__2].r = 1., a[i__2].i = 0.; z_div(&z__1, &wb, &wa); d__1 = z__1.r; tau.r = d__1, tau.i = 0.; } /* apply reflection to A(k+i:n,i+1:k+i-1) from the left */ i__2 = *n - *k - i + 1; i__3 = *k - 1; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i + (i + 1) * a_dim1], lda, &a[*k + i + i * a_dim1], &c__1, &c_b1, &work[ 1], &c__1); i__2 = *n - *k - i + 1; i__3 = *k - 1; z__1.r = -tau.r, z__1.i = -tau.i; zgerc_(&i__2, &i__3, &z__1, &a[*k + i + i * a_dim1], &c__1, &work[1], &c__1, &a[*k + i + (i + 1) * a_dim1], lda); /* apply reflection to A(k+i:n,k+i:n) from the left and the rig ht compute y := tau * A * u */ i__2 = *n - *k - i + 1; zhemv_("Lower", &i__2, &tau, &a[*k + i + (*k + i) * a_dim1], lda, &a[* k + i + i * a_dim1], &c__1, &c_b1, &work[1], &c__1); /* compute v := y - 1/2 * tau * ( y, u ) * u */ z__3.r = -.5, z__3.i = 0.; z__2.r = z__3.r * tau.r - z__3.i * tau.i, z__2.i = z__3.r * tau.i + z__3.i * tau.r; i__2 = *n - *k - i + 1; zdotc_(&z__4, &i__2, &work[1], &c__1, &a[*k + i + i * a_dim1], &c__1); z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i + z__2.i * z__4.r; alpha.r = z__1.r, alpha.i = z__1.i; i__2 = *n - *k - i + 1; zaxpy_(&i__2, &alpha, &a[*k + i + i * a_dim1], &c__1, &work[1], &c__1) ; /* apply hermitian rank-2 update to A(k+i:n,k+i:n) */ i__2 = *n - *k - i + 1; z__1.r = -1., z__1.i = 0.; zher2_("Lower", &i__2, &z__1, &a[*k + i + i * a_dim1], &c__1, &work[1] , &c__1, &a[*k + i + (*k + i) * a_dim1], lda); i__2 = *k + i + i * a_dim1; z__1.r = -wa.r, z__1.i = -wa.i; a[i__2].r = z__1.r, a[i__2].i = z__1.i; i__2 = *n; for (j = *k + i + 1; j <= i__2; ++j) { i__3 = j + i * a_dim1; a[i__3].r = 0., a[i__3].i = 0.; /* L50: */ } /* L60: */ } /* Store full hermitian matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i = j + 1; i <= i__2; ++i) { i__3 = j + i * a_dim1; d_cnjg(&z__1, &a[i + j * a_dim1]); a[i__3].r = z__1.r, a[i__3].i = z__1.i; /* L70: */ } /* L80: */ } return 0; /* End of ZLAGHE */ } /* zlaghe_ */
/* Subroutine */ int zlaghe_(integer *n, integer *k, doublereal *d__, doublecomplex *a, integer *lda, integer *iseed, doublecomplex *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1; doublecomplex z__1, z__2, z__3, z__4; /* Local variables */ integer i__, j; doublecomplex wa, wb; doublereal wn; doublecomplex tau; doublecomplex alpha; /* -- LAPACK auxiliary test routine (version 3.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZLAGHE generates a complex hermitian matrix A, by pre- and post- */ /* multiplying a real diagonal matrix D with a random unitary matrix: */ /* A = U*D*U'. The semi-bandwidth may then be reduced to k by additional */ /* unitary transformations. */ /* Arguments */ /* ========= */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. */ /* K (input) INTEGER */ /* The number of nonzero subdiagonals within the band of A. */ /* 0 <= K <= N-1. */ /* D (input) DOUBLE PRECISION array, dimension (N) */ /* The diagonal elements of the diagonal matrix D. */ /* A (output) COMPLEX*16 array, dimension (LDA,N) */ /* The generated n by n hermitian matrix A (the full matrix is */ /* stored). */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= N. */ /* ISEED (input/output) INTEGER array, dimension (4) */ /* On entry, the seed of the random number generator; the array */ /* elements must be between 0 and 4095, and ISEED(4) must be */ /* odd. */ /* On exit, the seed is updated. */ /* WORK (workspace) COMPLEX*16 array, dimension (2*N) */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input arguments */ /* Parameter adjustments */ --d__; a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --iseed; --work; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } else if (*k < 0 || *k > *n - 1) { *info = -2; } else if (*lda < max(1,*n)) { *info = -5; } if (*info < 0) { i__1 = -(*info); xerbla_("ZLAGHE", &i__1); return 0; } /* initialize lower triangle of A to diagonal matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { i__3 = i__ + j * a_dim1; a[i__3].r = 0., a[i__3].i = 0.; /* L10: */ } /* L20: */ } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = i__ + i__ * a_dim1; i__3 = i__; a[i__2].r = d__[i__3], a[i__2].i = 0.; /* L30: */ } /* Generate lower triangle of hermitian matrix */ for (i__ = *n - 1; i__ >= 1; --i__) { /* generate random reflection */ i__1 = *n - i__ + 1; zlarnv_(&c__3, &iseed[1], &i__1, &work[1]); i__1 = *n - i__ + 1; wn = dznrm2_(&i__1, &work[1], &c__1); d__1 = wn / z_abs(&work[1]); z__1.r = d__1 * work[1].r, z__1.i = d__1 * work[1].i; wa.r = z__1.r, wa.i = z__1.i; if (wn == 0.) { tau.r = 0., tau.i = 0.; } else { z__1.r = work[1].r + wa.r, z__1.i = work[1].i + wa.i; wb.r = z__1.r, wb.i = z__1.i; i__1 = *n - i__; z_div(&z__1, &c_b2, &wb); zscal_(&i__1, &z__1, &work[2], &c__1); work[1].r = 1., work[1].i = 0.; z_div(&z__1, &wb, &wa); d__1 = z__1.r; tau.r = d__1, tau.i = 0.; } /* apply random reflection to A(i:n,i:n) from the left */ /* and the right */ /* compute y := tau * A * u */ i__1 = *n - i__ + 1; zhemv_("Lower", &i__1, &tau, &a[i__ + i__ * a_dim1], lda, &work[1], & c__1, &c_b1, &work[*n + 1], &c__1); /* compute v := y - 1/2 * tau * ( y, u ) * u */ z__3.r = -.5, z__3.i = -0.; z__2.r = z__3.r * tau.r - z__3.i * tau.i, z__2.i = z__3.r * tau.i + z__3.i * tau.r; i__1 = *n - i__ + 1; zdotc_(&z__4, &i__1, &work[*n + 1], &c__1, &work[1], &c__1); z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i + z__2.i * z__4.r; alpha.r = z__1.r, alpha.i = z__1.i; i__1 = *n - i__ + 1; zaxpy_(&i__1, &alpha, &work[1], &c__1, &work[*n + 1], &c__1); /* apply the transformation as a rank-2 update to A(i:n,i:n) */ i__1 = *n - i__ + 1; z__1.r = -1., z__1.i = -0.; zher2_("Lower", &i__1, &z__1, &work[1], &c__1, &work[*n + 1], &c__1, & a[i__ + i__ * a_dim1], lda); /* L40: */ } /* Reduce number of subdiagonals to K */ i__1 = *n - 1 - *k; for (i__ = 1; i__ <= i__1; ++i__) { /* generate reflection to annihilate A(k+i+1:n,i) */ i__2 = *n - *k - i__ + 1; wn = dznrm2_(&i__2, &a[*k + i__ + i__ * a_dim1], &c__1); d__1 = wn / z_abs(&a[*k + i__ + i__ * a_dim1]); i__2 = *k + i__ + i__ * a_dim1; z__1.r = d__1 * a[i__2].r, z__1.i = d__1 * a[i__2].i; wa.r = z__1.r, wa.i = z__1.i; if (wn == 0.) { tau.r = 0., tau.i = 0.; } else { i__2 = *k + i__ + i__ * a_dim1; z__1.r = a[i__2].r + wa.r, z__1.i = a[i__2].i + wa.i; wb.r = z__1.r, wb.i = z__1.i; i__2 = *n - *k - i__; z_div(&z__1, &c_b2, &wb); zscal_(&i__2, &z__1, &a[*k + i__ + 1 + i__ * a_dim1], &c__1); i__2 = *k + i__ + i__ * a_dim1; a[i__2].r = 1., a[i__2].i = 0.; z_div(&z__1, &wb, &wa); d__1 = z__1.r; tau.r = d__1, tau.i = 0.; } /* apply reflection to A(k+i:n,i+1:k+i-1) from the left */ i__2 = *n - *k - i__ + 1; i__3 = *k - 1; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ + (i__ + 1) * a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, & c_b1, &work[1], &c__1); i__2 = *n - *k - i__ + 1; i__3 = *k - 1; z__1.r = -tau.r, z__1.i = -tau.i; zgerc_(&i__2, &i__3, &z__1, &a[*k + i__ + i__ * a_dim1], &c__1, &work[ 1], &c__1, &a[*k + i__ + (i__ + 1) * a_dim1], lda); /* apply reflection to A(k+i:n,k+i:n) from the left and the right */ /* compute y := tau * A * u */ i__2 = *n - *k - i__ + 1; zhemv_("Lower", &i__2, &tau, &a[*k + i__ + (*k + i__) * a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &work[1], &c__1); /* compute v := y - 1/2 * tau * ( y, u ) * u */ z__3.r = -.5, z__3.i = -0.; z__2.r = z__3.r * tau.r - z__3.i * tau.i, z__2.i = z__3.r * tau.i + z__3.i * tau.r; i__2 = *n - *k - i__ + 1; zdotc_(&z__4, &i__2, &work[1], &c__1, &a[*k + i__ + i__ * a_dim1], & c__1); z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i + z__2.i * z__4.r; alpha.r = z__1.r, alpha.i = z__1.i; i__2 = *n - *k - i__ + 1; zaxpy_(&i__2, &alpha, &a[*k + i__ + i__ * a_dim1], &c__1, &work[1], & c__1); /* apply hermitian rank-2 update to A(k+i:n,k+i:n) */ i__2 = *n - *k - i__ + 1; z__1.r = -1., z__1.i = -0.; zher2_("Lower", &i__2, &z__1, &a[*k + i__ + i__ * a_dim1], &c__1, & work[1], &c__1, &a[*k + i__ + (*k + i__) * a_dim1], lda); i__2 = *k + i__ + i__ * a_dim1; z__1.r = -wa.r, z__1.i = -wa.i; a[i__2].r = z__1.r, a[i__2].i = z__1.i; i__2 = *n; for (j = *k + i__ + 1; j <= i__2; ++j) { i__3 = j + i__ * a_dim1; a[i__3].r = 0., a[i__3].i = 0.; /* L50: */ } /* L60: */ } /* Store full hermitian matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { i__3 = j + i__ * a_dim1; d_cnjg(&z__1, &a[i__ + j * a_dim1]); a[i__3].r = z__1.r, a[i__3].i = z__1.i; /* L70: */ } /* L80: */ } return 0; /* End of ZLAGHE */ } /* zlaghe_ */
/* Subroutine */ int zla_porfsx_extended_(integer *prec_type__, char *uplo, integer *n, integer *nrhs, doublecomplex *a, integer *lda, doublecomplex *af, integer *ldaf, logical *colequ, doublereal *c__, doublecomplex *b, integer *ldb, doublecomplex *y, integer *ldy, doublereal *berr_out__, integer *n_norms__, doublereal * err_bnds_norm__, doublereal *err_bnds_comp__, doublecomplex *res, doublereal *ayb, doublecomplex *dy, doublecomplex *y_tail__, doublereal *rcond, integer *ithresh, doublereal *rthresh, doublereal * dz_ub__, logical *ignore_cwise__, integer *info) { /* System generated locals */ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, y_dim1, y_offset, err_bnds_norm_dim1, err_bnds_norm_offset, err_bnds_comp_dim1, err_bnds_comp_offset, i__1, i__2, i__3, i__4; doublereal d__1, d__2; /* Builtin functions */ double d_imag(doublecomplex *); /* Local variables */ doublereal dxratmax, dzratmax; integer i__, j; logical incr_prec__; extern /* Subroutine */ int zla_heamv_(integer *, integer *, doublereal * , doublecomplex *, integer *, doublecomplex *, integer *, doublereal *, doublereal *, integer *); doublereal prev_dz_z__, yk, final_dx_x__, final_dz_z__; extern /* Subroutine */ int zla_wwaddw_(integer *, doublecomplex *, doublecomplex *, doublecomplex *); doublereal prevnormdx; integer cnt; doublereal dyk, eps, incr_thresh__, dx_x__, dz_z__, ymin; extern /* Subroutine */ int zla_lin_berr_(integer *, integer *, integer * , doublecomplex *, doublereal *, doublereal *); integer y_prec_state__; extern /* Subroutine */ int blas_zhemv_x_(integer *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *, integer *) ; integer uplo2; extern logical lsame_(char *, char *); extern /* Subroutine */ int blas_zhemv2_x_(integer *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *, integer *); doublereal dxrat, dzrat; extern /* Subroutine */ int zhemv_(char *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *); doublereal normx, normy; extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *); extern doublereal dlamch_(char *); doublereal normdx; extern /* Subroutine */ int zpotrs_(char *, integer *, integer *, doublecomplex *, integer *, doublecomplex *, integer *, integer *); doublereal hugeval; extern integer ilauplo_(char *); integer x_state__, z_state__; /* -- LAPACK computational routine (version 3.4.2) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* September 2012 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Local Scalars .. */ /* .. */ /* .. Parameters .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Statement Functions .. */ /* .. */ /* .. Statement Function Definitions .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ err_bnds_comp_dim1 = *nrhs; err_bnds_comp_offset = 1 + err_bnds_comp_dim1; err_bnds_comp__ -= err_bnds_comp_offset; err_bnds_norm_dim1 = *nrhs; err_bnds_norm_offset = 1 + err_bnds_norm_dim1; err_bnds_norm__ -= err_bnds_norm_offset; a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; af_dim1 = *ldaf; af_offset = 1 + af_dim1; af -= af_offset; --c__; b_dim1 = *ldb; b_offset = 1 + b_dim1; b -= b_offset; y_dim1 = *ldy; y_offset = 1 + y_dim1; y -= y_offset; --berr_out__; --res; --ayb; --dy; --y_tail__; /* Function Body */ if (*info != 0) { return 0; } eps = dlamch_("Epsilon"); hugeval = dlamch_("Overflow"); /* Force HUGEVAL to Inf */ hugeval *= hugeval; /* Using HUGEVAL may lead to spurious underflows. */ incr_thresh__ = (doublereal) (*n) * eps; if (lsame_(uplo, "L")) { uplo2 = ilauplo_("L"); } else { uplo2 = ilauplo_("U"); } i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { y_prec_state__ = 1; if (y_prec_state__ == 2) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__; y_tail__[i__3].r = 0.; y_tail__[i__3].i = 0.; // , expr subst } } dxrat = 0.; dxratmax = 0.; dzrat = 0.; dzratmax = 0.; final_dx_x__ = hugeval; final_dz_z__ = hugeval; prevnormdx = hugeval; prev_dz_z__ = hugeval; dz_z__ = hugeval; dx_x__ = hugeval; x_state__ = 1; z_state__ = 0; incr_prec__ = FALSE_; i__2 = *ithresh; for (cnt = 1; cnt <= i__2; ++cnt) { /* Compute residual RES = B_s - op(A_s) * Y, */ /* op(A) = A, A**T, or A**H depending on TRANS (and type). */ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &res[1], &c__1); if (y_prec_state__ == 0) { zhemv_(uplo, n, &c_b11, &a[a_offset], lda, &y[j * y_dim1 + 1], &c__1, &c_b12, &res[1], &c__1); } else if (y_prec_state__ == 1) { blas_zhemv_x_(&uplo2, n, &c_b11, &a[a_offset], lda, &y[j * y_dim1 + 1], &c__1, &c_b12, &res[1], &c__1, prec_type__); } else { blas_zhemv2_x_(&uplo2, n, &c_b11, &a[a_offset], lda, &y[j * y_dim1 + 1], &y_tail__[1], &c__1, &c_b12, &res[1], & c__1, prec_type__); } /* XXX: RES is no longer needed. */ zcopy_(n, &res[1], &c__1, &dy[1], &c__1); zpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &dy[1], n, info); /* Calculate relative changes DX_X, DZ_Z and ratios DXRAT, DZRAT. */ normx = 0.; normy = 0.; normdx = 0.; dz_z__ = 0.; ymin = hugeval; i__3 = *n; for (i__ = 1; i__ <= i__3; ++i__) { i__4 = i__ + j * y_dim1; yk = (d__1 = y[i__4].r, abs(d__1)) + (d__2 = d_imag(&y[i__ + j * y_dim1]), abs(d__2)); i__4 = i__; dyk = (d__1 = dy[i__4].r, abs(d__1)) + (d__2 = d_imag(&dy[i__] ), abs(d__2)); if (yk != 0.) { /* Computing MAX */ d__1 = dz_z__; d__2 = dyk / yk; // , expr subst dz_z__ = max(d__1,d__2); } else if (dyk != 0.) { dz_z__ = hugeval; } ymin = min(ymin,yk); normy = max(normy,yk); if (*colequ) { /* Computing MAX */ d__1 = normx; d__2 = yk * c__[i__]; // , expr subst normx = max(d__1,d__2); /* Computing MAX */ d__1 = normdx; d__2 = dyk * c__[i__]; // , expr subst normdx = max(d__1,d__2); } else { normx = normy; normdx = max(normdx,dyk); } } if (normx != 0.) { dx_x__ = normdx / normx; } else if (normdx == 0.) { dx_x__ = 0.; } else { dx_x__ = hugeval; } dxrat = normdx / prevnormdx; dzrat = dz_z__ / prev_dz_z__; /* Check termination criteria. */ if (ymin * *rcond < incr_thresh__ * normy && y_prec_state__ < 2) { incr_prec__ = TRUE_; } if (x_state__ == 3 && dxrat <= *rthresh) { x_state__ = 1; } if (x_state__ == 1) { if (dx_x__ <= eps) { x_state__ = 2; } else if (dxrat > *rthresh) { if (y_prec_state__ != 2) { incr_prec__ = TRUE_; } else { x_state__ = 3; } } else { if (dxrat > dxratmax) { dxratmax = dxrat; } } if (x_state__ > 1) { final_dx_x__ = dx_x__; } } if (z_state__ == 0 && dz_z__ <= *dz_ub__) { z_state__ = 1; } if (z_state__ == 3 && dzrat <= *rthresh) { z_state__ = 1; } if (z_state__ == 1) { if (dz_z__ <= eps) { z_state__ = 2; } else if (dz_z__ > *dz_ub__) { z_state__ = 0; dzratmax = 0.; final_dz_z__ = hugeval; } else if (dzrat > *rthresh) { if (y_prec_state__ != 2) { incr_prec__ = TRUE_; } else { z_state__ = 3; } } else { if (dzrat > dzratmax) { dzratmax = dzrat; } } if (z_state__ > 1) { final_dz_z__ = dz_z__; } } if (x_state__ != 1 && (*ignore_cwise__ || z_state__ != 1)) { goto L666; } if (incr_prec__) { incr_prec__ = FALSE_; ++y_prec_state__; i__3 = *n; for (i__ = 1; i__ <= i__3; ++i__) { i__4 = i__; y_tail__[i__4].r = 0.; y_tail__[i__4].i = 0.; // , expr subst } } prevnormdx = normdx; prev_dz_z__ = dz_z__; /* Update soluton. */ if (y_prec_state__ < 2) { zaxpy_(n, &c_b12, &dy[1], &c__1, &y[j * y_dim1 + 1], &c__1); } else { zla_wwaddw_(n, &y[j * y_dim1 + 1], &y_tail__[1], &dy[1]); } } /* Target of "IF (Z_STOP .AND. X_STOP)". Sun's f77 won't CALL F90_EXIT. */ L666: /* Set final_* when cnt hits ithresh. */ if (x_state__ == 1) { final_dx_x__ = dx_x__; } if (z_state__ == 1) { final_dz_z__ = dz_z__; } /* Compute error bounds. */ if (*n_norms__ >= 1) { err_bnds_norm__[j + (err_bnds_norm_dim1 << 1)] = final_dx_x__ / ( 1 - dxratmax); } if (*n_norms__ >= 2) { err_bnds_comp__[j + (err_bnds_comp_dim1 << 1)] = final_dz_z__ / ( 1 - dzratmax); } /* Compute componentwise relative backward error from formula */ /* max(i) ( abs(R(i)) / ( abs(op(A_s))*abs(Y) + abs(B_s) )(i) ) */ /* where abs(Z) is the componentwise absolute value of the matrix */ /* or vector Z. */ /* Compute residual RES = B_s - op(A_s) * Y, */ /* op(A) = A, A**T, or A**H depending on TRANS (and type). */ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &res[1], &c__1); zhemv_(uplo, n, &c_b11, &a[a_offset], lda, &y[j * y_dim1 + 1], &c__1, &c_b12, &res[1], &c__1); i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; ayb[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(&b[i__ + j * b_dim1]), abs(d__2)); } /* Compute abs(op(A_s))*abs(Y) + abs(B_s). */ zla_heamv_(&uplo2, n, &c_b34, &a[a_offset], lda, &y[j * y_dim1 + 1], &c__1, &c_b34, &ayb[1], &c__1); zla_lin_berr_(n, n, &c__1, &res[1], &ayb[1], &berr_out__[j]); /* End of loop for each RHS. */ } return 0; }
/* Subroutine */ int zlatrd_(char *uplo, integer *n, integer *nb, doublecomplex *a, integer *lda, doublereal *e, doublecomplex *tau, doublecomplex *w, integer *ldw) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University September 30, 1994 Purpose ======= ZLATRD reduces NB rows and columns of a complex Hermitian matrix A to Hermitian tridiagonal form by a unitary similarity transformation Q' * A * Q, and returns the matrices V and W which are needed to apply the transformation to the unreduced part of A. If UPLO = 'U', ZLATRD reduces the last NB rows and columns of a matrix, of which the upper triangle is supplied; if UPLO = 'L', ZLATRD reduces the first NB rows and columns of a matrix, of which the lower triangle is supplied. This is an auxiliary routine called by ZHETRD. Arguments ========= UPLO (input) CHARACTER Specifies whether the upper or lower triangular part of the Hermitian matrix A is stored: = 'U': Upper triangular = 'L': Lower triangular N (input) INTEGER The order of the matrix A. NB (input) INTEGER The number of rows and columns to be reduced. A (input/output) COMPLEX*16 array, dimension (LDA,N) On entry, the Hermitian matrix A. If UPLO = 'U', the leading n-by-n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading n-by-n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit: if UPLO = 'U', the last NB columns have been reduced to tridiagonal form, with the diagonal elements overwriting the diagonal elements of A; the elements above the diagonal with the array TAU, represent the unitary matrix Q as a product of elementary reflectors; if UPLO = 'L', the first NB columns have been reduced to tridiagonal form, with the diagonal elements overwriting the diagonal elements of A; the elements below the diagonal with the array TAU, represent the unitary matrix Q as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). E (output) DOUBLE PRECISION array, dimension (N-1) If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal elements of the last NB columns of the reduced matrix; if UPLO = 'L', E(1:nb) contains the subdiagonal elements of the first NB columns of the reduced matrix. TAU (output) COMPLEX*16 array, dimension (N-1) The scalar factors of the elementary reflectors, stored in TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'. See Further Details. W (output) COMPLEX*16 array, dimension (LDW,NB) The n-by-nb matrix W required to update the unreduced part of A. LDW (input) INTEGER The leading dimension of the array W. LDW >= max(1,N). Further Details =============== If UPLO = 'U', the matrix Q is represented as a product of elementary reflectors Q = H(n) H(n-1) . . . H(n-nb+1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a complex scalar, and v is a complex vector with v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), and tau in TAU(i-1). If UPLO = 'L', the matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(nb). Each H(i) has the form H(i) = I - tau * v * v' where tau is a complex scalar, and v is a complex vector with v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), and tau in TAU(i). The elements of the vectors v together form the n-by-nb matrix V which is needed, with W, to apply the transformation to the unreduced part of the matrix, using a Hermitian rank-2k update of the form: A := A - V*W' - W*V'. The contents of A on exit are illustrated by the following examples with n = 5 and nb = 2: if UPLO = 'U': if UPLO = 'L': ( a a a v4 v5 ) ( d ) ( a a v4 v5 ) ( 1 d ) ( a 1 v5 ) ( v1 1 a ) ( d 1 ) ( v1 v2 a a ) ( d ) ( v1 v2 a a a ) where d denotes a diagonal element of the reduced matrix, a denotes an element of the original matrix that is unchanged, and vi denotes an element of the vector defining H(i). ===================================================================== Quick return if possible Parameter adjustments */ /* Table of constant values */ static doublecomplex c_b1 = {0.,0.}; static doublecomplex c_b2 = {1.,0.}; static integer c__1 = 1; /* System generated locals */ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3; doublereal d__1; doublecomplex z__1, z__2, z__3, z__4; /* Local variables */ static integer i__; static doublecomplex alpha; extern logical lsame_(char *, char *); extern /* Subroutine */ int zscal_(integer *, doublecomplex *, doublecomplex *, integer *); extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *); extern /* Subroutine */ int zgemv_(char *, integer *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *), zhemv_(char *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *); static integer iw; extern /* Subroutine */ int zlarfg_(integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *), zlacgv_(integer *, doublecomplex *, integer *); #define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1 #define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)] #define w_subscr(a_1,a_2) (a_2)*w_dim1 + a_1 #define w_ref(a_1,a_2) w[w_subscr(a_1,a_2)] a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --e; --tau; w_dim1 = *ldw; w_offset = 1 + w_dim1 * 1; w -= w_offset; /* Function Body */ if (*n <= 0) { return 0; } if (lsame_(uplo, "U")) { /* Reduce last NB columns of upper triangle */ i__1 = *n - *nb + 1; for (i__ = *n; i__ >= i__1; --i__) { iw = i__ - *n + *nb; if (i__ < *n) { /* Update A(1:i,i) */ i__2 = a_subscr(i__, i__); i__3 = a_subscr(i__, i__); d__1 = a[i__3].r; a[i__2].r = d__1, a[i__2].i = 0.; i__2 = *n - i__; zlacgv_(&i__2, &w_ref(i__, iw + 1), ldw); i__2 = *n - i__; z__1.r = -1., z__1.i = 0.; zgemv_("No transpose", &i__, &i__2, &z__1, &a_ref(1, i__ + 1), lda, &w_ref(i__, iw + 1), ldw, &c_b2, &a_ref(1, i__), &c__1); i__2 = *n - i__; zlacgv_(&i__2, &w_ref(i__, iw + 1), ldw); i__2 = *n - i__; zlacgv_(&i__2, &a_ref(i__, i__ + 1), lda); i__2 = *n - i__; z__1.r = -1., z__1.i = 0.; zgemv_("No transpose", &i__, &i__2, &z__1, &w_ref(1, iw + 1), ldw, &a_ref(i__, i__ + 1), lda, &c_b2, &a_ref(1, i__), &c__1); i__2 = *n - i__; zlacgv_(&i__2, &a_ref(i__, i__ + 1), lda); i__2 = a_subscr(i__, i__); i__3 = a_subscr(i__, i__); d__1 = a[i__3].r; a[i__2].r = d__1, a[i__2].i = 0.; } if (i__ > 1) { /* Generate elementary reflector H(i) to annihilate A(1:i-2,i) */ i__2 = a_subscr(i__ - 1, i__); alpha.r = a[i__2].r, alpha.i = a[i__2].i; i__2 = i__ - 1; zlarfg_(&i__2, &alpha, &a_ref(1, i__), &c__1, &tau[i__ - 1]); i__2 = i__ - 1; e[i__2] = alpha.r; i__2 = a_subscr(i__ - 1, i__); a[i__2].r = 1., a[i__2].i = 0.; /* Compute W(1:i-1,i) */ i__2 = i__ - 1; zhemv_("Upper", &i__2, &c_b2, &a[a_offset], lda, &a_ref(1, i__), &c__1, &c_b1, &w_ref(1, iw), &c__1); if (i__ < *n) { i__2 = i__ - 1; i__3 = *n - i__; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w_ref( 1, iw + 1), ldw, &a_ref(1, i__), &c__1, &c_b1, & w_ref(i__ + 1, iw), &c__1); i__2 = i__ - 1; i__3 = *n - i__; z__1.r = -1., z__1.i = 0.; zgemv_("No transpose", &i__2, &i__3, &z__1, &a_ref(1, i__ + 1), lda, &w_ref(i__ + 1, iw), &c__1, &c_b2, & w_ref(1, iw), &c__1); i__2 = i__ - 1; i__3 = *n - i__; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a_ref( 1, i__ + 1), lda, &a_ref(1, i__), &c__1, &c_b1, & w_ref(i__ + 1, iw), &c__1); i__2 = i__ - 1; i__3 = *n - i__; z__1.r = -1., z__1.i = 0.; zgemv_("No transpose", &i__2, &i__3, &z__1, &w_ref(1, iw + 1), ldw, &w_ref(i__ + 1, iw), &c__1, &c_b2, & w_ref(1, iw), &c__1); } i__2 = i__ - 1; zscal_(&i__2, &tau[i__ - 1], &w_ref(1, iw), &c__1); z__3.r = -.5, z__3.i = 0.; i__2 = i__ - 1; z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i, z__2.i = z__3.r * tau[i__2].i + z__3.i * tau[i__2].r; i__3 = i__ - 1; zdotc_(&z__4, &i__3, &w_ref(1, iw), &c__1, &a_ref(1, i__), & c__1); z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i + z__2.i * z__4.r; alpha.r = z__1.r, alpha.i = z__1.i; i__2 = i__ - 1; zaxpy_(&i__2, &alpha, &a_ref(1, i__), &c__1, &w_ref(1, iw), & c__1); } /* L10: */ } } else { /* Reduce first NB columns of lower triangle */ i__1 = *nb; for (i__ = 1; i__ <= i__1; ++i__) { /* Update A(i:n,i) */ i__2 = a_subscr(i__, i__); i__3 = a_subscr(i__, i__); d__1 = a[i__3].r; a[i__2].r = d__1, a[i__2].i = 0.; i__2 = i__ - 1; zlacgv_(&i__2, &w_ref(i__, 1), ldw); i__2 = *n - i__ + 1; i__3 = i__ - 1; z__1.r = -1., z__1.i = 0.; zgemv_("No transpose", &i__2, &i__3, &z__1, &a_ref(i__, 1), lda, & w_ref(i__, 1), ldw, &c_b2, &a_ref(i__, i__), &c__1); i__2 = i__ - 1; zlacgv_(&i__2, &w_ref(i__, 1), ldw); i__2 = i__ - 1; zlacgv_(&i__2, &a_ref(i__, 1), lda); i__2 = *n - i__ + 1; i__3 = i__ - 1; z__1.r = -1., z__1.i = 0.; zgemv_("No transpose", &i__2, &i__3, &z__1, &w_ref(i__, 1), ldw, & a_ref(i__, 1), lda, &c_b2, &a_ref(i__, i__), &c__1); i__2 = i__ - 1; zlacgv_(&i__2, &a_ref(i__, 1), lda); i__2 = a_subscr(i__, i__); i__3 = a_subscr(i__, i__); d__1 = a[i__3].r; a[i__2].r = d__1, a[i__2].i = 0.; if (i__ < *n) { /* Generate elementary reflector H(i) to annihilate A(i+2:n,i) */ i__2 = a_subscr(i__ + 1, i__); alpha.r = a[i__2].r, alpha.i = a[i__2].i; /* Computing MIN */ i__2 = i__ + 2; i__3 = *n - i__; zlarfg_(&i__3, &alpha, &a_ref(min(i__2,*n), i__), &c__1, &tau[ i__]); i__2 = i__; e[i__2] = alpha.r; i__2 = a_subscr(i__ + 1, i__); a[i__2].r = 1., a[i__2].i = 0.; /* Compute W(i+1:n,i) */ i__2 = *n - i__; zhemv_("Lower", &i__2, &c_b2, &a_ref(i__ + 1, i__ + 1), lda, & a_ref(i__ + 1, i__), &c__1, &c_b1, &w_ref(i__ + 1, i__), &c__1); i__2 = *n - i__; i__3 = i__ - 1; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w_ref(i__ + 1, 1), ldw, &a_ref(i__ + 1, i__), &c__1, &c_b1, & w_ref(1, i__), &c__1); i__2 = *n - i__; i__3 = i__ - 1; z__1.r = -1., z__1.i = 0.; zgemv_("No transpose", &i__2, &i__3, &z__1, &a_ref(i__ + 1, 1) , lda, &w_ref(1, i__), &c__1, &c_b2, &w_ref(i__ + 1, i__), &c__1); i__2 = *n - i__; i__3 = i__ - 1; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a_ref(i__ + 1, 1), lda, &a_ref(i__ + 1, i__), &c__1, &c_b1, & w_ref(1, i__), &c__1); i__2 = *n - i__; i__3 = i__ - 1; z__1.r = -1., z__1.i = 0.; zgemv_("No transpose", &i__2, &i__3, &z__1, &w_ref(i__ + 1, 1) , ldw, &w_ref(1, i__), &c__1, &c_b2, &w_ref(i__ + 1, i__), &c__1); i__2 = *n - i__; zscal_(&i__2, &tau[i__], &w_ref(i__ + 1, i__), &c__1); z__3.r = -.5, z__3.i = 0.; i__2 = i__; z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i, z__2.i = z__3.r * tau[i__2].i + z__3.i * tau[i__2].r; i__3 = *n - i__; zdotc_(&z__4, &i__3, &w_ref(i__ + 1, i__), &c__1, &a_ref(i__ + 1, i__), &c__1); z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i + z__2.i * z__4.r; alpha.r = z__1.r, alpha.i = z__1.i; i__2 = *n - i__; zaxpy_(&i__2, &alpha, &a_ref(i__ + 1, i__), &c__1, &w_ref(i__ + 1, i__), &c__1); } /* L20: */ } } return 0; /* End of ZLATRD */ } /* zlatrd_ */
/* Subroutine */ int zhetri_(char *uplo, integer *n, doublecomplex *a, integer *lda, integer *ipiv, doublecomplex *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1; doublecomplex z__1, z__2; /* Local variables */ doublereal d__; integer j, k; doublereal t, ak; integer kp; doublereal akp1; doublecomplex temp, akkp1; integer kstep; logical upper; /* -- LAPACK routine (version 3.2) -- */ /* November 2006 */ /* Purpose */ /* ======= */ /* ZHETRI computes the inverse of a complex Hermitian indefinite matrix */ /* A using the factorization A = U*D*U**H or A = L*D*L**H computed by */ /* ZHETRF. */ /* Arguments */ /* ========= */ /* UPLO (input) CHARACTER*1 */ /* Specifies whether the details of the factorization are stored */ /* as an upper or lower triangular matrix. */ /* = 'U': Upper triangular, form is A = U*D*U**H; */ /* = 'L': Lower triangular, form is A = L*D*L**H. */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. */ /* A (input/output) COMPLEX*16 array, dimension (LDA,N) */ /* On entry, the block diagonal matrix D and the multipliers */ /* used to obtain the factor U or L as computed by ZHETRF. */ /* On exit, if INFO = 0, the (Hermitian) inverse of the original */ /* matrix. If UPLO = 'U', the upper triangular part of the */ /* inverse is formed and the part of A below the diagonal is not */ /* referenced; if UPLO = 'L' the lower triangular part of the */ /* inverse is formed and the part of A above the diagonal is */ /* not referenced. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= max(1,N). */ /* IPIV (input) INTEGER array, dimension (N) */ /* Details of the interchanges and the block structure of D */ /* as determined by ZHETRF. */ /* WORK (workspace) COMPLEX*16 array, dimension (N) */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value */ /* > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */ /* inverse could not be computed. */ /* ===================================================================== */ /* Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --ipiv; --work; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("ZHETRI", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* Check that the diagonal matrix D is nonsingular. */ if (upper) { /* Upper triangular storage: examine D from bottom to top */ for (*info = *n; *info >= 1; --(*info)) { i__1 = *info + *info * a_dim1; if (ipiv[*info] > 0 && (a[i__1].r == 0. && a[i__1].i == 0.)) { return 0; } } } else { /* Lower triangular storage: examine D from top to bottom. */ i__1 = *n; for (*info = 1; *info <= i__1; ++(*info)) { i__2 = *info + *info * a_dim1; if (ipiv[*info] > 0 && (a[i__2].r == 0. && a[i__2].i == 0.)) { return 0; } } } *info = 0; if (upper) { /* Compute inv(A) from the factorization A = U*D*U'. */ /* K is the main loop index, increasing from 1 to N in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = 1; L30: /* If K > N, exit from loop. */ if (k > *n) { goto L50; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Invert the diagonal block. */ i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; d__1 = 1. / a[i__2].r; a[i__1].r = d__1, a[i__1].i = 0.; /* Compute column K of the inverse. */ if (k > 1) { i__1 = k - 1; zcopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1); i__1 = k - 1; z__1.r = -1., z__1.i = -0.; zhemv_(uplo, &i__1, &z__1, &a[a_offset], lda, &work[1], &c__1, &c_b2, &a[k * a_dim1 + 1], &c__1); i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; i__3 = k - 1; zdotc_(&z__2, &i__3, &work[1], &c__1, &a[k * a_dim1 + 1], & c__1); d__1 = z__2.r; z__1.r = a[i__2].r - d__1, z__1.i = a[i__2].i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; } kstep = 1; } else { /* 2 x 2 diagonal block */ /* Invert the diagonal block. */ t = z_abs(&a[k + (k + 1) * a_dim1]); i__1 = k + k * a_dim1; ak = a[i__1].r / t; i__1 = k + 1 + (k + 1) * a_dim1; akp1 = a[i__1].r / t; i__1 = k + (k + 1) * a_dim1; z__1.r = a[i__1].r / t, z__1.i = a[i__1].i / t; akkp1.r = z__1.r, akkp1.i = z__1.i; d__ = t * (ak * akp1 - 1.); i__1 = k + k * a_dim1; d__1 = akp1 / d__; a[i__1].r = d__1, a[i__1].i = 0.; i__1 = k + 1 + (k + 1) * a_dim1; d__1 = ak / d__; a[i__1].r = d__1, a[i__1].i = 0.; i__1 = k + (k + 1) * a_dim1; z__2.r = -akkp1.r, z__2.i = -akkp1.i; z__1.r = z__2.r / d__, z__1.i = z__2.i / d__; a[i__1].r = z__1.r, a[i__1].i = z__1.i; /* Compute columns K and K+1 of the inverse. */ if (k > 1) { i__1 = k - 1; zcopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1); i__1 = k - 1; z__1.r = -1., z__1.i = -0.; zhemv_(uplo, &i__1, &z__1, &a[a_offset], lda, &work[1], &c__1, &c_b2, &a[k * a_dim1 + 1], &c__1); i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; i__3 = k - 1; zdotc_(&z__2, &i__3, &work[1], &c__1, &a[k * a_dim1 + 1], & c__1); d__1 = z__2.r; z__1.r = a[i__2].r - d__1, z__1.i = a[i__2].i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = k + (k + 1) * a_dim1; i__2 = k + (k + 1) * a_dim1; i__3 = k - 1; zdotc_(&z__2, &i__3, &a[k * a_dim1 + 1], &c__1, &a[(k + 1) * a_dim1 + 1], &c__1); z__1.r = a[i__2].r - z__2.r, z__1.i = a[i__2].i - z__2.i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = k - 1; zcopy_(&i__1, &a[(k + 1) * a_dim1 + 1], &c__1, &work[1], & c__1); i__1 = k - 1; z__1.r = -1., z__1.i = -0.; zhemv_(uplo, &i__1, &z__1, &a[a_offset], lda, &work[1], &c__1, &c_b2, &a[(k + 1) * a_dim1 + 1], &c__1); i__1 = k + 1 + (k + 1) * a_dim1; i__2 = k + 1 + (k + 1) * a_dim1; i__3 = k - 1; zdotc_(&z__2, &i__3, &work[1], &c__1, &a[(k + 1) * a_dim1 + 1] , &c__1); d__1 = z__2.r; z__1.r = a[i__2].r - d__1, z__1.i = a[i__2].i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; } kstep = 2; } kp = (i__1 = ipiv[k], abs(i__1)); if (kp != k) { /* Interchange rows and columns K and KP in the leading */ /* submatrix A(1:k+1,1:k+1) */ i__1 = kp - 1; zswap_(&i__1, &a[k * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], & c__1); i__1 = k - 1; for (j = kp + 1; j <= i__1; ++j) { d_cnjg(&z__1, &a[j + k * a_dim1]); temp.r = z__1.r, temp.i = z__1.i; i__2 = j + k * a_dim1; d_cnjg(&z__1, &a[kp + j * a_dim1]); a[i__2].r = z__1.r, a[i__2].i = z__1.i; i__2 = kp + j * a_dim1; a[i__2].r = temp.r, a[i__2].i = temp.i; } i__1 = kp + k * a_dim1; d_cnjg(&z__1, &a[kp + k * a_dim1]); a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = k + k * a_dim1; temp.r = a[i__1].r, temp.i = a[i__1].i; i__1 = k + k * a_dim1; i__2 = kp + kp * a_dim1; a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i; i__1 = kp + kp * a_dim1; a[i__1].r = temp.r, a[i__1].i = temp.i; if (kstep == 2) { i__1 = k + (k + 1) * a_dim1; temp.r = a[i__1].r, temp.i = a[i__1].i; i__1 = k + (k + 1) * a_dim1; i__2 = kp + (k + 1) * a_dim1; a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i; i__1 = kp + (k + 1) * a_dim1; a[i__1].r = temp.r, a[i__1].i = temp.i; } } k += kstep; goto L30; L50: ; } else { /* Compute inv(A) from the factorization A = L*D*L'. */ /* K is the main loop index, increasing from 1 to N in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = *n; L60: /* If K < 1, exit from loop. */ if (k < 1) { goto L80; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Invert the diagonal block. */ i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; d__1 = 1. / a[i__2].r; a[i__1].r = d__1, a[i__1].i = 0.; /* Compute column K of the inverse. */ if (k < *n) { i__1 = *n - k; zcopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1); i__1 = *n - k; z__1.r = -1., z__1.i = -0.; zhemv_(uplo, &i__1, &z__1, &a[k + 1 + (k + 1) * a_dim1], lda, &work[1], &c__1, &c_b2, &a[k + 1 + k * a_dim1], &c__1); i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; i__3 = *n - k; zdotc_(&z__2, &i__3, &work[1], &c__1, &a[k + 1 + k * a_dim1], &c__1); d__1 = z__2.r; z__1.r = a[i__2].r - d__1, z__1.i = a[i__2].i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; } kstep = 1; } else { /* 2 x 2 diagonal block */ /* Invert the diagonal block. */ t = z_abs(&a[k + (k - 1) * a_dim1]); i__1 = k - 1 + (k - 1) * a_dim1; ak = a[i__1].r / t; i__1 = k + k * a_dim1; akp1 = a[i__1].r / t; i__1 = k + (k - 1) * a_dim1; z__1.r = a[i__1].r / t, z__1.i = a[i__1].i / t; akkp1.r = z__1.r, akkp1.i = z__1.i; d__ = t * (ak * akp1 - 1.); i__1 = k - 1 + (k - 1) * a_dim1; d__1 = akp1 / d__; a[i__1].r = d__1, a[i__1].i = 0.; i__1 = k + k * a_dim1; d__1 = ak / d__; a[i__1].r = d__1, a[i__1].i = 0.; i__1 = k + (k - 1) * a_dim1; z__2.r = -akkp1.r, z__2.i = -akkp1.i; z__1.r = z__2.r / d__, z__1.i = z__2.i / d__; a[i__1].r = z__1.r, a[i__1].i = z__1.i; /* Compute columns K-1 and K of the inverse. */ if (k < *n) { i__1 = *n - k; zcopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1); i__1 = *n - k; z__1.r = -1., z__1.i = -0.; zhemv_(uplo, &i__1, &z__1, &a[k + 1 + (k + 1) * a_dim1], lda, &work[1], &c__1, &c_b2, &a[k + 1 + k * a_dim1], &c__1); i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; i__3 = *n - k; zdotc_(&z__2, &i__3, &work[1], &c__1, &a[k + 1 + k * a_dim1], &c__1); d__1 = z__2.r; z__1.r = a[i__2].r - d__1, z__1.i = a[i__2].i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = k + (k - 1) * a_dim1; i__2 = k + (k - 1) * a_dim1; i__3 = *n - k; zdotc_(&z__2, &i__3, &a[k + 1 + k * a_dim1], &c__1, &a[k + 1 + (k - 1) * a_dim1], &c__1); z__1.r = a[i__2].r - z__2.r, z__1.i = a[i__2].i - z__2.i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = *n - k; zcopy_(&i__1, &a[k + 1 + (k - 1) * a_dim1], &c__1, &work[1], & c__1); i__1 = *n - k; z__1.r = -1., z__1.i = -0.; zhemv_(uplo, &i__1, &z__1, &a[k + 1 + (k + 1) * a_dim1], lda, &work[1], &c__1, &c_b2, &a[k + 1 + (k - 1) * a_dim1], &c__1); i__1 = k - 1 + (k - 1) * a_dim1; i__2 = k - 1 + (k - 1) * a_dim1; i__3 = *n - k; zdotc_(&z__2, &i__3, &work[1], &c__1, &a[k + 1 + (k - 1) * a_dim1], &c__1); d__1 = z__2.r; z__1.r = a[i__2].r - d__1, z__1.i = a[i__2].i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; } kstep = 2; } kp = (i__1 = ipiv[k], abs(i__1)); if (kp != k) { /* Interchange rows and columns K and KP in the trailing */ /* submatrix A(k-1:n,k-1:n) */ if (kp < *n) { i__1 = *n - kp; zswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + 1 + kp * a_dim1], &c__1); } i__1 = kp - 1; for (j = k + 1; j <= i__1; ++j) { d_cnjg(&z__1, &a[j + k * a_dim1]); temp.r = z__1.r, temp.i = z__1.i; i__2 = j + k * a_dim1; d_cnjg(&z__1, &a[kp + j * a_dim1]); a[i__2].r = z__1.r, a[i__2].i = z__1.i; i__2 = kp + j * a_dim1; a[i__2].r = temp.r, a[i__2].i = temp.i; } i__1 = kp + k * a_dim1; d_cnjg(&z__1, &a[kp + k * a_dim1]); a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = k + k * a_dim1; temp.r = a[i__1].r, temp.i = a[i__1].i; i__1 = k + k * a_dim1; i__2 = kp + kp * a_dim1; a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i; i__1 = kp + kp * a_dim1; a[i__1].r = temp.r, a[i__1].i = temp.i; if (kstep == 2) { i__1 = k + (k - 1) * a_dim1; temp.r = a[i__1].r, temp.i = a[i__1].i; i__1 = k + (k - 1) * a_dim1; i__2 = kp + (k - 1) * a_dim1; a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i; i__1 = kp + (k - 1) * a_dim1; a[i__1].r = temp.r, a[i__1].i = temp.i; } } k -= kstep; goto L60; L80: ; } return 0; /* End of ZHETRI */ } /* zhetri_ */
/* Subroutine */ int zporfs_(char *uplo, integer *n, integer *nrhs, doublecomplex *a, integer *lda, doublecomplex *af, integer *ldaf, doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx, doublereal *ferr, doublereal *berr, doublecomplex *work, doublereal * rwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5; doublereal d__1, d__2, d__3, d__4; doublecomplex z__1; /* Builtin functions */ double d_imag(doublecomplex *); /* Local variables */ integer i__, j, k; doublereal s, xk; integer nz; doublereal eps; integer kase; doublereal safe1, safe2; extern logical lsame_(char *, char *); integer isave[3], count; extern /* Subroutine */ int zhemv_(char *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *); logical upper; extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *), zlacn2_( integer *, doublecomplex *, doublecomplex *, doublereal *, integer *, integer *); extern doublereal dlamch_(char *); doublereal safmin; extern /* Subroutine */ int xerbla_(char *, integer *); doublereal lstres; extern /* Subroutine */ int zpotrs_(char *, integer *, integer *, doublecomplex *, integer *, doublecomplex *, integer *, integer *); /* -- LAPACK computational routine (version 3.4.0) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* November 2011 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ==================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. Local Arrays .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Statement Functions .. */ /* .. */ /* .. Statement Function definitions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; af_dim1 = *ldaf; af_offset = 1 + af_dim1; af -= af_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1; b -= b_offset; x_dim1 = *ldx; x_offset = 1 + x_dim1; x -= x_offset; --ferr; --berr; --work; --rwork; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*nrhs < 0) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*ldaf < max(1,*n)) { *info = -7; } else if (*ldb < max(1,*n)) { *info = -9; } else if (*ldx < max(1,*n)) { *info = -11; } if (*info != 0) { i__1 = -(*info); xerbla_("ZPORFS", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *nrhs == 0) { i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { ferr[j] = 0.; berr[j] = 0.; /* L10: */ } return 0; } /* NZ = maximum number of nonzero elements in each row of A, plus 1 */ nz = *n + 1; eps = dlamch_("Epsilon"); safmin = dlamch_("Safe minimum"); safe1 = nz * safmin; safe2 = safe1 / eps; /* Do for each right hand side */ i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { count = 1; lstres = 3.; L20: /* Loop until stopping criterion is satisfied. */ /* Compute residual R = B - A * X */ zcopy_(n, &b[j * b_dim1 + 1], &c__1, &work[1], &c__1); z__1.r = -1.; z__1.i = -0.; // , expr subst zhemv_(uplo, n, &z__1, &a[a_offset], lda, &x[j * x_dim1 + 1], &c__1, & c_b1, &work[1], &c__1); /* Compute componentwise relative backward error from formula */ /* max(i) ( f2c_abs(R(i)) / ( f2c_abs(A)*f2c_abs(X) + f2c_abs(B) )(i) ) */ /* where f2c_abs(Z) is the componentwise absolute value of the matrix */ /* or vector Z. If the i-th component of the denominator is less */ /* than SAFE2, then SAFE1 is added to the i-th components of the */ /* numerator and denominator before dividing. */ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; rwork[i__] = (d__1 = b[i__3].r, f2c_abs(d__1)) + (d__2 = d_imag(&b[ i__ + j * b_dim1]), f2c_abs(d__2)); /* L30: */ } /* Compute f2c_abs(A)*f2c_abs(X) + f2c_abs(B). */ if (upper) { i__2 = *n; for (k = 1; k <= i__2; ++k) { s = 0.; i__3 = k + j * x_dim1; xk = (d__1 = x[i__3].r, f2c_abs(d__1)) + (d__2 = d_imag(&x[k + j * x_dim1]), f2c_abs(d__2)); i__3 = k - 1; for (i__ = 1; i__ <= i__3; ++i__) { i__4 = i__ + k * a_dim1; rwork[i__] += ((d__1 = a[i__4].r, f2c_abs(d__1)) + (d__2 = d_imag(&a[i__ + k * a_dim1]), f2c_abs(d__2))) * xk; i__4 = i__ + k * a_dim1; i__5 = i__ + j * x_dim1; s += ((d__1 = a[i__4].r, f2c_abs(d__1)) + (d__2 = d_imag(&a[ i__ + k * a_dim1]), f2c_abs(d__2))) * ((d__3 = x[i__5] .r, f2c_abs(d__3)) + (d__4 = d_imag(&x[i__ + j * x_dim1]), f2c_abs(d__4))); /* L40: */ } i__3 = k + k * a_dim1; rwork[k] = rwork[k] + (d__1 = a[i__3].r, f2c_abs(d__1)) * xk + s; /* L50: */ } } else { i__2 = *n; for (k = 1; k <= i__2; ++k) { s = 0.; i__3 = k + j * x_dim1; xk = (d__1 = x[i__3].r, f2c_abs(d__1)) + (d__2 = d_imag(&x[k + j * x_dim1]), f2c_abs(d__2)); i__3 = k + k * a_dim1; rwork[k] += (d__1 = a[i__3].r, f2c_abs(d__1)) * xk; i__3 = *n; for (i__ = k + 1; i__ <= i__3; ++i__) { i__4 = i__ + k * a_dim1; rwork[i__] += ((d__1 = a[i__4].r, f2c_abs(d__1)) + (d__2 = d_imag(&a[i__ + k * a_dim1]), f2c_abs(d__2))) * xk; i__4 = i__ + k * a_dim1; i__5 = i__ + j * x_dim1; s += ((d__1 = a[i__4].r, f2c_abs(d__1)) + (d__2 = d_imag(&a[ i__ + k * a_dim1]), f2c_abs(d__2))) * ((d__3 = x[i__5] .r, f2c_abs(d__3)) + (d__4 = d_imag(&x[i__ + j * x_dim1]), f2c_abs(d__4))); /* L60: */ } rwork[k] += s; /* L70: */ } } s = 0.; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { if (rwork[i__] > safe2) { /* Computing MAX */ i__3 = i__; d__3 = s; d__4 = ((d__1 = work[i__3].r, f2c_abs(d__1)) + (d__2 = d_imag(&work[i__]), f2c_abs(d__2))) / rwork[i__]; // , expr subst s = max(d__3,d__4); } else { /* Computing MAX */ i__3 = i__; d__3 = s; d__4 = ((d__1 = work[i__3].r, f2c_abs(d__1)) + (d__2 = d_imag(&work[i__]), f2c_abs(d__2)) + safe1) / (rwork[i__] + safe1); // , expr subst s = max(d__3,d__4); } /* L80: */ } berr[j] = s; /* Test stopping criterion. Continue iterating if */ /* 1) The residual BERR(J) is larger than machine epsilon, and */ /* 2) BERR(J) decreased by at least a factor of 2 during the */ /* last iteration, and */ /* 3) At most ITMAX iterations tried. */ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) { /* Update solution and try again. */ zpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[1], n, info); zaxpy_(n, &c_b1, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1); lstres = berr[j]; ++count; goto L20; } /* Bound error from formula */ /* norm(X - XTRUE) / norm(X) .le. FERR = */ /* norm( f2c_abs(inv(A))* */ /* ( f2c_abs(R) + NZ*EPS*( f2c_abs(A)*f2c_abs(X)+f2c_abs(B) ))) / norm(X) */ /* where */ /* norm(Z) is the magnitude of the largest component of Z */ /* inv(A) is the inverse of A */ /* f2c_abs(Z) is the componentwise absolute value of the matrix or */ /* vector Z */ /* NZ is the maximum number of nonzeros in any row of A, plus 1 */ /* EPS is machine epsilon */ /* The i-th component of f2c_abs(R)+NZ*EPS*(f2c_abs(A)*f2c_abs(X)+f2c_abs(B)) */ /* is incremented by SAFE1 if the i-th component of */ /* f2c_abs(A)*f2c_abs(X) + f2c_abs(B) is less than SAFE2. */ /* Use ZLACN2 to estimate the infinity-norm of the matrix */ /* inv(A) * diag(W), */ /* where W = f2c_abs(R) + NZ*EPS*( f2c_abs(A)*f2c_abs(X)+f2c_abs(B) ))) */ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { if (rwork[i__] > safe2) { i__3 = i__; rwork[i__] = (d__1 = work[i__3].r, f2c_abs(d__1)) + (d__2 = d_imag(&work[i__]), f2c_abs(d__2)) + nz * eps * rwork[i__] ; } else { i__3 = i__; rwork[i__] = (d__1 = work[i__3].r, f2c_abs(d__1)) + (d__2 = d_imag(&work[i__]), f2c_abs(d__2)) + nz * eps * rwork[i__] + safe1; } /* L90: */ } kase = 0; L100: zlacn2_(n, &work[*n + 1], &work[1], &ferr[j], &kase, isave); if (kase != 0) { if (kase == 1) { /* Multiply by diag(W)*inv(A**H). */ zpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[1], n, info); i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__; i__4 = i__; i__5 = i__; z__1.r = rwork[i__4] * work[i__5].r; z__1.i = rwork[i__4] * work[i__5].i; // , expr subst work[i__3].r = z__1.r; work[i__3].i = z__1.i; // , expr subst /* L110: */ } } else if (kase == 2) { /* Multiply by inv(A)*diag(W). */ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__; i__4 = i__; i__5 = i__; z__1.r = rwork[i__4] * work[i__5].r; z__1.i = rwork[i__4] * work[i__5].i; // , expr subst work[i__3].r = z__1.r; work[i__3].i = z__1.i; // , expr subst /* L120: */ } zpotrs_(uplo, n, &c__1, &af[af_offset], ldaf, &work[1], n, info); } goto L100; } /* Normalize error. */ lstres = 0.; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { /* Computing MAX */ i__3 = i__ + j * x_dim1; d__3 = lstres; d__4 = (d__1 = x[i__3].r, f2c_abs(d__1)) + (d__2 = d_imag(&x[i__ + j * x_dim1]), f2c_abs(d__2)); // , expr subst lstres = max(d__3,d__4); /* L130: */ } if (lstres != 0.) { ferr[j] /= lstres; } /* L140: */ } return 0; /* End of ZPORFS */ }
/* Subroutine */ int zlatrd_(char *uplo, integer *n, integer *nb, doublecomplex *a, integer *lda, doublereal *e, doublecomplex *tau, doublecomplex *w, integer *ldw) { /* System generated locals */ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3; doublereal d__1; doublecomplex z__1, z__2, z__3, z__4; /* Local variables */ integer i__, iw; doublecomplex alpha; extern logical lsame_(char *, char *); extern /* Subroutine */ int zscal_(integer *, doublecomplex *, doublecomplex *, integer *); extern /* Double Complex */ VOID zdotc_f2c_(doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *); extern /* Subroutine */ int zgemv_(char *, integer *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *), zhemv_(char *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *), zlacgv_(integer *, doublecomplex *, integer *); /* -- LAPACK auxiliary routine (version 3.4.2) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* September 2012 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Quick return if possible */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --e; --tau; w_dim1 = *ldw; w_offset = 1 + w_dim1; w -= w_offset; /* Function Body */ if (*n <= 0) { return 0; } if (lsame_(uplo, "U")) { /* Reduce last NB columns of upper triangle */ i__1 = *n - *nb + 1; for (i__ = *n; i__ >= i__1; --i__) { iw = i__ - *n + *nb; if (i__ < *n) { /* Update A(1:i,i) */ i__2 = i__ + i__ * a_dim1; i__3 = i__ + i__ * a_dim1; d__1 = a[i__3].r; a[i__2].r = d__1; a[i__2].i = 0.; // , expr subst i__2 = *n - i__; zlacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw); i__2 = *n - i__; z__1.r = -1.; z__1.i = -0.; // , expr subst zgemv_("No transpose", &i__, &i__2, &z__1, &a[(i__ + 1) * a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, & c_b2, &a[i__ * a_dim1 + 1], &c__1); i__2 = *n - i__; zlacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw); i__2 = *n - i__; zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda); i__2 = *n - i__; z__1.r = -1.; z__1.i = -0.; // , expr subst zgemv_("No transpose", &i__, &i__2, &z__1, &w[(iw + 1) * w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, & c_b2, &a[i__ * a_dim1 + 1], &c__1); i__2 = *n - i__; zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda); i__2 = i__ + i__ * a_dim1; i__3 = i__ + i__ * a_dim1; d__1 = a[i__3].r; a[i__2].r = d__1; a[i__2].i = 0.; // , expr subst } if (i__ > 1) { /* Generate elementary reflector H(i) to annihilate */ /* A(1:i-2,i) */ i__2 = i__ - 1 + i__ * a_dim1; alpha.r = a[i__2].r; alpha.i = a[i__2].i; // , expr subst i__2 = i__ - 1; zlarfg_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &tau[i__ - 1]); i__2 = i__ - 1; e[i__2] = alpha.r; i__2 = i__ - 1 + i__ * a_dim1; a[i__2].r = 1.; a[i__2].i = 0.; // , expr subst /* Compute W(1:i-1,i) */ i__2 = i__ - 1; zhemv_("Upper", &i__2, &c_b2, &a[a_offset], lda, &a[i__ * a_dim1 + 1], &c__1, &c_b1, &w[iw * w_dim1 + 1], &c__1); if (i__ < *n) { i__2 = i__ - 1; i__3 = *n - i__; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[(iw + 1) * w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], & c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; z__1.r = -1.; z__1.i = -0.; // , expr subst zgemv_("No transpose", &i__2, &i__3, &z__1, &a[(i__ + 1) * a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], & c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[( i__ + 1) * a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], &c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; z__1.r = -1.; z__1.i = -0.; // , expr subst zgemv_("No transpose", &i__2, &i__3, &z__1, &w[(iw + 1) * w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], & c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1); } i__2 = i__ - 1; zscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1); z__3.r = -.5; z__3.i = -0.; // , expr subst i__2 = i__ - 1; z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i; z__2.i = z__3.r * tau[i__2].i + z__3.i * tau[i__2].r; // , expr subst i__3 = i__ - 1; zdotc_f2c_(&z__4, &i__3, &w[iw * w_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &c__1); z__1.r = z__2.r * z__4.r - z__2.i * z__4.i; z__1.i = z__2.r * z__4.i + z__2.i * z__4.r; // , expr subst alpha.r = z__1.r; alpha.i = z__1.i; // , expr subst i__2 = i__ - 1; zaxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw * w_dim1 + 1], &c__1); } /* L10: */ } } else { /* Reduce first NB columns of lower triangle */ i__1 = *nb; for (i__ = 1; i__ <= i__1; ++i__) { /* Update A(i:n,i) */ i__2 = i__ + i__ * a_dim1; i__3 = i__ + i__ * a_dim1; d__1 = a[i__3].r; a[i__2].r = d__1; a[i__2].i = 0.; // , expr subst i__2 = i__ - 1; zlacgv_(&i__2, &w[i__ + w_dim1], ldw); i__2 = *n - i__ + 1; i__3 = i__ - 1; z__1.r = -1.; z__1.i = -0.; // , expr subst zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + a_dim1], lda, &w[i__ + w_dim1], ldw, &c_b2, &a[i__ + i__ * a_dim1], & c__1); i__2 = i__ - 1; zlacgv_(&i__2, &w[i__ + w_dim1], ldw); i__2 = i__ - 1; zlacgv_(&i__2, &a[i__ + a_dim1], lda); i__2 = *n - i__ + 1; i__3 = i__ - 1; z__1.r = -1.; z__1.i = -0.; // , expr subst zgemv_("No transpose", &i__2, &i__3, &z__1, &w[i__ + w_dim1], ldw, &a[i__ + a_dim1], lda, &c_b2, &a[i__ + i__ * a_dim1], & c__1); i__2 = i__ - 1; zlacgv_(&i__2, &a[i__ + a_dim1], lda); i__2 = i__ + i__ * a_dim1; i__3 = i__ + i__ * a_dim1; d__1 = a[i__3].r; a[i__2].r = d__1; a[i__2].i = 0.; // , expr subst if (i__ < *n) { /* Generate elementary reflector H(i) to annihilate */ /* A(i+2:n,i) */ i__2 = i__ + 1 + i__ * a_dim1; alpha.r = a[i__2].r; alpha.i = a[i__2].i; // , expr subst i__2 = *n - i__; /* Computing MIN */ i__3 = i__ + 2; zlarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[i__]); i__2 = i__; e[i__2] = alpha.r; i__2 = i__ + 1 + i__ * a_dim1; a[i__2].r = 1.; a[i__2].i = 0.; // , expr subst /* Compute W(i+1:n,i) */ i__2 = *n - i__; zhemv_("Lower", &i__2, &c_b2, &a[i__ + 1 + (i__ + 1) * a_dim1] , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b1, &w[ i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[i__ + 1 + w_dim1], ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, & c_b1, &w[i__ * w_dim1 + 1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; z__1.r = -1.; z__1.i = -0.; // , expr subst zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 + a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[ i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, & c_b1, &w[i__ * w_dim1 + 1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; z__1.r = -1.; z__1.i = -0.; // , expr subst zgemv_("No transpose", &i__2, &i__3, &z__1, &w[i__ + 1 + w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[ i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; zscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1); z__3.r = -.5; z__3.i = -0.; // , expr subst i__2 = i__; z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i; z__2.i = z__3.r * tau[i__2].i + z__3.i * tau[i__2].r; // , expr subst i__3 = *n - i__; zdotc_f2c_(&z__4, &i__3, &w[i__ + 1 + i__ * w_dim1], &c__1, &a[ i__ + 1 + i__ * a_dim1], &c__1); z__1.r = z__2.r * z__4.r - z__2.i * z__4.i; z__1.i = z__2.r * z__4.i + z__2.i * z__4.r; // , expr subst alpha.r = z__1.r; alpha.i = z__1.i; // , expr subst i__2 = *n - i__; zaxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[ i__ + 1 + i__ * w_dim1], &c__1); } /* L20: */ } } return 0; /* End of ZLATRD */ }
/* Subroutine */ int zherfs_(char *uplo, integer *n, integer *nrhs, doublecomplex *a, integer *lda, doublecomplex *af, integer *ldaf, integer *ipiv, doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx, doublereal *ferr, doublereal *berr, doublecomplex *work, doublereal *rwork, integer *info) { /* -- LAPACK routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University September 30, 1994 Purpose ======= ZHERFS improves the computed solution to a system of linear equations when the coefficient matrix is Hermitian indefinite, and provides error bounds and backward error estimates for the solution. Arguments ========= UPLO (input) CHARACTER*1 = 'U': Upper triangle of A is stored; = 'L': Lower triangle of A is stored. N (input) INTEGER The order of the matrix A. N >= 0. NRHS (input) INTEGER The number of right hand sides, i.e., the number of columns of the matrices B and X. NRHS >= 0. A (input) COMPLEX*16 array, dimension (LDA,N) The Hermitian matrix A. If UPLO = 'U', the leading N-by-N upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading N-by-N lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). AF (input) COMPLEX*16 array, dimension (LDAF,N) The factored form of the matrix A. AF contains the block diagonal matrix D and the multipliers used to obtain the factor U or L from the factorization A = U*D*U**H or A = L*D*L**H as computed by ZHETRF. LDAF (input) INTEGER The leading dimension of the array AF. LDAF >= max(1,N). IPIV (input) INTEGER array, dimension (N) Details of the interchanges and the block structure of D as determined by ZHETRF. B (input) COMPLEX*16 array, dimension (LDB,NRHS) The right hand side matrix B. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,N). X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) On entry, the solution matrix X, as computed by ZHETRS. On exit, the improved solution matrix X. LDX (input) INTEGER The leading dimension of the array X. LDX >= max(1,N). FERR (output) DOUBLE PRECISION array, dimension (NRHS) The estimated forward error bound for each solution vector X(j) (the j-th column of the solution matrix X). If XTRUE is the true solution corresponding to X(j), FERR(j) is an estimated upper bound for the magnitude of the largest element in (X(j) - XTRUE) divided by the magnitude of the largest element in X(j). The estimate is as reliable as the estimate for RCOND, and is almost always a slight overestimate of the true error. BERR (output) DOUBLE PRECISION array, dimension (NRHS) The componentwise relative backward error of each solution vector X(j) (i.e., the smallest relative change in any element of A or B that makes X(j) an exact solution). WORK (workspace) COMPLEX*16 array, dimension (2*N) RWORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Internal Parameters =================== ITMAX is the maximum number of steps of iterative refinement. ===================================================================== Test the input parameters. Parameter adjustments */ /* Table of constant values */ static doublecomplex c_b1 = {1.,0.}; static integer c__1 = 1; /* System generated locals */ integer a_dim1, a_offset, af_dim1, af_offset, b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5; doublereal d__1, d__2, d__3, d__4; doublecomplex z__1; /* Builtin functions */ double d_imag(doublecomplex *); /* Local variables */ static integer kase; static doublereal safe1, safe2; static integer i__, j, k; static doublereal s; extern logical lsame_(char *, char *); static integer count; extern /* Subroutine */ int zhemv_(char *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *); static logical upper; extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, doublecomplex *, integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *); extern doublereal dlamch_(char *); static doublereal xk; static integer nz; static doublereal safmin; extern /* Subroutine */ int xerbla_(char *, integer *), zlacon_( integer *, doublecomplex *, doublecomplex *, doublereal *, integer *); static doublereal lstres; extern /* Subroutine */ int zhetrs_(char *, integer *, integer *, doublecomplex *, integer *, integer *, doublecomplex *, integer *, integer *); static doublereal eps; #define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1 #define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)] #define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1 #define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)] #define x_subscr(a_1,a_2) (a_2)*x_dim1 + a_1 #define x_ref(a_1,a_2) x[x_subscr(a_1,a_2)] a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; af_dim1 = *ldaf; af_offset = 1 + af_dim1 * 1; af -= af_offset; --ipiv; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; x_dim1 = *ldx; x_offset = 1 + x_dim1 * 1; x -= x_offset; --ferr; --berr; --work; --rwork; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*nrhs < 0) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*ldaf < max(1,*n)) { *info = -7; } else if (*ldb < max(1,*n)) { *info = -10; } else if (*ldx < max(1,*n)) { *info = -12; } if (*info != 0) { i__1 = -(*info); xerbla_("ZHERFS", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *nrhs == 0) { i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { ferr[j] = 0.; berr[j] = 0.; /* L10: */ } return 0; } /* NZ = maximum number of nonzero elements in each row of A, plus 1 */ nz = *n + 1; eps = dlamch_("Epsilon"); safmin = dlamch_("Safe minimum"); safe1 = nz * safmin; safe2 = safe1 / eps; /* Do for each right hand side */ i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { count = 1; lstres = 3.; L20: /* Loop until stopping criterion is satisfied. Compute residual R = B - A * X */ zcopy_(n, &b_ref(1, j), &c__1, &work[1], &c__1); z__1.r = -1., z__1.i = 0.; zhemv_(uplo, n, &z__1, &a[a_offset], lda, &x_ref(1, j), &c__1, &c_b1, &work[1], &c__1); /* Compute componentwise relative backward error from formula max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) where abs(Z) is the componentwise absolute value of the matrix or vector Z. If the i-th component of the denominator is less than SAFE2, then SAFE1 is added to the i-th components of the numerator and denominator before dividing. */ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = b_subscr(i__, j); rwork[i__] = (d__1 = b[i__3].r, abs(d__1)) + (d__2 = d_imag(& b_ref(i__, j)), abs(d__2)); /* L30: */ } /* Compute abs(A)*abs(X) + abs(B). */ if (upper) { i__2 = *n; for (k = 1; k <= i__2; ++k) { s = 0.; i__3 = x_subscr(k, j); xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x_ref(k, j)), abs(d__2)); i__3 = k - 1; for (i__ = 1; i__ <= i__3; ++i__) { i__4 = a_subscr(i__, k); rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a_ref(i__, k)), abs(d__2))) * xk; i__4 = a_subscr(i__, k); i__5 = x_subscr(i__, j); s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(& a_ref(i__, k)), abs(d__2))) * ((d__3 = x[i__5].r, abs(d__3)) + (d__4 = d_imag(&x_ref(i__, j)), abs( d__4))); /* L40: */ } i__3 = a_subscr(k, k); rwork[k] = rwork[k] + (d__1 = a[i__3].r, abs(d__1)) * xk + s; /* L50: */ } } else { i__2 = *n; for (k = 1; k <= i__2; ++k) { s = 0.; i__3 = x_subscr(k, j); xk = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x_ref(k, j)), abs(d__2)); i__3 = a_subscr(k, k); rwork[k] += (d__1 = a[i__3].r, abs(d__1)) * xk; i__3 = *n; for (i__ = k + 1; i__ <= i__3; ++i__) { i__4 = a_subscr(i__, k); rwork[i__] += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(&a_ref(i__, k)), abs(d__2))) * xk; i__4 = a_subscr(i__, k); i__5 = x_subscr(i__, j); s += ((d__1 = a[i__4].r, abs(d__1)) + (d__2 = d_imag(& a_ref(i__, k)), abs(d__2))) * ((d__3 = x[i__5].r, abs(d__3)) + (d__4 = d_imag(&x_ref(i__, j)), abs( d__4))); /* L60: */ } rwork[k] += s; /* L70: */ } } s = 0.; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { if (rwork[i__] > safe2) { /* Computing MAX */ i__3 = i__; d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 = d_imag(&work[i__]), abs(d__2))) / rwork[i__]; s = max(d__3,d__4); } else { /* Computing MAX */ i__3 = i__; d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 = d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__] + safe1); s = max(d__3,d__4); } /* L80: */ } berr[j] = s; /* Test stopping criterion. Continue iterating if 1) The residual BERR(J) is larger than machine epsilon, and 2) BERR(J) decreased by at least a factor of 2 during the last iteration, and 3) At most ITMAX iterations tried. */ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) { /* Update solution and try again. */ zhetrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[1], n, info); zaxpy_(n, &c_b1, &work[1], &c__1, &x_ref(1, j), &c__1); lstres = berr[j]; ++count; goto L20; } /* Bound error from formula norm(X - XTRUE) / norm(X) .le. FERR = norm( abs(inv(A))* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) where norm(Z) is the magnitude of the largest component of Z inv(A) is the inverse of A abs(Z) is the componentwise absolute value of the matrix or vector Z NZ is the maximum number of nonzeros in any row of A, plus 1 EPS is machine epsilon The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) is incremented by SAFE1 if the i-th component of abs(A)*abs(X) + abs(B) is less than SAFE2. Use ZLACON to estimate the infinity-norm of the matrix inv(A) * diag(W), where W = abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) */ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { if (rwork[i__] > safe2) { i__3 = i__; rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 = d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__] ; } else { i__3 = i__; rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 = d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__] + safe1; } /* L90: */ } kase = 0; L100: zlacon_(n, &work[*n + 1], &work[1], &ferr[j], &kase); if (kase != 0) { if (kase == 1) { /* Multiply by diag(W)*inv(A'). */ zhetrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[ 1], n, info); i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__; i__4 = i__; i__5 = i__; z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4] * work[i__5].i; work[i__3].r = z__1.r, work[i__3].i = z__1.i; /* L110: */ } } else if (kase == 2) { /* Multiply by inv(A)*diag(W). */ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__; i__4 = i__; i__5 = i__; z__1.r = rwork[i__4] * work[i__5].r, z__1.i = rwork[i__4] * work[i__5].i; work[i__3].r = z__1.r, work[i__3].i = z__1.i; /* L120: */ } zhetrs_(uplo, n, &c__1, &af[af_offset], ldaf, &ipiv[1], &work[ 1], n, info); } goto L100; } /* Normalize error. */ lstres = 0.; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { /* Computing MAX */ i__3 = x_subscr(i__, j); d__3 = lstres, d__4 = (d__1 = x[i__3].r, abs(d__1)) + (d__2 = d_imag(&x_ref(i__, j)), abs(d__2)); lstres = max(d__3,d__4); /* L130: */ } if (lstres != 0.) { ferr[j] /= lstres; } /* L140: */ } return 0; /* End of ZHERFS */ } /* zherfs_ */
int zhetd2_(char *uplo, int *n, doublecomplex *a, int *lda, double *d__, double *e, doublecomplex *tau, int *info) { /* System generated locals */ int a_dim1, a_offset, i__1, i__2, i__3; double d__1; doublecomplex z__1, z__2, z__3, z__4; /* Local variables */ int i__; doublecomplex taui; extern int zher2_(char *, int *, doublecomplex *, doublecomplex *, int *, doublecomplex *, int *, doublecomplex *, int *); doublecomplex alpha; extern int lsame_(char *, char *); extern /* Double Complex */ VOID zdotc_(doublecomplex *, int *, doublecomplex *, int *, doublecomplex *, int *); extern int zhemv_(char *, int *, doublecomplex *, doublecomplex *, int *, doublecomplex *, int *, doublecomplex *, doublecomplex *, int *); int upper; extern int zaxpy_(int *, doublecomplex *, doublecomplex *, int *, doublecomplex *, int *), xerbla_( char *, int *), zlarfg_(int *, doublecomplex *, doublecomplex *, int *, doublecomplex *); /* -- LAPACK routine (version 3.2) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZHETD2 reduces a complex Hermitian matrix A to float symmetric */ /* tridiagonal form T by a unitary similarity transformation: */ /* Q' * A * Q = T. */ /* Arguments */ /* ========= */ /* UPLO (input) CHARACTER*1 */ /* Specifies whether the upper or lower triangular part of the */ /* Hermitian matrix A is stored: */ /* = 'U': Upper triangular */ /* = 'L': Lower triangular */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. */ /* A (input/output) COMPLEX*16 array, dimension (LDA,N) */ /* On entry, the Hermitian matrix A. If UPLO = 'U', the leading */ /* n-by-n upper triangular part of A contains the upper */ /* triangular part of the matrix A, and the strictly lower */ /* triangular part of A is not referenced. If UPLO = 'L', the */ /* leading n-by-n lower triangular part of A contains the lower */ /* triangular part of the matrix A, and the strictly upper */ /* triangular part of A is not referenced. */ /* On exit, if UPLO = 'U', the diagonal and first superdiagonal */ /* of A are overwritten by the corresponding elements of the */ /* tridiagonal matrix T, and the elements above the first */ /* superdiagonal, with the array TAU, represent the unitary */ /* matrix Q as a product of elementary reflectors; if UPLO */ /* = 'L', the diagonal and first subdiagonal of A are over- */ /* written by the corresponding elements of the tridiagonal */ /* matrix T, and the elements below the first subdiagonal, with */ /* the array TAU, represent the unitary matrix Q as a product */ /* of elementary reflectors. See Further Details. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= MAX(1,N). */ /* D (output) DOUBLE PRECISION array, dimension (N) */ /* The diagonal elements of the tridiagonal matrix T: */ /* D(i) = A(i,i). */ /* E (output) DOUBLE PRECISION array, dimension (N-1) */ /* The off-diagonal elements of the tridiagonal matrix T: */ /* E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */ /* TAU (output) COMPLEX*16 array, dimension (N-1) */ /* The scalar factors of the elementary reflectors (see Further */ /* Details). */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value. */ /* Further Details */ /* =============== */ /* If UPLO = 'U', the matrix Q is represented as a product of elementary */ /* reflectors */ /* Q = H(n-1) . . . H(2) H(1). */ /* Each H(i) has the form */ /* H(i) = I - tau * v * v' */ /* where tau is a complex scalar, and v is a complex vector with */ /* v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in */ /* A(1:i-1,i+1), and tau in TAU(i). */ /* If UPLO = 'L', the matrix Q is represented as a product of elementary */ /* reflectors */ /* Q = H(1) H(2) . . . H(n-1). */ /* Each H(i) has the form */ /* H(i) = I - tau * v * v' */ /* where tau is a complex scalar, and v is a complex vector with */ /* v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), */ /* and tau in TAU(i). */ /* The contents of A on exit are illustrated by the following examples */ /* with n = 5: */ /* if UPLO = 'U': if UPLO = 'L': */ /* ( d e v2 v3 v4 ) ( d ) */ /* ( d e v3 v4 ) ( e d ) */ /* ( d e v4 ) ( v1 e d ) */ /* ( d e ) ( v1 v2 e d ) */ /* ( d ) ( v1 v2 v3 e d ) */ /* where d and e denote diagonal and off-diagonal elements of T, and vi */ /* denotes an element of the vector defining H(i). */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --d__; --e; --tau; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < MAX(1,*n)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("ZHETD2", &i__1); return 0; } /* Quick return if possible */ if (*n <= 0) { return 0; } if (upper) { /* Reduce the upper triangle of A */ i__1 = *n + *n * a_dim1; i__2 = *n + *n * a_dim1; d__1 = a[i__2].r; a[i__1].r = d__1, a[i__1].i = 0.; for (i__ = *n - 1; i__ >= 1; --i__) { /* Generate elementary reflector H(i) = I - tau * v * v' */ /* to annihilate A(1:i-1,i+1) */ i__1 = i__ + (i__ + 1) * a_dim1; alpha.r = a[i__1].r, alpha.i = a[i__1].i; zlarfg_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &taui); i__1 = i__; e[i__1] = alpha.r; if (taui.r != 0. || taui.i != 0.) { /* Apply H(i) from both sides to A(1:i,1:i) */ i__1 = i__ + (i__ + 1) * a_dim1; a[i__1].r = 1., a[i__1].i = 0.; /* Compute x := tau * A * v storing x in TAU(1:i) */ zhemv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) * a_dim1 + 1], &c__1, &c_b2, &tau[1], &c__1); /* Compute w := x - 1/2 * tau * (x'*v) * v */ z__3.r = -.5, z__3.i = -0.; z__2.r = z__3.r * taui.r - z__3.i * taui.i, z__2.i = z__3.r * taui.i + z__3.i * taui.r; zdotc_(&z__4, &i__, &tau[1], &c__1, &a[(i__ + 1) * a_dim1 + 1] , &c__1); z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i + z__2.i * z__4.r; alpha.r = z__1.r, alpha.i = z__1.i; zaxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[ 1], &c__1); /* Apply the transformation as a rank-2 update: */ /* A := A - v * w' - w * v' */ z__1.r = -1., z__1.i = -0.; zher2_(uplo, &i__, &z__1, &a[(i__ + 1) * a_dim1 + 1], &c__1, & tau[1], &c__1, &a[a_offset], lda); } else { i__1 = i__ + i__ * a_dim1; i__2 = i__ + i__ * a_dim1; d__1 = a[i__2].r; a[i__1].r = d__1, a[i__1].i = 0.; } i__1 = i__ + (i__ + 1) * a_dim1; i__2 = i__; a[i__1].r = e[i__2], a[i__1].i = 0.; i__1 = i__ + 1; i__2 = i__ + 1 + (i__ + 1) * a_dim1; d__[i__1] = a[i__2].r; i__1 = i__; tau[i__1].r = taui.r, tau[i__1].i = taui.i; /* L10: */ } i__1 = a_dim1 + 1; d__[1] = a[i__1].r; } else { /* Reduce the lower triangle of A */ i__1 = a_dim1 + 1; i__2 = a_dim1 + 1; d__1 = a[i__2].r; a[i__1].r = d__1, a[i__1].i = 0.; i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Generate elementary reflector H(i) = I - tau * v * v' */ /* to annihilate A(i+2:n,i) */ i__2 = i__ + 1 + i__ * a_dim1; alpha.r = a[i__2].r, alpha.i = a[i__2].i; i__2 = *n - i__; /* Computing MIN */ i__3 = i__ + 2; zlarfg_(&i__2, &alpha, &a[MIN(i__3, *n)+ i__ * a_dim1], &c__1, & taui); i__2 = i__; e[i__2] = alpha.r; if (taui.r != 0. || taui.i != 0.) { /* Apply H(i) from both sides to A(i+1:n,i+1:n) */ i__2 = i__ + 1 + i__ * a_dim1; a[i__2].r = 1., a[i__2].i = 0.; /* Compute x := tau * A * v storing y in TAU(i:n-1) */ i__2 = *n - i__; zhemv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b2, &tau[ i__], &c__1); /* Compute w := x - 1/2 * tau * (x'*v) * v */ z__3.r = -.5, z__3.i = -0.; z__2.r = z__3.r * taui.r - z__3.i * taui.i, z__2.i = z__3.r * taui.i + z__3.i * taui.r; i__2 = *n - i__; zdotc_(&z__4, &i__2, &tau[i__], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1); z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i + z__2.i * z__4.r; alpha.r = z__1.r, alpha.i = z__1.i; i__2 = *n - i__; zaxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ i__], &c__1); /* Apply the transformation as a rank-2 update: */ /* A := A - v * w' - w * v' */ i__2 = *n - i__; z__1.r = -1., z__1.i = -0.; zher2_(uplo, &i__2, &z__1, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) * a_dim1], lda); } else { i__2 = i__ + 1 + (i__ + 1) * a_dim1; i__3 = i__ + 1 + (i__ + 1) * a_dim1; d__1 = a[i__3].r; a[i__2].r = d__1, a[i__2].i = 0.; } i__2 = i__ + 1 + i__ * a_dim1; i__3 = i__; a[i__2].r = e[i__3], a[i__2].i = 0.; i__2 = i__; i__3 = i__ + i__ * a_dim1; d__[i__2] = a[i__3].r; i__2 = i__; tau[i__2].r = taui.r, tau[i__2].i = taui.i; /* L20: */ } i__1 = *n; i__2 = *n + *n * a_dim1; d__[i__1] = a[i__2].r; } return 0; /* End of ZHETD2 */ } /* zhetd2_ */