/* * Check the inf-norm of the error vector */ void pzinf_norm_error(int iam, int_t n, int_t nrhs, doublecomplex x[], int_t ldx, doublecomplex xtrue[], int_t ldxtrue, gridinfo_t *grid) { double err, xnorm, temperr, tempxnorm; doublecomplex *x_work, *xtrue_work; doublecomplex temp; int i, j; for (j = 0; j < nrhs; j++) { x_work = &x[j*ldx]; xtrue_work = &xtrue[j*ldxtrue]; err = xnorm = 0.0; for (i = 0; i < n; i++) { z_sub(&temp, &x_work[i], &xtrue_work[i]); err = SUPERLU_MAX(err, z_abs(&temp)); xnorm = SUPERLU_MAX(xnorm, z_abs(&x_work[i])); } /* get the golbal max err & xnrom */ temperr = err; tempxnorm = xnorm; MPI_Allreduce( &temperr, &err, 1, MPI_DOUBLE, MPI_MAX, grid->comm); MPI_Allreduce( &tempxnorm, &xnorm, 1, MPI_DOUBLE, MPI_MAX, grid->comm); err = err / xnorm; if ( !iam ) printf("\tSol %2d: ||X-Xtrue||/||X|| = %e\n", j, err); } }
/* Subroutine */ int zlaev2_(doublecomplex *a, doublecomplex *b, doublecomplex *c__, doublereal *rt1, doublereal *rt2, doublereal *cs1, doublecomplex *sn1) { /* System generated locals */ doublereal d__1, d__2, d__3; doublecomplex z__1, z__2; /* Builtin functions */ double z_abs(doublecomplex *); void d_cnjg(doublecomplex *, doublecomplex *); /* Local variables */ doublereal t; doublecomplex w; extern /* Subroutine */ int dlaev2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); /* -- LAPACK auxiliary routine (version 3.4.2) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* September 2012 */ /* .. Scalar Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ if (z_abs(b) == 0.) { w.r = 1.; w.i = 0.; // , expr subst } else { d_cnjg(&z__2, b); d__1 = z_abs(b); z__1.r = z__2.r / d__1; z__1.i = z__2.i / d__1; // , expr subst w.r = z__1.r; w.i = z__1.i; // , expr subst } d__1 = a->r; d__2 = z_abs(b); d__3 = c__->r; dlaev2_(&d__1, &d__2, &d__3, rt1, rt2, cs1, &t); z__1.r = t * w.r; z__1.i = t * w.i; // , expr subst sn1->r = z__1.r, sn1->i = z__1.i; return 0; /* End of ZLAEV2 */ }
/*! \brief <pre> Purpose ======= DZSUM1 takes the sum of the absolute values of a complex vector and returns a double precision result. Based on DZASUM from the Level 1 BLAS. The change is to use the 'genuine' absolute value. Contributed by Nick Higham for use with ZLACON. Arguments ========= N (input) INT The number of elements in the vector CX. CX (input) COMPLEX*16 array, dimension (N) The vector whose elements will be summed. INCX (input) INT The spacing between successive values of CX. INCX > 0. ===================================================================== </pre> */ double dzsum1_slu(int *n, doublecomplex *cx, int *incx) { /* Builtin functions */ double z_abs(doublecomplex *); /* Local variables */ int i, nincx; double stemp; #define CX(I) cx[(I)-1] stemp = 0.; if (*n <= 0) { return stemp; } if (*incx == 1) { goto L20; } /* CODE FOR INCREMENT NOT EQUAL TO 1 */ nincx = *n * *incx; for (i = 1; *incx < 0 ? i >= nincx : i <= nincx; i += *incx) { /* NEXT LINE MODIFIED. */ stemp += z_abs(&CX(i)); /* L10: */ } return stemp; /* CODE FOR INCREMENT EQUAL TO 1 */ L20: for (i = 1; i <= *n; ++i) { /* NEXT LINE MODIFIED. */ stemp += z_abs(&CX(i)); /* L30: */ } return stemp; /* End of DZSUM1 */ } /* dzsum1_slu */
static void VanVlietAdjustPoles( const doublecomplex unscaled[4], /* Unscaled pole locations from Table 1 or * 2 of the paper. */ double sigma, /* Standard deviation */ doublecomplex scaled[4] /* Scaled pole locations */ ) { double q = sigma; /* Scale factor */ int iter; /* Iteration number */ double s = VanVlietComputeSigma(q, unscaled); int i; /* Search for a scale factor q that yields the desiged sigma. */ for (iter = 0; fabs(sigma-s) > sigma * 1.0e-8; ++iter) { s = VanVlietComputeSigma(q, unscaled); q *= sigma/s; } /* Adjust poles */ for (i = 0; i < 4; ++i) { doublecomplex pi = unscaled[i]; double a = pow(z_abs(&pi), 2.0 / q); double t = atan2(pi.i, pi.r) * 2.0 / q; scaled[i].r = 1.0/a * cos(t); scaled[i].i = -1.0/a * sin(t); } }
/* * Check the inf-norm of the error vector */ void zinf_norm_error_dist(int_t n, int_t nrhs, doublecomplex *x, int_t ldx, doublecomplex *xtrue, int_t ldxtrue, gridinfo_t *grid) { double err, xnorm; doublecomplex *x_work, *xtrue_work; doublecomplex temp; int i, j; for (j = 0; j < nrhs; j++) { x_work = &x[j*ldx]; xtrue_work = &xtrue[j*ldxtrue]; err = xnorm = 0.0; for (i = 0; i < n; i++) { z_sub(&temp, &x_work[i], &xtrue_work[i]); err = SUPERLU_MAX(err, z_abs(&temp)); xnorm = SUPERLU_MAX(xnorm, z_abs(&x_work[i])); } err = err / xnorm; printf("\tRHS %2d: ||X-Xtrue||/||X|| = %e\n", j, err); } }
/*! \brief Check the inf-norm of the error vector */ void zinf_norm_error(int nrhs, SuperMatrix *X, doublecomplex *xtrue) { DNformat *Xstore; double err, xnorm; doublecomplex *Xmat, *soln_work; doublecomplex temp; int i, j; Xstore = X->Store; Xmat = Xstore->nzval; for (j = 0; j < nrhs; j++) { soln_work = &Xmat[j*Xstore->lda]; err = xnorm = 0.0; for (i = 0; i < X->nrow; i++) { z_sub(&temp, &soln_work[i], &xtrue[i]); err = SUPERLU_MAX(err, z_abs(&temp)); xnorm = SUPERLU_MAX(xnorm, z_abs(&soln_work[i])); } err = err / xnorm; printf("||X - Xtrue||/||X|| = %e\n", err); } }
/* Subroutine */ int PASTEF77(z,rotg)(doublecomplex *ca, doublecomplex *cb, doublereal *c__, doublecomplex *s) { /* System generated locals */ doublereal d__1, d__2; doublecomplex z__1, z__2, z__3, z__4; /* Builtin functions */ double z_abs(doublecomplex *); void bla_z_div(doublecomplex *, doublecomplex *, doublecomplex *); double sqrt(doublereal); void bla_d_cnjg(doublecomplex *, doublecomplex *); /* Local variables */ doublereal norm; doublecomplex alpha; doublereal scale; if (z_abs(ca) != 0.) { goto L10; } *c__ = 0.; s->real = 1., s->imag = 0.; ca->real = cb->real, ca->imag = cb->imag; goto L20; L10: scale = z_abs(ca) + z_abs(cb); z__2.real = scale, z__2.imag = 0.; bla_z_div(&z__1, ca, &z__2); /* Computing 2nd power */ d__1 = z_abs(&z__1); z__4.real = scale, z__4.imag = 0.; bla_z_div(&z__3, cb, &z__4); /* Computing 2nd power */ d__2 = z_abs(&z__3); norm = scale * sqrt(d__1 * d__1 + d__2 * d__2); d__1 = z_abs(ca); z__1.real = ca->real / d__1, z__1.imag = ca->imag / d__1; alpha.real = z__1.real, alpha.imag = z__1.imag; *c__ = z_abs(ca) / norm; bla_d_cnjg(&z__3, cb); z__2.real = alpha.real * z__3.real - alpha.imag * z__3.imag, z__2.imag = alpha.real * z__3.imag + alpha.imag * z__3.real; z__1.real = z__2.real / norm, z__1.imag = z__2.imag / norm; s->real = z__1.real, s->imag = z__1.imag; z__1.real = norm * alpha.real, z__1.imag = norm * alpha.imag; ca->real = z__1.real, ca->imag = z__1.imag; L20: return 0; } /* zrotg_ */
static double VanVlietComputeSigma( double sigma, /* Scale factor */ const doublecomplex poles[4] /* Poles of the filter */ ) { double q = sigma / 2.0; doublecomplex cs = {0.0, 0.0}; doublecomplex b, c, d, temp; int i; for (i = 0; i < 4; ++i) { doublecomplex pi = poles[i]; double a = pow(z_abs(&pi), -1.0 / q); double t = atan2(pi.i, pi.r) / q; b.r = a * cos(t); b.i = a * sin(t); c.r = 1.0 - b.r; c.i = - b.i; d.r = c.r * c.r - c.i * c.i; d.i = 2.0 * c.r * c.i; b.r *= 2.0; b.i *= 2.0; z_div(&temp, &b, &d); cs.r += temp.r; cs.i += temp.i; } return sqrt(cs.r); }
/* Subroutine */ int ztgex2_(logical *wantq, logical *wantz, integer *n, doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb, doublecomplex *q, integer *ldq, doublecomplex *z__, integer *ldz, integer *j1, integer *info) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1, z_offset, i__1, i__2, i__3; doublereal d__1; doublecomplex z__1, z__2, z__3; /* Builtin functions */ double sqrt(doublereal), z_abs(doublecomplex *); void d_cnjg(doublecomplex *, doublecomplex *); /* Local variables */ static doublecomplex f, g; static integer i__, m; static doublecomplex s[4] /* was [2][2] */, t[4] /* was [2][2] */; static doublereal cq, sa, sb, cz; static doublecomplex sq; static doublereal ss, ws; static doublecomplex sz; static doublereal eps, sum; static logical weak; static doublecomplex cdum, work[8]; extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, doublecomplex *, integer *, doublereal *, doublecomplex *); static doublereal scale; extern doublereal dlamch_(char *, ftnlen); static logical dtrong; static doublereal thresh; extern /* Subroutine */ int zlacpy_(char *, integer *, integer *, doublecomplex *, integer *, doublecomplex *, integer *, ftnlen), zlartg_(doublecomplex *, doublecomplex *, doublereal *, doublecomplex *, doublecomplex *); static doublereal smlnum; extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *, doublereal *, doublereal *); /* -- LAPACK auxiliary routine (version 3.0) -- */ /* Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */ /* Courant Institute, Argonne National Lab, and Rice University */ /* June 30, 1999 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZTGEX2 swaps adjacent diagonal 1 by 1 blocks (A11,B11) and (A22,B22) */ /* in an upper triangular matrix pair (A, B) by an unitary equivalence */ /* transformation. */ /* (A, B) must be in generalized Schur canonical form, that is, A and */ /* B are both upper triangular. */ /* Optionally, the matrices Q and Z of generalized Schur vectors are */ /* updated. */ /* Q(in) * A(in) * Z(in)' = Q(out) * A(out) * Z(out)' */ /* Q(in) * B(in) * Z(in)' = Q(out) * B(out) * Z(out)' */ /* Arguments */ /* ========= */ /* WANTQ (input) LOGICAL */ /* .TRUE. : update the left transformation matrix Q; */ /* .FALSE.: do not update Q. */ /* WANTZ (input) LOGICAL */ /* .TRUE. : update the right transformation matrix Z; */ /* .FALSE.: do not update Z. */ /* N (input) INTEGER */ /* The order of the matrices A and B. N >= 0. */ /* A (input/output) COMPLEX*16 arrays, dimensions (LDA,N) */ /* On entry, the matrix A in the pair (A, B). */ /* On exit, the updated matrix A. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= max(1,N). */ /* B (input/output) COMPLEX*16 arrays, dimensions (LDB,N) */ /* On entry, the matrix B in the pair (A, B). */ /* On exit, the updated matrix B. */ /* LDB (input) INTEGER */ /* The leading dimension of the array B. LDB >= max(1,N). */ /* Q (input/output) COMPLEX*16 array, dimension (LDZ,N) */ /* If WANTQ = .TRUE, on entry, the unitary matrix Q. On exit, */ /* the updated matrix Q. */ /* Not referenced if WANTQ = .FALSE.. */ /* LDQ (input) INTEGER */ /* The leading dimension of the array Q. LDQ >= 1; */ /* If WANTQ = .TRUE., LDQ >= N. */ /* Z (input/output) COMPLEX*16 array, dimension (LDZ,N) */ /* If WANTZ = .TRUE, on entry, the unitary matrix Z. On exit, */ /* the updated matrix Z. */ /* Not referenced if WANTZ = .FALSE.. */ /* LDZ (input) INTEGER */ /* The leading dimension of the array Z. LDZ >= 1; */ /* If WANTZ = .TRUE., LDZ >= N. */ /* J1 (input) INTEGER */ /* The index to the first block (A11, B11). */ /* INFO (output) INTEGER */ /* =0: Successful exit. */ /* =1: The transformed matrix pair (A, B) would be too far */ /* from generalized Schur form; the problem is ill- */ /* conditioned. (A, B) may have been partially reordered, */ /* and ILST points to the first row of the current */ /* position of the block being moved. */ /* Further Details */ /* =============== */ /* Based on contributions by */ /* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */ /* Umea University, S-901 87 Umea, Sweden. */ /* In the current code both weak and strong stability tests are */ /* performed. The user can omit the strong stability test by changing */ /* the internal logical parameter WANDS to .FALSE.. See ref. [2] for */ /* details. */ /* [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */ /* Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */ /* M.S. Moonen et al (eds), Linear Algebra for Large Scale and */ /* Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */ /* [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */ /* Eigenvalues of a Regular Matrix Pair (A, B) and Condition */ /* Estimation: Theory, Algorithms and Software, Report UMINF-94.04, */ /* Department of Computing Science, Umea University, S-901 87 Umea, */ /* Sweden, 1994. Also as LAPACK Working Note 87. To appear in */ /* Numerical Algorithms, 1996. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. Local Arrays .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1; b -= b_offset; q_dim1 = *ldq; q_offset = 1 + q_dim1; q -= q_offset; z_dim1 = *ldz; z_offset = 1 + z_dim1; z__ -= z_offset; /* Function Body */ *info = 0; /* Quick return if possible */ if (*n <= 1) { return 0; } m = 2; weak = FALSE_; dtrong = FALSE_; /* Make a local copy of selected block in (A, B) */ zlacpy_("Full", &m, &m, &a[*j1 + *j1 * a_dim1], lda, s, &c__2, (ftnlen)4); zlacpy_("Full", &m, &m, &b[*j1 + *j1 * b_dim1], ldb, t, &c__2, (ftnlen)4); /* Compute the threshold for testing the acceptance of swapping. */ eps = dlamch_("P", (ftnlen)1); smlnum = dlamch_("S", (ftnlen)1) / eps; scale = 0.; sum = 1.; zlacpy_("Full", &m, &m, s, &c__2, work, &m, (ftnlen)4); zlacpy_("Full", &m, &m, t, &c__2, &work[m * m], &m, (ftnlen)4); i__1 = (m << 1) * m; zlassq_(&i__1, work, &c__1, &scale, &sum); sa = scale * sqrt(sum); /* Computing MAX */ d__1 = eps * 10. * sa; thresh = max(d__1,smlnum); /* Compute unitary QL and RQ that swap 1-by-1 and 1-by-1 blocks */ /* using Givens rotations and perform the swap tentatively. */ z__2.r = s[3].r * t[0].r - s[3].i * t[0].i, z__2.i = s[3].r * t[0].i + s[ 3].i * t[0].r; z__3.r = t[3].r * s[0].r - t[3].i * s[0].i, z__3.i = t[3].r * s[0].i + t[ 3].i * s[0].r; z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i; f.r = z__1.r, f.i = z__1.i; z__2.r = s[3].r * t[2].r - s[3].i * t[2].i, z__2.i = s[3].r * t[2].i + s[ 3].i * t[2].r; z__3.r = t[3].r * s[2].r - t[3].i * s[2].i, z__3.i = t[3].r * s[2].i + t[ 3].i * s[2].r; z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i; g.r = z__1.r, g.i = z__1.i; sa = z_abs(&s[3]); sb = z_abs(&t[3]); zlartg_(&g, &f, &cz, &sz, &cdum); z__1.r = -sz.r, z__1.i = -sz.i; sz.r = z__1.r, sz.i = z__1.i; d_cnjg(&z__1, &sz); zrot_(&c__2, s, &c__1, &s[2], &c__1, &cz, &z__1); d_cnjg(&z__1, &sz); zrot_(&c__2, t, &c__1, &t[2], &c__1, &cz, &z__1); if (sa >= sb) { zlartg_(s, &s[1], &cq, &sq, &cdum); } else { zlartg_(t, &t[1], &cq, &sq, &cdum); } zrot_(&c__2, s, &c__2, &s[1], &c__2, &cq, &sq); zrot_(&c__2, t, &c__2, &t[1], &c__2, &cq, &sq); /* Weak stability test: |S21| + |T21| <= O(EPS F-norm((S, T))) */ ws = z_abs(&s[1]) + z_abs(&t[1]); weak = ws <= thresh; if (! weak) { goto L20; } if (TRUE_) { /* Strong stability test: */ /* F-norm((A-QL'*S*QR, B-QL'*T*QR)) <= O(EPS*F-norm((A, B))) */ zlacpy_("Full", &m, &m, s, &c__2, work, &m, (ftnlen)4); zlacpy_("Full", &m, &m, t, &c__2, &work[m * m], &m, (ftnlen)4); d_cnjg(&z__2, &sz); z__1.r = -z__2.r, z__1.i = -z__2.i; zrot_(&c__2, work, &c__1, &work[2], &c__1, &cz, &z__1); d_cnjg(&z__2, &sz); z__1.r = -z__2.r, z__1.i = -z__2.i; zrot_(&c__2, &work[4], &c__1, &work[6], &c__1, &cz, &z__1); z__1.r = -sq.r, z__1.i = -sq.i; zrot_(&c__2, work, &c__2, &work[1], &c__2, &cq, &z__1); z__1.r = -sq.r, z__1.i = -sq.i; zrot_(&c__2, &work[4], &c__2, &work[5], &c__2, &cq, &z__1); for (i__ = 1; i__ <= 2; ++i__) { i__1 = i__ - 1; i__2 = i__ - 1; i__3 = *j1 + i__ - 1 + *j1 * a_dim1; z__1.r = work[i__2].r - a[i__3].r, z__1.i = work[i__2].i - a[i__3] .i; work[i__1].r = z__1.r, work[i__1].i = z__1.i; i__1 = i__ + 1; i__2 = i__ + 1; i__3 = *j1 + i__ - 1 + (*j1 + 1) * a_dim1; z__1.r = work[i__2].r - a[i__3].r, z__1.i = work[i__2].i - a[i__3] .i; work[i__1].r = z__1.r, work[i__1].i = z__1.i; i__1 = i__ + 3; i__2 = i__ + 3; i__3 = *j1 + i__ - 1 + *j1 * b_dim1; z__1.r = work[i__2].r - b[i__3].r, z__1.i = work[i__2].i - b[i__3] .i; work[i__1].r = z__1.r, work[i__1].i = z__1.i; i__1 = i__ + 5; i__2 = i__ + 5; i__3 = *j1 + i__ - 1 + (*j1 + 1) * b_dim1; z__1.r = work[i__2].r - b[i__3].r, z__1.i = work[i__2].i - b[i__3] .i; work[i__1].r = z__1.r, work[i__1].i = z__1.i; /* L10: */ } scale = 0.; sum = 1.; i__1 = (m << 1) * m; zlassq_(&i__1, work, &c__1, &scale, &sum); ss = scale * sqrt(sum); dtrong = ss <= thresh; if (! dtrong) { goto L20; } } /* If the swap is accepted ("weakly" and "strongly"), apply the */ /* equivalence transformations to the original matrix pair (A,B) */ i__1 = *j1 + 1; d_cnjg(&z__1, &sz); zrot_(&i__1, &a[*j1 * a_dim1 + 1], &c__1, &a[(*j1 + 1) * a_dim1 + 1], & c__1, &cz, &z__1); i__1 = *j1 + 1; d_cnjg(&z__1, &sz); zrot_(&i__1, &b[*j1 * b_dim1 + 1], &c__1, &b[(*j1 + 1) * b_dim1 + 1], & c__1, &cz, &z__1); i__1 = *n - *j1 + 1; zrot_(&i__1, &a[*j1 + *j1 * a_dim1], lda, &a[*j1 + 1 + *j1 * a_dim1], lda, &cq, &sq); i__1 = *n - *j1 + 1; zrot_(&i__1, &b[*j1 + *j1 * b_dim1], ldb, &b[*j1 + 1 + *j1 * b_dim1], ldb, &cq, &sq); /* Set N1 by N2 (2,1) blocks to 0 */ i__1 = *j1 + 1 + *j1 * a_dim1; a[i__1].r = 0., a[i__1].i = 0.; i__1 = *j1 + 1 + *j1 * b_dim1; b[i__1].r = 0., b[i__1].i = 0.; /* Accumulate transformations into Q and Z if requested. */ if (*wantz) { d_cnjg(&z__1, &sz); zrot_(n, &z__[*j1 * z_dim1 + 1], &c__1, &z__[(*j1 + 1) * z_dim1 + 1], &c__1, &cz, &z__1); } if (*wantq) { d_cnjg(&z__1, &sq); zrot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[(*j1 + 1) * q_dim1 + 1], & c__1, &cq, &z__1); } /* Exit with INFO = 0 if swap was successfully performed. */ return 0; /* Exit with INFO = 1 if swap was rejected. */ L20: *info = 1; return 0; /* End of ZTGEX2 */ } /* ztgex2_ */
/* Subroutine */ int zgetf2_(integer *m, integer *n, doublecomplex *a, integer *lda, integer *ipiv, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublecomplex z__1; /* Builtin functions */ double z_abs(doublecomplex *); void z_div(doublecomplex *, doublecomplex *, doublecomplex *); /* Local variables */ integer i__, j, jp; doublereal sfmin; extern /* Subroutine */ int zscal_(integer *, doublecomplex *, doublecomplex *, integer *), zgeru_(integer *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *), zswap_(integer *, doublecomplex *, integer *, doublecomplex *, integer *); extern doublereal dlamch_(char *); extern /* Subroutine */ int xerbla_(char *, integer *); extern integer izamax_(integer *, doublecomplex *, integer *); /* -- LAPACK routine (version 3.2) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZGETF2 computes an LU factorization of a general m-by-n matrix A */ /* using partial pivoting with row interchanges. */ /* The factorization has the form */ /* A = P * L * U */ /* where P is a permutation matrix, L is lower triangular with unit */ /* diagonal elements (lower trapezoidal if m > n), and U is upper */ /* triangular (upper trapezoidal if m < n). */ /* This is the right-looking Level 2 BLAS version of the algorithm. */ /* Arguments */ /* ========= */ /* M (input) INTEGER */ /* The number of rows of the matrix A. M >= 0. */ /* N (input) INTEGER */ /* The number of columns of the matrix A. N >= 0. */ /* A (input/output) COMPLEX*16 array, dimension (LDA,N) */ /* On entry, the m by n matrix to be factored. */ /* On exit, the factors L and U from the factorization */ /* A = P*L*U; the unit diagonal elements of L are not stored. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= max(1,M). */ /* IPIV (output) INTEGER array, dimension (min(M,N)) */ /* The pivot indices; for 1 <= i <= min(M,N), row i of the */ /* matrix was interchanged with row IPIV(i). */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -k, the k-th argument had an illegal value */ /* > 0: if INFO = k, U(k,k) is exactly zero. The factorization */ /* has been completed, but the factor U is exactly */ /* singular, and division by zero will occur if it is used */ /* to solve a system of equations. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --ipiv; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("ZGETF2", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } /* Compute machine safe minimum */ sfmin = dlamch_("S"); i__1 = min(*m,*n); for (j = 1; j <= i__1; ++j) { /* Find pivot and test for singularity. */ i__2 = *m - j + 1; jp = j - 1 + izamax_(&i__2, &a[j + j * a_dim1], &c__1); ipiv[j] = jp; i__2 = jp + j * a_dim1; if (a[i__2].r != 0. || a[i__2].i != 0.) { /* Apply the interchange to columns 1:N. */ if (jp != j) { zswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda); } /* Compute elements J+1:M of J-th column. */ if (j < *m) { if (z_abs(&a[j + j * a_dim1]) >= sfmin) { i__2 = *m - j; z_div(&z__1, &c_b1, &a[j + j * a_dim1]); zscal_(&i__2, &z__1, &a[j + 1 + j * a_dim1], &c__1); } else { i__2 = *m - j; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = j + i__ + j * a_dim1; z_div(&z__1, &a[j + i__ + j * a_dim1], &a[j + j * a_dim1]); a[i__3].r = z__1.r, a[i__3].i = z__1.i; /* L20: */ } } } } else if (*info == 0) { *info = j; } if (j < min(*m,*n)) { /* Update trailing submatrix. */ i__2 = *m - j; i__3 = *n - j; z__1.r = -1., z__1.i = -0.; zgeru_(&i__2, &i__3, &z__1, &a[j + 1 + j * a_dim1], &c__1, &a[j + (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], lda) ; } /* L10: */ } return 0; /* End of ZGETF2 */ } /* zgetf2_ */
/* Subroutine */ int zgeqpf_(integer *m, integer *n, doublecomplex *a, integer *lda, integer *jpvt, doublecomplex *tau, doublecomplex *work, doublereal *rwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1, d__2; doublecomplex z__1; /* Local variables */ integer i__, j, ma, mn; doublecomplex aii; integer pvt; doublereal temp, temp2, tol3z; integer itemp; /* -- LAPACK deprecated driver routine (version 3.2) -- */ /* November 2006 */ /* Purpose */ /* ======= */ /* This routine is deprecated and has been replaced by routine ZGEQP3. */ /* ZGEQPF computes a QR factorization with column pivoting of a */ /* complex M-by-N matrix A: A*P = Q*R. */ /* Arguments */ /* ========= */ /* M (input) INTEGER */ /* The number of rows of the matrix A. M >= 0. */ /* N (input) INTEGER */ /* The number of columns of the matrix A. N >= 0 */ /* A (input/output) COMPLEX*16 array, dimension (LDA,N) */ /* On entry, the M-by-N matrix A. */ /* On exit, the upper triangle of the array contains the */ /* min(M,N)-by-N upper triangular matrix R; the elements */ /* below the diagonal, together with the array TAU, */ /* represent the unitary matrix Q as a product of */ /* min(m,n) elementary reflectors. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= max(1,M). */ /* JPVT (input/output) INTEGER array, dimension (N) */ /* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */ /* to the front of A*P (a leading column); if JPVT(i) = 0, */ /* the i-th column of A is a free column. */ /* On exit, if JPVT(i) = k, then the i-th column of A*P */ /* was the k-th column of A. */ /* TAU (output) COMPLEX*16 array, dimension (min(M,N)) */ /* The scalar factors of the elementary reflectors. */ /* WORK (workspace) COMPLEX*16 array, dimension (N) */ /* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value */ /* Further Details */ /* =============== */ /* The matrix Q is represented as a product of elementary reflectors */ /* Q = H(1) H(2) . . . H(n) */ /* Each H(i) has the form */ /* H = I - tau * v * v' */ /* where tau is a complex scalar, and v is a complex vector with */ /* v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i). */ /* The matrix P is represented in jpvt as follows: If */ /* jpvt(j) = i */ /* then the jth column of P is the ith canonical unit vector. */ /* Partial column norm updating strategy modified by */ /* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */ /* University of Zagreb, Croatia. */ /* June 2006. */ /* For more details see LAPACK Working Note 176. */ /* ===================================================================== */ /* Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --jpvt; --tau; --work; --rwork; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("ZGEQPF", &i__1); return 0; } mn = min(*m,*n); tol3z = sqrt(dlamch_("Epsilon")); /* Move initial columns up front */ itemp = 1; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (jpvt[i__] != 0) { if (i__ != itemp) { zswap_(m, &a[i__ * a_dim1 + 1], &c__1, &a[itemp * a_dim1 + 1], &c__1); jpvt[i__] = jpvt[itemp]; jpvt[itemp] = i__; } else { jpvt[i__] = i__; } ++itemp; } else { jpvt[i__] = i__; } } --itemp; /* Compute the QR factorization and update remaining columns */ if (itemp > 0) { ma = min(itemp,*m); zgeqr2_(m, &ma, &a[a_offset], lda, &tau[1], &work[1], info); if (ma < *n) { i__1 = *n - ma; zunm2r_("Left", "Conjugate transpose", m, &i__1, &ma, &a[a_offset] , lda, &tau[1], &a[(ma + 1) * a_dim1 + 1], lda, &work[1], info); } } if (itemp < mn) { /* Initialize partial column norms. The first n elements of */ /* work store the exact column norms. */ i__1 = *n; for (i__ = itemp + 1; i__ <= i__1; ++i__) { i__2 = *m - itemp; rwork[i__] = dznrm2_(&i__2, &a[itemp + 1 + i__ * a_dim1], &c__1); rwork[*n + i__] = rwork[i__]; } /* Compute factorization */ i__1 = mn; for (i__ = itemp + 1; i__ <= i__1; ++i__) { /* Determine ith pivot column and swap if necessary */ i__2 = *n - i__ + 1; pvt = i__ - 1 + idamax_(&i__2, &rwork[i__], &c__1); if (pvt != i__) { zswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], & c__1); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[i__]; jpvt[i__] = itemp; rwork[pvt] = rwork[i__]; rwork[*n + pvt] = rwork[*n + i__]; } /* Generate elementary reflector H(i) */ i__2 = i__ + i__ * a_dim1; aii.r = a[i__2].r, aii.i = a[i__2].i; i__2 = *m - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; zlarfp_(&i__2, &aii, &a[min(i__3, *m)+ i__ * a_dim1], &c__1, &tau[ i__]); i__2 = i__ + i__ * a_dim1; a[i__2].r = aii.r, a[i__2].i = aii.i; if (i__ < *n) { /* Apply H(i) to A(i:m,i+1:n) from the left */ i__2 = i__ + i__ * a_dim1; aii.r = a[i__2].r, aii.i = a[i__2].i; i__2 = i__ + i__ * a_dim1; a[i__2].r = 1., a[i__2].i = 0.; i__2 = *m - i__ + 1; i__3 = *n - i__; d_cnjg(&z__1, &tau[i__]); zlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, & z__1, &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]); i__2 = i__ + i__ * a_dim1; a[i__2].r = aii.r, a[i__2].i = aii.i; } /* Update partial column norms */ i__2 = *n; for (j = i__ + 1; j <= i__2; ++j) { if (rwork[j] != 0.) { /* NOTE: The following 4 lines follow from the analysis in */ /* Lapack Working Note 176. */ temp = z_abs(&a[i__ + j * a_dim1]) / rwork[j]; /* Computing MAX */ d__1 = 0., d__2 = (temp + 1.) * (1. - temp); temp = max(d__1,d__2); /* Computing 2nd power */ d__1 = rwork[j] / rwork[*n + j]; temp2 = temp * (d__1 * d__1); if (temp2 <= tol3z) { if (*m - i__ > 0) { i__3 = *m - i__; rwork[j] = dznrm2_(&i__3, &a[i__ + 1 + j * a_dim1] , &c__1); rwork[*n + j] = rwork[j]; } else { rwork[j] = 0.; rwork[*n + j] = 0.; } } else { rwork[j] *= sqrt(temp); } } } } } return 0; /* End of ZGEQPF */ } /* zgeqpf_ */
/* Subroutine */ int zdrvpt_(logical *dotype, integer *nn, integer *nval, integer *nrhs, doublereal *thresh, logical *tsterr, doublecomplex *a, doublereal *d__, doublecomplex *e, doublecomplex *b, doublecomplex *x, doublecomplex *xact, doublecomplex *work, doublereal *rwork, integer *nout) { /* Initialized data */ static integer iseedy[4] = { 0,0,0,1 }; /* Format strings */ static char fmt_9999[] = "(1x,a6,\002, N =\002,i5,\002, type \002,i2," "\002, test \002,i2,\002, ratio = \002,g12.5)"; static char fmt_9998[] = "(1x,a6,\002, FACT='\002,a1,\002', N =\002,i5" ",\002, type \002,i2,\002, test \002,i2,\002, ratio = \002,g12.5)"; /* System generated locals */ integer i__1, i__2, i__3, i__4, i__5; doublereal d__1, d__2; /* Builtin functions */ /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen); double z_abs(doublecomplex *); integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void); /* Local variables */ integer i__, j, k, n; doublereal z__[3]; integer k1, ia, in, kl, ku, ix, nt, lda; char fact[1]; doublereal cond; integer mode; doublereal dmax__; integer imat, info; char path[3], dist[1], type__[1]; integer nrun, ifact; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); integer nfail, iseed[4]; extern doublereal dget06_(doublereal *, doublereal *); doublereal rcond; integer nimat; doublereal anorm; extern /* Subroutine */ int zget04_(integer *, integer *, doublecomplex *, integer *, doublecomplex *, integer *, doublereal *, doublereal * ), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); integer izero, nerrs; extern /* Subroutine */ int zptt01_(integer *, doublereal *, doublecomplex *, doublereal *, doublecomplex *, doublecomplex *, doublereal *); logical zerot; extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, doublecomplex *, integer *), zptt02_(char *, integer *, integer *, doublereal *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublereal *), zptt05_( integer *, integer *, doublereal *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *, doublereal *, doublereal *, doublereal *), zptsv_(integer *, integer *, doublereal *, doublecomplex *, doublecomplex *, integer *, integer *), zlatb4_( char *, integer *, integer *, integer *, char *, integer *, integer *, doublereal *, integer *, doublereal *, char *), aladhd_(integer *, char *), alaerh_(char *, char *, integer *, integer *, char *, integer *, integer *, integer *, integer *, integer *, integer *, integer *, integer *, integer *); extern integer idamax_(integer *, doublereal *, integer *); doublereal rcondc; extern /* Subroutine */ int zdscal_(integer *, doublereal *, doublecomplex *, integer *), alasvm_(char *, integer *, integer *, integer *, integer *), dlarnv_(integer *, integer *, integer *, doublereal *); doublereal ainvnm; extern doublereal zlanht_(char *, integer *, doublereal *, doublecomplex * ); extern /* Subroutine */ int zlacpy_(char *, integer *, integer *, doublecomplex *, integer *, doublecomplex *, integer *); extern doublereal dzasum_(integer *, doublecomplex *, integer *); extern /* Subroutine */ int zlaset_(char *, integer *, integer *, doublecomplex *, doublecomplex *, doublecomplex *, integer *), zlaptm_(char *, integer *, integer *, doublereal *, doublereal *, doublecomplex *, doublecomplex *, integer *, doublereal *, doublecomplex *, integer *), zlatms_( integer *, integer *, char *, integer *, char *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, char *, doublecomplex *, integer *, doublecomplex *, integer *), zlarnv_(integer *, integer *, integer *, doublecomplex *); doublereal result[6]; extern /* Subroutine */ int zpttrf_(integer *, doublereal *, doublecomplex *, integer *), zerrvx_(char *, integer *), zpttrs_(char *, integer *, integer *, doublereal *, doublecomplex *, doublecomplex *, integer *, integer *), zptsvx_(char *, integer *, integer *, doublereal *, doublecomplex *, doublereal * , doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublereal *, doublereal *, doublereal *, doublecomplex *, doublereal *, integer *); /* Fortran I/O blocks */ static cilist io___35 = { 0, 0, 0, fmt_9999, 0 }; static cilist io___38 = { 0, 0, 0, fmt_9998, 0 }; /* -- LAPACK test routine (version 3.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZDRVPT tests ZPTSV and -SVX. */ /* Arguments */ /* ========= */ /* DOTYPE (input) LOGICAL array, dimension (NTYPES) */ /* The matrix types to be used for testing. Matrices of type j */ /* (for 1 <= j <= NTYPES) are used for testing if DOTYPE(j) = */ /* .TRUE.; if DOTYPE(j) = .FALSE., then type j is not used. */ /* NN (input) INTEGER */ /* The number of values of N contained in the vector NVAL. */ /* NVAL (input) INTEGER array, dimension (NN) */ /* The values of the matrix dimension N. */ /* NRHS (input) INTEGER */ /* The number of right hand side vectors to be generated for */ /* each linear system. */ /* THRESH (input) DOUBLE PRECISION */ /* The threshold value for the test ratios. A result is */ /* included in the output file if RESULT >= THRESH. To have */ /* every test ratio printed, use THRESH = 0. */ /* TSTERR (input) LOGICAL */ /* Flag that indicates whether error exits are to be tested. */ /* A (workspace) COMPLEX*16 array, dimension (NMAX*2) */ /* D (workspace) DOUBLE PRECISION array, dimension (NMAX*2) */ /* E (workspace) COMPLEX*16 array, dimension (NMAX*2) */ /* B (workspace) COMPLEX*16 array, dimension (NMAX*NRHS) */ /* X (workspace) COMPLEX*16 array, dimension (NMAX*NRHS) */ /* XACT (workspace) COMPLEX*16 array, dimension (NMAX*NRHS) */ /* WORK (workspace) COMPLEX*16 array, dimension */ /* (NMAX*max(3,NRHS)) */ /* RWORK (workspace) DOUBLE PRECISION array, dimension (NMAX+2*NRHS) */ /* NOUT (input) INTEGER */ /* The unit number for output. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. Local Arrays .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Scalars in Common .. */ /* .. */ /* .. Common blocks .. */ /* .. */ /* .. Data statements .. */ /* Parameter adjustments */ --rwork; --work; --xact; --x; --b; --e; --d__; --a; --nval; --dotype; /* Function Body */ /* .. */ /* .. Executable Statements .. */ s_copy(path, "Zomplex precision", (ftnlen)1, (ftnlen)17); s_copy(path + 1, "PT", (ftnlen)2, (ftnlen)2); nrun = 0; nfail = 0; nerrs = 0; for (i__ = 1; i__ <= 4; ++i__) { iseed[i__ - 1] = iseedy[i__ - 1]; /* L10: */ } /* Test the error exits */ if (*tsterr) { zerrvx_(path, nout); } infoc_1.infot = 0; i__1 = *nn; for (in = 1; in <= i__1; ++in) { /* Do for each value of N in NVAL. */ n = nval[in]; lda = max(1,n); nimat = 12; if (n <= 0) { nimat = 1; } i__2 = nimat; for (imat = 1; imat <= i__2; ++imat) { /* Do the tests only if DOTYPE( IMAT ) is true. */ if (n > 0 && ! dotype[imat]) { goto L110; } /* Set up parameters with ZLATB4. */ zlatb4_(path, &imat, &n, &n, type__, &kl, &ku, &anorm, &mode, & cond, dist); zerot = imat >= 8 && imat <= 10; if (imat <= 6) { /* Type 1-6: generate a symmetric tridiagonal matrix of */ /* known condition number in lower triangular band storage. */ s_copy(srnamc_1.srnamt, "ZLATMS", (ftnlen)6, (ftnlen)6); zlatms_(&n, &n, dist, iseed, type__, &rwork[1], &mode, &cond, &anorm, &kl, &ku, "B", &a[1], &c__2, &work[1], &info); /* Check the error code from ZLATMS. */ if (info != 0) { alaerh_(path, "ZLATMS", &info, &c__0, " ", &n, &n, &kl, & ku, &c_n1, &imat, &nfail, &nerrs, nout); goto L110; } izero = 0; /* Copy the matrix to D and E. */ ia = 1; i__3 = n - 1; for (i__ = 1; i__ <= i__3; ++i__) { i__4 = i__; i__5 = ia; d__[i__4] = a[i__5].r; i__4 = i__; i__5 = ia + 1; e[i__4].r = a[i__5].r, e[i__4].i = a[i__5].i; ia += 2; /* L20: */ } if (n > 0) { i__3 = n; i__4 = ia; d__[i__3] = a[i__4].r; } } else { /* Type 7-12: generate a diagonally dominant matrix with */ /* unknown condition number in the vectors D and E. */ if (! zerot || ! dotype[7]) { /* Let D and E have values from [-1,1]. */ dlarnv_(&c__2, iseed, &n, &d__[1]); i__3 = n - 1; zlarnv_(&c__2, iseed, &i__3, &e[1]); /* Make the tridiagonal matrix diagonally dominant. */ if (n == 1) { d__[1] = abs(d__[1]); } else { d__[1] = abs(d__[1]) + z_abs(&e[1]); d__[n] = (d__1 = d__[n], abs(d__1)) + z_abs(&e[n - 1]) ; i__3 = n - 1; for (i__ = 2; i__ <= i__3; ++i__) { d__[i__] = (d__1 = d__[i__], abs(d__1)) + z_abs(& e[i__]) + z_abs(&e[i__ - 1]); /* L30: */ } } /* Scale D and E so the maximum element is ANORM. */ ix = idamax_(&n, &d__[1], &c__1); dmax__ = d__[ix]; d__1 = anorm / dmax__; dscal_(&n, &d__1, &d__[1], &c__1); if (n > 1) { i__3 = n - 1; d__1 = anorm / dmax__; zdscal_(&i__3, &d__1, &e[1], &c__1); } } else if (izero > 0) { /* Reuse the last matrix by copying back the zeroed out */ /* elements. */ if (izero == 1) { d__[1] = z__[1]; if (n > 1) { e[1].r = z__[2], e[1].i = 0.; } } else if (izero == n) { i__3 = n - 1; e[i__3].r = z__[0], e[i__3].i = 0.; d__[n] = z__[1]; } else { i__3 = izero - 1; e[i__3].r = z__[0], e[i__3].i = 0.; d__[izero] = z__[1]; i__3 = izero; e[i__3].r = z__[2], e[i__3].i = 0.; } } /* For types 8-10, set one row and column of the matrix to */ /* zero. */ izero = 0; if (imat == 8) { izero = 1; z__[1] = d__[1]; d__[1] = 0.; if (n > 1) { z__[2] = e[1].r; e[1].r = 0., e[1].i = 0.; } } else if (imat == 9) { izero = n; if (n > 1) { i__3 = n - 1; z__[0] = e[i__3].r; i__3 = n - 1; e[i__3].r = 0., e[i__3].i = 0.; } z__[1] = d__[n]; d__[n] = 0.; } else if (imat == 10) { izero = (n + 1) / 2; if (izero > 1) { i__3 = izero - 1; z__[0] = e[i__3].r; i__3 = izero - 1; e[i__3].r = 0., e[i__3].i = 0.; i__3 = izero; z__[2] = e[i__3].r; i__3 = izero; e[i__3].r = 0., e[i__3].i = 0.; } z__[1] = d__[izero]; d__[izero] = 0.; } } /* Generate NRHS random solution vectors. */ ix = 1; i__3 = *nrhs; for (j = 1; j <= i__3; ++j) { zlarnv_(&c__2, iseed, &n, &xact[ix]); ix += lda; /* L40: */ } /* Set the right hand side. */ zlaptm_("Lower", &n, nrhs, &c_b24, &d__[1], &e[1], &xact[1], &lda, &c_b25, &b[1], &lda); for (ifact = 1; ifact <= 2; ++ifact) { if (ifact == 1) { *(unsigned char *)fact = 'F'; } else { *(unsigned char *)fact = 'N'; } /* Compute the condition number for comparison with */ /* the value returned by ZPTSVX. */ if (zerot) { if (ifact == 1) { goto L100; } rcondc = 0.; } else if (ifact == 1) { /* Compute the 1-norm of A. */ anorm = zlanht_("1", &n, &d__[1], &e[1]); dcopy_(&n, &d__[1], &c__1, &d__[n + 1], &c__1); if (n > 1) { i__3 = n - 1; zcopy_(&i__3, &e[1], &c__1, &e[n + 1], &c__1); } /* Factor the matrix A. */ zpttrf_(&n, &d__[n + 1], &e[n + 1], &info); /* Use ZPTTRS to solve for one column at a time of */ /* inv(A), computing the maximum column sum as we go. */ ainvnm = 0.; i__3 = n; for (i__ = 1; i__ <= i__3; ++i__) { i__4 = n; for (j = 1; j <= i__4; ++j) { i__5 = j; x[i__5].r = 0., x[i__5].i = 0.; /* L50: */ } i__4 = i__; x[i__4].r = 1., x[i__4].i = 0.; zpttrs_("Lower", &n, &c__1, &d__[n + 1], &e[n + 1], & x[1], &lda, &info); /* Computing MAX */ d__1 = ainvnm, d__2 = dzasum_(&n, &x[1], &c__1); ainvnm = max(d__1,d__2); /* L60: */ } /* Compute the 1-norm condition number of A. */ if (anorm <= 0. || ainvnm <= 0.) { rcondc = 1.; } else { rcondc = 1. / anorm / ainvnm; } } if (ifact == 2) { /* --- Test ZPTSV -- */ dcopy_(&n, &d__[1], &c__1, &d__[n + 1], &c__1); if (n > 1) { i__3 = n - 1; zcopy_(&i__3, &e[1], &c__1, &e[n + 1], &c__1); } zlacpy_("Full", &n, nrhs, &b[1], &lda, &x[1], &lda); /* Factor A as L*D*L' and solve the system A*X = B. */ s_copy(srnamc_1.srnamt, "ZPTSV ", (ftnlen)6, (ftnlen)6); zptsv_(&n, nrhs, &d__[n + 1], &e[n + 1], &x[1], &lda, & info); /* Check error code from ZPTSV . */ if (info != izero) { alaerh_(path, "ZPTSV ", &info, &izero, " ", &n, &n, & c__1, &c__1, nrhs, &imat, &nfail, &nerrs, nout); } nt = 0; if (izero == 0) { /* Check the factorization by computing the ratio */ /* norm(L*D*L' - A) / (n * norm(A) * EPS ) */ zptt01_(&n, &d__[1], &e[1], &d__[n + 1], &e[n + 1], & work[1], result); /* Compute the residual in the solution. */ zlacpy_("Full", &n, nrhs, &b[1], &lda, &work[1], &lda); zptt02_("Lower", &n, nrhs, &d__[1], &e[1], &x[1], & lda, &work[1], &lda, &result[1]); /* Check solution from generated exact solution. */ zget04_(&n, nrhs, &x[1], &lda, &xact[1], &lda, & rcondc, &result[2]); nt = 3; } /* Print information about the tests that did not pass */ /* the threshold. */ i__3 = nt; for (k = 1; k <= i__3; ++k) { if (result[k - 1] >= *thresh) { if (nfail == 0 && nerrs == 0) { aladhd_(nout, path); } io___35.ciunit = *nout; s_wsfe(&io___35); do_fio(&c__1, "ZPTSV ", (ftnlen)6); do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer)) ; do_fio(&c__1, (char *)&imat, (ftnlen)sizeof( integer)); do_fio(&c__1, (char *)&k, (ftnlen)sizeof(integer)) ; do_fio(&c__1, (char *)&result[k - 1], (ftnlen) sizeof(doublereal)); e_wsfe(); ++nfail; } /* L70: */ } nrun += nt; } /* --- Test ZPTSVX --- */ if (ifact > 1) { /* Initialize D( N+1:2*N ) and E( N+1:2*N ) to zero. */ i__3 = n - 1; for (i__ = 1; i__ <= i__3; ++i__) { d__[n + i__] = 0.; i__4 = n + i__; e[i__4].r = 0., e[i__4].i = 0.; /* L80: */ } if (n > 0) { d__[n + n] = 0.; } } zlaset_("Full", &n, nrhs, &c_b62, &c_b62, &x[1], &lda); /* Solve the system and compute the condition number and */ /* error bounds using ZPTSVX. */ s_copy(srnamc_1.srnamt, "ZPTSVX", (ftnlen)6, (ftnlen)6); zptsvx_(fact, &n, nrhs, &d__[1], &e[1], &d__[n + 1], &e[n + 1] , &b[1], &lda, &x[1], &lda, &rcond, &rwork[1], &rwork[ *nrhs + 1], &work[1], &rwork[(*nrhs << 1) + 1], &info); /* Check the error code from ZPTSVX. */ if (info != izero) { alaerh_(path, "ZPTSVX", &info, &izero, fact, &n, &n, & c__1, &c__1, nrhs, &imat, &nfail, &nerrs, nout); } if (izero == 0) { if (ifact == 2) { /* Check the factorization by computing the ratio */ /* norm(L*D*L' - A) / (n * norm(A) * EPS ) */ k1 = 1; zptt01_(&n, &d__[1], &e[1], &d__[n + 1], &e[n + 1], & work[1], result); } else { k1 = 2; } /* Compute the residual in the solution. */ zlacpy_("Full", &n, nrhs, &b[1], &lda, &work[1], &lda); zptt02_("Lower", &n, nrhs, &d__[1], &e[1], &x[1], &lda, & work[1], &lda, &result[1]); /* Check solution from generated exact solution. */ zget04_(&n, nrhs, &x[1], &lda, &xact[1], &lda, &rcondc, & result[2]); /* Check error bounds from iterative refinement. */ zptt05_(&n, nrhs, &d__[1], &e[1], &b[1], &lda, &x[1], & lda, &xact[1], &lda, &rwork[1], &rwork[*nrhs + 1], &result[3]); } else { k1 = 6; } /* Check the reciprocal of the condition number. */ result[5] = dget06_(&rcond, &rcondc); /* Print information about the tests that did not pass */ /* the threshold. */ for (k = k1; k <= 6; ++k) { if (result[k - 1] >= *thresh) { if (nfail == 0 && nerrs == 0) { aladhd_(nout, path); } io___38.ciunit = *nout; s_wsfe(&io___38); do_fio(&c__1, "ZPTSVX", (ftnlen)6); do_fio(&c__1, fact, (ftnlen)1); do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&imat, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&k, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&result[k - 1], (ftnlen)sizeof( doublereal)); e_wsfe(); ++nfail; } /* L90: */ } nrun = nrun + 7 - k1; L100: ; } L110: ; } /* L120: */ } /* Print a summary of the results. */ alasvm_(path, nout, &nfail, &nrun, &nerrs); return 0; /* End of ZDRVPT */ } /* zdrvpt_ */
/* Subroutine */ int zlarfgp_(integer *n, doublecomplex *alpha, doublecomplex *x, integer *incx, doublecomplex *tau) { /* System generated locals */ integer i__1, i__2; doublereal d__1, d__2; doublecomplex z__1, z__2; /* Builtin functions */ double d_imag(doublecomplex *), d_sign(doublereal *, doublereal *), z_abs( doublecomplex *); /* Local variables */ integer j; doublecomplex savealpha; integer knt; doublereal beta, alphi, alphr; extern /* Subroutine */ int zscal_(integer *, doublecomplex *, doublecomplex *, integer *); doublereal xnorm; extern doublereal dlapy2_(doublereal *, doublereal *), dlapy3_(doublereal *, doublereal *, doublereal *), dznrm2_(integer *, doublecomplex * , integer *), dlamch_(char *); extern /* Subroutine */ int zdscal_(integer *, doublereal *, doublecomplex *, integer *); doublereal bignum; extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *, doublecomplex *); doublereal smlnum; /* -- LAPACK auxiliary routine (version 3.4.2) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* September 2012 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ --x; /* Function Body */ if (*n <= 0) { tau->r = 0., tau->i = 0.; return 0; } i__1 = *n - 1; xnorm = dznrm2_(&i__1, &x[1], incx); alphr = alpha->r; alphi = d_imag(alpha); if (xnorm == 0.) { /* H = [1-alpha/abs(alpha) 0; 0 I], sign chosen so ALPHA >= 0. */ if (alphi == 0.) { if (alphr >= 0.) { /* When TAU.eq.ZERO, the vector is special-cased to be */ /* all zeros in the application routines. We do not need */ /* to clear it. */ tau->r = 0., tau->i = 0.; } else { /* However, the application routines rely on explicit */ /* zero checks when TAU.ne.ZERO, and we must clear X. */ tau->r = 2., tau->i = 0.; i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { i__2 = (j - 1) * *incx + 1; x[i__2].r = 0.; x[i__2].i = 0.; // , expr subst } z__1.r = -alpha->r; z__1.i = -alpha->i; // , expr subst alpha->r = z__1.r, alpha->i = z__1.i; } } else { /* Only "reflecting" the diagonal entry to be real and non-negative. */ xnorm = dlapy2_(&alphr, &alphi); d__1 = 1. - alphr / xnorm; d__2 = -alphi / xnorm; z__1.r = d__1; z__1.i = d__2; // , expr subst tau->r = z__1.r, tau->i = z__1.i; i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { i__2 = (j - 1) * *incx + 1; x[i__2].r = 0.; x[i__2].i = 0.; // , expr subst } alpha->r = xnorm, alpha->i = 0.; } } else { /* general case */ d__1 = dlapy3_(&alphr, &alphi, &xnorm); beta = d_sign(&d__1, &alphr); smlnum = dlamch_("S") / dlamch_("E"); bignum = 1. / smlnum; knt = 0; if (abs(beta) < smlnum) { /* XNORM, BETA may be inaccurate; scale X and recompute them */ L10: ++knt; i__1 = *n - 1; zdscal_(&i__1, &bignum, &x[1], incx); beta *= bignum; alphi *= bignum; alphr *= bignum; if (abs(beta) < smlnum) { goto L10; } /* New BETA is at most 1, at least SMLNUM */ i__1 = *n - 1; xnorm = dznrm2_(&i__1, &x[1], incx); z__1.r = alphr; z__1.i = alphi; // , expr subst alpha->r = z__1.r, alpha->i = z__1.i; d__1 = dlapy3_(&alphr, &alphi, &xnorm); beta = d_sign(&d__1, &alphr); } savealpha.r = alpha->r; savealpha.i = alpha->i; // , expr subst z__1.r = alpha->r + beta; z__1.i = alpha->i; // , expr subst alpha->r = z__1.r, alpha->i = z__1.i; if (beta < 0.) { beta = -beta; z__2.r = -alpha->r; z__2.i = -alpha->i; // , expr subst z__1.r = z__2.r / beta; z__1.i = z__2.i / beta; // , expr subst tau->r = z__1.r, tau->i = z__1.i; } else { alphr = alphi * (alphi / alpha->r); alphr += xnorm * (xnorm / alpha->r); d__1 = alphr / beta; d__2 = -alphi / beta; z__1.r = d__1; z__1.i = d__2; // , expr subst tau->r = z__1.r, tau->i = z__1.i; d__1 = -alphr; z__1.r = d__1; z__1.i = alphi; // , expr subst alpha->r = z__1.r, alpha->i = z__1.i; } zladiv_(&z__1, &c_b5, alpha); alpha->r = z__1.r, alpha->i = z__1.i; if (z_abs(tau) <= smlnum) { /* In the case where the computed TAU ends up being a denormalized number, */ /* it loses relative accuracy. This is a BIG problem. Solution: flush TAU */ /* to ZERO (or TWO or whatever makes a nonnegative real number for BETA). */ /* (Bug report provided by Pat Quillen from MathWorks on Jul 29, 2009.) */ /* (Thanks Pat. Thanks MathWorks.) */ alphr = savealpha.r; alphi = d_imag(&savealpha); if (alphi == 0.) { if (alphr >= 0.) { tau->r = 0., tau->i = 0.; } else { tau->r = 2., tau->i = 0.; i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { i__2 = (j - 1) * *incx + 1; x[i__2].r = 0.; x[i__2].i = 0.; // , expr subst } z__1.r = -savealpha.r; z__1.i = -savealpha.i; // , expr subst beta = z__1.r; } } else { xnorm = dlapy2_(&alphr, &alphi); d__1 = 1. - alphr / xnorm; d__2 = -alphi / xnorm; z__1.r = d__1; z__1.i = d__2; // , expr subst tau->r = z__1.r, tau->i = z__1.i; i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { i__2 = (j - 1) * *incx + 1; x[i__2].r = 0.; x[i__2].i = 0.; // , expr subst } beta = xnorm; } } else { /* This is the general case. */ i__1 = *n - 1; zscal_(&i__1, alpha, &x[1], incx); } /* If BETA is subnormal, it may lose relative accuracy */ i__1 = knt; for (j = 1; j <= i__1; ++j) { beta *= smlnum; /* L20: */ } alpha->r = beta, alpha->i = 0.; } return 0; /* End of ZLARFGP */ }
/* Subroutine */ int zlaqps_(integer *m, integer *n, integer *offset, integer *nb, integer *kb, doublecomplex *a, integer *lda, integer *jpvt, doublecomplex *tau, doublereal *vn1, doublereal *vn2, doublecomplex * auxv, doublecomplex *f, integer *ldf) { /* System generated locals */ integer a_dim1, a_offset, f_dim1, f_offset, i__1, i__2, i__3; doublereal d__1, d__2; doublecomplex z__1; /* Builtin functions */ double sqrt(doublereal); void d_cnjg(doublecomplex *, doublecomplex *); double z_abs(doublecomplex *); integer i_dnnt(doublereal *); /* Local variables */ integer j, k, rk; doublecomplex akk; integer pvt; doublereal temp, temp2, tol3z; integer itemp; extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *), zgemv_(char *, integer *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *), zswap_(integer *, doublecomplex *, integer *, doublecomplex *, integer *); extern doublereal dznrm2_(integer *, doublecomplex *, integer *), dlamch_( char *); extern integer idamax_(integer *, doublereal *, integer *); integer lsticc; extern /* Subroutine */ int zlarfp_(integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *); integer lastrk; /* -- LAPACK auxiliary routine (version 3.2) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZLAQPS computes a step of QR factorization with column pivoting */ /* of a complex M-by-N matrix A by using Blas-3. It tries to factorize */ /* NB columns from A starting from the row OFFSET+1, and updates all */ /* of the matrix with Blas-3 xGEMM. */ /* In some cases, due to catastrophic cancellations, it cannot */ /* factorize NB columns. Hence, the actual number of factorized */ /* columns is returned in KB. */ /* Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */ /* Arguments */ /* ========= */ /* M (input) INTEGER */ /* The number of rows of the matrix A. M >= 0. */ /* N (input) INTEGER */ /* The number of columns of the matrix A. N >= 0 */ /* OFFSET (input) INTEGER */ /* The number of rows of A that have been factorized in */ /* previous steps. */ /* NB (input) INTEGER */ /* The number of columns to factorize. */ /* KB (output) INTEGER */ /* The number of columns actually factorized. */ /* A (input/output) COMPLEX*16 array, dimension (LDA,N) */ /* On entry, the M-by-N matrix A. */ /* On exit, block A(OFFSET+1:M,1:KB) is the triangular */ /* factor obtained and block A(1:OFFSET,1:N) has been */ /* accordingly pivoted, but no factorized. */ /* The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has */ /* been updated. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= max(1,M). */ /* JPVT (input/output) INTEGER array, dimension (N) */ /* JPVT(I) = K <==> Column K of the full matrix A has been */ /* permuted into position I in AP. */ /* TAU (output) COMPLEX*16 array, dimension (KB) */ /* The scalar factors of the elementary reflectors. */ /* VN1 (input/output) DOUBLE PRECISION array, dimension (N) */ /* The vector with the partial column norms. */ /* VN2 (input/output) DOUBLE PRECISION array, dimension (N) */ /* The vector with the exact column norms. */ /* AUXV (input/output) COMPLEX*16 array, dimension (NB) */ /* Auxiliar vector. */ /* F (input/output) COMPLEX*16 array, dimension (LDF,NB) */ /* Matrix F' = L*Y'*A. */ /* LDF (input) INTEGER */ /* The leading dimension of the array F. LDF >= max(1,N). */ /* Further Details */ /* =============== */ /* Based on contributions by */ /* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */ /* X. Sun, Computer Science Dept., Duke University, USA */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --jpvt; --tau; --vn1; --vn2; --auxv; f_dim1 = *ldf; f_offset = 1 + f_dim1; f -= f_offset; /* Function Body */ /* Computing MIN */ i__1 = *m, i__2 = *n + *offset; lastrk = min(i__1,i__2); lsticc = 0; k = 0; tol3z = sqrt(dlamch_("Epsilon")); /* Beginning of while loop. */ L10: if (k < *nb && lsticc == 0) { ++k; rk = *offset + k; /* Determine ith pivot column and swap if necessary */ i__1 = *n - k + 1; pvt = k - 1 + idamax_(&i__1, &vn1[k], &c__1); if (pvt != k) { zswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &c__1); i__1 = k - 1; zswap_(&i__1, &f[pvt + f_dim1], ldf, &f[k + f_dim1], ldf); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[k]; jpvt[k] = itemp; vn1[pvt] = vn1[k]; vn2[pvt] = vn2[k]; } /* Apply previous Householder reflectors to column K: */ /* A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. */ if (k > 1) { i__1 = k - 1; for (j = 1; j <= i__1; ++j) { i__2 = k + j * f_dim1; d_cnjg(&z__1, &f[k + j * f_dim1]); f[i__2].r = z__1.r, f[i__2].i = z__1.i; /* L20: */ } i__1 = *m - rk + 1; i__2 = k - 1; z__1.r = -1., z__1.i = -0.; zgemv_("No transpose", &i__1, &i__2, &z__1, &a[rk + a_dim1], lda, &f[k + f_dim1], ldf, &c_b2, &a[rk + k * a_dim1], &c__1); i__1 = k - 1; for (j = 1; j <= i__1; ++j) { i__2 = k + j * f_dim1; d_cnjg(&z__1, &f[k + j * f_dim1]); f[i__2].r = z__1.r, f[i__2].i = z__1.i; /* L30: */ } } /* Generate elementary reflector H(k). */ if (rk < *m) { i__1 = *m - rk + 1; zlarfp_(&i__1, &a[rk + k * a_dim1], &a[rk + 1 + k * a_dim1], & c__1, &tau[k]); } else { zlarfp_(&c__1, &a[rk + k * a_dim1], &a[rk + k * a_dim1], &c__1, & tau[k]); } i__1 = rk + k * a_dim1; akk.r = a[i__1].r, akk.i = a[i__1].i; i__1 = rk + k * a_dim1; a[i__1].r = 1., a[i__1].i = 0.; /* Compute Kth column of F: */ /* Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K). */ if (k < *n) { i__1 = *m - rk + 1; i__2 = *n - k; zgemv_("Conjugate transpose", &i__1, &i__2, &tau[k], &a[rk + (k + 1) * a_dim1], lda, &a[rk + k * a_dim1], &c__1, &c_b1, &f[ k + 1 + k * f_dim1], &c__1); } /* Padding F(1:K,K) with zeros. */ i__1 = k; for (j = 1; j <= i__1; ++j) { i__2 = j + k * f_dim1; f[i__2].r = 0., f[i__2].i = 0.; /* L40: */ } /* Incremental updating of F: */ /* F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)' */ /* *A(RK:M,K). */ if (k > 1) { i__1 = *m - rk + 1; i__2 = k - 1; i__3 = k; z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i; zgemv_("Conjugate transpose", &i__1, &i__2, &z__1, &a[rk + a_dim1] , lda, &a[rk + k * a_dim1], &c__1, &c_b1, &auxv[1], &c__1); i__1 = k - 1; zgemv_("No transpose", n, &i__1, &c_b2, &f[f_dim1 + 1], ldf, & auxv[1], &c__1, &c_b2, &f[k * f_dim1 + 1], &c__1); } /* Update the current row of A: */ /* A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */ if (k < *n) { i__1 = *n - k; z__1.r = -1., z__1.i = -0.; zgemm_("No transpose", "Conjugate transpose", &c__1, &i__1, &k, & z__1, &a[rk + a_dim1], lda, &f[k + 1 + f_dim1], ldf, & c_b2, &a[rk + (k + 1) * a_dim1], lda); } /* Update partial column norms. */ if (rk < lastrk) { i__1 = *n; for (j = k + 1; j <= i__1; ++j) { if (vn1[j] != 0.) { /* NOTE: The following 4 lines follow from the analysis in */ /* Lapack Working Note 176. */ temp = z_abs(&a[rk + j * a_dim1]) / vn1[j]; /* Computing MAX */ d__1 = 0., d__2 = (temp + 1.) * (1. - temp); temp = max(d__1,d__2); /* Computing 2nd power */ d__1 = vn1[j] / vn2[j]; temp2 = temp * (d__1 * d__1); if (temp2 <= tol3z) { vn2[j] = (doublereal) lsticc; lsticc = j; } else { vn1[j] *= sqrt(temp); } } /* L50: */ } } i__1 = rk + k * a_dim1; a[i__1].r = akk.r, a[i__1].i = akk.i; /* End of while loop. */ goto L10; } *kb = k; rk = *offset + *kb; /* Apply the block reflector to the rest of the matrix: */ /* A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - */ /* A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)'. */ /* Computing MIN */ i__1 = *n, i__2 = *m - *offset; if (*kb < min(i__1,i__2)) { i__1 = *m - rk; i__2 = *n - *kb; z__1.r = -1., z__1.i = -0.; zgemm_("No transpose", "Conjugate transpose", &i__1, &i__2, kb, &z__1, &a[rk + 1 + a_dim1], lda, &f[*kb + 1 + f_dim1], ldf, &c_b2, & a[rk + 1 + (*kb + 1) * a_dim1], lda); } /* Recomputation of difficult columns. */ L60: if (lsticc > 0) { itemp = i_dnnt(&vn2[lsticc]); i__1 = *m - rk; vn1[lsticc] = dznrm2_(&i__1, &a[rk + 1 + lsticc * a_dim1], &c__1); /* NOTE: The computation of VN1( LSTICC ) relies on the fact that */ /* SNRM2 does not fail on vectors with norm below the value of */ /* SQRT(DLAMCH('S')) */ vn2[lsticc] = vn1[lsticc]; lsticc = itemp; goto L60; } return 0; /* End of ZLAQPS */ } /* zlaqps_ */
/* Subroutine */ int zhgeqz_(char *job, char *compq, char *compz, integer *n, integer *ilo, integer *ihi, doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb, doublecomplex *alpha, doublecomplex * beta, doublecomplex *q, integer *ldq, doublecomplex *z__, integer * ldz, doublecomplex *work, integer *lwork, doublereal *rwork, integer * info) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5, i__6; doublereal d__1, d__2, d__3, d__4, d__5, d__6; doublecomplex z__1, z__2, z__3, z__4, z__5, z__6; /* Builtin functions */ double z_abs(doublecomplex *); void d_cnjg(doublecomplex *, doublecomplex *); double d_imag(doublecomplex *); void z_div(doublecomplex *, doublecomplex *, doublecomplex *), pow_zi( doublecomplex *, doublecomplex *, integer *), z_sqrt( doublecomplex *, doublecomplex *); /* Local variables */ static doublereal absb, atol, btol, temp, opst; extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, doublecomplex *, integer *, doublereal *, doublecomplex *); static doublereal temp2, c__; static integer j; static doublecomplex s, t; extern logical lsame_(char *, char *); static doublecomplex ctemp; static integer iiter, ilast, jiter; static doublereal anorm; static integer maxit; static doublereal bnorm; static doublecomplex shift; extern /* Subroutine */ int zscal_(integer *, doublecomplex *, doublecomplex *, integer *); static doublereal tempr; static doublecomplex ctemp2, ctemp3; static logical ilazr2; static integer jc, in; static doublereal ascale, bscale; static doublecomplex u12; extern doublereal dlamch_(char *); static integer jr, nq; static doublecomplex signbc; static integer nz; static doublereal safmin; extern /* Subroutine */ int xerbla_(char *, integer *); static doublecomplex eshift; static logical ilschr; static integer icompq, ilastm; static doublecomplex rtdisc; static integer ischur; extern doublereal zlanhs_(char *, integer *, doublecomplex *, integer *, doublereal *); static logical ilazro; static integer icompz, ifirst; extern /* Subroutine */ int zlartg_(doublecomplex *, doublecomplex *, doublereal *, doublecomplex *, doublecomplex *); static integer ifrstm; extern /* Subroutine */ int zlaset_(char *, integer *, integer *, doublecomplex *, doublecomplex *, doublecomplex *, integer *); static integer istart; static logical lquery; static doublecomplex ad11, ad12, ad21, ad22; static integer jch; static logical ilq, ilz; static doublereal ulp; static doublecomplex abi22; #define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1 #define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)] #define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1 #define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)] #define q_subscr(a_1,a_2) (a_2)*q_dim1 + a_1 #define q_ref(a_1,a_2) q[q_subscr(a_1,a_2)] #define z___subscr(a_1,a_2) (a_2)*z_dim1 + a_1 #define z___ref(a_1,a_2) z__[z___subscr(a_1,a_2)] /* -- LAPACK routine (instrumented to count operations, version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University June 30, 1999 ----------------------- Begin Timing Code ------------------------ Common block to return operation count and iteration count ITCNT is initialized to 0, OPS is only incremented OPST is used to accumulate small contributions to OPS to avoid roundoff error ------------------------ End Timing Code ------------------------- Purpose ======= ZHGEQZ implements a single-shift version of the QZ method for finding the generalized eigenvalues w(i)=ALPHA(i)/BETA(i) of the equation det( A - w(i) B ) = 0 If JOB='S', then the pair (A,B) is simultaneously reduced to Schur form (i.e., A and B are both upper triangular) by applying one unitary tranformation (usually called Q) on the left and another (usually called Z) on the right. The diagonal elements of A are then ALPHA(1),...,ALPHA(N), and of B are BETA(1),...,BETA(N). If JOB='S' and COMPQ and COMPZ are 'V' or 'I', then the unitary transformations used to reduce (A,B) are accumulated into the arrays Q and Z s.t.: Q(in) A(in) Z(in)* = Q(out) A(out) Z(out)* Q(in) B(in) Z(in)* = Q(out) B(out) Z(out)* Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), pp. 241--256. Arguments ========= JOB (input) CHARACTER*1 = 'E': compute only ALPHA and BETA. A and B will not necessarily be put into generalized Schur form. = 'S': put A and B into generalized Schur form, as well as computing ALPHA and BETA. COMPQ (input) CHARACTER*1 = 'N': do not modify Q. = 'V': multiply the array Q on the right by the conjugate transpose of the unitary tranformation that is applied to the left side of A and B to reduce them to Schur form. = 'I': like COMPQ='V', except that Q will be initialized to the identity first. COMPZ (input) CHARACTER*1 = 'N': do not modify Z. = 'V': multiply the array Z on the right by the unitary tranformation that is applied to the right side of A and B to reduce them to Schur form. = 'I': like COMPZ='V', except that Z will be initialized to the identity first. N (input) INTEGER The order of the matrices A, B, Q, and Z. N >= 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that A is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N. 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. A (input/output) COMPLEX*16 array, dimension (LDA, N) On entry, the N-by-N upper Hessenberg matrix A. Elements below the subdiagonal must be zero. If JOB='S', then on exit A and B will have been simultaneously reduced to upper triangular form. If JOB='E', then on exit A will have been destroyed. LDA (input) INTEGER The leading dimension of the array A. LDA >= max( 1, N ). B (input/output) COMPLEX*16 array, dimension (LDB, N) On entry, the N-by-N upper triangular matrix B. Elements below the diagonal must be zero. If JOB='S', then on exit A and B will have been simultaneously reduced to upper triangular form. If JOB='E', then on exit B will have been destroyed. LDB (input) INTEGER The leading dimension of the array B. LDB >= max( 1, N ). ALPHA (output) COMPLEX*16 array, dimension (N) The diagonal elements of A when the pair (A,B) has been reduced to Schur form. ALPHA(i)/BETA(i) i=1,...,N are the generalized eigenvalues. BETA (output) COMPLEX*16 array, dimension (N) The diagonal elements of B when the pair (A,B) has been reduced to Schur form. ALPHA(i)/BETA(i) i=1,...,N are the generalized eigenvalues. A and B are normalized so that BETA(1),...,BETA(N) are non-negative real numbers. Q (input/output) COMPLEX*16 array, dimension (LDQ, N) If COMPQ='N', then Q will not be referenced. If COMPQ='V' or 'I', then the conjugate transpose of the unitary transformations which are applied to A and B on the left will be applied to the array Q on the right. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= 1. If COMPQ='V' or 'I', then LDQ >= N. Z (input/output) COMPLEX*16 array, dimension (LDZ, N) If COMPZ='N', then Z will not be referenced. If COMPZ='V' or 'I', then the unitary transformations which are applied to A and B on the right will be applied to the array Z on the right. LDZ (input) INTEGER The leading dimension of the array Z. LDZ >= 1. If COMPZ='V' or 'I', then LDZ >= N. WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) On exit, if INFO >= 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,N). If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. RWORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value = 1,...,N: the QZ iteration did not converge. (A,B) is not in Schur form, but ALPHA(i) and BETA(i), i=INFO+1,...,N should be correct. = N+1,...,2*N: the shift calculation failed. (A,B) is not in Schur form, but ALPHA(i) and BETA(i), i=INFO-N+1,...,N should be correct. > 2*N: various "impossible" errors. Further Details =============== We assume that complex ABS works as long as its value is less than overflow. ===================================================================== ----------------------- Begin Timing Code ------------------------ Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; --alpha; --beta; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --work; --rwork; /* Function Body */ latime_1.itcnt = 0.; /* ------------------------ End Timing Code ------------------------- Decode JOB, COMPQ, COMPZ */ if (lsame_(job, "E")) { ilschr = FALSE_; ischur = 1; } else if (lsame_(job, "S")) { ilschr = TRUE_; ischur = 2; } else { ischur = 0; } if (lsame_(compq, "N")) { ilq = FALSE_; icompq = 1; nq = 0; } else if (lsame_(compq, "V")) { ilq = TRUE_; icompq = 2; nq = *n; } else if (lsame_(compq, "I")) { ilq = TRUE_; icompq = 3; nq = *n; } else { icompq = 0; } if (lsame_(compz, "N")) { ilz = FALSE_; icompz = 1; nz = 0; } else if (lsame_(compz, "V")) { ilz = TRUE_; icompz = 2; nz = *n; } else if (lsame_(compz, "I")) { ilz = TRUE_; icompz = 3; nz = *n; } else { icompz = 0; } /* Check Argument Values */ *info = 0; i__1 = max(1,*n); work[1].r = (doublereal) i__1, work[1].i = 0.; lquery = *lwork == -1; if (ischur == 0) { *info = -1; } else if (icompq == 0) { *info = -2; } else if (icompz == 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*ilo < 1) { *info = -5; } else if (*ihi > *n || *ihi < *ilo - 1) { *info = -6; } else if (*lda < *n) { *info = -8; } else if (*ldb < *n) { *info = -10; } else if (*ldq < 1 || ilq && *ldq < *n) { *info = -14; } else if (*ldz < 1 || ilz && *ldz < *n) { *info = -16; } else if (*lwork < max(1,*n) && ! lquery) { *info = -18; } if (*info != 0) { i__1 = -(*info); xerbla_("ZHGEQZ", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible WORK( 1 ) = CMPLX( 1 ) */ if (*n <= 0) { work[1].r = 1., work[1].i = 0.; return 0; } /* Initialize Q and Z */ if (icompq == 3) { zlaset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq); } if (icompz == 3) { zlaset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz); } /* Machine Constants */ in = *ihi + 1 - *ilo; safmin = dlamch_("S"); ulp = dlamch_("E") * dlamch_("B"); anorm = zlanhs_("F", &in, &a_ref(*ilo, *ilo), lda, &rwork[1]); bnorm = zlanhs_("F", &in, &b_ref(*ilo, *ilo), ldb, &rwork[1]); /* Computing MAX */ d__1 = safmin, d__2 = ulp * anorm; atol = max(d__1,d__2); /* Computing MAX */ d__1 = safmin, d__2 = ulp * bnorm; btol = max(d__1,d__2); ascale = 1. / max(safmin,anorm); bscale = 1. / max(safmin,bnorm); /* ---------------------- Begin Timing Code ------------------------- Count ops for norms, etc. */ opst = 0.; /* Computing 2nd power */ i__1 = *n; latime_1.ops += (doublereal) ((i__1 * i__1 << 2) + *n * 12 - 5); /* ----------------------- End Timing Code -------------------------- Set Eigenvalues IHI+1:N */ i__1 = *n; for (j = *ihi + 1; j <= i__1; ++j) { absb = z_abs(&b_ref(j, j)); if (absb > safmin) { i__2 = b_subscr(j, j); z__2.r = b[i__2].r / absb, z__2.i = b[i__2].i / absb; d_cnjg(&z__1, &z__2); signbc.r = z__1.r, signbc.i = z__1.i; i__2 = b_subscr(j, j); b[i__2].r = absb, b[i__2].i = 0.; if (ilschr) { i__2 = j - 1; zscal_(&i__2, &signbc, &b_ref(1, j), &c__1); zscal_(&j, &signbc, &a_ref(1, j), &c__1); /* ----------------- Begin Timing Code --------------------- */ opst += (doublereal) ((j - 1) * 12); /* ------------------ End Timing Code ---------------------- */ } else { i__2 = a_subscr(j, j); i__3 = a_subscr(j, j); z__1.r = a[i__3].r * signbc.r - a[i__3].i * signbc.i, z__1.i = a[i__3].r * signbc.i + a[i__3].i * signbc.r; a[i__2].r = z__1.r, a[i__2].i = z__1.i; } if (ilz) { zscal_(n, &signbc, &z___ref(1, j), &c__1); } /* ------------------- Begin Timing Code ---------------------- */ opst += (doublereal) (nz * 6 + 13); /* -------------------- End Timing Code ----------------------- */ } else { i__2 = b_subscr(j, j); b[i__2].r = 0., b[i__2].i = 0.; } i__2 = j; i__3 = a_subscr(j, j); alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i; i__2 = j; i__3 = b_subscr(j, j); beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i; /* L10: */ } /* If IHI < ILO, skip QZ steps */ if (*ihi < *ilo) { goto L190; } /* MAIN QZ ITERATION LOOP Initialize dynamic indices Eigenvalues ILAST+1:N have been found. Column operations modify rows IFRSTM:whatever Row operations modify columns whatever:ILASTM If only eigenvalues are being computed, then IFRSTM is the row of the last splitting row above row ILAST; this is always at least ILO. IITER counts iterations since the last eigenvalue was found, to tell when to use an extraordinary shift. MAXIT is the maximum number of QZ sweeps allowed. */ ilast = *ihi; if (ilschr) { ifrstm = 1; ilastm = *n; } else { ifrstm = *ilo; ilastm = *ihi; } iiter = 0; eshift.r = 0., eshift.i = 0.; maxit = (*ihi - *ilo + 1) * 30; i__1 = maxit; for (jiter = 1; jiter <= i__1; ++jiter) { /* Check for too many iterations. */ if (jiter > maxit) { goto L180; } /* Split the matrix if possible. Two tests: 1: A(j,j-1)=0 or j=ILO 2: B(j,j)=0 Special case: j=ILAST */ if (ilast == *ilo) { goto L60; } else { i__2 = a_subscr(ilast, ilast - 1); if ((d__1 = a[i__2].r, abs(d__1)) + (d__2 = d_imag(&a_ref(ilast, ilast - 1)), abs(d__2)) <= atol) { i__2 = a_subscr(ilast, ilast - 1); a[i__2].r = 0., a[i__2].i = 0.; goto L60; } } if (z_abs(&b_ref(ilast, ilast)) <= btol) { i__2 = b_subscr(ilast, ilast); b[i__2].r = 0., b[i__2].i = 0.; goto L50; } /* General case: j<ILAST */ i__2 = *ilo; for (j = ilast - 1; j >= i__2; --j) { /* Test 1: for A(j,j-1)=0 or j=ILO */ if (j == *ilo) { ilazro = TRUE_; } else { i__3 = a_subscr(j, j - 1); if ((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a_ref(j, j - 1)), abs(d__2)) <= atol) { i__3 = a_subscr(j, j - 1); a[i__3].r = 0., a[i__3].i = 0.; ilazro = TRUE_; } else { ilazro = FALSE_; } } /* Test 2: for B(j,j)=0 */ if (z_abs(&b_ref(j, j)) < btol) { i__3 = b_subscr(j, j); b[i__3].r = 0., b[i__3].i = 0.; /* Test 1a: Check for 2 consecutive small subdiagonals in A */ ilazr2 = FALSE_; if (! ilazro) { i__3 = a_subscr(j, j - 1); i__4 = a_subscr(j + 1, j); i__5 = a_subscr(j, j); if (((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(& a_ref(j, j - 1)), abs(d__2))) * (ascale * ((d__3 = a[i__4].r, abs(d__3)) + (d__4 = d_imag(&a_ref(j + 1, j)), abs(d__4)))) <= ((d__5 = a[i__5].r, abs( d__5)) + (d__6 = d_imag(&a_ref(j, j)), abs(d__6))) * (ascale * atol)) { ilazr2 = TRUE_; } } /* If both tests pass (1 & 2), i.e., the leading diagonal element of B in the block is zero, split a 1x1 block off at the top. (I.e., at the J-th row/column) The leading diagonal element of the remainder can also be zero, so this may have to be done repeatedly. */ if (ilazro || ilazr2) { i__3 = ilast - 1; for (jch = j; jch <= i__3; ++jch) { i__4 = a_subscr(jch, jch); ctemp.r = a[i__4].r, ctemp.i = a[i__4].i; zlartg_(&ctemp, &a_ref(jch + 1, jch), &c__, &s, & a_ref(jch, jch)); i__4 = a_subscr(jch + 1, jch); a[i__4].r = 0., a[i__4].i = 0.; i__4 = ilastm - jch; zrot_(&i__4, &a_ref(jch, jch + 1), lda, &a_ref(jch + 1, jch + 1), lda, &c__, &s); i__4 = ilastm - jch; zrot_(&i__4, &b_ref(jch, jch + 1), ldb, &b_ref(jch + 1, jch + 1), ldb, &c__, &s); if (ilq) { d_cnjg(&z__1, &s); zrot_(n, &q_ref(1, jch), &c__1, &q_ref(1, jch + 1) , &c__1, &c__, &z__1); } if (ilazr2) { i__4 = a_subscr(jch, jch - 1); i__5 = a_subscr(jch, jch - 1); z__1.r = c__ * a[i__5].r, z__1.i = c__ * a[i__5] .i; a[i__4].r = z__1.r, a[i__4].i = z__1.i; } ilazr2 = FALSE_; /* --------------- Begin Timing Code ----------------- */ opst += (doublereal) ((ilastm - jch) * 40 + 32 + nq * 20); /* ---------------- End Timing Code ------------------ */ i__4 = b_subscr(jch + 1, jch + 1); if ((d__1 = b[i__4].r, abs(d__1)) + (d__2 = d_imag(& b_ref(jch + 1, jch + 1)), abs(d__2)) >= btol) { if (jch + 1 >= ilast) { goto L60; } else { ifirst = jch + 1; goto L70; } } i__4 = b_subscr(jch + 1, jch + 1); b[i__4].r = 0., b[i__4].i = 0.; /* L20: */ } goto L50; } else { /* Only test 2 passed -- chase the zero to B(ILAST,ILAST) Then process as in the case B(ILAST,ILAST)=0 */ i__3 = ilast - 1; for (jch = j; jch <= i__3; ++jch) { i__4 = b_subscr(jch, jch + 1); ctemp.r = b[i__4].r, ctemp.i = b[i__4].i; zlartg_(&ctemp, &b_ref(jch + 1, jch + 1), &c__, &s, & b_ref(jch, jch + 1)); i__4 = b_subscr(jch + 1, jch + 1); b[i__4].r = 0., b[i__4].i = 0.; if (jch < ilastm - 1) { i__4 = ilastm - jch - 1; zrot_(&i__4, &b_ref(jch, jch + 2), ldb, &b_ref( jch + 1, jch + 2), ldb, &c__, &s); } i__4 = ilastm - jch + 2; zrot_(&i__4, &a_ref(jch, jch - 1), lda, &a_ref(jch + 1, jch - 1), lda, &c__, &s); if (ilq) { d_cnjg(&z__1, &s); zrot_(n, &q_ref(1, jch), &c__1, &q_ref(1, jch + 1) , &c__1, &c__, &z__1); } i__4 = a_subscr(jch + 1, jch); ctemp.r = a[i__4].r, ctemp.i = a[i__4].i; zlartg_(&ctemp, &a_ref(jch + 1, jch - 1), &c__, &s, & a_ref(jch + 1, jch)); i__4 = a_subscr(jch + 1, jch - 1); a[i__4].r = 0., a[i__4].i = 0.; i__4 = jch + 1 - ifrstm; zrot_(&i__4, &a_ref(ifrstm, jch), &c__1, &a_ref( ifrstm, jch - 1), &c__1, &c__, &s); i__4 = jch - ifrstm; zrot_(&i__4, &b_ref(ifrstm, jch), &c__1, &b_ref( ifrstm, jch - 1), &c__1, &c__, &s); if (ilz) { zrot_(n, &z___ref(1, jch), &c__1, &z___ref(1, jch - 1), &c__1, &c__, &s); } /* L30: */ } /* ---------------- Begin Timing Code ------------------- */ opst += (doublereal) ((ilastm + 1 - ifrstm) * 40 + 64 + ( nq + nz) * 20) * (doublereal) (ilast - j); /* ----------------- End Timing Code -------------------- */ goto L50; } } else if (ilazro) { /* Only test 1 passed -- work on J:ILAST */ ifirst = j; goto L70; } /* Neither test passed -- try next J L40: */ } /* (Drop-through is "impossible") */ *info = (*n << 1) + 1; goto L210; /* B(ILAST,ILAST)=0 -- clear A(ILAST,ILAST-1) to split off a 1x1 block. */ L50: i__2 = a_subscr(ilast, ilast); ctemp.r = a[i__2].r, ctemp.i = a[i__2].i; zlartg_(&ctemp, &a_ref(ilast, ilast - 1), &c__, &s, &a_ref(ilast, ilast)); i__2 = a_subscr(ilast, ilast - 1); a[i__2].r = 0., a[i__2].i = 0.; i__2 = ilast - ifrstm; zrot_(&i__2, &a_ref(ifrstm, ilast), &c__1, &a_ref(ifrstm, ilast - 1), &c__1, &c__, &s); i__2 = ilast - ifrstm; zrot_(&i__2, &b_ref(ifrstm, ilast), &c__1, &b_ref(ifrstm, ilast - 1), &c__1, &c__, &s); if (ilz) { zrot_(n, &z___ref(1, ilast), &c__1, &z___ref(1, ilast - 1), &c__1, &c__, &s); } /* --------------------- Begin Timing Code ----------------------- */ opst += (doublereal) ((ilast - ifrstm) * 40 + 32 + nz * 20); /* ---------------------- End Timing Code ------------------------ A(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA */ L60: absb = z_abs(&b_ref(ilast, ilast)); if (absb > safmin) { i__2 = b_subscr(ilast, ilast); z__2.r = b[i__2].r / absb, z__2.i = b[i__2].i / absb; d_cnjg(&z__1, &z__2); signbc.r = z__1.r, signbc.i = z__1.i; i__2 = b_subscr(ilast, ilast); b[i__2].r = absb, b[i__2].i = 0.; if (ilschr) { i__2 = ilast - ifrstm; zscal_(&i__2, &signbc, &b_ref(ifrstm, ilast), &c__1); i__2 = ilast + 1 - ifrstm; zscal_(&i__2, &signbc, &a_ref(ifrstm, ilast), &c__1); /* ----------------- Begin Timing Code --------------------- */ opst += (doublereal) ((ilast - ifrstm) * 12); /* ------------------ End Timing Code ---------------------- */ } else { i__2 = a_subscr(ilast, ilast); i__3 = a_subscr(ilast, ilast); z__1.r = a[i__3].r * signbc.r - a[i__3].i * signbc.i, z__1.i = a[i__3].r * signbc.i + a[i__3].i * signbc.r; a[i__2].r = z__1.r, a[i__2].i = z__1.i; } if (ilz) { zscal_(n, &signbc, &z___ref(1, ilast), &c__1); } /* ------------------- Begin Timing Code ---------------------- */ opst += (doublereal) (nz * 6 + 13); /* -------------------- End Timing Code ----------------------- */ } else { i__2 = b_subscr(ilast, ilast); b[i__2].r = 0., b[i__2].i = 0.; } i__2 = ilast; i__3 = a_subscr(ilast, ilast); alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i; i__2 = ilast; i__3 = b_subscr(ilast, ilast); beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i; /* Go to next block -- exit if finished. */ --ilast; if (ilast < *ilo) { goto L190; } /* Reset counters */ iiter = 0; eshift.r = 0., eshift.i = 0.; if (! ilschr) { ilastm = ilast; if (ifrstm > ilast) { ifrstm = *ilo; } } goto L160; /* QZ step This iteration only involves rows/columns IFIRST:ILAST. We assume IFIRST < ILAST, and that the diagonal of B is non-zero. */ L70: ++iiter; if (! ilschr) { ifrstm = ifirst; } /* Compute the Shift. At this point, IFIRST < ILAST, and the diagonal elements of B(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in magnitude) */ if (iiter / 10 * 10 != iiter) { /* The Wilkinson shift (AEP p.512), i.e., the eigenvalue of the bottom-right 2x2 block of A inv(B) which is nearest to the bottom-right element. We factor B as U*D, where U has unit diagonals, and compute (A*inv(D))*inv(U). */ i__2 = b_subscr(ilast - 1, ilast); z__2.r = bscale * b[i__2].r, z__2.i = bscale * b[i__2].i; i__3 = b_subscr(ilast, ilast); z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i; z_div(&z__1, &z__2, &z__3); u12.r = z__1.r, u12.i = z__1.i; i__2 = a_subscr(ilast - 1, ilast - 1); z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i; i__3 = b_subscr(ilast - 1, ilast - 1); z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i; z_div(&z__1, &z__2, &z__3); ad11.r = z__1.r, ad11.i = z__1.i; i__2 = a_subscr(ilast, ilast - 1); z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i; i__3 = b_subscr(ilast - 1, ilast - 1); z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i; z_div(&z__1, &z__2, &z__3); ad21.r = z__1.r, ad21.i = z__1.i; i__2 = a_subscr(ilast - 1, ilast); z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i; i__3 = b_subscr(ilast, ilast); z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i; z_div(&z__1, &z__2, &z__3); ad12.r = z__1.r, ad12.i = z__1.i; i__2 = a_subscr(ilast, ilast); z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i; i__3 = b_subscr(ilast, ilast); z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i; z_div(&z__1, &z__2, &z__3); ad22.r = z__1.r, ad22.i = z__1.i; z__2.r = u12.r * ad21.r - u12.i * ad21.i, z__2.i = u12.r * ad21.i + u12.i * ad21.r; z__1.r = ad22.r - z__2.r, z__1.i = ad22.i - z__2.i; abi22.r = z__1.r, abi22.i = z__1.i; z__2.r = ad11.r + abi22.r, z__2.i = ad11.i + abi22.i; z__1.r = z__2.r * .5, z__1.i = z__2.i * .5; t.r = z__1.r, t.i = z__1.i; pow_zi(&z__4, &t, &c__2); z__5.r = ad12.r * ad21.r - ad12.i * ad21.i, z__5.i = ad12.r * ad21.i + ad12.i * ad21.r; z__3.r = z__4.r + z__5.r, z__3.i = z__4.i + z__5.i; z__6.r = ad11.r * ad22.r - ad11.i * ad22.i, z__6.i = ad11.r * ad22.i + ad11.i * ad22.r; z__2.r = z__3.r - z__6.r, z__2.i = z__3.i - z__6.i; z_sqrt(&z__1, &z__2); rtdisc.r = z__1.r, rtdisc.i = z__1.i; z__1.r = t.r - abi22.r, z__1.i = t.i - abi22.i; z__2.r = t.r - abi22.r, z__2.i = t.i - abi22.i; temp = z__1.r * rtdisc.r + d_imag(&z__2) * d_imag(&rtdisc); if (temp <= 0.) { z__1.r = t.r + rtdisc.r, z__1.i = t.i + rtdisc.i; shift.r = z__1.r, shift.i = z__1.i; } else { z__1.r = t.r - rtdisc.r, z__1.i = t.i - rtdisc.i; shift.r = z__1.r, shift.i = z__1.i; } /* ------------------- Begin Timing Code ---------------------- */ opst += 116.; /* -------------------- End Timing Code ----------------------- */ } else { /* Exceptional shift. Chosen for no particularly good reason. */ i__2 = a_subscr(ilast - 1, ilast); z__4.r = ascale * a[i__2].r, z__4.i = ascale * a[i__2].i; i__3 = b_subscr(ilast - 1, ilast - 1); z__5.r = bscale * b[i__3].r, z__5.i = bscale * b[i__3].i; z_div(&z__3, &z__4, &z__5); d_cnjg(&z__2, &z__3); z__1.r = eshift.r + z__2.r, z__1.i = eshift.i + z__2.i; eshift.r = z__1.r, eshift.i = z__1.i; shift.r = eshift.r, shift.i = eshift.i; /* ------------------- Begin Timing Code ---------------------- */ opst += 15.; /* -------------------- End Timing Code ----------------------- */ } /* Now check for two consecutive small subdiagonals. */ i__2 = ifirst + 1; for (j = ilast - 1; j >= i__2; --j) { istart = j; i__3 = a_subscr(j, j); z__2.r = ascale * a[i__3].r, z__2.i = ascale * a[i__3].i; i__4 = b_subscr(j, j); z__4.r = bscale * b[i__4].r, z__4.i = bscale * b[i__4].i; z__3.r = shift.r * z__4.r - shift.i * z__4.i, z__3.i = shift.r * z__4.i + shift.i * z__4.r; z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i; ctemp.r = z__1.r, ctemp.i = z__1.i; temp = (d__1 = ctemp.r, abs(d__1)) + (d__2 = d_imag(&ctemp), abs( d__2)); i__3 = a_subscr(j + 1, j); temp2 = ascale * ((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(& a_ref(j + 1, j)), abs(d__2))); tempr = max(temp,temp2); if (tempr < 1. && tempr != 0.) { temp /= tempr; temp2 /= tempr; } i__3 = a_subscr(j, j - 1); if (((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a_ref(j, j - 1)), abs(d__2))) * temp2 <= temp * atol) { goto L90; } /* L80: */ } istart = ifirst; i__2 = a_subscr(ifirst, ifirst); z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i; i__3 = b_subscr(ifirst, ifirst); z__4.r = bscale * b[i__3].r, z__4.i = bscale * b[i__3].i; z__3.r = shift.r * z__4.r - shift.i * z__4.i, z__3.i = shift.r * z__4.i + shift.i * z__4.r; z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i; ctemp.r = z__1.r, ctemp.i = z__1.i; /* --------------------- Begin Timing Code ----------------------- */ opst += -6.; /* ---------------------- End Timing Code ------------------------ */ L90: /* Do an implicit-shift QZ sweep. Initial Q */ i__2 = a_subscr(istart + 1, istart); z__1.r = ascale * a[i__2].r, z__1.i = ascale * a[i__2].i; ctemp2.r = z__1.r, ctemp2.i = z__1.i; /* --------------------- Begin Timing Code ----------------------- */ opst += (doublereal) ((ilast - istart) * 18 + 2); /* ---------------------- End Timing Code ------------------------ */ zlartg_(&ctemp, &ctemp2, &c__, &s, &ctemp3); /* Sweep */ i__2 = ilast - 1; for (j = istart; j <= i__2; ++j) { if (j > istart) { i__3 = a_subscr(j, j - 1); ctemp.r = a[i__3].r, ctemp.i = a[i__3].i; zlartg_(&ctemp, &a_ref(j + 1, j - 1), &c__, &s, &a_ref(j, j - 1)); i__3 = a_subscr(j + 1, j - 1); a[i__3].r = 0., a[i__3].i = 0.; } i__3 = ilastm; for (jc = j; jc <= i__3; ++jc) { i__4 = a_subscr(j, jc); z__2.r = c__ * a[i__4].r, z__2.i = c__ * a[i__4].i; i__5 = a_subscr(j + 1, jc); z__3.r = s.r * a[i__5].r - s.i * a[i__5].i, z__3.i = s.r * a[ i__5].i + s.i * a[i__5].r; z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; ctemp.r = z__1.r, ctemp.i = z__1.i; i__4 = a_subscr(j + 1, jc); d_cnjg(&z__4, &s); z__3.r = -z__4.r, z__3.i = -z__4.i; i__5 = a_subscr(j, jc); z__2.r = z__3.r * a[i__5].r - z__3.i * a[i__5].i, z__2.i = z__3.r * a[i__5].i + z__3.i * a[i__5].r; i__6 = a_subscr(j + 1, jc); z__5.r = c__ * a[i__6].r, z__5.i = c__ * a[i__6].i; z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i; a[i__4].r = z__1.r, a[i__4].i = z__1.i; i__4 = a_subscr(j, jc); a[i__4].r = ctemp.r, a[i__4].i = ctemp.i; i__4 = b_subscr(j, jc); z__2.r = c__ * b[i__4].r, z__2.i = c__ * b[i__4].i; i__5 = b_subscr(j + 1, jc); z__3.r = s.r * b[i__5].r - s.i * b[i__5].i, z__3.i = s.r * b[ i__5].i + s.i * b[i__5].r; z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; ctemp2.r = z__1.r, ctemp2.i = z__1.i; i__4 = b_subscr(j + 1, jc); d_cnjg(&z__4, &s); z__3.r = -z__4.r, z__3.i = -z__4.i; i__5 = b_subscr(j, jc); z__2.r = z__3.r * b[i__5].r - z__3.i * b[i__5].i, z__2.i = z__3.r * b[i__5].i + z__3.i * b[i__5].r; i__6 = b_subscr(j + 1, jc); z__5.r = c__ * b[i__6].r, z__5.i = c__ * b[i__6].i; z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i; b[i__4].r = z__1.r, b[i__4].i = z__1.i; i__4 = b_subscr(j, jc); b[i__4].r = ctemp2.r, b[i__4].i = ctemp2.i; /* L100: */ } if (ilq) { i__3 = *n; for (jr = 1; jr <= i__3; ++jr) { i__4 = q_subscr(jr, j); z__2.r = c__ * q[i__4].r, z__2.i = c__ * q[i__4].i; d_cnjg(&z__4, &s); i__5 = q_subscr(jr, j + 1); z__3.r = z__4.r * q[i__5].r - z__4.i * q[i__5].i, z__3.i = z__4.r * q[i__5].i + z__4.i * q[i__5].r; z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; ctemp.r = z__1.r, ctemp.i = z__1.i; i__4 = q_subscr(jr, j + 1); z__3.r = -s.r, z__3.i = -s.i; i__5 = q_subscr(jr, j); z__2.r = z__3.r * q[i__5].r - z__3.i * q[i__5].i, z__2.i = z__3.r * q[i__5].i + z__3.i * q[i__5].r; i__6 = q_subscr(jr, j + 1); z__4.r = c__ * q[i__6].r, z__4.i = c__ * q[i__6].i; z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; q[i__4].r = z__1.r, q[i__4].i = z__1.i; i__4 = q_subscr(jr, j); q[i__4].r = ctemp.r, q[i__4].i = ctemp.i; /* L110: */ } } i__3 = b_subscr(j + 1, j + 1); ctemp.r = b[i__3].r, ctemp.i = b[i__3].i; zlartg_(&ctemp, &b_ref(j + 1, j), &c__, &s, &b_ref(j + 1, j + 1)); i__3 = b_subscr(j + 1, j); b[i__3].r = 0., b[i__3].i = 0.; /* Computing MIN */ i__4 = j + 2; i__3 = min(i__4,ilast); for (jr = ifrstm; jr <= i__3; ++jr) { i__4 = a_subscr(jr, j + 1); z__2.r = c__ * a[i__4].r, z__2.i = c__ * a[i__4].i; i__5 = a_subscr(jr, j); z__3.r = s.r * a[i__5].r - s.i * a[i__5].i, z__3.i = s.r * a[ i__5].i + s.i * a[i__5].r; z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; ctemp.r = z__1.r, ctemp.i = z__1.i; i__4 = a_subscr(jr, j); d_cnjg(&z__4, &s); z__3.r = -z__4.r, z__3.i = -z__4.i; i__5 = a_subscr(jr, j + 1); z__2.r = z__3.r * a[i__5].r - z__3.i * a[i__5].i, z__2.i = z__3.r * a[i__5].i + z__3.i * a[i__5].r; i__6 = a_subscr(jr, j); z__5.r = c__ * a[i__6].r, z__5.i = c__ * a[i__6].i; z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i; a[i__4].r = z__1.r, a[i__4].i = z__1.i; i__4 = a_subscr(jr, j + 1); a[i__4].r = ctemp.r, a[i__4].i = ctemp.i; /* L120: */ } i__3 = j; for (jr = ifrstm; jr <= i__3; ++jr) { i__4 = b_subscr(jr, j + 1); z__2.r = c__ * b[i__4].r, z__2.i = c__ * b[i__4].i; i__5 = b_subscr(jr, j); z__3.r = s.r * b[i__5].r - s.i * b[i__5].i, z__3.i = s.r * b[ i__5].i + s.i * b[i__5].r; z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; ctemp.r = z__1.r, ctemp.i = z__1.i; i__4 = b_subscr(jr, j); d_cnjg(&z__4, &s); z__3.r = -z__4.r, z__3.i = -z__4.i; i__5 = b_subscr(jr, j + 1); z__2.r = z__3.r * b[i__5].r - z__3.i * b[i__5].i, z__2.i = z__3.r * b[i__5].i + z__3.i * b[i__5].r; i__6 = b_subscr(jr, j); z__5.r = c__ * b[i__6].r, z__5.i = c__ * b[i__6].i; z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i; b[i__4].r = z__1.r, b[i__4].i = z__1.i; i__4 = b_subscr(jr, j + 1); b[i__4].r = ctemp.r, b[i__4].i = ctemp.i; /* L130: */ } if (ilz) { i__3 = *n; for (jr = 1; jr <= i__3; ++jr) { i__4 = z___subscr(jr, j + 1); z__2.r = c__ * z__[i__4].r, z__2.i = c__ * z__[i__4].i; i__5 = z___subscr(jr, j); z__3.r = s.r * z__[i__5].r - s.i * z__[i__5].i, z__3.i = s.r * z__[i__5].i + s.i * z__[i__5].r; z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; ctemp.r = z__1.r, ctemp.i = z__1.i; i__4 = z___subscr(jr, j); d_cnjg(&z__4, &s); z__3.r = -z__4.r, z__3.i = -z__4.i; i__5 = z___subscr(jr, j + 1); z__2.r = z__3.r * z__[i__5].r - z__3.i * z__[i__5].i, z__2.i = z__3.r * z__[i__5].i + z__3.i * z__[i__5] .r; i__6 = z___subscr(jr, j); z__5.r = c__ * z__[i__6].r, z__5.i = c__ * z__[i__6].i; z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i; z__[i__4].r = z__1.r, z__[i__4].i = z__1.i; i__4 = z___subscr(jr, j + 1); z__[i__4].r = ctemp.r, z__[i__4].i = ctemp.i; /* L140: */ } } /* L150: */ } /* --------------------- Begin Timing Code ----------------------- */ opst += (doublereal) (ilast - istart) * (doublereal) ((ilastm - ifrstm) * 40 + 184 + (nq + nz) * 20) - 20; /* ---------------------- End Timing Code ------------------------ */ L160: /* --------------------- Begin Timing Code ----------------------- End of iteration -- add in "small" contributions. */ latime_1.ops += opst; opst = 0.; /* ---------------------- End Timing Code ------------------------ L170: */ } /* Drop-through = non-convergence */ L180: *info = ilast; /* ---------------------- Begin Timing Code ------------------------- */ latime_1.ops += opst; opst = 0.; /* ----------------------- End Timing Code -------------------------- */ goto L210; /* Successful completion of all QZ steps */ L190: /* Set Eigenvalues 1:ILO-1 */ i__1 = *ilo - 1; for (j = 1; j <= i__1; ++j) { absb = z_abs(&b_ref(j, j)); if (absb > safmin) { i__2 = b_subscr(j, j); z__2.r = b[i__2].r / absb, z__2.i = b[i__2].i / absb; d_cnjg(&z__1, &z__2); signbc.r = z__1.r, signbc.i = z__1.i; i__2 = b_subscr(j, j); b[i__2].r = absb, b[i__2].i = 0.; if (ilschr) { i__2 = j - 1; zscal_(&i__2, &signbc, &b_ref(1, j), &c__1); zscal_(&j, &signbc, &a_ref(1, j), &c__1); /* ----------------- Begin Timing Code --------------------- */ opst += (doublereal) ((j - 1) * 12); /* ------------------ End Timing Code ---------------------- */ } else { i__2 = a_subscr(j, j); i__3 = a_subscr(j, j); z__1.r = a[i__3].r * signbc.r - a[i__3].i * signbc.i, z__1.i = a[i__3].r * signbc.i + a[i__3].i * signbc.r; a[i__2].r = z__1.r, a[i__2].i = z__1.i; } if (ilz) { zscal_(n, &signbc, &z___ref(1, j), &c__1); } /* ------------------- Begin Timing Code ---------------------- */ opst += (doublereal) (nz * 6 + 13); /* -------------------- End Timing Code ----------------------- */ } else { i__2 = b_subscr(j, j); b[i__2].r = 0., b[i__2].i = 0.; } i__2 = j; i__3 = a_subscr(j, j); alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i; i__2 = j; i__3 = b_subscr(j, j); beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i; /* L200: */ } /* Normal Termination */ *info = 0; /* Exit (other than argument error) -- return optimal workspace size */ L210: /* ---------------------- Begin Timing Code ------------------------- */ latime_1.ops += opst; opst = 0.; latime_1.itcnt = (doublereal) jiter; /* ----------------------- End Timing Code -------------------------- */ z__1.r = (doublereal) (*n), z__1.i = 0.; work[1].r = z__1.r, work[1].i = z__1.i; return 0; /* End of ZHGEQZ */ } /* zhgeqz_ */
/* ===================================================================== */ doublereal zlantb_(char *norm, char *uplo, char *diag, integer *n, integer *k, doublecomplex *ab, integer *ldab, doublereal *work) { /* System generated locals */ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5; doublereal ret_val; /* Builtin functions */ double z_abs(doublecomplex *), sqrt(doublereal); /* Local variables */ integer i__, j, l; doublereal sum, scale; logical udiag; extern logical lsame_(char *, char *); doublereal value; extern logical disnan_(doublereal *); extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *, doublereal *, doublereal *); /* -- LAPACK auxiliary routine (version 3.4.2) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* September 2012 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ ab_dim1 = *ldab; ab_offset = 1 + ab_dim1; ab -= ab_offset; --work; /* Function Body */ if (*n == 0) { value = 0.; } else if (lsame_(norm, "M")) { /* Find max(f2c_abs(A(i,j))). */ if (lsame_(diag, "U")) { value = 1.; if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MAX */ i__2 = *k + 2 - j; i__3 = *k; for (i__ = max(i__2,1); i__ <= i__3; ++i__) { sum = z_abs(&ab[i__ + j * ab_dim1]); if (value < sum || disnan_(&sum)) { value = sum; } /* L10: */ } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__2 = *n + 1 - j; i__4 = *k + 1; // , expr subst i__3 = min(i__2,i__4); for (i__ = 2; i__ <= i__3; ++i__) { sum = z_abs(&ab[i__ + j * ab_dim1]); if (value < sum || disnan_(&sum)) { value = sum; } /* L30: */ } /* L40: */ } } } else { value = 0.; if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MAX */ i__3 = *k + 2 - j; i__2 = *k + 1; for (i__ = max(i__3,1); i__ <= i__2; ++i__) { sum = z_abs(&ab[i__ + j * ab_dim1]); if (value < sum || disnan_(&sum)) { value = sum; } /* L50: */ } /* L60: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__3 = *n + 1 - j; i__4 = *k + 1; // , expr subst i__2 = min(i__3,i__4); for (i__ = 1; i__ <= i__2; ++i__) { sum = z_abs(&ab[i__ + j * ab_dim1]); if (value < sum || disnan_(&sum)) { value = sum; } /* L70: */ } /* L80: */ } } } } else if (lsame_(norm, "O") || *(unsigned char *) norm == '1') { /* Find norm1(A). */ value = 0.; udiag = lsame_(diag, "U"); if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (udiag) { sum = 1.; /* Computing MAX */ i__2 = *k + 2 - j; i__3 = *k; for (i__ = max(i__2,1); i__ <= i__3; ++i__) { sum += z_abs(&ab[i__ + j * ab_dim1]); /* L90: */ } } else { sum = 0.; /* Computing MAX */ i__3 = *k + 2 - j; i__2 = *k + 1; for (i__ = max(i__3,1); i__ <= i__2; ++i__) { sum += z_abs(&ab[i__ + j * ab_dim1]); /* L100: */ } } if (value < sum || disnan_(&sum)) { value = sum; } /* L110: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (udiag) { sum = 1.; /* Computing MIN */ i__3 = *n + 1 - j; i__4 = *k + 1; // , expr subst i__2 = min(i__3,i__4); for (i__ = 2; i__ <= i__2; ++i__) { sum += z_abs(&ab[i__ + j * ab_dim1]); /* L120: */ } } else { sum = 0.; /* Computing MIN */ i__3 = *n + 1 - j; i__4 = *k + 1; // , expr subst i__2 = min(i__3,i__4); for (i__ = 1; i__ <= i__2; ++i__) { sum += z_abs(&ab[i__ + j * ab_dim1]); /* L130: */ } } if (value < sum || disnan_(&sum)) { value = sum; } /* L140: */ } } } else if (lsame_(norm, "I")) { /* Find normI(A). */ value = 0.; if (lsame_(uplo, "U")) { if (lsame_(diag, "U")) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 1.; /* L150: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { l = *k + 1 - j; /* Computing MAX */ i__2 = 1; i__3 = j - *k; // , expr subst i__4 = j - 1; for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) { work[i__] += z_abs(&ab[l + i__ + j * ab_dim1]); /* L160: */ } /* L170: */ } } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 0.; /* L180: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { l = *k + 1 - j; /* Computing MAX */ i__4 = 1; i__2 = j - *k; // , expr subst i__3 = j; for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) { work[i__] += z_abs(&ab[l + i__ + j * ab_dim1]); /* L190: */ } /* L200: */ } } } else { if (lsame_(diag, "U")) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 1.; /* L210: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { l = 1 - j; /* Computing MIN */ i__4 = *n; i__2 = j + *k; // , expr subst i__3 = min(i__4,i__2); for (i__ = j + 1; i__ <= i__3; ++i__) { work[i__] += z_abs(&ab[l + i__ + j * ab_dim1]); /* L220: */ } /* L230: */ } } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 0.; /* L240: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { l = 1 - j; /* Computing MIN */ i__4 = *n; i__2 = j + *k; // , expr subst i__3 = min(i__4,i__2); for (i__ = j; i__ <= i__3; ++i__) { work[i__] += z_abs(&ab[l + i__ + j * ab_dim1]); /* L250: */ } /* L260: */ } } } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { sum = work[i__]; if (value < sum || disnan_(&sum)) { value = sum; } /* L270: */ } } else if (lsame_(norm, "F") || lsame_(norm, "E")) { /* Find normF(A). */ if (lsame_(uplo, "U")) { if (lsame_(diag, "U")) { scale = 1.; sum = (doublereal) (*n); if (*k > 0) { i__1 = *n; for (j = 2; j <= i__1; ++j) { /* Computing MIN */ i__4 = j - 1; i__3 = min(i__4,*k); /* Computing MAX */ i__2 = *k + 2 - j; zlassq_(&i__3, &ab[max(i__2,1) + j * ab_dim1], &c__1, &scale, &sum); /* L280: */ } } } else { scale = 0.; sum = 1.; i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__4 = j; i__2 = *k + 1; // , expr subst i__3 = min(i__4,i__2); /* Computing MAX */ i__5 = *k + 2 - j; zlassq_(&i__3, &ab[max(i__5,1) + j * ab_dim1], &c__1, & scale, &sum); /* L290: */ } } } else { if (lsame_(diag, "U")) { scale = 1.; sum = (doublereal) (*n); if (*k > 0) { i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__4 = *n - j; i__3 = min(i__4,*k); zlassq_(&i__3, &ab[j * ab_dim1 + 2], &c__1, &scale, & sum); /* L300: */ } } } else { scale = 0.; sum = 1.; i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__4 = *n - j + 1; i__2 = *k + 1; // , expr subst i__3 = min(i__4,i__2); zlassq_(&i__3, &ab[j * ab_dim1 + 1], &c__1, &scale, &sum); /* L310: */ } } } value = scale * sqrt(sum); } ret_val = value; return ret_val; /* End of ZLANTB */ }
/* Subroutine */ int zlatm4_(integer *itype, integer *n, integer *nz1, integer *nz2, logical *rsign, doublereal *amagn, doublereal *rcond, doublereal *triang, integer *idist, integer *iseed, doublecomplex *a, integer *lda) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; doublereal d__1, d__2; doublecomplex z__1, z__2; /* Local variables */ integer i__, k, jc, jd, jr, kbeg, isdb, kend, isde, klen; doublereal alpha; doublecomplex ctemp; extern doublereal dlaran_(integer *); extern /* Double Complex */ void zlarnd_(doublecomplex *, integer *, integer *); extern /* Subroutine */ int zlaset_(char *, integer *, integer *, doublecomplex *, doublecomplex *, doublecomplex *, integer *); /* -- LAPACK auxiliary test routine (version 3.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZLATM4 generates basic square matrices, which may later be */ /* multiplied by others in order to produce test matrices. It is */ /* intended mainly to be used to test the generalized eigenvalue */ /* routines. */ /* It first generates the diagonal and (possibly) subdiagonal, */ /* according to the value of ITYPE, NZ1, NZ2, RSIGN, AMAGN, and RCOND. */ /* It then fills in the upper triangle with random numbers, if TRIANG is */ /* non-zero. */ /* Arguments */ /* ========= */ /* ITYPE (input) INTEGER */ /* The "type" of matrix on the diagonal and sub-diagonal. */ /* If ITYPE < 0, then type abs(ITYPE) is generated and then */ /* swapped end for end (A(I,J) := A'(N-J,N-I).) See also */ /* the description of AMAGN and RSIGN. */ /* Special types: */ /* = 0: the zero matrix. */ /* = 1: the identity. */ /* = 2: a transposed Jordan block. */ /* = 3: If N is odd, then a k+1 x k+1 transposed Jordan block */ /* followed by a k x k identity block, where k=(N-1)/2. */ /* If N is even, then k=(N-2)/2, and a zero diagonal entry */ /* is tacked onto the end. */ /* Diagonal types. The diagonal consists of NZ1 zeros, then */ /* k=N-NZ1-NZ2 nonzeros. The subdiagonal is zero. ITYPE */ /* specifies the nonzero diagonal entries as follows: */ /* = 4: 1, ..., k */ /* = 5: 1, RCOND, ..., RCOND */ /* = 6: 1, ..., 1, RCOND */ /* = 7: 1, a, a^2, ..., a^(k-1)=RCOND */ /* = 8: 1, 1-d, 1-2*d, ..., 1-(k-1)*d=RCOND */ /* = 9: random numbers chosen from (RCOND,1) */ /* = 10: random numbers with distribution IDIST (see ZLARND.) */ /* N (input) INTEGER */ /* The order of the matrix. */ /* NZ1 (input) INTEGER */ /* If abs(ITYPE) > 3, then the first NZ1 diagonal entries will */ /* be zero. */ /* NZ2 (input) INTEGER */ /* If abs(ITYPE) > 3, then the last NZ2 diagonal entries will */ /* be zero. */ /* RSIGN (input) LOGICAL */ /* = .TRUE.: The diagonal and subdiagonal entries will be */ /* multiplied by random numbers of magnitude 1. */ /* = .FALSE.: The diagonal and subdiagonal entries will be */ /* left as they are (usually non-negative real.) */ /* AMAGN (input) DOUBLE PRECISION */ /* The diagonal and subdiagonal entries will be multiplied by */ /* AMAGN. */ /* RCOND (input) DOUBLE PRECISION */ /* If abs(ITYPE) > 4, then the smallest diagonal entry will be */ /* RCOND. RCOND must be between 0 and 1. */ /* TRIANG (input) DOUBLE PRECISION */ /* The entries above the diagonal will be random numbers with */ /* magnitude bounded by TRIANG (i.e., random numbers multiplied */ /* by TRIANG.) */ /* IDIST (input) INTEGER */ /* On entry, DIST specifies the type of distribution to be used */ /* to generate a random matrix . */ /* = 1: real and imaginary parts each UNIFORM( 0, 1 ) */ /* = 2: real and imaginary parts each UNIFORM( -1, 1 ) */ /* = 3: real and imaginary parts each NORMAL( 0, 1 ) */ /* = 4: complex number uniform in DISK( 0, 1 ) */ /* ISEED (input/output) INTEGER array, dimension (4) */ /* On entry ISEED specifies the seed of the random number */ /* generator. The values of ISEED are changed on exit, and can */ /* be used in the next call to ZLATM4 to continue the same */ /* random number sequence. */ /* Note: ISEED(4) should be odd, for the random number generator */ /* used at present. */ /* A (output) COMPLEX*16 array, dimension (LDA, N) */ /* Array to be computed. */ /* LDA (input) INTEGER */ /* Leading dimension of A. Must be at least 1 and at least N. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ --iseed; a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; /* Function Body */ if (*n <= 0) { return 0; } zlaset_("Full", n, n, &c_b1, &c_b1, &a[a_offset], lda); /* Insure a correct ISEED */ if (iseed[4] % 2 != 1) { ++iseed[4]; } /* Compute diagonal and subdiagonal according to ITYPE, NZ1, NZ2, */ /* and RCOND */ if (*itype != 0) { if (abs(*itype) >= 4) { /* Computing MAX */ /* Computing MIN */ i__3 = *n, i__4 = *nz1 + 1; i__1 = 1, i__2 = min(i__3,i__4); kbeg = max(i__1,i__2); /* Computing MAX */ /* Computing MIN */ i__3 = *n, i__4 = *n - *nz2; i__1 = kbeg, i__2 = min(i__3,i__4); kend = max(i__1,i__2); klen = kend + 1 - kbeg; } else { kbeg = 1; kend = *n; klen = *n; } isdb = 1; isde = 0; switch (abs(*itype)) { case 1: goto L10; case 2: goto L30; case 3: goto L50; case 4: goto L80; case 5: goto L100; case 6: goto L120; case 7: goto L140; case 8: goto L160; case 9: goto L180; case 10: goto L200; } /* abs(ITYPE) = 1: Identity */ L10: i__1 = *n; for (jd = 1; jd <= i__1; ++jd) { i__2 = jd + jd * a_dim1; a[i__2].r = 1., a[i__2].i = 0.; /* L20: */ } goto L220; /* abs(ITYPE) = 2: Transposed Jordan block */ L30: i__1 = *n - 1; for (jd = 1; jd <= i__1; ++jd) { i__2 = jd + 1 + jd * a_dim1; a[i__2].r = 1., a[i__2].i = 0.; /* L40: */ } isdb = 1; isde = *n - 1; goto L220; /* abs(ITYPE) = 3: Transposed Jordan block, followed by the */ /* identity. */ L50: k = (*n - 1) / 2; i__1 = k; for (jd = 1; jd <= i__1; ++jd) { i__2 = jd + 1 + jd * a_dim1; a[i__2].r = 1., a[i__2].i = 0.; /* L60: */ } isdb = 1; isde = k; i__1 = (k << 1) + 1; for (jd = k + 2; jd <= i__1; ++jd) { i__2 = jd + jd * a_dim1; a[i__2].r = 1., a[i__2].i = 0.; /* L70: */ } goto L220; /* abs(ITYPE) = 4: 1,...,k */ L80: i__1 = kend; for (jd = kbeg; jd <= i__1; ++jd) { i__2 = jd + jd * a_dim1; i__3 = jd - *nz1; z__1.r = (doublereal) i__3, z__1.i = 0.; a[i__2].r = z__1.r, a[i__2].i = z__1.i; /* L90: */ } goto L220; /* abs(ITYPE) = 5: One large D value: */ L100: i__1 = kend; for (jd = kbeg + 1; jd <= i__1; ++jd) { i__2 = jd + jd * a_dim1; z__1.r = *rcond, z__1.i = 0.; a[i__2].r = z__1.r, a[i__2].i = z__1.i; /* L110: */ } i__1 = kbeg + kbeg * a_dim1; a[i__1].r = 1., a[i__1].i = 0.; goto L220; /* abs(ITYPE) = 6: One small D value: */ L120: i__1 = kend - 1; for (jd = kbeg; jd <= i__1; ++jd) { i__2 = jd + jd * a_dim1; a[i__2].r = 1., a[i__2].i = 0.; /* L130: */ } i__1 = kend + kend * a_dim1; z__1.r = *rcond, z__1.i = 0.; a[i__1].r = z__1.r, a[i__1].i = z__1.i; goto L220; /* abs(ITYPE) = 7: Exponentially distributed D values: */ L140: i__1 = kbeg + kbeg * a_dim1; a[i__1].r = 1., a[i__1].i = 0.; if (klen > 1) { d__1 = 1. / (doublereal) (klen - 1); alpha = pow_dd(rcond, &d__1); i__1 = klen; for (i__ = 2; i__ <= i__1; ++i__) { i__2 = *nz1 + i__ + (*nz1 + i__) * a_dim1; d__2 = (doublereal) (i__ - 1); d__1 = pow_dd(&alpha, &d__2); z__1.r = d__1, z__1.i = 0.; a[i__2].r = z__1.r, a[i__2].i = z__1.i; /* L150: */ } } goto L220; /* abs(ITYPE) = 8: Arithmetically distributed D values: */ L160: i__1 = kbeg + kbeg * a_dim1; a[i__1].r = 1., a[i__1].i = 0.; if (klen > 1) { alpha = (1. - *rcond) / (doublereal) (klen - 1); i__1 = klen; for (i__ = 2; i__ <= i__1; ++i__) { i__2 = *nz1 + i__ + (*nz1 + i__) * a_dim1; d__1 = (doublereal) (klen - i__) * alpha + *rcond; z__1.r = d__1, z__1.i = 0.; a[i__2].r = z__1.r, a[i__2].i = z__1.i; /* L170: */ } } goto L220; /* abs(ITYPE) = 9: Randomly distributed D values on ( RCOND, 1): */ L180: alpha = log(*rcond); i__1 = kend; for (jd = kbeg; jd <= i__1; ++jd) { i__2 = jd + jd * a_dim1; d__1 = exp(alpha * dlaran_(&iseed[1])); a[i__2].r = d__1, a[i__2].i = 0.; /* L190: */ } goto L220; /* abs(ITYPE) = 10: Randomly distributed D values from DIST */ L200: i__1 = kend; for (jd = kbeg; jd <= i__1; ++jd) { i__2 = jd + jd * a_dim1; zlarnd_(&z__1, idist, &iseed[1]); a[i__2].r = z__1.r, a[i__2].i = z__1.i; /* L210: */ } L220: /* Scale by AMAGN */ i__1 = kend; for (jd = kbeg; jd <= i__1; ++jd) { i__2 = jd + jd * a_dim1; i__3 = jd + jd * a_dim1; d__1 = *amagn * a[i__3].r; a[i__2].r = d__1, a[i__2].i = 0.; /* L230: */ } i__1 = isde; for (jd = isdb; jd <= i__1; ++jd) { i__2 = jd + 1 + jd * a_dim1; i__3 = jd + 1 + jd * a_dim1; d__1 = *amagn * a[i__3].r; a[i__2].r = d__1, a[i__2].i = 0.; /* L240: */ } /* If RSIGN = .TRUE., assign random signs to diagonal and */ /* subdiagonal */ if (*rsign) { i__1 = kend; for (jd = kbeg; jd <= i__1; ++jd) { i__2 = jd + jd * a_dim1; if (a[i__2].r != 0.) { zlarnd_(&z__1, &c__3, &iseed[1]); ctemp.r = z__1.r, ctemp.i = z__1.i; d__1 = z_abs(&ctemp); z__1.r = ctemp.r / d__1, z__1.i = ctemp.i / d__1; ctemp.r = z__1.r, ctemp.i = z__1.i; i__2 = jd + jd * a_dim1; i__3 = jd + jd * a_dim1; d__1 = a[i__3].r; z__1.r = d__1 * ctemp.r, z__1.i = d__1 * ctemp.i; a[i__2].r = z__1.r, a[i__2].i = z__1.i; } /* L250: */ } i__1 = isde; for (jd = isdb; jd <= i__1; ++jd) { i__2 = jd + 1 + jd * a_dim1; if (a[i__2].r != 0.) { zlarnd_(&z__1, &c__3, &iseed[1]); ctemp.r = z__1.r, ctemp.i = z__1.i; d__1 = z_abs(&ctemp); z__1.r = ctemp.r / d__1, z__1.i = ctemp.i / d__1; ctemp.r = z__1.r, ctemp.i = z__1.i; i__2 = jd + 1 + jd * a_dim1; i__3 = jd + 1 + jd * a_dim1; d__1 = a[i__3].r; z__1.r = d__1 * ctemp.r, z__1.i = d__1 * ctemp.i; a[i__2].r = z__1.r, a[i__2].i = z__1.i; } /* L260: */ } } /* Reverse if ITYPE < 0 */ if (*itype < 0) { i__1 = (kbeg + kend - 1) / 2; for (jd = kbeg; jd <= i__1; ++jd) { i__2 = jd + jd * a_dim1; ctemp.r = a[i__2].r, ctemp.i = a[i__2].i; i__2 = jd + jd * a_dim1; i__3 = kbeg + kend - jd + (kbeg + kend - jd) * a_dim1; a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i; i__2 = kbeg + kend - jd + (kbeg + kend - jd) * a_dim1; a[i__2].r = ctemp.r, a[i__2].i = ctemp.i; /* L270: */ } i__1 = (*n - 1) / 2; for (jd = 1; jd <= i__1; ++jd) { i__2 = jd + 1 + jd * a_dim1; ctemp.r = a[i__2].r, ctemp.i = a[i__2].i; i__2 = jd + 1 + jd * a_dim1; i__3 = *n + 1 - jd + (*n - jd) * a_dim1; a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i; i__2 = *n + 1 - jd + (*n - jd) * a_dim1; a[i__2].r = ctemp.r, a[i__2].i = ctemp.i; /* L280: */ } } } /* Fill in upper triangle */ if (*triang != 0.) { i__1 = *n; for (jc = 2; jc <= i__1; ++jc) { i__2 = jc - 1; for (jr = 1; jr <= i__2; ++jr) { i__3 = jr + jc * a_dim1; zlarnd_(&z__2, idist, &iseed[1]); z__1.r = *triang * z__2.r, z__1.i = *triang * z__2.i; a[i__3].r = z__1.r, a[i__3].i = z__1.i; /* L290: */ } /* L300: */ } } return 0; /* End of ZLATM4 */ } /* zlatm4_ */
/* Subroutine */ int zptcon_(integer *n, doublereal *d__, doublecomplex *e, doublereal *anorm, doublereal *rcond, doublereal *rwork, integer * info) { /* -- LAPACK routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University September 30, 1994 Purpose ======= ZPTCON computes the reciprocal of the condition number (in the 1-norm) of a complex Hermitian positive definite tridiagonal matrix using the factorization A = L*D*L**H or A = U**H*D*U computed by ZPTTRF. Norm(inv(A)) is computed by a direct method, and the reciprocal of the condition number is computed as RCOND = 1 / (ANORM * norm(inv(A))). Arguments ========= N (input) INTEGER The order of the matrix A. N >= 0. D (input) DOUBLE PRECISION array, dimension (N) The n diagonal elements of the diagonal matrix D from the factorization of A, as computed by ZPTTRF. E (input) COMPLEX*16 array, dimension (N-1) The (n-1) off-diagonal elements of the unit bidiagonal factor U or L from the factorization of A, as computed by ZPTTRF. ANORM (input) DOUBLE PRECISION The 1-norm of the original matrix A. RCOND (output) DOUBLE PRECISION The reciprocal of the condition number of the matrix A, computed as RCOND = 1/(ANORM * AINVNM), where AINVNM is the 1-norm of inv(A) computed in this routine. RWORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== The method used is described in Nicholas J. Higham, "Efficient Algorithms for Computing the Condition Number of a Tridiagonal Matrix", SIAM J. Sci. Stat. Comput., Vol. 7, No. 1, January 1986. ===================================================================== Test the input arguments. Parameter adjustments */ /* Table of constant values */ static integer c__1 = 1; /* System generated locals */ integer i__1; doublereal d__1; /* Builtin functions */ double z_abs(doublecomplex *); /* Local variables */ static integer i__, ix; extern integer idamax_(integer *, doublereal *, integer *); extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal ainvnm; --rwork; --e; --d__; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } else if (*anorm < 0.) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("ZPTCON", &i__1); return 0; } /* Quick return if possible */ *rcond = 0.; if (*n == 0) { *rcond = 1.; return 0; } else if (*anorm == 0.) { return 0; } /* Check that D(1:N) is positive. */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (d__[i__] <= 0.) { return 0; } /* L10: */ } /* Solve M(A) * x = e, where M(A) = (m(i,j)) is given by m(i,j) = abs(A(i,j)), i = j, m(i,j) = -abs(A(i,j)), i .ne. j, and e = [ 1, 1, ..., 1 ]'. Note M(A) = M(L)*D*M(L)'. Solve M(L) * x = e. */ rwork[1] = 1.; i__1 = *n; for (i__ = 2; i__ <= i__1; ++i__) { rwork[i__] = rwork[i__ - 1] * z_abs(&e[i__ - 1]) + 1.; /* L20: */ } /* Solve D * M(L)' * x = b. */ rwork[*n] /= d__[*n]; for (i__ = *n - 1; i__ >= 1; --i__) { rwork[i__] = rwork[i__] / d__[i__] + rwork[i__ + 1] * z_abs(&e[i__]); /* L30: */ } /* Compute AINVNM = max(x(i)), 1<=i<=n. */ ix = idamax_(n, &rwork[1], &c__1); ainvnm = (d__1 = rwork[ix], abs(d__1)); /* Compute the reciprocal condition number. */ if (ainvnm != 0.) { *rcond = 1. / ainvnm / *anorm; } return 0; /* End of ZPTCON */ } /* zptcon_ */
doublereal zqrt14_(char *trans, integer *m, integer *n, integer *nrhs, doublecomplex *a, integer *lda, doublecomplex *x, integer *ldx, doublecomplex *work, integer *lwork) { /* System generated locals */ integer a_dim1, a_offset, x_dim1, x_offset, i__1, i__2, i__3; doublereal ret_val, d__1, d__2; doublecomplex z__1; /* Builtin functions */ double z_abs(doublecomplex *); void d_cnjg(doublecomplex *, doublecomplex *); /* Local variables */ static integer info; static doublereal anrm; static logical tpsd; static doublereal xnrm; static integer i__, j; extern logical lsame_(char *, char *); static doublereal rwork[1]; extern /* Subroutine */ int zgelq2_(integer *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *), zgeqr2_( integer *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *); extern doublereal dlamch_(char *); extern /* Subroutine */ int xerbla_(char *, integer *); extern doublereal zlange_(char *, integer *, integer *, doublecomplex *, integer *, doublereal *); extern /* Subroutine */ int zlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublecomplex *, integer *, integer *); static integer ldwork; extern /* Subroutine */ int zlacpy_(char *, integer *, integer *, doublecomplex *, integer *, doublecomplex *, integer *); static doublereal err; #define x_subscr(a_1,a_2) (a_2)*x_dim1 + a_1 #define x_ref(a_1,a_2) x[x_subscr(a_1,a_2)] /* -- LAPACK test routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University February 29, 1992 Purpose ======= ZQRT14 checks whether X is in the row space of A or A'. It does so by scaling both X and A such that their norms are in the range [sqrt(eps), 1/sqrt(eps)], then computing a QR factorization of [A,X] (if TRANS = 'C') or an LQ factorization of [A',X]' (if TRANS = 'N'), and returning the norm of the trailing triangle, scaled by MAX(M,N,NRHS)*eps. Arguments ========= TRANS (input) CHARACTER*1 = 'N': No transpose, check for X in the row space of A = 'C': Conjugate transpose, check for X in row space of A'. M (input) INTEGER The number of rows of the matrix A. N (input) INTEGER The number of columns of the matrix A. NRHS (input) INTEGER The number of right hand sides, i.e., the number of columns of X. A (input) COMPLEX*16 array, dimension (LDA,N) The M-by-N matrix A. LDA (input) INTEGER The leading dimension of the array A. X (input) COMPLEX*16 array, dimension (LDX,NRHS) If TRANS = 'N', the N-by-NRHS matrix X. IF TRANS = 'C', the M-by-NRHS matrix X. LDX (input) INTEGER The leading dimension of the array X. WORK (workspace) COMPLEX*16 array dimension (LWORK) LWORK (input) INTEGER length of workspace array required If TRANS = 'N', LWORK >= (M+NRHS)*(N+2); if TRANS = 'C', LWORK >= (N+NRHS)*(M+2). ===================================================================== Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; x_dim1 = *ldx; x_offset = 1 + x_dim1 * 1; x -= x_offset; --work; /* Function Body */ ret_val = 0.; if (lsame_(trans, "N")) { ldwork = *m + *nrhs; tpsd = FALSE_; if (*lwork < (*m + *nrhs) * (*n + 2)) { xerbla_("ZQRT14", &c__10); return ret_val; } else if (*n <= 0 || *nrhs <= 0) { return ret_val; } } else if (lsame_(trans, "C")) { ldwork = *m; tpsd = TRUE_; if (*lwork < (*n + *nrhs) * (*m + 2)) { xerbla_("ZQRT14", &c__10); return ret_val; } else if (*m <= 0 || *nrhs <= 0) { return ret_val; } } else { xerbla_("ZQRT14", &c__1); return ret_val; } /* Copy and scale A */ zlacpy_("All", m, n, &a[a_offset], lda, &work[1], &ldwork); anrm = zlange_("M", m, n, &work[1], &ldwork, rwork); if (anrm != 0.) { zlascl_("G", &c__0, &c__0, &anrm, &c_b15, m, n, &work[1], &ldwork, & info); } /* Copy X or X' into the right place and scale it */ if (tpsd) { /* Copy X into columns n+1:n+nrhs of work */ zlacpy_("All", m, nrhs, &x[x_offset], ldx, &work[*n * ldwork + 1], & ldwork); xnrm = zlange_("M", m, nrhs, &work[*n * ldwork + 1], &ldwork, rwork); if (xnrm != 0.) { zlascl_("G", &c__0, &c__0, &xnrm, &c_b15, m, nrhs, &work[*n * ldwork + 1], &ldwork, &info); } i__1 = *n + *nrhs; anrm = zlange_("One-norm", m, &i__1, &work[1], &ldwork, rwork); /* Compute QR factorization of X */ i__1 = *n + *nrhs; /* Computing MIN */ i__2 = *m, i__3 = *n + *nrhs; zgeqr2_(m, &i__1, &work[1], &ldwork, &work[ldwork * (*n + *nrhs) + 1], &work[ldwork * (*n + *nrhs) + min(i__2,i__3) + 1], &info); /* Compute largest entry in upper triangle of work(n+1:m,n+1:n+nrhs) */ err = 0.; i__1 = *n + *nrhs; for (j = *n + 1; j <= i__1; ++j) { i__2 = min(*m,j); for (i__ = *n + 1; i__ <= i__2; ++i__) { /* Computing MAX */ d__1 = err, d__2 = z_abs(&work[i__ + (j - 1) * *m]); err = max(d__1,d__2); /* L10: */ } /* L20: */ } } else { /* Copy X' into rows m+1:m+nrhs of work */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = *nrhs; for (j = 1; j <= i__2; ++j) { i__3 = *m + j + (i__ - 1) * ldwork; d_cnjg(&z__1, &x_ref(i__, j)); work[i__3].r = z__1.r, work[i__3].i = z__1.i; /* L30: */ } /* L40: */ } xnrm = zlange_("M", nrhs, n, &work[*m + 1], &ldwork, rwork) ; if (xnrm != 0.) { zlascl_("G", &c__0, &c__0, &xnrm, &c_b15, nrhs, n, &work[*m + 1], &ldwork, &info); } /* Compute LQ factorization of work */ zgelq2_(&ldwork, n, &work[1], &ldwork, &work[ldwork * *n + 1], &work[ ldwork * (*n + 1) + 1], &info); /* Compute largest entry in lower triangle in work(m+1:m+nrhs,m+1:n) */ err = 0.; i__1 = *n; for (j = *m + 1; j <= i__1; ++j) { i__2 = ldwork; for (i__ = j; i__ <= i__2; ++i__) { /* Computing MAX */ d__1 = err, d__2 = z_abs(&work[i__ + (j - 1) * ldwork]); err = max(d__1,d__2); /* L50: */ } /* L60: */ } } /* Computing MAX */ i__1 = max(*m,*n); ret_val = err / ((doublereal) max(i__1,*nrhs) * dlamch_("Epsilon")); return ret_val; /* End of ZQRT14 */ } /* zqrt14_ */
/* Subroutine */ int zgetc2_(integer *n, doublecomplex *a, integer *lda, integer *ipiv, integer *jpiv, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1; doublecomplex z__1; /* Builtin functions */ double z_abs(doublecomplex *); void z_div(doublecomplex *, doublecomplex *, doublecomplex *); /* Local variables */ static integer i__, j, ip, jp; static doublereal eps; static integer ipv, jpv; static doublereal smin, xmax; extern /* Subroutine */ int zgeru_(integer *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *), zswap_(integer *, doublecomplex *, integer *, doublecomplex *, integer *), dlabad_(doublereal *, doublereal *); extern doublereal dlamch_(char *, ftnlen); static doublereal bignum, smlnum; /* -- LAPACK auxiliary routine (version 3.0) -- */ /* Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */ /* Courant Institute, Argonne National Lab, and Rice University */ /* June 30, 1999 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZGETC2 computes an LU factorization, using complete pivoting, of the */ /* n-by-n matrix A. The factorization has the form A = P * L * U * Q, */ /* where P and Q are permutation matrices, L is lower triangular with */ /* unit diagonal elements and U is upper triangular. */ /* This is a level 1 BLAS version of the algorithm. */ /* Arguments */ /* ========= */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. */ /* A (input/output) COMPLEX*16 array, dimension (LDA, N) */ /* On entry, the n-by-n matrix to be factored. */ /* On exit, the factors L and U from the factorization */ /* A = P*L*U*Q; the unit diagonal elements of L are not stored. */ /* If U(k, k) appears to be less than SMIN, U(k, k) is given the */ /* value of SMIN, giving a nonsingular perturbed system. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= max(1, N). */ /* IPIV (output) INTEGER array, dimension (N). */ /* The pivot indices; for 1 <= i <= N, row i of the */ /* matrix has been interchanged with row IPIV(i). */ /* JPIV (output) INTEGER array, dimension (N). */ /* The pivot indices; for 1 <= j <= N, column j of the */ /* matrix has been interchanged with column JPIV(j). */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* > 0: if INFO = k, U(k, k) is likely to produce overflow if */ /* one tries to solve for x in Ax = b. So U is perturbed */ /* to avoid the overflow. */ /* Further Details */ /* =============== */ /* Based on contributions by */ /* Bo Kagstrom and Peter Poromaa, Department of Computing Science, */ /* Umea University, S-901 87 Umea, Sweden. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Set constants to control overflow */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --ipiv; --jpiv; /* Function Body */ *info = 0; eps = dlamch_("P", (ftnlen)1); smlnum = dlamch_("S", (ftnlen)1) / eps; bignum = 1. / smlnum; dlabad_(&smlnum, &bignum); /* Factorize A using complete pivoting. */ /* Set pivots less than SMIN to SMIN */ i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Find max element in matrix A */ xmax = 0.; i__2 = *n; for (ip = i__; ip <= i__2; ++ip) { i__3 = *n; for (jp = i__; jp <= i__3; ++jp) { if (z_abs(&a[ip + jp * a_dim1]) >= xmax) { xmax = z_abs(&a[ip + jp * a_dim1]); ipv = ip; jpv = jp; } /* L10: */ } /* L20: */ } if (i__ == 1) { /* Computing MAX */ d__1 = eps * xmax; smin = max(d__1,smlnum); } /* Swap rows */ if (ipv != i__) { zswap_(n, &a[ipv + a_dim1], lda, &a[i__ + a_dim1], lda); } ipiv[i__] = ipv; /* Swap columns */ if (jpv != i__) { zswap_(n, &a[jpv * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], & c__1); } jpiv[i__] = jpv; /* Check for singularity */ if (z_abs(&a[i__ + i__ * a_dim1]) < smin) { *info = i__; i__2 = i__ + i__ * a_dim1; z__1.r = smin, z__1.i = 0.; a[i__2].r = z__1.r, a[i__2].i = z__1.i; } i__2 = *n; for (j = i__ + 1; j <= i__2; ++j) { i__3 = j + i__ * a_dim1; z_div(&z__1, &a[j + i__ * a_dim1], &a[i__ + i__ * a_dim1]); a[i__3].r = z__1.r, a[i__3].i = z__1.i; /* L30: */ } i__2 = *n - i__; i__3 = *n - i__; zgeru_(&i__2, &i__3, &c_b10, &a[i__ + 1 + i__ * a_dim1], &c__1, &a[ i__ + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + (i__ + 1) * a_dim1], lda); /* L40: */ } if (z_abs(&a[*n + *n * a_dim1]) < smin) { *info = *n; i__1 = *n + *n * a_dim1; z__1.r = smin, z__1.i = 0.; a[i__1].r = z__1.r, a[i__1].i = z__1.i; } return 0; /* End of ZGETC2 */ } /* zgetc2_ */
/* Subroutine */ int zlacn2_(integer *n, doublecomplex *v, doublecomplex *x, doublereal *est, integer *kase, integer *isave) { /* System generated locals */ integer i__1, i__2, i__3; doublereal d__1, d__2; doublecomplex z__1; /* Builtin functions */ double z_abs(doublecomplex *), d_imag(doublecomplex *); /* Local variables */ integer i__; doublereal temp, absxi; integer jlast; extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, doublecomplex *, integer *); extern integer izmax1_(integer *, doublecomplex *, integer *); extern doublereal dzsum1_(integer *, doublecomplex *, integer *), dlamch_( char *); doublereal safmin, altsgn, estold; /* -- LAPACK auxiliary routine (version 3.2) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZLACN2 estimates the 1-norm of a square, complex matrix A. */ /* Reverse communication is used for evaluating matrix-vector products. */ /* Arguments */ /* ========= */ /* N (input) INTEGER */ /* The order of the matrix. N >= 1. */ /* V (workspace) COMPLEX*16 array, dimension (N) */ /* On the final return, V = A*W, where EST = norm(V)/norm(W) */ /* (W is not returned). */ /* X (input/output) COMPLEX*16 array, dimension (N) */ /* On an intermediate return, X should be overwritten by */ /* A * X, if KASE=1, */ /* A' * X, if KASE=2, */ /* where A' is the conjugate transpose of A, and ZLACN2 must be */ /* re-called with all the other parameters unchanged. */ /* EST (input/output) DOUBLE PRECISION */ /* On entry with KASE = 1 or 2 and ISAVE(1) = 3, EST should be */ /* unchanged from the previous call to ZLACN2. */ /* On exit, EST is an estimate (a lower bound) for norm(A). */ /* KASE (input/output) INTEGER */ /* On the initial call to ZLACN2, KASE should be 0. */ /* On an intermediate return, KASE will be 1 or 2, indicating */ /* whether X should be overwritten by A * X or A' * X. */ /* On the final return from ZLACN2, KASE will again be 0. */ /* ISAVE (input/output) INTEGER array, dimension (3) */ /* ISAVE is used to save variables between calls to ZLACN2 */ /* Further Details */ /* ======= ======= */ /* Contributed by Nick Higham, University of Manchester. */ /* Originally named CONEST, dated March 16, 1988. */ /* Reference: N.J. Higham, "FORTRAN codes for estimating the one-norm of */ /* a real or complex matrix, with applications to condition estimation", */ /* ACM Trans. Math. Soft., vol. 14, no. 4, pp. 381-396, December 1988. */ /* Last modified: April, 1999 */ /* This is a thread safe version of ZLACON, which uses the array ISAVE */ /* in place of a SAVE statement, as follows: */ /* ZLACON ZLACN2 */ /* JUMP ISAVE(1) */ /* J ISAVE(2) */ /* ITER ISAVE(3) */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ --isave; --x; --v; /* Function Body */ safmin = dlamch_("Safe minimum"); if (*kase == 0) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = i__; d__1 = 1. / (doublereal) (*n); z__1.r = d__1, z__1.i = 0.; x[i__2].r = z__1.r, x[i__2].i = z__1.i; /* L10: */ } *kase = 1; isave[1] = 1; return 0; } switch (isave[1]) { case 1: goto L20; case 2: goto L40; case 3: goto L70; case 4: goto L90; case 5: goto L120; } /* ................ ENTRY (ISAVE( 1 ) = 1) */ /* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY A*X. */ L20: if (*n == 1) { v[1].r = x[1].r, v[1].i = x[1].i; *est = z_abs(&v[1]); /* ... QUIT */ goto L130; } *est = dzsum1_(n, &x[1], &c__1); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { absxi = z_abs(&x[i__]); if (absxi > safmin) { i__2 = i__; i__3 = i__; d__1 = x[i__3].r / absxi; d__2 = d_imag(&x[i__]) / absxi; z__1.r = d__1, z__1.i = d__2; x[i__2].r = z__1.r, x[i__2].i = z__1.i; } else { i__2 = i__; x[i__2].r = 1., x[i__2].i = 0.; } /* L30: */ } *kase = 2; isave[1] = 2; return 0; /* ................ ENTRY (ISAVE( 1 ) = 2) */ /* FIRST ITERATION. X HAS BEEN OVERWRITTEN BY CTRANS(A)*X. */ L40: isave[2] = izmax1_(n, &x[1], &c__1); isave[3] = 2; /* MAIN LOOP - ITERATIONS 2,3,...,ITMAX. */ L50: i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = i__; x[i__2].r = 0., x[i__2].i = 0.; /* L60: */ } i__1 = isave[2]; x[i__1].r = 1., x[i__1].i = 0.; *kase = 1; isave[1] = 3; return 0; /* ................ ENTRY (ISAVE( 1 ) = 3) */ /* X HAS BEEN OVERWRITTEN BY A*X. */ L70: zcopy_(n, &x[1], &c__1, &v[1], &c__1); estold = *est; *est = dzsum1_(n, &v[1], &c__1); /* TEST FOR CYCLING. */ if (*est <= estold) { goto L100; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { absxi = z_abs(&x[i__]); if (absxi > safmin) { i__2 = i__; i__3 = i__; d__1 = x[i__3].r / absxi; d__2 = d_imag(&x[i__]) / absxi; z__1.r = d__1, z__1.i = d__2; x[i__2].r = z__1.r, x[i__2].i = z__1.i; } else { i__2 = i__; x[i__2].r = 1., x[i__2].i = 0.; } /* L80: */ } *kase = 2; isave[1] = 4; return 0; /* ................ ENTRY (ISAVE( 1 ) = 4) */ /* X HAS BEEN OVERWRITTEN BY CTRANS(A)*X. */ L90: jlast = isave[2]; isave[2] = izmax1_(n, &x[1], &c__1); if (z_abs(&x[jlast]) != z_abs(&x[isave[2]]) && isave[3] < 5) { ++isave[3]; goto L50; } /* ITERATION COMPLETE. FINAL STAGE. */ L100: altsgn = 1.; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = i__; d__1 = altsgn * ((doublereal) (i__ - 1) / (doublereal) (*n - 1) + 1.); z__1.r = d__1, z__1.i = 0.; x[i__2].r = z__1.r, x[i__2].i = z__1.i; altsgn = -altsgn; /* L110: */ } *kase = 1; isave[1] = 5; return 0; /* ................ ENTRY (ISAVE( 1 ) = 5) */ /* X HAS BEEN OVERWRITTEN BY A*X. */ L120: temp = dzsum1_(n, &x[1], &c__1) / (doublereal) (*n * 3) * 2.; if (temp > *est) { zcopy_(n, &x[1], &c__1, &v[1], &c__1); *est = temp; } L130: *kase = 0; return 0; /* End of ZLACN2 */ } /* zlacn2_ */
/* Subroutine */ int zgbbrd_(char *vect, integer *m, integer *n, integer *ncc, integer *kl, integer *ku, doublecomplex *ab, integer *ldab, doublereal *d__, doublereal *e, doublecomplex *q, integer *ldq, doublecomplex *pt, integer *ldpt, doublecomplex *c__, integer *ldc, doublecomplex *work, doublereal *rwork, integer *info) { /* System generated locals */ integer ab_dim1, ab_offset, c_dim1, c_offset, pt_dim1, pt_offset, q_dim1, q_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7; doublecomplex z__1, z__2, z__3; /* Local variables */ integer i__, j, l; doublecomplex t; integer j1, j2, kb; doublecomplex ra, rb; doublereal rc; integer kk, ml, nr, mu; doublecomplex rs; integer kb1, ml0, mu0, klm, kun, nrt, klu1, inca; doublereal abst; logical wantb, wantc; integer minmn; logical wantq; logical wantpt; /* -- LAPACK routine (version 3.2) -- */ /* November 2006 */ /* Purpose */ /* ======= */ /* ZGBBRD reduces a complex general m-by-n band matrix A to real upper */ /* bidiagonal form B by a unitary transformation: Q' * A * P = B. */ /* The routine computes B, and optionally forms Q or P', or computes */ /* Q'*C for a given matrix C. */ /* Arguments */ /* ========= */ /* VECT (input) CHARACTER*1 */ /* Specifies whether or not the matrices Q and P' are to be */ /* formed. */ /* = 'N': do not form Q or P'; */ /* = 'Q': form Q only; */ /* = 'P': form P' only; */ /* = 'B': form both. */ /* M (input) INTEGER */ /* The number of rows of the matrix A. M >= 0. */ /* N (input) INTEGER */ /* The number of columns of the matrix A. N >= 0. */ /* NCC (input) INTEGER */ /* The number of columns of the matrix C. NCC >= 0. */ /* KL (input) INTEGER */ /* The number of subdiagonals of the matrix A. KL >= 0. */ /* KU (input) INTEGER */ /* The number of superdiagonals of the matrix A. KU >= 0. */ /* AB (input/output) COMPLEX*16 array, dimension (LDAB,N) */ /* On entry, the m-by-n band matrix A, stored in rows 1 to */ /* KL+KU+1. The j-th column of A is stored in the j-th column of */ /* the array AB as follows: */ /* AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl). */ /* On exit, A is overwritten by values generated during the */ /* reduction. */ /* LDAB (input) INTEGER */ /* The leading dimension of the array A. LDAB >= KL+KU+1. */ /* D (output) DOUBLE PRECISION array, dimension (min(M,N)) */ /* The diagonal elements of the bidiagonal matrix B. */ /* E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) */ /* The superdiagonal elements of the bidiagonal matrix B. */ /* Q (output) COMPLEX*16 array, dimension (LDQ,M) */ /* If VECT = 'Q' or 'B', the m-by-m unitary matrix Q. */ /* If VECT = 'N' or 'P', the array Q is not referenced. */ /* LDQ (input) INTEGER */ /* The leading dimension of the array Q. */ /* LDQ >= max(1,M) if VECT = 'Q' or 'B'; LDQ >= 1 otherwise. */ /* PT (output) COMPLEX*16 array, dimension (LDPT,N) */ /* If VECT = 'P' or 'B', the n-by-n unitary matrix P'. */ /* If VECT = 'N' or 'Q', the array PT is not referenced. */ /* LDPT (input) INTEGER */ /* The leading dimension of the array PT. */ /* LDPT >= max(1,N) if VECT = 'P' or 'B'; LDPT >= 1 otherwise. */ /* C (input/output) COMPLEX*16 array, dimension (LDC,NCC) */ /* On entry, an m-by-ncc matrix C. */ /* On exit, C is overwritten by Q'*C. */ /* C is not referenced if NCC = 0. */ /* LDC (input) INTEGER */ /* The leading dimension of the array C. */ /* LDC >= max(1,M) if NCC > 0; LDC >= 1 if NCC = 0. */ /* WORK (workspace) COMPLEX*16 array, dimension (max(M,N)) */ /* RWORK (workspace) DOUBLE PRECISION array, dimension (max(M,N)) */ /* INFO (output) INTEGER */ /* = 0: successful exit. */ /* < 0: if INFO = -i, the i-th argument had an illegal value. */ /* ===================================================================== */ /* Test the input parameters */ /* Parameter adjustments */ ab_dim1 = *ldab; ab_offset = 1 + ab_dim1; ab -= ab_offset; --d__; --e; q_dim1 = *ldq; q_offset = 1 + q_dim1; q -= q_offset; pt_dim1 = *ldpt; pt_offset = 1 + pt_dim1; pt -= pt_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1; c__ -= c_offset; --work; --rwork; /* Function Body */ wantb = lsame_(vect, "B"); wantq = lsame_(vect, "Q") || wantb; wantpt = lsame_(vect, "P") || wantb; wantc = *ncc > 0; klu1 = *kl + *ku + 1; *info = 0; if (! wantq && ! wantpt && ! lsame_(vect, "N")) { *info = -1; } else if (*m < 0) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ncc < 0) { *info = -4; } else if (*kl < 0) { *info = -5; } else if (*ku < 0) { *info = -6; } else if (*ldab < klu1) { *info = -8; } else if (*ldq < 1 || wantq && *ldq < max(1,*m)) { *info = -12; } else if (*ldpt < 1 || wantpt && *ldpt < max(1,*n)) { *info = -14; } else if (*ldc < 1 || wantc && *ldc < max(1,*m)) { *info = -16; } if (*info != 0) { i__1 = -(*info); xerbla_("ZGBBRD", &i__1); return 0; } /* Initialize Q and P' to the unit matrix, if needed */ if (wantq) { zlaset_("Full", m, m, &c_b1, &c_b2, &q[q_offset], ldq); } if (wantpt) { zlaset_("Full", n, n, &c_b1, &c_b2, &pt[pt_offset], ldpt); } /* Quick return if possible. */ if (*m == 0 || *n == 0) { return 0; } minmn = min(*m,*n); if (*kl + *ku > 1) { /* Reduce to upper bidiagonal form if KU > 0; if KU = 0, reduce */ /* first to lower bidiagonal form and then transform to upper */ /* bidiagonal */ if (*ku > 0) { ml0 = 1; mu0 = 2; } else { ml0 = 2; mu0 = 1; } /* Wherever possible, plane rotations are generated and applied in */ /* vector operations of length NR over the index set J1:J2:KLU1. */ /* The complex sines of the plane rotations are stored in WORK, */ /* and the real cosines in RWORK. */ /* Computing MIN */ i__1 = *m - 1; klm = min(i__1,*kl); /* Computing MIN */ i__1 = *n - 1; kun = min(i__1,*ku); kb = klm + kun; kb1 = kb + 1; inca = kb1 * *ldab; nr = 0; j1 = klm + 2; j2 = 1 - kun; i__1 = minmn; for (i__ = 1; i__ <= i__1; ++i__) { /* Reduce i-th column and i-th row of matrix to bidiagonal form */ ml = klm + 1; mu = kun + 1; i__2 = kb; for (kk = 1; kk <= i__2; ++kk) { j1 += kb; j2 += kb; /* generate plane rotations to annihilate nonzero elements */ /* which have been created below the band */ if (nr > 0) { zlargv_(&nr, &ab[klu1 + (j1 - klm - 1) * ab_dim1], &inca, &work[j1], &kb1, &rwork[j1], &kb1); } /* apply plane rotations from the left */ i__3 = kb; for (l = 1; l <= i__3; ++l) { if (j2 - klm + l - 1 > *n) { nrt = nr - 1; } else { nrt = nr; } if (nrt > 0) { zlartv_(&nrt, &ab[klu1 - l + (j1 - klm + l - 1) * ab_dim1], &inca, &ab[klu1 - l + 1 + (j1 - klm + l - 1) * ab_dim1], &inca, &rwork[j1], &work[ j1], &kb1); } } if (ml > ml0) { if (ml <= *m - i__ + 1) { /* generate plane rotation to annihilate a(i+ml-1,i) */ /* within the band, and apply rotation from the left */ zlartg_(&ab[*ku + ml - 1 + i__ * ab_dim1], &ab[*ku + ml + i__ * ab_dim1], &rwork[i__ + ml - 1], & work[i__ + ml - 1], &ra); i__3 = *ku + ml - 1 + i__ * ab_dim1; ab[i__3].r = ra.r, ab[i__3].i = ra.i; if (i__ < *n) { /* Computing MIN */ i__4 = *ku + ml - 2, i__5 = *n - i__; i__3 = min(i__4,i__5); i__6 = *ldab - 1; i__7 = *ldab - 1; zrot_(&i__3, &ab[*ku + ml - 2 + (i__ + 1) * ab_dim1], &i__6, &ab[*ku + ml - 1 + (i__ + 1) * ab_dim1], &i__7, &rwork[i__ + ml - 1], &work[i__ + ml - 1]); } } ++nr; j1 -= kb1; } if (wantq) { /* accumulate product of plane rotations in Q */ i__3 = j2; i__4 = kb1; for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) { d_cnjg(&z__1, &work[j]); zrot_(m, &q[(j - 1) * q_dim1 + 1], &c__1, &q[j * q_dim1 + 1], &c__1, &rwork[j], &z__1); } } if (wantc) { /* apply plane rotations to C */ i__4 = j2; i__3 = kb1; for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) { zrot_(ncc, &c__[j - 1 + c_dim1], ldc, &c__[j + c_dim1] , ldc, &rwork[j], &work[j]); } } if (j2 + kun > *n) { /* adjust J2 to keep within the bounds of the matrix */ --nr; j2 -= kb1; } i__3 = j2; i__4 = kb1; for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) { /* create nonzero element a(j-1,j+ku) above the band */ /* and store it in WORK(n+1:2*n) */ i__5 = j + kun; i__6 = j; i__7 = (j + kun) * ab_dim1 + 1; z__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[ i__7].i, z__1.i = work[i__6].r * ab[i__7].i + work[i__6].i * ab[i__7].r; work[i__5].r = z__1.r, work[i__5].i = z__1.i; i__5 = (j + kun) * ab_dim1 + 1; i__6 = j; i__7 = (j + kun) * ab_dim1 + 1; z__1.r = rwork[i__6] * ab[i__7].r, z__1.i = rwork[i__6] * ab[i__7].i; ab[i__5].r = z__1.r, ab[i__5].i = z__1.i; } /* generate plane rotations to annihilate nonzero elements */ /* which have been generated above the band */ if (nr > 0) { zlargv_(&nr, &ab[(j1 + kun - 1) * ab_dim1 + 1], &inca, & work[j1 + kun], &kb1, &rwork[j1 + kun], &kb1); } /* apply plane rotations from the right */ i__4 = kb; for (l = 1; l <= i__4; ++l) { if (j2 + l - 1 > *m) { nrt = nr - 1; } else { nrt = nr; } if (nrt > 0) { zlartv_(&nrt, &ab[l + 1 + (j1 + kun - 1) * ab_dim1], & inca, &ab[l + (j1 + kun) * ab_dim1], &inca, & rwork[j1 + kun], &work[j1 + kun], &kb1); } } if (ml == ml0 && mu > mu0) { if (mu <= *n - i__ + 1) { /* generate plane rotation to annihilate a(i,i+mu-1) */ /* within the band, and apply rotation from the right */ zlartg_(&ab[*ku - mu + 3 + (i__ + mu - 2) * ab_dim1], &ab[*ku - mu + 2 + (i__ + mu - 1) * ab_dim1], &rwork[i__ + mu - 1], &work[i__ + mu - 1], & ra); i__4 = *ku - mu + 3 + (i__ + mu - 2) * ab_dim1; ab[i__4].r = ra.r, ab[i__4].i = ra.i; /* Computing MIN */ i__3 = *kl + mu - 2, i__5 = *m - i__; i__4 = min(i__3,i__5); zrot_(&i__4, &ab[*ku - mu + 4 + (i__ + mu - 2) * ab_dim1], &c__1, &ab[*ku - mu + 3 + (i__ + mu - 1) * ab_dim1], &c__1, &rwork[i__ + mu - 1], &work[i__ + mu - 1]); } ++nr; j1 -= kb1; } if (wantpt) { /* accumulate product of plane rotations in P' */ i__4 = j2; i__3 = kb1; for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) { d_cnjg(&z__1, &work[j + kun]); zrot_(n, &pt[j + kun - 1 + pt_dim1], ldpt, &pt[j + kun + pt_dim1], ldpt, &rwork[j + kun], &z__1); } } if (j2 + kb > *m) { /* adjust J2 to keep within the bounds of the matrix */ --nr; j2 -= kb1; } i__3 = j2; i__4 = kb1; for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) { /* create nonzero element a(j+kl+ku,j+ku-1) below the */ /* band and store it in WORK(1:n) */ i__5 = j + kb; i__6 = j + kun; i__7 = klu1 + (j + kun) * ab_dim1; z__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[ i__7].i, z__1.i = work[i__6].r * ab[i__7].i + work[i__6].i * ab[i__7].r; work[i__5].r = z__1.r, work[i__5].i = z__1.i; i__5 = klu1 + (j + kun) * ab_dim1; i__6 = j + kun; i__7 = klu1 + (j + kun) * ab_dim1; z__1.r = rwork[i__6] * ab[i__7].r, z__1.i = rwork[i__6] * ab[i__7].i; ab[i__5].r = z__1.r, ab[i__5].i = z__1.i; } if (ml > ml0) { --ml; } else { --mu; } } } } if (*ku == 0 && *kl > 0) { /* A has been reduced to complex lower bidiagonal form */ /* Transform lower bidiagonal form to upper bidiagonal by applying */ /* plane rotations from the left, overwriting superdiagonal */ /* elements on subdiagonal elements */ /* Computing MIN */ i__2 = *m - 1; i__1 = min(i__2,*n); for (i__ = 1; i__ <= i__1; ++i__) { zlartg_(&ab[i__ * ab_dim1 + 1], &ab[i__ * ab_dim1 + 2], &rc, &rs, &ra); i__2 = i__ * ab_dim1 + 1; ab[i__2].r = ra.r, ab[i__2].i = ra.i; if (i__ < *n) { i__2 = i__ * ab_dim1 + 2; i__4 = (i__ + 1) * ab_dim1 + 1; z__1.r = rs.r * ab[i__4].r - rs.i * ab[i__4].i, z__1.i = rs.r * ab[i__4].i + rs.i * ab[i__4].r; ab[i__2].r = z__1.r, ab[i__2].i = z__1.i; i__2 = (i__ + 1) * ab_dim1 + 1; i__4 = (i__ + 1) * ab_dim1 + 1; z__1.r = rc * ab[i__4].r, z__1.i = rc * ab[i__4].i; ab[i__2].r = z__1.r, ab[i__2].i = z__1.i; } if (wantq) { d_cnjg(&z__1, &rs); zrot_(m, &q[i__ * q_dim1 + 1], &c__1, &q[(i__ + 1) * q_dim1 + 1], &c__1, &rc, &z__1); } if (wantc) { zrot_(ncc, &c__[i__ + c_dim1], ldc, &c__[i__ + 1 + c_dim1], ldc, &rc, &rs); } } } else { /* A has been reduced to complex upper bidiagonal form or is */ /* diagonal */ if (*ku > 0 && *m < *n) { /* Annihilate a(m,m+1) by applying plane rotations from the */ /* right */ i__1 = *ku + (*m + 1) * ab_dim1; rb.r = ab[i__1].r, rb.i = ab[i__1].i; for (i__ = *m; i__ >= 1; --i__) { zlartg_(&ab[*ku + 1 + i__ * ab_dim1], &rb, &rc, &rs, &ra); i__1 = *ku + 1 + i__ * ab_dim1; ab[i__1].r = ra.r, ab[i__1].i = ra.i; if (i__ > 1) { d_cnjg(&z__3, &rs); z__2.r = -z__3.r, z__2.i = -z__3.i; i__1 = *ku + i__ * ab_dim1; z__1.r = z__2.r * ab[i__1].r - z__2.i * ab[i__1].i, z__1.i = z__2.r * ab[i__1].i + z__2.i * ab[i__1] .r; rb.r = z__1.r, rb.i = z__1.i; i__1 = *ku + i__ * ab_dim1; i__2 = *ku + i__ * ab_dim1; z__1.r = rc * ab[i__2].r, z__1.i = rc * ab[i__2].i; ab[i__1].r = z__1.r, ab[i__1].i = z__1.i; } if (wantpt) { d_cnjg(&z__1, &rs); zrot_(n, &pt[i__ + pt_dim1], ldpt, &pt[*m + 1 + pt_dim1], ldpt, &rc, &z__1); } } } } /* Make diagonal and superdiagonal elements real, storing them in D */ /* and E */ i__1 = *ku + 1 + ab_dim1; t.r = ab[i__1].r, t.i = ab[i__1].i; i__1 = minmn; for (i__ = 1; i__ <= i__1; ++i__) { abst = z_abs(&t); d__[i__] = abst; if (abst != 0.) { z__1.r = t.r / abst, z__1.i = t.i / abst; t.r = z__1.r, t.i = z__1.i; } else { t.r = 1., t.i = 0.; } if (wantq) { zscal_(m, &t, &q[i__ * q_dim1 + 1], &c__1); } if (wantc) { d_cnjg(&z__1, &t); zscal_(ncc, &z__1, &c__[i__ + c_dim1], ldc); } if (i__ < minmn) { if (*ku == 0 && *kl == 0) { e[i__] = 0.; i__2 = (i__ + 1) * ab_dim1 + 1; t.r = ab[i__2].r, t.i = ab[i__2].i; } else { if (*ku == 0) { i__2 = i__ * ab_dim1 + 2; d_cnjg(&z__2, &t); z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, z__1.i = ab[i__2].r * z__2.i + ab[i__2].i * z__2.r; t.r = z__1.r, t.i = z__1.i; } else { i__2 = *ku + (i__ + 1) * ab_dim1; d_cnjg(&z__2, &t); z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, z__1.i = ab[i__2].r * z__2.i + ab[i__2].i * z__2.r; t.r = z__1.r, t.i = z__1.i; } abst = z_abs(&t); e[i__] = abst; if (abst != 0.) { z__1.r = t.r / abst, z__1.i = t.i / abst; t.r = z__1.r, t.i = z__1.i; } else { t.r = 1., t.i = 0.; } if (wantpt) { zscal_(n, &t, &pt[i__ + 1 + pt_dim1], ldpt); } i__2 = *ku + 1 + (i__ + 1) * ab_dim1; d_cnjg(&z__2, &t); z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, z__1.i = ab[i__2].r * z__2.i + ab[i__2].i * z__2.r; t.r = z__1.r, t.i = z__1.i; } } } return 0; /* End of ZGBBRD */ } /* zgbbrd_ */
/* Subroutine */ int zptrfs_(char *uplo, integer *n, integer *nrhs, doublereal *d__, doublecomplex *e, doublereal *df, doublecomplex *ef, doublecomplex *b, integer *ldb, doublecomplex *x, integer *ldx, doublereal *ferr, doublereal *berr, doublecomplex *work, doublereal * rwork, integer *info) { /* System generated locals */ integer b_dim1, b_offset, x_dim1, x_offset, i__1, i__2, i__3, i__4, i__5, i__6; doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8, d__9, d__10, d__11, d__12; doublecomplex z__1, z__2, z__3; /* Builtin functions */ double d_imag(doublecomplex *); void d_cnjg(doublecomplex *, doublecomplex *); double z_abs(doublecomplex *); /* Local variables */ integer i__, j; doublereal s; doublecomplex bi, cx, dx, ex; integer ix, nz; doublereal eps, safe1, safe2; extern logical lsame_(char *, char *); integer count; logical upper; extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *); extern doublereal dlamch_(char *); extern integer idamax_(integer *, doublereal *, integer *); doublereal safmin; extern /* Subroutine */ int xerbla_(char *, integer *); doublereal lstres; extern /* Subroutine */ int zpttrs_(char *, integer *, integer *, doublereal *, doublecomplex *, doublecomplex *, integer *, integer *); /* -- LAPACK routine (version 3.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZPTRFS improves the computed solution to a system of linear */ /* equations when the coefficient matrix is Hermitian positive definite */ /* and tridiagonal, and provides error bounds and backward error */ /* estimates for the solution. */ /* Arguments */ /* ========= */ /* UPLO (input) CHARACTER*1 */ /* Specifies whether the superdiagonal or the subdiagonal of the */ /* tridiagonal matrix A is stored and the form of the */ /* factorization: */ /* = 'U': E is the superdiagonal of A, and A = U**H*D*U; */ /* = 'L': E is the subdiagonal of A, and A = L*D*L**H. */ /* (The two forms are equivalent if A is real.) */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. */ /* NRHS (input) INTEGER */ /* The number of right hand sides, i.e., the number of columns */ /* of the matrix B. NRHS >= 0. */ /* D (input) DOUBLE PRECISION array, dimension (N) */ /* The n real diagonal elements of the tridiagonal matrix A. */ /* E (input) COMPLEX*16 array, dimension (N-1) */ /* The (n-1) off-diagonal elements of the tridiagonal matrix A */ /* (see UPLO). */ /* DF (input) DOUBLE PRECISION array, dimension (N) */ /* The n diagonal elements of the diagonal matrix D from */ /* the factorization computed by ZPTTRF. */ /* EF (input) COMPLEX*16 array, dimension (N-1) */ /* The (n-1) off-diagonal elements of the unit bidiagonal */ /* factor U or L from the factorization computed by ZPTTRF */ /* (see UPLO). */ /* B (input) COMPLEX*16 array, dimension (LDB,NRHS) */ /* The right hand side matrix B. */ /* LDB (input) INTEGER */ /* The leading dimension of the array B. LDB >= max(1,N). */ /* X (input/output) COMPLEX*16 array, dimension (LDX,NRHS) */ /* On entry, the solution matrix X, as computed by ZPTTRS. */ /* On exit, the improved solution matrix X. */ /* LDX (input) INTEGER */ /* The leading dimension of the array X. LDX >= max(1,N). */ /* FERR (output) DOUBLE PRECISION array, dimension (NRHS) */ /* The forward error bound for each solution vector */ /* X(j) (the j-th column of the solution matrix X). */ /* If XTRUE is the true solution corresponding to X(j), FERR(j) */ /* is an estimated upper bound for the magnitude of the largest */ /* element in (X(j) - XTRUE) divided by the magnitude of the */ /* largest element in X(j). */ /* BERR (output) DOUBLE PRECISION array, dimension (NRHS) */ /* The componentwise relative backward error of each solution */ /* vector X(j) (i.e., the smallest relative change in */ /* any element of A or B that makes X(j) an exact solution). */ /* WORK (workspace) COMPLEX*16 array, dimension (N) */ /* RWORK (workspace) DOUBLE PRECISION array, dimension (N) */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value */ /* Internal Parameters */ /* =================== */ /* ITMAX is the maximum number of steps of iterative refinement. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Statement Functions .. */ /* .. */ /* .. Statement Function definitions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ --d__; --e; --df; --ef; b_dim1 = *ldb; b_offset = 1 + b_dim1; b -= b_offset; x_dim1 = *ldx; x_offset = 1 + x_dim1; x -= x_offset; --ferr; --berr; --work; --rwork; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*nrhs < 0) { *info = -3; } else if (*ldb < max(1,*n)) { *info = -9; } else if (*ldx < max(1,*n)) { *info = -11; } if (*info != 0) { i__1 = -(*info); xerbla_("ZPTRFS", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *nrhs == 0) { i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { ferr[j] = 0.; berr[j] = 0.; /* L10: */ } return 0; } /* NZ = maximum number of nonzero elements in each row of A, plus 1 */ nz = 4; eps = dlamch_("Epsilon"); safmin = dlamch_("Safe minimum"); safe1 = nz * safmin; safe2 = safe1 / eps; /* Do for each right hand side */ i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { count = 1; lstres = 3.; L20: /* Loop until stopping criterion is satisfied. */ /* Compute residual R = B - A * X. Also compute */ /* abs(A)*abs(x) + abs(b) for use in the backward error bound. */ if (upper) { if (*n == 1) { i__2 = j * b_dim1 + 1; bi.r = b[i__2].r, bi.i = b[i__2].i; i__2 = j * x_dim1 + 1; z__1.r = d__[1] * x[i__2].r, z__1.i = d__[1] * x[i__2].i; dx.r = z__1.r, dx.i = z__1.i; z__1.r = bi.r - dx.r, z__1.i = bi.i - dx.i; work[1].r = z__1.r, work[1].i = z__1.i; rwork[1] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&bi), abs(d__2)) + ((d__3 = dx.r, abs(d__3)) + (d__4 = d_imag(&dx), abs(d__4))); } else { i__2 = j * b_dim1 + 1; bi.r = b[i__2].r, bi.i = b[i__2].i; i__2 = j * x_dim1 + 1; z__1.r = d__[1] * x[i__2].r, z__1.i = d__[1] * x[i__2].i; dx.r = z__1.r, dx.i = z__1.i; i__2 = j * x_dim1 + 2; z__1.r = e[1].r * x[i__2].r - e[1].i * x[i__2].i, z__1.i = e[ 1].r * x[i__2].i + e[1].i * x[i__2].r; ex.r = z__1.r, ex.i = z__1.i; z__2.r = bi.r - dx.r, z__2.i = bi.i - dx.i; z__1.r = z__2.r - ex.r, z__1.i = z__2.i - ex.i; work[1].r = z__1.r, work[1].i = z__1.i; i__2 = j * x_dim1 + 2; rwork[1] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&bi), abs(d__2)) + ((d__3 = dx.r, abs(d__3)) + (d__4 = d_imag(&dx), abs(d__4))) + ((d__5 = e[1].r, abs(d__5)) + (d__6 = d_imag(&e[1]), abs(d__6))) * ((d__7 = x[ i__2].r, abs(d__7)) + (d__8 = d_imag(&x[j * x_dim1 + 2]), abs(d__8))); i__2 = *n - 1; for (i__ = 2; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; bi.r = b[i__3].r, bi.i = b[i__3].i; d_cnjg(&z__2, &e[i__ - 1]); i__3 = i__ - 1 + j * x_dim1; z__1.r = z__2.r * x[i__3].r - z__2.i * x[i__3].i, z__1.i = z__2.r * x[i__3].i + z__2.i * x[i__3].r; cx.r = z__1.r, cx.i = z__1.i; i__3 = i__; i__4 = i__ + j * x_dim1; z__1.r = d__[i__3] * x[i__4].r, z__1.i = d__[i__3] * x[ i__4].i; dx.r = z__1.r, dx.i = z__1.i; i__3 = i__; i__4 = i__ + 1 + j * x_dim1; z__1.r = e[i__3].r * x[i__4].r - e[i__3].i * x[i__4].i, z__1.i = e[i__3].r * x[i__4].i + e[i__3].i * x[ i__4].r; ex.r = z__1.r, ex.i = z__1.i; i__3 = i__; z__3.r = bi.r - cx.r, z__3.i = bi.i - cx.i; z__2.r = z__3.r - dx.r, z__2.i = z__3.i - dx.i; z__1.r = z__2.r - ex.r, z__1.i = z__2.i - ex.i; work[i__3].r = z__1.r, work[i__3].i = z__1.i; i__3 = i__ - 1; i__4 = i__ - 1 + j * x_dim1; i__5 = i__; i__6 = i__ + 1 + j * x_dim1; rwork[i__] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(& bi), abs(d__2)) + ((d__3 = e[i__3].r, abs(d__3)) + (d__4 = d_imag(&e[i__ - 1]), abs(d__4))) * (( d__5 = x[i__4].r, abs(d__5)) + (d__6 = d_imag(&x[ i__ - 1 + j * x_dim1]), abs(d__6))) + ((d__7 = dx.r, abs(d__7)) + (d__8 = d_imag(&dx), abs(d__8)) ) + ((d__9 = e[i__5].r, abs(d__9)) + (d__10 = d_imag(&e[i__]), abs(d__10))) * ((d__11 = x[i__6] .r, abs(d__11)) + (d__12 = d_imag(&x[i__ + 1 + j * x_dim1]), abs(d__12))); /* L30: */ } i__2 = *n + j * b_dim1; bi.r = b[i__2].r, bi.i = b[i__2].i; d_cnjg(&z__2, &e[*n - 1]); i__2 = *n - 1 + j * x_dim1; z__1.r = z__2.r * x[i__2].r - z__2.i * x[i__2].i, z__1.i = z__2.r * x[i__2].i + z__2.i * x[i__2].r; cx.r = z__1.r, cx.i = z__1.i; i__2 = *n; i__3 = *n + j * x_dim1; z__1.r = d__[i__2] * x[i__3].r, z__1.i = d__[i__2] * x[i__3] .i; dx.r = z__1.r, dx.i = z__1.i; i__2 = *n; z__2.r = bi.r - cx.r, z__2.i = bi.i - cx.i; z__1.r = z__2.r - dx.r, z__1.i = z__2.i - dx.i; work[i__2].r = z__1.r, work[i__2].i = z__1.i; i__2 = *n - 1; i__3 = *n - 1 + j * x_dim1; rwork[*n] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&bi), abs(d__2)) + ((d__3 = e[i__2].r, abs(d__3)) + (d__4 = d_imag(&e[*n - 1]), abs(d__4))) * ((d__5 = x[i__3].r, abs(d__5)) + (d__6 = d_imag(&x[*n - 1 + j * x_dim1]), abs(d__6))) + ((d__7 = dx.r, abs(d__7)) + (d__8 = d_imag(&dx), abs(d__8))); } } else { if (*n == 1) { i__2 = j * b_dim1 + 1; bi.r = b[i__2].r, bi.i = b[i__2].i; i__2 = j * x_dim1 + 1; z__1.r = d__[1] * x[i__2].r, z__1.i = d__[1] * x[i__2].i; dx.r = z__1.r, dx.i = z__1.i; z__1.r = bi.r - dx.r, z__1.i = bi.i - dx.i; work[1].r = z__1.r, work[1].i = z__1.i; rwork[1] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&bi), abs(d__2)) + ((d__3 = dx.r, abs(d__3)) + (d__4 = d_imag(&dx), abs(d__4))); } else { i__2 = j * b_dim1 + 1; bi.r = b[i__2].r, bi.i = b[i__2].i; i__2 = j * x_dim1 + 1; z__1.r = d__[1] * x[i__2].r, z__1.i = d__[1] * x[i__2].i; dx.r = z__1.r, dx.i = z__1.i; d_cnjg(&z__2, &e[1]); i__2 = j * x_dim1 + 2; z__1.r = z__2.r * x[i__2].r - z__2.i * x[i__2].i, z__1.i = z__2.r * x[i__2].i + z__2.i * x[i__2].r; ex.r = z__1.r, ex.i = z__1.i; z__2.r = bi.r - dx.r, z__2.i = bi.i - dx.i; z__1.r = z__2.r - ex.r, z__1.i = z__2.i - ex.i; work[1].r = z__1.r, work[1].i = z__1.i; i__2 = j * x_dim1 + 2; rwork[1] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&bi), abs(d__2)) + ((d__3 = dx.r, abs(d__3)) + (d__4 = d_imag(&dx), abs(d__4))) + ((d__5 = e[1].r, abs(d__5)) + (d__6 = d_imag(&e[1]), abs(d__6))) * ((d__7 = x[ i__2].r, abs(d__7)) + (d__8 = d_imag(&x[j * x_dim1 + 2]), abs(d__8))); i__2 = *n - 1; for (i__ = 2; i__ <= i__2; ++i__) { i__3 = i__ + j * b_dim1; bi.r = b[i__3].r, bi.i = b[i__3].i; i__3 = i__ - 1; i__4 = i__ - 1 + j * x_dim1; z__1.r = e[i__3].r * x[i__4].r - e[i__3].i * x[i__4].i, z__1.i = e[i__3].r * x[i__4].i + e[i__3].i * x[ i__4].r; cx.r = z__1.r, cx.i = z__1.i; i__3 = i__; i__4 = i__ + j * x_dim1; z__1.r = d__[i__3] * x[i__4].r, z__1.i = d__[i__3] * x[ i__4].i; dx.r = z__1.r, dx.i = z__1.i; d_cnjg(&z__2, &e[i__]); i__3 = i__ + 1 + j * x_dim1; z__1.r = z__2.r * x[i__3].r - z__2.i * x[i__3].i, z__1.i = z__2.r * x[i__3].i + z__2.i * x[i__3].r; ex.r = z__1.r, ex.i = z__1.i; i__3 = i__; z__3.r = bi.r - cx.r, z__3.i = bi.i - cx.i; z__2.r = z__3.r - dx.r, z__2.i = z__3.i - dx.i; z__1.r = z__2.r - ex.r, z__1.i = z__2.i - ex.i; work[i__3].r = z__1.r, work[i__3].i = z__1.i; i__3 = i__ - 1; i__4 = i__ - 1 + j * x_dim1; i__5 = i__; i__6 = i__ + 1 + j * x_dim1; rwork[i__] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(& bi), abs(d__2)) + ((d__3 = e[i__3].r, abs(d__3)) + (d__4 = d_imag(&e[i__ - 1]), abs(d__4))) * (( d__5 = x[i__4].r, abs(d__5)) + (d__6 = d_imag(&x[ i__ - 1 + j * x_dim1]), abs(d__6))) + ((d__7 = dx.r, abs(d__7)) + (d__8 = d_imag(&dx), abs(d__8)) ) + ((d__9 = e[i__5].r, abs(d__9)) + (d__10 = d_imag(&e[i__]), abs(d__10))) * ((d__11 = x[i__6] .r, abs(d__11)) + (d__12 = d_imag(&x[i__ + 1 + j * x_dim1]), abs(d__12))); /* L40: */ } i__2 = *n + j * b_dim1; bi.r = b[i__2].r, bi.i = b[i__2].i; i__2 = *n - 1; i__3 = *n - 1 + j * x_dim1; z__1.r = e[i__2].r * x[i__3].r - e[i__2].i * x[i__3].i, z__1.i = e[i__2].r * x[i__3].i + e[i__2].i * x[i__3] .r; cx.r = z__1.r, cx.i = z__1.i; i__2 = *n; i__3 = *n + j * x_dim1; z__1.r = d__[i__2] * x[i__3].r, z__1.i = d__[i__2] * x[i__3] .i; dx.r = z__1.r, dx.i = z__1.i; i__2 = *n; z__2.r = bi.r - cx.r, z__2.i = bi.i - cx.i; z__1.r = z__2.r - dx.r, z__1.i = z__2.i - dx.i; work[i__2].r = z__1.r, work[i__2].i = z__1.i; i__2 = *n - 1; i__3 = *n - 1 + j * x_dim1; rwork[*n] = (d__1 = bi.r, abs(d__1)) + (d__2 = d_imag(&bi), abs(d__2)) + ((d__3 = e[i__2].r, abs(d__3)) + (d__4 = d_imag(&e[*n - 1]), abs(d__4))) * ((d__5 = x[i__3].r, abs(d__5)) + (d__6 = d_imag(&x[*n - 1 + j * x_dim1]), abs(d__6))) + ((d__7 = dx.r, abs(d__7)) + (d__8 = d_imag(&dx), abs(d__8))); } } /* Compute componentwise relative backward error from formula */ /* max(i) ( abs(R(i)) / ( abs(A)*abs(X) + abs(B) )(i) ) */ /* where abs(Z) is the componentwise absolute value of the matrix */ /* or vector Z. If the i-th component of the denominator is less */ /* than SAFE2, then SAFE1 is added to the i-th components of the */ /* numerator and denominator before dividing. */ s = 0.; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { if (rwork[i__] > safe2) { /* Computing MAX */ i__3 = i__; d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 = d_imag(&work[i__]), abs(d__2))) / rwork[i__]; s = max(d__3,d__4); } else { /* Computing MAX */ i__3 = i__; d__3 = s, d__4 = ((d__1 = work[i__3].r, abs(d__1)) + (d__2 = d_imag(&work[i__]), abs(d__2)) + safe1) / (rwork[i__] + safe1); s = max(d__3,d__4); } /* L50: */ } berr[j] = s; /* Test stopping criterion. Continue iterating if */ /* 1) The residual BERR(J) is larger than machine epsilon, and */ /* 2) BERR(J) decreased by at least a factor of 2 during the */ /* last iteration, and */ /* 3) At most ITMAX iterations tried. */ if (berr[j] > eps && berr[j] * 2. <= lstres && count <= 5) { /* Update solution and try again. */ zpttrs_(uplo, n, &c__1, &df[1], &ef[1], &work[1], n, info); zaxpy_(n, &c_b16, &work[1], &c__1, &x[j * x_dim1 + 1], &c__1); lstres = berr[j]; ++count; goto L20; } /* Bound error from formula */ /* norm(X - XTRUE) / norm(X) .le. FERR = */ /* norm( abs(inv(A))* */ /* ( abs(R) + NZ*EPS*( abs(A)*abs(X)+abs(B) ))) / norm(X) */ /* where */ /* norm(Z) is the magnitude of the largest component of Z */ /* inv(A) is the inverse of A */ /* abs(Z) is the componentwise absolute value of the matrix or */ /* vector Z */ /* NZ is the maximum number of nonzeros in any row of A, plus 1 */ /* EPS is machine epsilon */ /* The i-th component of abs(R)+NZ*EPS*(abs(A)*abs(X)+abs(B)) */ /* is incremented by SAFE1 if the i-th component of */ /* abs(A)*abs(X) + abs(B) is less than SAFE2. */ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { if (rwork[i__] > safe2) { i__3 = i__; rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 = d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__] ; } else { i__3 = i__; rwork[i__] = (d__1 = work[i__3].r, abs(d__1)) + (d__2 = d_imag(&work[i__]), abs(d__2)) + nz * eps * rwork[i__] + safe1; } /* L60: */ } ix = idamax_(n, &rwork[1], &c__1); ferr[j] = rwork[ix]; /* Estimate the norm of inv(A). */ /* Solve M(A) * x = e, where M(A) = (m(i,j)) is given by */ /* m(i,j) = abs(A(i,j)), i = j, */ /* m(i,j) = -abs(A(i,j)), i .ne. j, */ /* and e = [ 1, 1, ..., 1 ]'. Note M(A) = M(L)*D*M(L)'. */ /* Solve M(L) * x = e. */ rwork[1] = 1.; i__2 = *n; for (i__ = 2; i__ <= i__2; ++i__) { rwork[i__] = rwork[i__ - 1] * z_abs(&ef[i__ - 1]) + 1.; /* L70: */ } /* Solve D * M(L)' * x = b. */ rwork[*n] /= df[*n]; for (i__ = *n - 1; i__ >= 1; --i__) { rwork[i__] = rwork[i__] / df[i__] + rwork[i__ + 1] * z_abs(&ef[ i__]); /* L80: */ } /* Compute norm(inv(A)) = max(x(i)), 1<=i<=n. */ ix = idamax_(n, &rwork[1], &c__1); ferr[j] *= (d__1 = rwork[ix], abs(d__1)); /* Normalize error. */ lstres = 0.; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { /* Computing MAX */ d__1 = lstres, d__2 = z_abs(&x[i__ + j * x_dim1]); lstres = max(d__1,d__2); /* L90: */ } if (lstres != 0.) { ferr[j] /= lstres; } /* L100: */ } return 0; /* End of ZPTRFS */ } /* zptrfs_ */
doublereal zlanhp_(char *norm, char *uplo, integer *n, doublecomplex *ap, doublereal *work) { /* System generated locals */ integer i__1, i__2; doublereal ret_val, d__1, d__2, d__3; /* Builtin functions */ double z_abs(doublecomplex *), sqrt(doublereal); /* Local variables */ integer i__, j, k; doublereal sum, absa, scale; extern logical lsame_(char *, char *); doublereal value; extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *, doublereal *, doublereal *); /* -- LAPACK auxiliary routine (version 3.2) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZLANHP returns the value of the one norm, or the Frobenius norm, or */ /* the infinity norm, or the element of largest absolute value of a */ /* complex hermitian matrix A, supplied in packed form. */ /* Description */ /* =========== */ /* ZLANHP returns the value */ /* ZLANHP = ( max(abs(A(i,j))), NORM = 'M' or 'm' */ /* ( */ /* ( norm1(A), NORM = '1', 'O' or 'o' */ /* ( */ /* ( normI(A), NORM = 'I' or 'i' */ /* ( */ /* ( normF(A), NORM = 'F', 'f', 'E' or 'e' */ /* where norm1 denotes the one norm of a matrix (maximum column sum), */ /* normI denotes the infinity norm of a matrix (maximum row sum) and */ /* normF denotes the Frobenius norm of a matrix (square root of sum of */ /* squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. */ /* Arguments */ /* ========= */ /* NORM (input) CHARACTER*1 */ /* Specifies the value to be returned in ZLANHP as described */ /* above. */ /* UPLO (input) CHARACTER*1 */ /* Specifies whether the upper or lower triangular part of the */ /* hermitian matrix A is supplied. */ /* = 'U': Upper triangular part of A is supplied */ /* = 'L': Lower triangular part of A is supplied */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. When N = 0, ZLANHP is */ /* set to zero. */ /* AP (input) COMPLEX*16 array, dimension (N*(N+1)/2) */ /* The upper or lower triangle of the hermitian matrix A, packed */ /* columnwise in a linear array. The j-th column of A is stored */ /* in the array AP as follows: */ /* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */ /* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */ /* Note that the imaginary parts of the diagonal elements need */ /* not be set and are assumed to be zero. */ /* WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), */ /* where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, */ /* WORK is not referenced. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ --work; --ap; /* Function Body */ if (*n == 0) { value = 0.; } else if (lsame_(norm, "M")) { /* Find max(abs(A(i,j))). */ value = 0.; if (lsame_(uplo, "U")) { k = 0; i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = k + j - 1; for (i__ = k + 1; i__ <= i__2; ++i__) { /* Computing MAX */ d__1 = value, d__2 = z_abs(&ap[i__]); value = max(d__1,d__2); /* L10: */ } k += j; /* Computing MAX */ i__2 = k; d__2 = value, d__3 = (d__1 = ap[i__2].r, abs(d__1)); value = max(d__2,d__3); /* L20: */ } } else { k = 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MAX */ i__2 = k; d__2 = value, d__3 = (d__1 = ap[i__2].r, abs(d__1)); value = max(d__2,d__3); i__2 = k + *n - j; for (i__ = k + 1; i__ <= i__2; ++i__) { /* Computing MAX */ d__1 = value, d__2 = z_abs(&ap[i__]); value = max(d__1,d__2); /* L30: */ } k = k + *n - j + 1; /* L40: */ } } } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') { /* Find normI(A) ( = norm1(A), since A is hermitian). */ value = 0.; k = 1; if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = 0.; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { absa = z_abs(&ap[k]); sum += absa; work[i__] += absa; ++k; /* L50: */ } i__2 = k; work[j] = sum + (d__1 = ap[i__2].r, abs(d__1)); ++k; /* L60: */ } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__1 = value, d__2 = work[i__]; value = max(d__1,d__2); /* L70: */ } } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 0.; /* L80: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = k; sum = work[j] + (d__1 = ap[i__2].r, abs(d__1)); ++k; i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { absa = z_abs(&ap[k]); sum += absa; work[i__] += absa; ++k; /* L90: */ } value = max(value,sum); /* L100: */ } } } else if (lsame_(norm, "F") || lsame_(norm, "E")) { /* Find normF(A). */ scale = 0.; sum = 1.; k = 2; if (lsame_(uplo, "U")) { i__1 = *n; for (j = 2; j <= i__1; ++j) { i__2 = j - 1; zlassq_(&i__2, &ap[k], &c__1, &scale, &sum); k += j; /* L110: */ } } else { i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { i__2 = *n - j; zlassq_(&i__2, &ap[k], &c__1, &scale, &sum); k = k + *n - j + 1; /* L120: */ } } sum *= 2; k = 1; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = k; if (ap[i__2].r != 0.) { i__2 = k; absa = (d__1 = ap[i__2].r, abs(d__1)); if (scale < absa) { /* Computing 2nd power */ d__1 = scale / absa; sum = sum * (d__1 * d__1) + 1.; scale = absa; } else { /* Computing 2nd power */ d__1 = absa / scale; sum += d__1 * d__1; } } if (lsame_(uplo, "U")) { k = k + i__ + 1; } else { k = k + *n - i__ + 1; } /* L130: */ } value = scale * sqrt(sum); } ret_val = value; return ret_val; /* End of ZLANHP */ } /* zlanhp_ */
/* Subroutine */ int zhptri_(char *uplo, integer *n, doublecomplex *ap, integer *ipiv, doublecomplex *work, integer *info) { /* System generated locals */ integer i__1, i__2, i__3; doublereal d__1; doublecomplex z__1, z__2; /* Builtin functions */ double z_abs(doublecomplex *); void d_cnjg(doublecomplex *, doublecomplex *); /* Local variables */ doublereal d__; integer j, k; doublereal t, ak; integer kc, kp, kx, kpc, npp; doublereal akp1; doublecomplex temp, akkp1; extern logical lsame_(char *, char *); extern /* Double Complex */ VOID zdotc_f2c_(doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *); integer kstep; logical upper; extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, doublecomplex *, integer *), zhpmv_(char *, integer *, doublecomplex *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *), zswap_( integer *, doublecomplex *, integer *, doublecomplex *, integer *) , xerbla_(char *, integer *); integer kcnext; /* -- LAPACK computational routine (version 3.4.0) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* November 2011 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ --work; --ipiv; --ap; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } if (*info != 0) { i__1 = -(*info); xerbla_("ZHPTRI", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* Check that the diagonal matrix D is nonsingular. */ if (upper) { /* Upper triangular storage: examine D from bottom to top */ kp = *n * (*n + 1) / 2; for (*info = *n; *info >= 1; --(*info)) { i__1 = kp; if (ipiv[*info] > 0 && (ap[i__1].r == 0. && ap[i__1].i == 0.)) { return 0; } kp -= *info; /* L10: */ } } else { /* Lower triangular storage: examine D from top to bottom. */ kp = 1; i__1 = *n; for (*info = 1; *info <= i__1; ++(*info)) { i__2 = kp; if (ipiv[*info] > 0 && (ap[i__2].r == 0. && ap[i__2].i == 0.)) { return 0; } kp = kp + *n - *info + 1; /* L20: */ } } *info = 0; if (upper) { /* Compute inv(A) from the factorization A = U*D*U**H. */ /* K is the main loop index, increasing from 1 to N in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = 1; kc = 1; L30: /* If K > N, exit from loop. */ if (k > *n) { goto L50; } kcnext = kc + k; if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Invert the diagonal block. */ i__1 = kc + k - 1; i__2 = kc + k - 1; d__1 = 1. / ap[i__2].r; ap[i__1].r = d__1; ap[i__1].i = 0.; // , expr subst /* Compute column K of the inverse. */ if (k > 1) { i__1 = k - 1; zcopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1); i__1 = k - 1; z__1.r = -1.; z__1.i = -0.; // , expr subst zhpmv_(uplo, &i__1, &z__1, &ap[1], &work[1], &c__1, &c_b2, & ap[kc], &c__1); i__1 = kc + k - 1; i__2 = kc + k - 1; i__3 = k - 1; zdotc_f2c_(&z__2, &i__3, &work[1], &c__1, &ap[kc], &c__1); d__1 = z__2.r; z__1.r = ap[i__2].r - d__1; z__1.i = ap[i__2].i; // , expr subst ap[i__1].r = z__1.r; ap[i__1].i = z__1.i; // , expr subst } kstep = 1; } else { /* 2 x 2 diagonal block */ /* Invert the diagonal block. */ t = z_abs(&ap[kcnext + k - 1]); i__1 = kc + k - 1; ak = ap[i__1].r / t; i__1 = kcnext + k; akp1 = ap[i__1].r / t; i__1 = kcnext + k - 1; z__1.r = ap[i__1].r / t; z__1.i = ap[i__1].i / t; // , expr subst akkp1.r = z__1.r; akkp1.i = z__1.i; // , expr subst d__ = t * (ak * akp1 - 1.); i__1 = kc + k - 1; d__1 = akp1 / d__; ap[i__1].r = d__1; ap[i__1].i = 0.; // , expr subst i__1 = kcnext + k; d__1 = ak / d__; ap[i__1].r = d__1; ap[i__1].i = 0.; // , expr subst i__1 = kcnext + k - 1; z__2.r = -akkp1.r; z__2.i = -akkp1.i; // , expr subst z__1.r = z__2.r / d__; z__1.i = z__2.i / d__; // , expr subst ap[i__1].r = z__1.r; ap[i__1].i = z__1.i; // , expr subst /* Compute columns K and K+1 of the inverse. */ if (k > 1) { i__1 = k - 1; zcopy_(&i__1, &ap[kc], &c__1, &work[1], &c__1); i__1 = k - 1; z__1.r = -1.; z__1.i = -0.; // , expr subst zhpmv_(uplo, &i__1, &z__1, &ap[1], &work[1], &c__1, &c_b2, & ap[kc], &c__1); i__1 = kc + k - 1; i__2 = kc + k - 1; i__3 = k - 1; zdotc_f2c_(&z__2, &i__3, &work[1], &c__1, &ap[kc], &c__1); d__1 = z__2.r; z__1.r = ap[i__2].r - d__1; z__1.i = ap[i__2].i; // , expr subst ap[i__1].r = z__1.r; ap[i__1].i = z__1.i; // , expr subst i__1 = kcnext + k - 1; i__2 = kcnext + k - 1; i__3 = k - 1; zdotc_f2c_(&z__2, &i__3, &ap[kc], &c__1, &ap[kcnext], &c__1); z__1.r = ap[i__2].r - z__2.r; z__1.i = ap[i__2].i - z__2.i; // , expr subst ap[i__1].r = z__1.r; ap[i__1].i = z__1.i; // , expr subst i__1 = k - 1; zcopy_(&i__1, &ap[kcnext], &c__1, &work[1], &c__1); i__1 = k - 1; z__1.r = -1.; z__1.i = -0.; // , expr subst zhpmv_(uplo, &i__1, &z__1, &ap[1], &work[1], &c__1, &c_b2, & ap[kcnext], &c__1); i__1 = kcnext + k; i__2 = kcnext + k; i__3 = k - 1; zdotc_f2c_(&z__2, &i__3, &work[1], &c__1, &ap[kcnext], &c__1); d__1 = z__2.r; z__1.r = ap[i__2].r - d__1; z__1.i = ap[i__2].i; // , expr subst ap[i__1].r = z__1.r; ap[i__1].i = z__1.i; // , expr subst } kstep = 2; kcnext = kcnext + k + 1; } kp = (i__1 = ipiv[k], f2c_abs(i__1)); if (kp != k) { /* Interchange rows and columns K and KP in the leading */ /* submatrix A(1:k+1,1:k+1) */ kpc = (kp - 1) * kp / 2 + 1; i__1 = kp - 1; zswap_(&i__1, &ap[kc], &c__1, &ap[kpc], &c__1); kx = kpc + kp - 1; i__1 = k - 1; for (j = kp + 1; j <= i__1; ++j) { kx = kx + j - 1; d_cnjg(&z__1, &ap[kc + j - 1]); temp.r = z__1.r; temp.i = z__1.i; // , expr subst i__2 = kc + j - 1; d_cnjg(&z__1, &ap[kx]); ap[i__2].r = z__1.r; ap[i__2].i = z__1.i; // , expr subst i__2 = kx; ap[i__2].r = temp.r; ap[i__2].i = temp.i; // , expr subst /* L40: */ } i__1 = kc + kp - 1; d_cnjg(&z__1, &ap[kc + kp - 1]); ap[i__1].r = z__1.r; ap[i__1].i = z__1.i; // , expr subst i__1 = kc + k - 1; temp.r = ap[i__1].r; temp.i = ap[i__1].i; // , expr subst i__1 = kc + k - 1; i__2 = kpc + kp - 1; ap[i__1].r = ap[i__2].r; ap[i__1].i = ap[i__2].i; // , expr subst i__1 = kpc + kp - 1; ap[i__1].r = temp.r; ap[i__1].i = temp.i; // , expr subst if (kstep == 2) { i__1 = kc + k + k - 1; temp.r = ap[i__1].r; temp.i = ap[i__1].i; // , expr subst i__1 = kc + k + k - 1; i__2 = kc + k + kp - 1; ap[i__1].r = ap[i__2].r; ap[i__1].i = ap[i__2].i; // , expr subst i__1 = kc + k + kp - 1; ap[i__1].r = temp.r; ap[i__1].i = temp.i; // , expr subst } } k += kstep; kc = kcnext; goto L30; L50: ; } else { /* Compute inv(A) from the factorization A = L*D*L**H. */ /* K is the main loop index, increasing from 1 to N in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ npp = *n * (*n + 1) / 2; k = *n; kc = npp; L60: /* If K < 1, exit from loop. */ if (k < 1) { goto L80; } kcnext = kc - (*n - k + 2); if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Invert the diagonal block. */ i__1 = kc; i__2 = kc; d__1 = 1. / ap[i__2].r; ap[i__1].r = d__1; ap[i__1].i = 0.; // , expr subst /* Compute column K of the inverse. */ if (k < *n) { i__1 = *n - k; zcopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1); i__1 = *n - k; z__1.r = -1.; z__1.i = -0.; // , expr subst zhpmv_(uplo, &i__1, &z__1, &ap[kc + *n - k + 1], &work[1], & c__1, &c_b2, &ap[kc + 1], &c__1); i__1 = kc; i__2 = kc; i__3 = *n - k; zdotc_f2c_(&z__2, &i__3, &work[1], &c__1, &ap[kc + 1], &c__1); d__1 = z__2.r; z__1.r = ap[i__2].r - d__1; z__1.i = ap[i__2].i; // , expr subst ap[i__1].r = z__1.r; ap[i__1].i = z__1.i; // , expr subst } kstep = 1; } else { /* 2 x 2 diagonal block */ /* Invert the diagonal block. */ t = z_abs(&ap[kcnext + 1]); i__1 = kcnext; ak = ap[i__1].r / t; i__1 = kc; akp1 = ap[i__1].r / t; i__1 = kcnext + 1; z__1.r = ap[i__1].r / t; z__1.i = ap[i__1].i / t; // , expr subst akkp1.r = z__1.r; akkp1.i = z__1.i; // , expr subst d__ = t * (ak * akp1 - 1.); i__1 = kcnext; d__1 = akp1 / d__; ap[i__1].r = d__1; ap[i__1].i = 0.; // , expr subst i__1 = kc; d__1 = ak / d__; ap[i__1].r = d__1; ap[i__1].i = 0.; // , expr subst i__1 = kcnext + 1; z__2.r = -akkp1.r; z__2.i = -akkp1.i; // , expr subst z__1.r = z__2.r / d__; z__1.i = z__2.i / d__; // , expr subst ap[i__1].r = z__1.r; ap[i__1].i = z__1.i; // , expr subst /* Compute columns K-1 and K of the inverse. */ if (k < *n) { i__1 = *n - k; zcopy_(&i__1, &ap[kc + 1], &c__1, &work[1], &c__1); i__1 = *n - k; z__1.r = -1.; z__1.i = -0.; // , expr subst zhpmv_(uplo, &i__1, &z__1, &ap[kc + (*n - k + 1)], &work[1], & c__1, &c_b2, &ap[kc + 1], &c__1); i__1 = kc; i__2 = kc; i__3 = *n - k; zdotc_f2c_(&z__2, &i__3, &work[1], &c__1, &ap[kc + 1], &c__1); d__1 = z__2.r; z__1.r = ap[i__2].r - d__1; z__1.i = ap[i__2].i; // , expr subst ap[i__1].r = z__1.r; ap[i__1].i = z__1.i; // , expr subst i__1 = kcnext + 1; i__2 = kcnext + 1; i__3 = *n - k; zdotc_f2c_(&z__2, &i__3, &ap[kc + 1], &c__1, &ap[kcnext + 2], & c__1); z__1.r = ap[i__2].r - z__2.r; z__1.i = ap[i__2].i - z__2.i; // , expr subst ap[i__1].r = z__1.r; ap[i__1].i = z__1.i; // , expr subst i__1 = *n - k; zcopy_(&i__1, &ap[kcnext + 2], &c__1, &work[1], &c__1); i__1 = *n - k; z__1.r = -1.; z__1.i = -0.; // , expr subst zhpmv_(uplo, &i__1, &z__1, &ap[kc + (*n - k + 1)], &work[1], & c__1, &c_b2, &ap[kcnext + 2], &c__1); i__1 = kcnext; i__2 = kcnext; i__3 = *n - k; zdotc_f2c_(&z__2, &i__3, &work[1], &c__1, &ap[kcnext + 2], &c__1); d__1 = z__2.r; z__1.r = ap[i__2].r - d__1; z__1.i = ap[i__2].i; // , expr subst ap[i__1].r = z__1.r; ap[i__1].i = z__1.i; // , expr subst } kstep = 2; kcnext -= *n - k + 3; } kp = (i__1 = ipiv[k], f2c_abs(i__1)); if (kp != k) { /* Interchange rows and columns K and KP in the trailing */ /* submatrix A(k-1:n,k-1:n) */ kpc = npp - (*n - kp + 1) * (*n - kp + 2) / 2 + 1; if (kp < *n) { i__1 = *n - kp; zswap_(&i__1, &ap[kc + kp - k + 1], &c__1, &ap[kpc + 1], & c__1); } kx = kc + kp - k; i__1 = kp - 1; for (j = k + 1; j <= i__1; ++j) { kx = kx + *n - j + 1; d_cnjg(&z__1, &ap[kc + j - k]); temp.r = z__1.r; temp.i = z__1.i; // , expr subst i__2 = kc + j - k; d_cnjg(&z__1, &ap[kx]); ap[i__2].r = z__1.r; ap[i__2].i = z__1.i; // , expr subst i__2 = kx; ap[i__2].r = temp.r; ap[i__2].i = temp.i; // , expr subst /* L70: */ } i__1 = kc + kp - k; d_cnjg(&z__1, &ap[kc + kp - k]); ap[i__1].r = z__1.r; ap[i__1].i = z__1.i; // , expr subst i__1 = kc; temp.r = ap[i__1].r; temp.i = ap[i__1].i; // , expr subst i__1 = kc; i__2 = kpc; ap[i__1].r = ap[i__2].r; ap[i__1].i = ap[i__2].i; // , expr subst i__1 = kpc; ap[i__1].r = temp.r; ap[i__1].i = temp.i; // , expr subst if (kstep == 2) { i__1 = kc - *n + k - 1; temp.r = ap[i__1].r; temp.i = ap[i__1].i; // , expr subst i__1 = kc - *n + k - 1; i__2 = kc - *n + kp - 1; ap[i__1].r = ap[i__2].r; ap[i__1].i = ap[i__2].i; // , expr subst i__1 = kc - *n + kp - 1; ap[i__1].r = temp.r; ap[i__1].i = temp.i; // , expr subst } } k -= kstep; kc = kcnext; goto L60; L80: ; } return 0; /* End of ZHPTRI */ }
/* Subroutine */ int zlatm6_(integer *type__, integer *n, doublecomplex *a, integer *lda, doublecomplex *b, doublecomplex *x, integer *ldx, doublecomplex *y, integer *ldy, doublecomplex *alpha, doublecomplex * beta, doublecomplex *wx, doublecomplex *wy, doublereal *s, doublereal *dif) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2, i__3; doublereal d__1, d__2; doublecomplex z__1, z__2, z__3, z__4; /* Builtin functions */ void d_cnjg(doublecomplex *, doublecomplex *); double z_abs(doublecomplex *), sqrt(doublereal); /* Local variables */ integer i__, j; doublecomplex z__[64] /* was [8][8] */; integer info; doublecomplex work[26]; doublereal rwork[50]; extern /* Subroutine */ int zlakf2_(integer *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, doublecomplex *, doublecomplex *, integer *), zgesvd_(char *, char *, integer *, integer *, doublecomplex *, integer *, doublereal *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *, doublereal *, integer *), zlacpy_(char *, integer *, integer *, doublecomplex *, integer *, doublecomplex *, integer *); /* -- LAPACK test routine (version 3.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZLATM6 generates test matrices for the generalized eigenvalue */ /* problem, their corresponding right and left eigenvector matrices, */ /* and also reciprocal condition numbers for all eigenvalues and */ /* the reciprocal condition numbers of eigenvectors corresponding to */ /* the 1th and 5th eigenvalues. */ /* Test Matrices */ /* ============= */ /* Two kinds of test matrix pairs */ /* (A, B) = inverse(YH) * (Da, Db) * inverse(X) */ /* are used in the tests: */ /* Type 1: */ /* Da = 1+a 0 0 0 0 Db = 1 0 0 0 0 */ /* 0 2+a 0 0 0 0 1 0 0 0 */ /* 0 0 3+a 0 0 0 0 1 0 0 */ /* 0 0 0 4+a 0 0 0 0 1 0 */ /* 0 0 0 0 5+a , 0 0 0 0 1 */ /* and Type 2: */ /* Da = 1+i 0 0 0 0 Db = 1 0 0 0 0 */ /* 0 1-i 0 0 0 0 1 0 0 0 */ /* 0 0 1 0 0 0 0 1 0 0 */ /* 0 0 0 (1+a)+(1+b)i 0 0 0 0 1 0 */ /* 0 0 0 0 (1+a)-(1+b)i, 0 0 0 0 1 . */ /* In both cases the same inverse(YH) and inverse(X) are used to compute */ /* (A, B), giving the exact eigenvectors to (A,B) as (YH, X): */ /* YH: = 1 0 -y y -y X = 1 0 -x -x x */ /* 0 1 -y y -y 0 1 x -x -x */ /* 0 0 1 0 0 0 0 1 0 0 */ /* 0 0 0 1 0 0 0 0 1 0 */ /* 0 0 0 0 1, 0 0 0 0 1 , where */ /* a, b, x and y will have all values independently of each other. */ /* Arguments */ /* ========= */ /* TYPE (input) INTEGER */ /* Specifies the problem type (see futher details). */ /* N (input) INTEGER */ /* Size of the matrices A and B. */ /* A (output) COMPLEX*16 array, dimension (LDA, N). */ /* On exit A N-by-N is initialized according to TYPE. */ /* LDA (input) INTEGER */ /* The leading dimension of A and of B. */ /* B (output) COMPLEX*16 array, dimension (LDA, N). */ /* On exit B N-by-N is initialized according to TYPE. */ /* X (output) COMPLEX*16 array, dimension (LDX, N). */ /* On exit X is the N-by-N matrix of right eigenvectors. */ /* LDX (input) INTEGER */ /* The leading dimension of X. */ /* Y (output) COMPLEX*16 array, dimension (LDY, N). */ /* On exit Y is the N-by-N matrix of left eigenvectors. */ /* LDY (input) INTEGER */ /* The leading dimension of Y. */ /* ALPHA (input) COMPLEX*16 */ /* BETA (input) COMPLEX*16 */ /* Weighting constants for matrix A. */ /* WX (input) COMPLEX*16 */ /* Constant for right eigenvector matrix. */ /* WY (input) COMPLEX*16 */ /* Constant for left eigenvector matrix. */ /* S (output) DOUBLE PRECISION array, dimension (N) */ /* S(i) is the reciprocal condition number for eigenvalue i. */ /* DIF (output) DOUBLE PRECISION array, dimension (N) */ /* DIF(i) is the reciprocal condition number for eigenvector i. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. Local Arrays .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Executable Statements .. */ /* Generate test problem ... */ /* (Da, Db) ... */ /* Parameter adjustments */ b_dim1 = *lda; b_offset = 1 + b_dim1; b -= b_offset; a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; x_dim1 = *ldx; x_offset = 1 + x_dim1; x -= x_offset; y_dim1 = *ldy; y_offset = 1 + y_dim1; y -= y_offset; --s; --dif; /* Function Body */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = *n; for (j = 1; j <= i__2; ++j) { if (i__ == j) { i__3 = i__ + i__ * a_dim1; z__2.r = (doublereal) i__, z__2.i = 0.; z__1.r = z__2.r + alpha->r, z__1.i = z__2.i + alpha->i; a[i__3].r = z__1.r, a[i__3].i = z__1.i; i__3 = i__ + i__ * b_dim1; b[i__3].r = 1., b[i__3].i = 0.; } else { i__3 = i__ + j * a_dim1; a[i__3].r = 0., a[i__3].i = 0.; i__3 = i__ + j * b_dim1; b[i__3].r = 0., b[i__3].i = 0.; } /* L10: */ } /* L20: */ } if (*type__ == 2) { i__1 = a_dim1 + 1; a[i__1].r = 1., a[i__1].i = 1.; i__1 = (a_dim1 << 1) + 2; d_cnjg(&z__1, &a[a_dim1 + 1]); a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = a_dim1 * 3 + 3; a[i__1].r = 1., a[i__1].i = 0.; i__1 = (a_dim1 << 2) + 4; z__2.r = alpha->r + 1., z__2.i = alpha->i + 0.; d__1 = z__2.r; z__3.r = beta->r + 1., z__3.i = beta->i + 0.; d__2 = z__3.r; z__1.r = d__1, z__1.i = d__2; a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = a_dim1 * 5 + 5; d_cnjg(&z__1, &a[(a_dim1 << 2) + 4]); a[i__1].r = z__1.r, a[i__1].i = z__1.i; } /* Form X and Y */ zlacpy_("F", n, n, &b[b_offset], lda, &y[y_offset], ldy); i__1 = y_dim1 + 3; d_cnjg(&z__2, wy); z__1.r = -z__2.r, z__1.i = -z__2.i; y[i__1].r = z__1.r, y[i__1].i = z__1.i; i__1 = y_dim1 + 4; d_cnjg(&z__1, wy); y[i__1].r = z__1.r, y[i__1].i = z__1.i; i__1 = y_dim1 + 5; d_cnjg(&z__2, wy); z__1.r = -z__2.r, z__1.i = -z__2.i; y[i__1].r = z__1.r, y[i__1].i = z__1.i; i__1 = (y_dim1 << 1) + 3; d_cnjg(&z__2, wy); z__1.r = -z__2.r, z__1.i = -z__2.i; y[i__1].r = z__1.r, y[i__1].i = z__1.i; i__1 = (y_dim1 << 1) + 4; d_cnjg(&z__1, wy); y[i__1].r = z__1.r, y[i__1].i = z__1.i; i__1 = (y_dim1 << 1) + 5; d_cnjg(&z__2, wy); z__1.r = -z__2.r, z__1.i = -z__2.i; y[i__1].r = z__1.r, y[i__1].i = z__1.i; zlacpy_("F", n, n, &b[b_offset], lda, &x[x_offset], ldx); i__1 = x_dim1 * 3 + 1; z__1.r = -wx->r, z__1.i = -wx->i; x[i__1].r = z__1.r, x[i__1].i = z__1.i; i__1 = (x_dim1 << 2) + 1; z__1.r = -wx->r, z__1.i = -wx->i; x[i__1].r = z__1.r, x[i__1].i = z__1.i; i__1 = x_dim1 * 5 + 1; x[i__1].r = wx->r, x[i__1].i = wx->i; i__1 = x_dim1 * 3 + 2; x[i__1].r = wx->r, x[i__1].i = wx->i; i__1 = (x_dim1 << 2) + 2; z__1.r = -wx->r, z__1.i = -wx->i; x[i__1].r = z__1.r, x[i__1].i = z__1.i; i__1 = x_dim1 * 5 + 2; z__1.r = -wx->r, z__1.i = -wx->i; x[i__1].r = z__1.r, x[i__1].i = z__1.i; /* Form (A, B) */ i__1 = b_dim1 * 3 + 1; z__1.r = wx->r + wy->r, z__1.i = wx->i + wy->i; b[i__1].r = z__1.r, b[i__1].i = z__1.i; i__1 = b_dim1 * 3 + 2; z__2.r = -wx->r, z__2.i = -wx->i; z__1.r = z__2.r + wy->r, z__1.i = z__2.i + wy->i; b[i__1].r = z__1.r, b[i__1].i = z__1.i; i__1 = (b_dim1 << 2) + 1; z__1.r = wx->r - wy->r, z__1.i = wx->i - wy->i; b[i__1].r = z__1.r, b[i__1].i = z__1.i; i__1 = (b_dim1 << 2) + 2; z__1.r = wx->r - wy->r, z__1.i = wx->i - wy->i; b[i__1].r = z__1.r, b[i__1].i = z__1.i; i__1 = b_dim1 * 5 + 1; z__2.r = -wx->r, z__2.i = -wx->i; z__1.r = z__2.r + wy->r, z__1.i = z__2.i + wy->i; b[i__1].r = z__1.r, b[i__1].i = z__1.i; i__1 = b_dim1 * 5 + 2; z__1.r = wx->r + wy->r, z__1.i = wx->i + wy->i; b[i__1].r = z__1.r, b[i__1].i = z__1.i; i__1 = a_dim1 * 3 + 1; i__2 = a_dim1 + 1; z__2.r = wx->r * a[i__2].r - wx->i * a[i__2].i, z__2.i = wx->r * a[i__2] .i + wx->i * a[i__2].r; i__3 = a_dim1 * 3 + 3; z__3.r = wy->r * a[i__3].r - wy->i * a[i__3].i, z__3.i = wy->r * a[i__3] .i + wy->i * a[i__3].r; z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = a_dim1 * 3 + 2; z__3.r = -wx->r, z__3.i = -wx->i; i__2 = (a_dim1 << 1) + 2; z__2.r = z__3.r * a[i__2].r - z__3.i * a[i__2].i, z__2.i = z__3.r * a[ i__2].i + z__3.i * a[i__2].r; i__3 = a_dim1 * 3 + 3; z__4.r = wy->r * a[i__3].r - wy->i * a[i__3].i, z__4.i = wy->r * a[i__3] .i + wy->i * a[i__3].r; z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = (a_dim1 << 2) + 1; i__2 = a_dim1 + 1; z__2.r = wx->r * a[i__2].r - wx->i * a[i__2].i, z__2.i = wx->r * a[i__2] .i + wx->i * a[i__2].r; i__3 = (a_dim1 << 2) + 4; z__3.r = wy->r * a[i__3].r - wy->i * a[i__3].i, z__3.i = wy->r * a[i__3] .i + wy->i * a[i__3].r; z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = (a_dim1 << 2) + 2; i__2 = (a_dim1 << 1) + 2; z__2.r = wx->r * a[i__2].r - wx->i * a[i__2].i, z__2.i = wx->r * a[i__2] .i + wx->i * a[i__2].r; i__3 = (a_dim1 << 2) + 4; z__3.r = wy->r * a[i__3].r - wy->i * a[i__3].i, z__3.i = wy->r * a[i__3] .i + wy->i * a[i__3].r; z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = a_dim1 * 5 + 1; z__3.r = -wx->r, z__3.i = -wx->i; i__2 = a_dim1 + 1; z__2.r = z__3.r * a[i__2].r - z__3.i * a[i__2].i, z__2.i = z__3.r * a[ i__2].i + z__3.i * a[i__2].r; i__3 = a_dim1 * 5 + 5; z__4.r = wy->r * a[i__3].r - wy->i * a[i__3].i, z__4.i = wy->r * a[i__3] .i + wy->i * a[i__3].r; z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; i__1 = a_dim1 * 5 + 2; i__2 = (a_dim1 << 1) + 2; z__2.r = wx->r * a[i__2].r - wx->i * a[i__2].i, z__2.i = wx->r * a[i__2] .i + wx->i * a[i__2].r; i__3 = a_dim1 * 5 + 5; z__3.r = wy->r * a[i__3].r - wy->i * a[i__3].i, z__3.i = wy->r * a[i__3] .i + wy->i * a[i__3].r; z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i; a[i__1].r = z__1.r, a[i__1].i = z__1.i; /* Compute condition numbers */ s[1] = 1. / sqrt((z_abs(wy) * 3. * z_abs(wy) + 1.) / (z_abs(&a[a_dim1 + 1] ) * z_abs(&a[a_dim1 + 1]) + 1.)); s[2] = 1. / sqrt((z_abs(wy) * 3. * z_abs(wy) + 1.) / (z_abs(&a[(a_dim1 << 1) + 2]) * z_abs(&a[(a_dim1 << 1) + 2]) + 1.)); s[3] = 1. / sqrt((z_abs(wx) * 2. * z_abs(wx) + 1.) / (z_abs(&a[a_dim1 * 3 + 3]) * z_abs(&a[a_dim1 * 3 + 3]) + 1.)); s[4] = 1. / sqrt((z_abs(wx) * 2. * z_abs(wx) + 1.) / (z_abs(&a[(a_dim1 << 2) + 4]) * z_abs(&a[(a_dim1 << 2) + 4]) + 1.)); s[5] = 1. / sqrt((z_abs(wx) * 2. * z_abs(wx) + 1.) / (z_abs(&a[a_dim1 * 5 + 5]) * z_abs(&a[a_dim1 * 5 + 5]) + 1.)); zlakf2_(&c__1, &c__4, &a[a_offset], lda, &a[(a_dim1 << 1) + 2], &b[ b_offset], &b[(b_dim1 << 1) + 2], z__, &c__8); zgesvd_("N", "N", &c__8, &c__8, z__, &c__8, rwork, work, &c__1, &work[1], &c__1, &work[2], &c__24, &rwork[8], &info); dif[1] = rwork[7]; zlakf2_(&c__4, &c__1, &a[a_offset], lda, &a[a_dim1 * 5 + 5], &b[b_offset], &b[b_dim1 * 5 + 5], z__, &c__8); zgesvd_("N", "N", &c__8, &c__8, z__, &c__8, rwork, work, &c__1, &work[1], &c__1, &work[2], &c__24, &rwork[8], &info); dif[5] = rwork[7]; return 0; /* End of ZLATM6 */ } /* zlatm6_ */
doublereal zlantb_(char *norm, char *uplo, char *diag, integer *n, integer *k, doublecomplex *ab, integer *ldab, doublereal *work) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= ZLANTB returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of an n by n triangular band matrix A, with ( k + 1 ) diagonals. Description =========== ZLANTB returns the value ZLANTB = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ( ( normI(A), NORM = 'I' or 'i' ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a matrix norm. Arguments ========= NORM (input) CHARACTER*1 Specifies the value to be returned in ZLANTB as described above. UPLO (input) CHARACTER*1 Specifies whether the matrix A is upper or lower triangular. = 'U': Upper triangular = 'L': Lower triangular DIAG (input) CHARACTER*1 Specifies whether or not the matrix A is unit triangular. = 'N': Non-unit triangular = 'U': Unit triangular N (input) INTEGER The order of the matrix A. N >= 0. When N = 0, ZLANTB is set to zero. K (input) INTEGER The number of super-diagonals of the matrix A if UPLO = 'U', or the number of sub-diagonals of the matrix A if UPLO = 'L'. K >= 0. AB (input) COMPLEX*16 array, dimension (LDAB,N) The upper or lower triangular band matrix A, stored in the first k+1 rows of AB. The j-th column of A is stored in the j-th column of the array AB as follows: if UPLO = 'U', AB(k+1+i-j,j) = A(i,j) for max(1,j-k)<=i<=j; if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+k). Note that when DIAG = 'U', the elements of the array AB corresponding to the diagonal elements of the matrix A are not referenced, but are assumed to be one. LDAB (input) INTEGER The leading dimension of the array AB. LDAB >= K+1. WORK (workspace) DOUBLE PRECISION array, dimension (LWORK), where LWORK >= N when NORM = 'I'; otherwise, WORK is not referenced. ===================================================================== Parameter adjustments */ /* Table of constant values */ static integer c__1 = 1; /* System generated locals */ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5; doublereal ret_val, d__1, d__2; /* Builtin functions */ double z_abs(doublecomplex *), sqrt(doublereal); /* Local variables */ static integer i__, j, l; static doublereal scale; static logical udiag; extern logical lsame_(char *, char *); static doublereal value; extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *, doublereal *, doublereal *); static doublereal sum; #define ab_subscr(a_1,a_2) (a_2)*ab_dim1 + a_1 #define ab_ref(a_1,a_2) ab[ab_subscr(a_1,a_2)] ab_dim1 = *ldab; ab_offset = 1 + ab_dim1 * 1; ab -= ab_offset; --work; /* Function Body */ if (*n == 0) { value = 0.; } else if (lsame_(norm, "M")) { /* Find max(abs(A(i,j))). */ if (lsame_(diag, "U")) { value = 1.; if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MAX */ i__2 = *k + 2 - j; i__3 = *k; for (i__ = max(i__2,1); i__ <= i__3; ++i__) { /* Computing MAX */ d__1 = value, d__2 = z_abs(&ab_ref(i__, j)); value = max(d__1,d__2); /* L10: */ } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__2 = *n + 1 - j, i__4 = *k + 1; i__3 = min(i__2,i__4); for (i__ = 2; i__ <= i__3; ++i__) { /* Computing MAX */ d__1 = value, d__2 = z_abs(&ab_ref(i__, j)); value = max(d__1,d__2); /* L30: */ } /* L40: */ } } } else { value = 0.; if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MAX */ i__3 = *k + 2 - j; i__2 = *k + 1; for (i__ = max(i__3,1); i__ <= i__2; ++i__) { /* Computing MAX */ d__1 = value, d__2 = z_abs(&ab_ref(i__, j)); value = max(d__1,d__2); /* L50: */ } /* L60: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__3 = *n + 1 - j, i__4 = *k + 1; i__2 = min(i__3,i__4); for (i__ = 1; i__ <= i__2; ++i__) { /* Computing MAX */ d__1 = value, d__2 = z_abs(&ab_ref(i__, j)); value = max(d__1,d__2); /* L70: */ } /* L80: */ } } } } else if (lsame_(norm, "O") || *(unsigned char *) norm == '1') { /* Find norm1(A). */ value = 0.; udiag = lsame_(diag, "U"); if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (udiag) { sum = 1.; /* Computing MAX */ i__2 = *k + 2 - j; i__3 = *k; for (i__ = max(i__2,1); i__ <= i__3; ++i__) { sum += z_abs(&ab_ref(i__, j)); /* L90: */ } } else { sum = 0.; /* Computing MAX */ i__3 = *k + 2 - j; i__2 = *k + 1; for (i__ = max(i__3,1); i__ <= i__2; ++i__) { sum += z_abs(&ab_ref(i__, j)); /* L100: */ } } value = max(value,sum); /* L110: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (udiag) { sum = 1.; /* Computing MIN */ i__3 = *n + 1 - j, i__4 = *k + 1; i__2 = min(i__3,i__4); for (i__ = 2; i__ <= i__2; ++i__) { sum += z_abs(&ab_ref(i__, j)); /* L120: */ } } else { sum = 0.; /* Computing MIN */ i__3 = *n + 1 - j, i__4 = *k + 1; i__2 = min(i__3,i__4); for (i__ = 1; i__ <= i__2; ++i__) { sum += z_abs(&ab_ref(i__, j)); /* L130: */ } } value = max(value,sum); /* L140: */ } } } else if (lsame_(norm, "I")) { /* Find normI(A). */ value = 0.; if (lsame_(uplo, "U")) { if (lsame_(diag, "U")) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 1.; /* L150: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { l = *k + 1 - j; /* Computing MAX */ i__2 = 1, i__3 = j - *k; i__4 = j - 1; for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) { work[i__] += z_abs(&ab_ref(l + i__, j)); /* L160: */ } /* L170: */ } } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 0.; /* L180: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { l = *k + 1 - j; /* Computing MAX */ i__4 = 1, i__2 = j - *k; i__3 = j; for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) { work[i__] += z_abs(&ab_ref(l + i__, j)); /* L190: */ } /* L200: */ } } } else { if (lsame_(diag, "U")) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 1.; /* L210: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { l = 1 - j; /* Computing MIN */ i__4 = *n, i__2 = j + *k; i__3 = min(i__4,i__2); for (i__ = j + 1; i__ <= i__3; ++i__) { work[i__] += z_abs(&ab_ref(l + i__, j)); /* L220: */ } /* L230: */ } } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 0.; /* L240: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { l = 1 - j; /* Computing MIN */ i__4 = *n, i__2 = j + *k; i__3 = min(i__4,i__2); for (i__ = j; i__ <= i__3; ++i__) { work[i__] += z_abs(&ab_ref(l + i__, j)); /* L250: */ } /* L260: */ } } } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__1 = value, d__2 = work[i__]; value = max(d__1,d__2); /* L270: */ } } else if (lsame_(norm, "F") || lsame_(norm, "E")) { /* Find normF(A). */ if (lsame_(uplo, "U")) { if (lsame_(diag, "U")) { scale = 1.; sum = (doublereal) (*n); if (*k > 0) { i__1 = *n; for (j = 2; j <= i__1; ++j) { /* Computing MAX */ i__3 = *k + 2 - j; /* Computing MIN */ i__2 = j - 1; i__4 = min(i__2,*k); zlassq_(&i__4, &ab_ref(max(i__3,1), j), &c__1, &scale, &sum); /* L280: */ } } } else { scale = 0.; sum = 1.; i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MAX */ i__3 = *k + 2 - j; /* Computing MIN */ i__2 = j, i__5 = *k + 1; i__4 = min(i__2,i__5); zlassq_(&i__4, &ab_ref(max(i__3,1), j), &c__1, &scale, & sum); /* L290: */ } } } else { if (lsame_(diag, "U")) { scale = 1.; sum = (doublereal) (*n); if (*k > 0) { i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__4 = *n - j; i__3 = min(i__4,*k); zlassq_(&i__3, &ab_ref(2, j), &c__1, &scale, &sum); /* L300: */ } } } else { scale = 0.; sum = 1.; i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__4 = *n - j + 1, i__2 = *k + 1; i__3 = min(i__4,i__2); zlassq_(&i__3, &ab_ref(1, j), &c__1, &scale, &sum); /* L310: */ } } } value = scale * sqrt(sum); } ret_val = value; return ret_val; /* End of ZLANTB */ } /* zlantb_ */
doublereal dzsum1_(integer *n, doublecomplex *cx, integer *incx) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DZSUM1 takes the sum of the absolute values of a complex vector and returns a double precision result. Based on DZASUM from the Level 1 BLAS. The change is to use the 'genuine' absolute value. Contributed by Nick Higham for use with ZLACON. Arguments ========= N (input) INTEGER The number of elements in the vector CX. CX (input) COMPLEX*16 array, dimension (N) The vector whose elements will be summed. INCX (input) INTEGER The spacing between successive values of CX. INCX > 0. ===================================================================== Parameter adjustments */ /* System generated locals */ integer i__1, i__2; doublereal ret_val; /* Builtin functions */ double z_abs(doublecomplex *); /* Local variables */ static integer i__, nincx; static doublereal stemp; --cx; /* Function Body */ ret_val = 0.; stemp = 0.; if (*n <= 0) { return ret_val; } if (*incx == 1) { goto L20; } /* CODE FOR INCREMENT NOT EQUAL TO 1 */ nincx = *n * *incx; i__1 = nincx; i__2 = *incx; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* NEXT LINE MODIFIED. */ stemp += z_abs(&cx[i__]); /* L10: */ } ret_val = stemp; return ret_val; /* CODE FOR INCREMENT EQUAL TO 1 */ L20: i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { /* NEXT LINE MODIFIED. */ stemp += z_abs(&cx[i__]); /* L30: */ } ret_val = stemp; return ret_val; /* End of DZSUM1 */ } /* dzsum1_ */
/* Subroutine */ int zlaror_slu(char *side, char *init, integer *m, integer *n, doublecomplex *a, integer *lda, integer *iseed, doublecomplex *x, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublecomplex z__1, z__2; /* Builtin functions */ double z_abs(doublecomplex *); void d_cnjg(doublecomplex *, doublecomplex *); /* Local variables */ static integer kbeg, jcol; static doublereal xabs; static integer irow, j; static doublecomplex csign; extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *), zscal_(integer *, doublecomplex *, doublecomplex *, integer *); static integer ixfrm; extern /* Subroutine */ int zgemv_(char *, integer *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *); static integer itype, nxfrm; static doublereal xnorm; extern doublereal dznrm2_(integer *, doublecomplex *, integer *); extern int input_error(char *, int *); static doublereal factor; extern /* Subroutine */ int zlacgv_slu(integer *, doublecomplex *, integer *) ; extern /* Double Complex */ VOID zlarnd_slu(doublecomplex *, integer *, integer *); extern /* Subroutine */ int zlaset_slu(char *, integer *, integer *, doublecomplex *, doublecomplex *, doublecomplex *, integer *); static doublecomplex xnorms; /* -- LAPACK auxiliary test routine (version 2.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University September 30, 1994 Purpose ======= ZLAROR pre- or post-multiplies an M by N matrix A by a random unitary matrix U, overwriting A. A may optionally be initialized to the identity matrix before multiplying by U. U is generated using the method of G.W. Stewart ( SIAM J. Numer. Anal. 17, 1980, pp. 403-409 ). (BLAS-2 version) Arguments ========= SIDE - CHARACTER*1 SIDE specifies whether A is multiplied on the left or right by U. SIDE = 'L' Multiply A on the left (premultiply) by U SIDE = 'R' Multiply A on the right (postmultiply) by U* SIDE = 'C' Multiply A on the left by U and the right by U* SIDE = 'T' Multiply A on the left by U and the right by U' Not modified. INIT - CHARACTER*1 INIT specifies whether or not A should be initialized to the identity matrix. INIT = 'I' Initialize A to (a section of) the identity matrix before applying U. INIT = 'N' No initialization. Apply U to the input matrix A. INIT = 'I' may be used to generate square (i.e., unitary) or rectangular orthogonal matrices (orthogonality being in the sense of ZDOTC): For square matrices, M=N, and SIDE many be either 'L' or 'R'; the rows will be orthogonal to each other, as will the columns. For rectangular matrices where M < N, SIDE = 'R' will produce a dense matrix whose rows will be orthogonal and whose columns will not, while SIDE = 'L' will produce a matrix whose rows will be orthogonal, and whose first M columns will be orthogonal, the remaining columns being zero. For matrices where M > N, just use the previous explaination, interchanging 'L' and 'R' and "rows" and "columns". Not modified. M - INTEGER Number of rows of A. Not modified. N - INTEGER Number of columns of A. Not modified. A - COMPLEX*16 array, dimension ( LDA, N ) Input and output array. Overwritten by U A ( if SIDE = 'L' ) or by A U ( if SIDE = 'R' ) or by U A U* ( if SIDE = 'C') or by U A U' ( if SIDE = 'T') on exit. LDA - INTEGER Leading dimension of A. Must be at least MAX ( 1, M ). Not modified. ISEED - INTEGER array, dimension ( 4 ) On entry ISEED specifies the seed of the random number generator. The array elements should be between 0 and 4095; if not they will be reduced mod 4096. Also, ISEED(4) must be odd. The random number generator uses a linear congruential sequence limited to small integers, and so should produce machine independent random numbers. The values of ISEED are changed on exit, and can be used in the next call to ZLAROR to continue the same random number sequence. Modified. X - COMPLEX*16 array, dimension ( 3*MAX( M, N ) ) Workspace. Of length: 2*M + N if SIDE = 'L', 2*N + M if SIDE = 'R', 3*N if SIDE = 'C' or 'T'. Modified. INFO - INTEGER An error flag. It is set to: 0 if no error. 1 if ZLARND returned a bad random number (installation problem) -1 if SIDE is not L, R, C, or T. -3 if M is negative. -4 if N is negative or if SIDE is C or T and N is not equal to M. -6 if LDA is less than M. ===================================================================== Parameter adjustments */ a_dim1 = *lda; a_offset = a_dim1 + 1; a -= a_offset; --iseed; --x; /* Function Body */ if (*n == 0 || *m == 0) { return 0; } itype = 0; if (strncmp(side, "L", 1)==0) { itype = 1; } else if (strncmp(side, "R", 1)==0) { itype = 2; } else if (strncmp(side, "C", 1)==0) { itype = 3; } else if (strncmp(side, "T", 1)==0) { itype = 4; } /* Check for argument errors. */ *info = 0; if (itype == 0) { *info = -1; } else if (*m < 0) { *info = -3; } else if (*n < 0 || itype == 3 && *n != *m) { *info = -4; } else if (*lda < *m) { *info = -6; } if (*info != 0) { i__1 = -(*info); input_error("ZLAROR", &i__1); return 0; } if (itype == 1) { nxfrm = *m; } else { nxfrm = *n; } /* Initialize A to the identity matrix if desired */ if (strncmp(init, "I", 1)==0) { zlaset_slu("Full", m, n, &c_b1, &c_b2, &a[a_offset], lda); } /* If no rotation possible, still multiply by a random complex number from the circle |x| = 1 2) Compute Rotation by computing Householder Transformations H(2), H(3), ..., H(n). Note that the order in which they are computed is irrelevant. */ i__1 = nxfrm; for (j = 1; j <= i__1; ++j) { i__2 = j; x[i__2].r = 0., x[i__2].i = 0.; /* L10: */ } i__1 = nxfrm; for (ixfrm = 2; ixfrm <= i__1; ++ixfrm) { kbeg = nxfrm - ixfrm + 1; /* Generate independent normal( 0, 1 ) random numbers */ i__2 = nxfrm; for (j = kbeg; j <= i__2; ++j) { i__3 = j; zlarnd_slu(&z__1, &c__3, &iseed[1]); x[i__3].r = z__1.r, x[i__3].i = z__1.i; /* L20: */ } /* Generate a Householder transformation from the random vector X */ xnorm = dznrm2_(&ixfrm, &x[kbeg], &c__1); xabs = z_abs(&x[kbeg]); if (xabs != 0.) { i__2 = kbeg; z__1.r = x[i__2].r / xabs, z__1.i = x[i__2].i / xabs; csign.r = z__1.r, csign.i = z__1.i; } else { csign.r = 1., csign.i = 0.; } z__1.r = xnorm * csign.r, z__1.i = xnorm * csign.i; xnorms.r = z__1.r, xnorms.i = z__1.i; i__2 = nxfrm + kbeg; z__1.r = -csign.r, z__1.i = -csign.i; x[i__2].r = z__1.r, x[i__2].i = z__1.i; factor = xnorm * (xnorm + xabs); if (abs(factor) < 1e-20) { *info = 1; i__2 = -(*info); input_error("ZLAROR", &i__2); return 0; } else { factor = 1. / factor; } i__2 = kbeg; i__3 = kbeg; z__1.r = x[i__3].r + xnorms.r, z__1.i = x[i__3].i + xnorms.i; x[i__2].r = z__1.r, x[i__2].i = z__1.i; /* Apply Householder transformation to A */ if (itype == 1 || itype == 3 || itype == 4) { /* Apply H(k) on the left of A */ zgemv_("C", &ixfrm, n, &c_b2, &a[kbeg + a_dim1], lda, &x[kbeg], & c__1, &c_b1, &x[(nxfrm << 1) + 1], &c__1); z__2.r = factor, z__2.i = 0.; z__1.r = -z__2.r, z__1.i = -z__2.i; zgerc_(&ixfrm, n, &z__1, &x[kbeg], &c__1, &x[(nxfrm << 1) + 1], & c__1, &a[kbeg + a_dim1], lda); } if (itype >= 2 && itype <= 4) { /* Apply H(k)* (or H(k)') on the right of A */ if (itype == 4) { zlacgv_slu(&ixfrm, &x[kbeg], &c__1); } zgemv_("N", m, &ixfrm, &c_b2, &a[kbeg * a_dim1 + 1], lda, &x[kbeg] , &c__1, &c_b1, &x[(nxfrm << 1) + 1], &c__1); z__2.r = factor, z__2.i = 0.; z__1.r = -z__2.r, z__1.i = -z__2.i; zgerc_(m, &ixfrm, &z__1, &x[(nxfrm << 1) + 1], &c__1, &x[kbeg], & c__1, &a[kbeg * a_dim1 + 1], lda); } /* L30: */ } zlarnd_slu(&z__1, &c__3, &iseed[1]); x[1].r = z__1.r, x[1].i = z__1.i; xabs = z_abs(&x[1]); if (xabs != 0.) { z__1.r = x[1].r / xabs, z__1.i = x[1].i / xabs; csign.r = z__1.r, csign.i = z__1.i; } else { csign.r = 1., csign.i = 0.; } i__1 = nxfrm << 1; x[i__1].r = csign.r, x[i__1].i = csign.i; /* Scale the matrix A by D. */ if (itype == 1 || itype == 3 || itype == 4) { i__1 = *m; for (irow = 1; irow <= i__1; ++irow) { d_cnjg(&z__1, &x[nxfrm + irow]); zscal_(n, &z__1, &a[irow + a_dim1], lda); /* L40: */ } } if (itype == 2 || itype == 3) { i__1 = *n; for (jcol = 1; jcol <= i__1; ++jcol) { zscal_(m, &x[nxfrm + jcol], &a[jcol * a_dim1 + 1], &c__1); /* L50: */ } } if (itype == 4) { i__1 = *n; for (jcol = 1; jcol <= i__1; ++jcol) { d_cnjg(&z__1, &x[nxfrm + jcol]); zscal_(m, &z__1, &a[jcol * a_dim1 + 1], &c__1); /* L60: */ } } return 0; /* End of ZLAROR */ } /* zlaror_slu */
/* Subroutine */ int zdrges_(integer *nsizes, integer *nn, integer *ntypes, logical *dotype, integer *iseed, doublereal *thresh, integer *nounit, doublecomplex *a, integer *lda, doublecomplex *b, doublecomplex *s, doublecomplex *t, doublecomplex *q, integer *ldq, doublecomplex *z__, doublecomplex *alpha, doublecomplex *beta, doublecomplex *work, integer *lwork, doublereal *rwork, doublereal *result, logical *bwork, integer *info) { /* Initialized data */ static integer kclass[26] = { 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2, 2,2,2,3 }; static integer kbmagn[26] = { 1,1,1,1,1,1,1,1,3,2,3,2,2,3,1,1,1,1,1,1,1,3, 2,3,2,1 }; static integer ktrian[26] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1, 1,1,1,1 }; static logical lasign[26] = { FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_, TRUE_,FALSE_,TRUE_,TRUE_,FALSE_,FALSE_,TRUE_,TRUE_,TRUE_,FALSE_, TRUE_,FALSE_,FALSE_,FALSE_,TRUE_,TRUE_,TRUE_,TRUE_,TRUE_,FALSE_ }; static logical lbsign[26] = { FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_, FALSE_,TRUE_,FALSE_,FALSE_,TRUE_,TRUE_,FALSE_,FALSE_,TRUE_,FALSE_, TRUE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_, FALSE_ }; static integer kz1[6] = { 0,1,2,1,3,3 }; static integer kz2[6] = { 0,0,1,2,1,1 }; static integer kadd[6] = { 0,0,0,0,3,2 }; static integer katype[26] = { 0,1,0,1,2,3,4,1,4,4,1,1,4,4,4,2,4,5,8,7,9,4, 4,4,4,0 }; static integer kbtype[26] = { 0,0,1,1,2,-3,1,4,1,1,4,4,1,1,-4,2,-4,8,8,8, 8,8,8,8,8,0 }; static integer kazero[26] = { 1,1,1,1,1,1,2,1,2,2,1,1,2,2,3,1,3,5,5,5,5,3, 3,3,3,1 }; static integer kbzero[26] = { 1,1,1,1,1,1,1,2,1,1,2,2,1,1,4,1,4,6,6,6,6,4, 4,4,4,1 }; static integer kamagn[26] = { 1,1,1,1,1,1,1,1,2,3,2,3,2,3,1,1,1,1,1,1,1,2, 3,3,2,1 }; /* Format strings */ static char fmt_9999[] = "(\002 ZDRGES: \002,a,\002 returned INFO=\002,i" "6,\002.\002,/9x,\002N=\002,i6,\002, JTYPE=\002,i6,\002, ISEED=" "(\002,4(i4,\002,\002),i5,\002)\002)"; static char fmt_9998[] = "(\002 ZDRGES: S not in Schur form at eigenvalu" "e \002,i6,\002.\002,/9x,\002N=\002,i6,\002, JTYPE=\002,i6,\002, " "ISEED=(\002,3(i5,\002,\002),i5,\002)\002)"; static char fmt_9997[] = "(/1x,a3,\002 -- Complex Generalized Schur from" " problem \002,\002driver\002)"; static char fmt_9996[] = "(\002 Matrix types (see ZDRGES for details):" " \002)"; static char fmt_9995[] = "(\002 Special Matrices:\002,23x,\002(J'=transp" "osed Jordan block)\002,/\002 1=(0,0) 2=(I,0) 3=(0,I) 4=(I,I" ") 5=(J',J') \002,\0026=(diag(J',I), diag(I,J'))\002,/\002 Diag" "onal Matrices: ( \002,\002D=diag(0,1,2,...) )\002,/\002 7=(D," "I) 9=(large*D, small*I\002,\002) 11=(large*I, small*D) 13=(l" "arge*D, large*I)\002,/\002 8=(I,D) 10=(small*D, large*I) 12=" "(small*I, large*D) \002,\002 14=(small*D, small*I)\002,/\002 15" "=(D, reversed D)\002)"; static char fmt_9994[] = "(\002 Matrices Rotated by Random \002,a,\002 M" "atrices U, V:\002,/\002 16=Transposed Jordan Blocks " " 19=geometric \002,\002alpha, beta=0,1\002,/\002 17=arithm. alp" "ha&beta \002,\002 20=arithmetic alpha, beta=0," "1\002,/\002 18=clustered \002,\002alpha, beta=0,1 21" "=random alpha, beta=0,1\002,/\002 Large & Small Matrices:\002," "/\002 22=(large, small) \002,\00223=(small,large) 24=(smal" "l,small) 25=(large,large)\002,/\002 26=random O(1) matrices" ".\002)"; static char fmt_9993[] = "(/\002 Tests performed: (S is Schur, T is tri" "angular, \002,\002Q and Z are \002,a,\002,\002,/19x,\002l and r " "are the appropriate left and right\002,/19x,\002eigenvectors, re" "sp., a is alpha, b is beta, and\002,/19x,a,\002 means \002,a," "\002.)\002,/\002 Without ordering: \002,/\002 1 = | A - Q S " "Z\002,a,\002 | / ( |A| n ulp ) 2 = | B - Q T Z\002,a,\002 |" " / ( |B| n ulp )\002,/\002 3 = | I - QQ\002,a,\002 | / ( n ulp " ") 4 = | I - ZZ\002,a,\002 | / ( n ulp )\002,/\002 5" " = A is in Schur form S\002,/\002 6 = difference between (alpha" ",beta)\002,\002 and diagonals of (S,T)\002,/\002 With ordering:" " \002,/\002 7 = | (A,B) - Q (S,T) Z\002,a,\002 | / ( |(A,B)| n " "ulp )\002,/\002 8 = | I - QQ\002,a,\002 | / ( n ulp ) " " 9 = | I - ZZ\002,a,\002 | / ( n ulp )\002,/\002 10 = A is in " "Schur form S\002,/\002 11 = difference between (alpha,beta) and " "diagonals\002,\002 of (S,T)\002,/\002 12 = SDIM is the correct n" "umber of \002,\002selected eigenvalues\002,/)"; static char fmt_9992[] = "(\002 Matrix order=\002,i5,\002, type=\002,i2" ",\002, seed=\002,4(i4,\002,\002),\002 result \002,i2,\002 is\002" ",0p,f8.2)"; static char fmt_9991[] = "(\002 Matrix order=\002,i5,\002, type=\002,i2" ",\002, seed=\002,4(i4,\002,\002),\002 result \002,i2,\002 is\002" ",1p,d10.3)"; /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, s_dim1, s_offset, t_dim1, t_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8, i__9, i__10, i__11; doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8, d__9, d__10, d__11, d__12, d__13, d__14, d__15, d__16; doublecomplex z__1, z__2, z__3, z__4; /* Local variables */ integer i__, j, n, n1, jc, nb, in, jr; doublereal ulp; integer iadd, sdim, nmax, rsub; char sort[1]; doublereal temp1, temp2; logical badnn; integer iinfo; doublereal rmagn[4]; doublecomplex ctemp; extern /* Subroutine */ int zget51_(integer *, integer *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer * , doublecomplex *, integer *, doublecomplex *, doublereal *, doublereal *), zgges_(char *, char *, char *, L_fp, integer *, doublecomplex *, integer *, doublecomplex *, integer *, integer *, doublecomplex *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *, doublereal *, logical *, integer *); integer nmats, jsize; extern /* Subroutine */ int zget54_(integer *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, integer *, doublecomplex *, doublereal *); integer nerrs, jtype, ntest, isort; extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), zlatm4_( integer *, integer *, integer *, integer *, logical *, doublereal *, doublereal *, doublereal *, integer *, integer *, doublecomplex *, integer *); logical ilabad; extern doublereal dlamch_(char *); extern /* Subroutine */ int zunm2r_(char *, char *, integer *, integer *, integer *, doublecomplex *, integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *, integer *); doublereal safmin, safmax; integer knteig, ioldsd[4]; extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *); extern /* Subroutine */ int alasvm_(char *, integer *, integer *, integer *, integer *), xerbla_(char *, integer *), zlarfg_(integer *, doublecomplex *, doublecomplex *, integer *, doublecomplex *); extern /* Double Complex */ void zlarnd_(doublecomplex *, integer *, integer *); extern /* Subroutine */ int zlacpy_(char *, integer *, integer *, doublecomplex *, integer *, doublecomplex *, integer *), zlaset_(char *, integer *, integer *, doublecomplex *, doublecomplex *, doublecomplex *, integer *); extern logical zlctes_(doublecomplex *, doublecomplex *); integer minwrk, maxwrk; doublereal ulpinv; integer mtypes, ntestt; /* Fortran I/O blocks */ static cilist io___41 = { 0, 0, 0, fmt_9999, 0 }; static cilist io___47 = { 0, 0, 0, fmt_9999, 0 }; static cilist io___51 = { 0, 0, 0, fmt_9998, 0 }; static cilist io___53 = { 0, 0, 0, fmt_9997, 0 }; static cilist io___54 = { 0, 0, 0, fmt_9996, 0 }; static cilist io___55 = { 0, 0, 0, fmt_9995, 0 }; static cilist io___56 = { 0, 0, 0, fmt_9994, 0 }; static cilist io___57 = { 0, 0, 0, fmt_9993, 0 }; static cilist io___58 = { 0, 0, 0, fmt_9992, 0 }; static cilist io___59 = { 0, 0, 0, fmt_9991, 0 }; /* -- LAPACK test routine (version 3.1.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* February 2007 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* ZDRGES checks the nonsymmetric generalized eigenvalue (Schur form) */ /* problem driver ZGGES. */ /* ZGGES factors A and B as Q*S*Z' and Q*T*Z' , where ' means conjugate */ /* transpose, S and T are upper triangular (i.e., in generalized Schur */ /* form), and Q and Z are unitary. It also computes the generalized */ /* eigenvalues (alpha(j),beta(j)), j=1,...,n. Thus, */ /* w(j) = alpha(j)/beta(j) is a root of the characteristic equation */ /* det( A - w(j) B ) = 0 */ /* Optionally it also reorder the eigenvalues so that a selected */ /* cluster of eigenvalues appears in the leading diagonal block of the */ /* Schur forms. */ /* When ZDRGES is called, a number of matrix "sizes" ("N's") and a */ /* number of matrix "TYPES" are specified. For each size ("N") */ /* and each TYPE of matrix, a pair of matrices (A, B) will be generated */ /* and used for testing. For each matrix pair, the following 13 tests */ /* will be performed and compared with the threshhold THRESH except */ /* the tests (5), (11) and (13). */ /* (1) | A - Q S Z' | / ( |A| n ulp ) (no sorting of eigenvalues) */ /* (2) | B - Q T Z' | / ( |B| n ulp ) (no sorting of eigenvalues) */ /* (3) | I - QQ' | / ( n ulp ) (no sorting of eigenvalues) */ /* (4) | I - ZZ' | / ( n ulp ) (no sorting of eigenvalues) */ /* (5) if A is in Schur form (i.e. triangular form) (no sorting of */ /* eigenvalues) */ /* (6) if eigenvalues = diagonal elements of the Schur form (S, T), */ /* i.e., test the maximum over j of D(j) where: */ /* |alpha(j) - S(j,j)| |beta(j) - T(j,j)| */ /* D(j) = ------------------------ + ----------------------- */ /* max(|alpha(j)|,|S(j,j)|) max(|beta(j)|,|T(j,j)|) */ /* (no sorting of eigenvalues) */ /* (7) | (A,B) - Q (S,T) Z' | / ( |(A,B)| n ulp ) */ /* (with sorting of eigenvalues). */ /* (8) | I - QQ' | / ( n ulp ) (with sorting of eigenvalues). */ /* (9) | I - ZZ' | / ( n ulp ) (with sorting of eigenvalues). */ /* (10) if A is in Schur form (i.e. quasi-triangular form) */ /* (with sorting of eigenvalues). */ /* (11) if eigenvalues = diagonal elements of the Schur form (S, T), */ /* i.e. test the maximum over j of D(j) where: */ /* |alpha(j) - S(j,j)| |beta(j) - T(j,j)| */ /* D(j) = ------------------------ + ----------------------- */ /* max(|alpha(j)|,|S(j,j)|) max(|beta(j)|,|T(j,j)|) */ /* (with sorting of eigenvalues). */ /* (12) if sorting worked and SDIM is the number of eigenvalues */ /* which were CELECTed. */ /* Test Matrices */ /* ============= */ /* The sizes of the test matrices are specified by an array */ /* NN(1:NSIZES); the value of each element NN(j) specifies one size. */ /* The "types" are specified by a logical array DOTYPE( 1:NTYPES ); if */ /* DOTYPE(j) is .TRUE., then matrix type "j" will be generated. */ /* Currently, the list of possible types is: */ /* (1) ( 0, 0 ) (a pair of zero matrices) */ /* (2) ( I, 0 ) (an identity and a zero matrix) */ /* (3) ( 0, I ) (an identity and a zero matrix) */ /* (4) ( I, I ) (a pair of identity matrices) */ /* t t */ /* (5) ( J , J ) (a pair of transposed Jordan blocks) */ /* t ( I 0 ) */ /* (6) ( X, Y ) where X = ( J 0 ) and Y = ( t ) */ /* ( 0 I ) ( 0 J ) */ /* and I is a k x k identity and J a (k+1)x(k+1) */ /* Jordan block; k=(N-1)/2 */ /* (7) ( D, I ) where D is diag( 0, 1,..., N-1 ) (a diagonal */ /* matrix with those diagonal entries.) */ /* (8) ( I, D ) */ /* (9) ( big*D, small*I ) where "big" is near overflow and small=1/big */ /* (10) ( small*D, big*I ) */ /* (11) ( big*I, small*D ) */ /* (12) ( small*I, big*D ) */ /* (13) ( big*D, big*I ) */ /* (14) ( small*D, small*I ) */ /* (15) ( D1, D2 ) where D1 is diag( 0, 0, 1, ..., N-3, 0 ) and */ /* D2 is diag( 0, N-3, N-4,..., 1, 0, 0 ) */ /* t t */ /* (16) Q ( J , J ) Z where Q and Z are random orthogonal matrices. */ /* (17) Q ( T1, T2 ) Z where T1 and T2 are upper triangular matrices */ /* with random O(1) entries above the diagonal */ /* and diagonal entries diag(T1) = */ /* ( 0, 0, 1, ..., N-3, 0 ) and diag(T2) = */ /* ( 0, N-3, N-4,..., 1, 0, 0 ) */ /* (18) Q ( T1, T2 ) Z diag(T1) = ( 0, 0, 1, 1, s, ..., s, 0 ) */ /* diag(T2) = ( 0, 1, 0, 1,..., 1, 0 ) */ /* s = machine precision. */ /* (19) Q ( T1, T2 ) Z diag(T1)=( 0,0,1,1, 1-d, ..., 1-(N-5)*d=s, 0 ) */ /* diag(T2) = ( 0, 1, 0, 1, ..., 1, 0 ) */ /* N-5 */ /* (20) Q ( T1, T2 ) Z diag(T1)=( 0, 0, 1, 1, a, ..., a =s, 0 ) */ /* diag(T2) = ( 0, 1, 0, 1, ..., 1, 0, 0 ) */ /* (21) Q ( T1, T2 ) Z diag(T1)=( 0, 0, 1, r1, r2, ..., r(N-4), 0 ) */ /* diag(T2) = ( 0, 1, 0, 1, ..., 1, 0, 0 ) */ /* where r1,..., r(N-4) are random. */ /* (22) Q ( big*T1, small*T2 ) Z diag(T1) = ( 0, 0, 1, ..., N-3, 0 ) */ /* diag(T2) = ( 0, 1, ..., 1, 0, 0 ) */ /* (23) Q ( small*T1, big*T2 ) Z diag(T1) = ( 0, 0, 1, ..., N-3, 0 ) */ /* diag(T2) = ( 0, 1, ..., 1, 0, 0 ) */ /* (24) Q ( small*T1, small*T2 ) Z diag(T1) = ( 0, 0, 1, ..., N-3, 0 ) */ /* diag(T2) = ( 0, 1, ..., 1, 0, 0 ) */ /* (25) Q ( big*T1, big*T2 ) Z diag(T1) = ( 0, 0, 1, ..., N-3, 0 ) */ /* diag(T2) = ( 0, 1, ..., 1, 0, 0 ) */ /* (26) Q ( T1, T2 ) Z where T1 and T2 are random upper-triangular */ /* matrices. */ /* Arguments */ /* ========= */ /* NSIZES (input) INTEGER */ /* The number of sizes of matrices to use. If it is zero, */ /* DDRGES does nothing. NSIZES >= 0. */ /* NN (input) INTEGER array, dimension (NSIZES) */ /* An array containing the sizes to be used for the matrices. */ /* Zero values will be skipped. NN >= 0. */ /* NTYPES (input) INTEGER */ /* The number of elements in DOTYPE. If it is zero, DDRGES */ /* does nothing. It must be at least zero. If it is MAXTYP+1 */ /* and NSIZES is 1, then an additional type, MAXTYP+1 is */ /* defined, which is to use whatever matrix is in A on input. */ /* This is only useful if DOTYPE(1:MAXTYP) is .FALSE. and */ /* DOTYPE(MAXTYP+1) is .TRUE. . */ /* DOTYPE (input) LOGICAL array, dimension (NTYPES) */ /* If DOTYPE(j) is .TRUE., then for each size in NN a */ /* matrix of that size and of type j will be generated. */ /* If NTYPES is smaller than the maximum number of types */ /* defined (PARAMETER MAXTYP), then types NTYPES+1 through */ /* MAXTYP will not be generated. If NTYPES is larger */ /* than MAXTYP, DOTYPE(MAXTYP+1) through DOTYPE(NTYPES) */ /* will be ignored. */ /* ISEED (input/output) INTEGER array, dimension (4) */ /* On entry ISEED specifies the seed of the random number */ /* generator. The array elements should be between 0 and 4095; */ /* if not they will be reduced mod 4096. Also, ISEED(4) must */ /* be odd. The random number generator uses a linear */ /* congruential sequence limited to small integers, and so */ /* should produce machine independent random numbers. The */ /* values of ISEED are changed on exit, and can be used in the */ /* next call to DDRGES to continue the same random number */ /* sequence. */ /* THRESH (input) DOUBLE PRECISION */ /* A test will count as "failed" if the "error", computed as */ /* described above, exceeds THRESH. Note that the error is */ /* scaled to be O(1), so THRESH should be a reasonably small */ /* multiple of 1, e.g., 10 or 100. In particular, it should */ /* not depend on the precision (single vs. double) or the size */ /* of the matrix. THRESH >= 0. */ /* NOUNIT (input) INTEGER */ /* The FORTRAN unit number for printing out error messages */ /* (e.g., if a routine returns IINFO not equal to 0.) */ /* A (input/workspace) COMPLEX*16 array, dimension(LDA, max(NN)) */ /* Used to hold the original A matrix. Used as input only */ /* if NTYPES=MAXTYP+1, DOTYPE(1:MAXTYP)=.FALSE., and */ /* DOTYPE(MAXTYP+1)=.TRUE. */ /* LDA (input) INTEGER */ /* The leading dimension of A, B, S, and T. */ /* It must be at least 1 and at least max( NN ). */ /* B (input/workspace) COMPLEX*16 array, dimension(LDA, max(NN)) */ /* Used to hold the original B matrix. Used as input only */ /* if NTYPES=MAXTYP+1, DOTYPE(1:MAXTYP)=.FALSE., and */ /* DOTYPE(MAXTYP+1)=.TRUE. */ /* S (workspace) COMPLEX*16 array, dimension (LDA, max(NN)) */ /* The Schur form matrix computed from A by ZGGES. On exit, S */ /* contains the Schur form matrix corresponding to the matrix */ /* in A. */ /* T (workspace) COMPLEX*16 array, dimension (LDA, max(NN)) */ /* The upper triangular matrix computed from B by ZGGES. */ /* Q (workspace) COMPLEX*16 array, dimension (LDQ, max(NN)) */ /* The (left) orthogonal matrix computed by ZGGES. */ /* LDQ (input) INTEGER */ /* The leading dimension of Q and Z. It must */ /* be at least 1 and at least max( NN ). */ /* Z (workspace) COMPLEX*16 array, dimension( LDQ, max(NN) ) */ /* The (right) orthogonal matrix computed by ZGGES. */ /* ALPHA (workspace) COMPLEX*16 array, dimension (max(NN)) */ /* BETA (workspace) COMPLEX*16 array, dimension (max(NN)) */ /* The generalized eigenvalues of (A,B) computed by ZGGES. */ /* ALPHA(k) / BETA(k) is the k-th generalized eigenvalue of A */ /* and B. */ /* WORK (workspace) COMPLEX*16 array, dimension (LWORK) */ /* LWORK (input) INTEGER */ /* The dimension of the array WORK. LWORK >= 3*N*N. */ /* RWORK (workspace) DOUBLE PRECISION array, dimension ( 8*N ) */ /* Real workspace. */ /* RESULT (output) DOUBLE PRECISION array, dimension (15) */ /* The values computed by the tests described above. */ /* The values are currently limited to 1/ulp, to avoid overflow. */ /* BWORK (workspace) LOGICAL array, dimension (N) */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value. */ /* > 0: A routine returned an error code. INFO is the */ /* absolute value of the INFO value returned. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. Local Arrays .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Statement Functions .. */ /* .. */ /* .. Statement Function definitions .. */ /* .. */ /* .. Data statements .. */ /* Parameter adjustments */ --nn; --dotype; --iseed; t_dim1 = *lda; t_offset = 1 + t_dim1; t -= t_offset; s_dim1 = *lda; s_offset = 1 + s_dim1; s -= s_offset; b_dim1 = *lda; b_offset = 1 + b_dim1; b -= b_offset; a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; z_dim1 = *ldq; z_offset = 1 + z_dim1; z__ -= z_offset; q_dim1 = *ldq; q_offset = 1 + q_dim1; q -= q_offset; --alpha; --beta; --work; --rwork; --result; --bwork; /* Function Body */ /* .. */ /* .. Executable Statements .. */ /* Check for errors */ *info = 0; badnn = FALSE_; nmax = 1; i__1 = *nsizes; for (j = 1; j <= i__1; ++j) { /* Computing MAX */ i__2 = nmax, i__3 = nn[j]; nmax = max(i__2,i__3); if (nn[j] < 0) { badnn = TRUE_; } /* L10: */ } if (*nsizes < 0) { *info = -1; } else if (badnn) { *info = -2; } else if (*ntypes < 0) { *info = -3; } else if (*thresh < 0.) { *info = -6; } else if (*lda <= 1 || *lda < nmax) { *info = -9; } else if (*ldq <= 1 || *ldq < nmax) { *info = -14; } /* Compute workspace */ /* (Note: Comments in the code beginning "Workspace:" describe the */ /* minimal amount of workspace needed at that point in the code, */ /* as well as the preferred amount for good performance. */ /* NB refers to the optimal block size for the immediately */ /* following subroutine, as returned by ILAENV. */ minwrk = 1; if (*info == 0 && *lwork >= 1) { minwrk = nmax * 3 * nmax; /* Computing MAX */ i__1 = 1, i__2 = ilaenv_(&c__1, "ZGEQRF", " ", &nmax, &nmax, &c_n1, & c_n1), i__1 = max(i__1,i__2), i__2 = ilaenv_(&c__1, "ZUNMQR", "LC", &nmax, &nmax, &nmax, &c_n1), i__1 = max(i__1,i__2), i__2 = ilaenv_(& c__1, "ZUNGQR", " ", &nmax, &nmax, &nmax, &c_n1); nb = max(i__1,i__2); /* Computing MAX */ i__1 = nmax + nmax * nb, i__2 = nmax * 3 * nmax; maxwrk = max(i__1,i__2); work[1].r = (doublereal) maxwrk, work[1].i = 0.; } if (*lwork < minwrk) { *info = -19; } if (*info != 0) { i__1 = -(*info); xerbla_("ZDRGES", &i__1); return 0; } /* Quick return if possible */ if (*nsizes == 0 || *ntypes == 0) { return 0; } ulp = dlamch_("Precision"); safmin = dlamch_("Safe minimum"); safmin /= ulp; safmax = 1. / safmin; dlabad_(&safmin, &safmax); ulpinv = 1. / ulp; /* The values RMAGN(2:3) depend on N, see below. */ rmagn[0] = 0.; rmagn[1] = 1.; /* Loop over matrix sizes */ ntestt = 0; nerrs = 0; nmats = 0; i__1 = *nsizes; for (jsize = 1; jsize <= i__1; ++jsize) { n = nn[jsize]; n1 = max(1,n); rmagn[2] = safmax * ulp / (doublereal) n1; rmagn[3] = safmin * ulpinv * (doublereal) n1; if (*nsizes != 1) { mtypes = min(26,*ntypes); } else { mtypes = min(27,*ntypes); } /* Loop over matrix types */ i__2 = mtypes; for (jtype = 1; jtype <= i__2; ++jtype) { if (! dotype[jtype]) { goto L180; } ++nmats; ntest = 0; /* Save ISEED in case of an error. */ for (j = 1; j <= 4; ++j) { ioldsd[j - 1] = iseed[j]; /* L20: */ } /* Initialize RESULT */ for (j = 1; j <= 13; ++j) { result[j] = 0.; /* L30: */ } /* Generate test matrices A and B */ /* Description of control parameters: */ /* KZLASS: =1 means w/o rotation, =2 means w/ rotation, */ /* =3 means random. */ /* KATYPE: the "type" to be passed to ZLATM4 for computing A. */ /* KAZERO: the pattern of zeros on the diagonal for A: */ /* =1: ( xxx ), =2: (0, xxx ) =3: ( 0, 0, xxx, 0 ), */ /* =4: ( 0, xxx, 0, 0 ), =5: ( 0, 0, 1, xxx, 0 ), */ /* =6: ( 0, 1, 0, xxx, 0 ). (xxx means a string of */ /* non-zero entries.) */ /* KAMAGN: the magnitude of the matrix: =0: zero, =1: O(1), */ /* =2: large, =3: small. */ /* LASIGN: .TRUE. if the diagonal elements of A are to be */ /* multiplied by a random magnitude 1 number. */ /* KBTYPE, KBZERO, KBMAGN, LBSIGN: the same, but for B. */ /* KTRIAN: =0: don't fill in the upper triangle, =1: do. */ /* KZ1, KZ2, KADD: used to implement KAZERO and KBZERO. */ /* RMAGN: used to implement KAMAGN and KBMAGN. */ if (mtypes > 26) { goto L110; } iinfo = 0; if (kclass[jtype - 1] < 3) { /* Generate A (w/o rotation) */ if ((i__3 = katype[jtype - 1], abs(i__3)) == 3) { in = ((n - 1) / 2 << 1) + 1; if (in != n) { zlaset_("Full", &n, &n, &c_b1, &c_b1, &a[a_offset], lda); } } else { in = n; } zlatm4_(&katype[jtype - 1], &in, &kz1[kazero[jtype - 1] - 1], &kz2[kazero[jtype - 1] - 1], &lasign[jtype - 1], & rmagn[kamagn[jtype - 1]], &ulp, &rmagn[ktrian[jtype - 1] * kamagn[jtype - 1]], &c__2, &iseed[1], &a[ a_offset], lda); iadd = kadd[kazero[jtype - 1] - 1]; if (iadd > 0 && iadd <= n) { i__3 = iadd + iadd * a_dim1; i__4 = kamagn[jtype - 1]; a[i__3].r = rmagn[i__4], a[i__3].i = 0.; } /* Generate B (w/o rotation) */ if ((i__3 = kbtype[jtype - 1], abs(i__3)) == 3) { in = ((n - 1) / 2 << 1) + 1; if (in != n) { zlaset_("Full", &n, &n, &c_b1, &c_b1, &b[b_offset], lda); } } else { in = n; } zlatm4_(&kbtype[jtype - 1], &in, &kz1[kbzero[jtype - 1] - 1], &kz2[kbzero[jtype - 1] - 1], &lbsign[jtype - 1], & rmagn[kbmagn[jtype - 1]], &c_b29, &rmagn[ktrian[jtype - 1] * kbmagn[jtype - 1]], &c__2, &iseed[1], &b[ b_offset], lda); iadd = kadd[kbzero[jtype - 1] - 1]; if (iadd != 0 && iadd <= n) { i__3 = iadd + iadd * b_dim1; i__4 = kbmagn[jtype - 1]; b[i__3].r = rmagn[i__4], b[i__3].i = 0.; } if (kclass[jtype - 1] == 2 && n > 0) { /* Include rotations */ /* Generate Q, Z as Householder transformations times */ /* a diagonal matrix. */ i__3 = n - 1; for (jc = 1; jc <= i__3; ++jc) { i__4 = n; for (jr = jc; jr <= i__4; ++jr) { i__5 = jr + jc * q_dim1; zlarnd_(&z__1, &c__3, &iseed[1]); q[i__5].r = z__1.r, q[i__5].i = z__1.i; i__5 = jr + jc * z_dim1; zlarnd_(&z__1, &c__3, &iseed[1]); z__[i__5].r = z__1.r, z__[i__5].i = z__1.i; /* L40: */ } i__4 = n + 1 - jc; zlarfg_(&i__4, &q[jc + jc * q_dim1], &q[jc + 1 + jc * q_dim1], &c__1, &work[jc]); i__4 = (n << 1) + jc; i__5 = jc + jc * q_dim1; d__2 = q[i__5].r; d__1 = d_sign(&c_b29, &d__2); work[i__4].r = d__1, work[i__4].i = 0.; i__4 = jc + jc * q_dim1; q[i__4].r = 1., q[i__4].i = 0.; i__4 = n + 1 - jc; zlarfg_(&i__4, &z__[jc + jc * z_dim1], &z__[jc + 1 + jc * z_dim1], &c__1, &work[n + jc]); i__4 = n * 3 + jc; i__5 = jc + jc * z_dim1; d__2 = z__[i__5].r; d__1 = d_sign(&c_b29, &d__2); work[i__4].r = d__1, work[i__4].i = 0.; i__4 = jc + jc * z_dim1; z__[i__4].r = 1., z__[i__4].i = 0.; /* L50: */ } zlarnd_(&z__1, &c__3, &iseed[1]); ctemp.r = z__1.r, ctemp.i = z__1.i; i__3 = n + n * q_dim1; q[i__3].r = 1., q[i__3].i = 0.; i__3 = n; work[i__3].r = 0., work[i__3].i = 0.; i__3 = n * 3; d__1 = z_abs(&ctemp); z__1.r = ctemp.r / d__1, z__1.i = ctemp.i / d__1; work[i__3].r = z__1.r, work[i__3].i = z__1.i; zlarnd_(&z__1, &c__3, &iseed[1]); ctemp.r = z__1.r, ctemp.i = z__1.i; i__3 = n + n * z_dim1; z__[i__3].r = 1., z__[i__3].i = 0.; i__3 = n << 1; work[i__3].r = 0., work[i__3].i = 0.; i__3 = n << 2; d__1 = z_abs(&ctemp); z__1.r = ctemp.r / d__1, z__1.i = ctemp.i / d__1; work[i__3].r = z__1.r, work[i__3].i = z__1.i; /* Apply the diagonal matrices */ i__3 = n; for (jc = 1; jc <= i__3; ++jc) { i__4 = n; for (jr = 1; jr <= i__4; ++jr) { i__5 = jr + jc * a_dim1; i__6 = (n << 1) + jr; d_cnjg(&z__3, &work[n * 3 + jc]); z__2.r = work[i__6].r * z__3.r - work[i__6].i * z__3.i, z__2.i = work[i__6].r * z__3.i + work[i__6].i * z__3.r; i__7 = jr + jc * a_dim1; z__1.r = z__2.r * a[i__7].r - z__2.i * a[i__7].i, z__1.i = z__2.r * a[i__7].i + z__2.i * a[ i__7].r; a[i__5].r = z__1.r, a[i__5].i = z__1.i; i__5 = jr + jc * b_dim1; i__6 = (n << 1) + jr; d_cnjg(&z__3, &work[n * 3 + jc]); z__2.r = work[i__6].r * z__3.r - work[i__6].i * z__3.i, z__2.i = work[i__6].r * z__3.i + work[i__6].i * z__3.r; i__7 = jr + jc * b_dim1; z__1.r = z__2.r * b[i__7].r - z__2.i * b[i__7].i, z__1.i = z__2.r * b[i__7].i + z__2.i * b[ i__7].r; b[i__5].r = z__1.r, b[i__5].i = z__1.i; /* L60: */ } /* L70: */ } i__3 = n - 1; zunm2r_("L", "N", &n, &n, &i__3, &q[q_offset], ldq, &work[ 1], &a[a_offset], lda, &work[(n << 1) + 1], & iinfo); if (iinfo != 0) { goto L100; } i__3 = n - 1; zunm2r_("R", "C", &n, &n, &i__3, &z__[z_offset], ldq, & work[n + 1], &a[a_offset], lda, &work[(n << 1) + 1], &iinfo); if (iinfo != 0) { goto L100; } i__3 = n - 1; zunm2r_("L", "N", &n, &n, &i__3, &q[q_offset], ldq, &work[ 1], &b[b_offset], lda, &work[(n << 1) + 1], & iinfo); if (iinfo != 0) { goto L100; } i__3 = n - 1; zunm2r_("R", "C", &n, &n, &i__3, &z__[z_offset], ldq, & work[n + 1], &b[b_offset], lda, &work[(n << 1) + 1], &iinfo); if (iinfo != 0) { goto L100; } } } else { /* Random matrices */ i__3 = n; for (jc = 1; jc <= i__3; ++jc) { i__4 = n; for (jr = 1; jr <= i__4; ++jr) { i__5 = jr + jc * a_dim1; i__6 = kamagn[jtype - 1]; zlarnd_(&z__2, &c__4, &iseed[1]); z__1.r = rmagn[i__6] * z__2.r, z__1.i = rmagn[i__6] * z__2.i; a[i__5].r = z__1.r, a[i__5].i = z__1.i; i__5 = jr + jc * b_dim1; i__6 = kbmagn[jtype - 1]; zlarnd_(&z__2, &c__4, &iseed[1]); z__1.r = rmagn[i__6] * z__2.r, z__1.i = rmagn[i__6] * z__2.i; b[i__5].r = z__1.r, b[i__5].i = z__1.i; /* L80: */ } /* L90: */ } } L100: if (iinfo != 0) { io___41.ciunit = *nounit; s_wsfe(&io___41); do_fio(&c__1, "Generator", (ftnlen)9); do_fio(&c__1, (char *)&iinfo, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer)); do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer)); e_wsfe(); *info = abs(iinfo); return 0; } L110: for (i__ = 1; i__ <= 13; ++i__) { result[i__] = -1.; /* L120: */ } /* Test with and without sorting of eigenvalues */ for (isort = 0; isort <= 1; ++isort) { if (isort == 0) { *(unsigned char *)sort = 'N'; rsub = 0; } else { *(unsigned char *)sort = 'S'; rsub = 5; } /* Call ZGGES to compute H, T, Q, Z, alpha, and beta. */ zlacpy_("Full", &n, &n, &a[a_offset], lda, &s[s_offset], lda); zlacpy_("Full", &n, &n, &b[b_offset], lda, &t[t_offset], lda); ntest = rsub + 1 + isort; result[rsub + 1 + isort] = ulpinv; zgges_("V", "V", sort, (L_fp)zlctes_, &n, &s[s_offset], lda, & t[t_offset], lda, &sdim, &alpha[1], &beta[1], &q[ q_offset], ldq, &z__[z_offset], ldq, &work[1], lwork, &rwork[1], &bwork[1], &iinfo); if (iinfo != 0 && iinfo != n + 2) { result[rsub + 1 + isort] = ulpinv; io___47.ciunit = *nounit; s_wsfe(&io___47); do_fio(&c__1, "ZGGES", (ftnlen)5); do_fio(&c__1, (char *)&iinfo, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer)); do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer)) ; e_wsfe(); *info = abs(iinfo); goto L160; } ntest = rsub + 4; /* Do tests 1--4 (or tests 7--9 when reordering ) */ if (isort == 0) { zget51_(&c__1, &n, &a[a_offset], lda, &s[s_offset], lda, & q[q_offset], ldq, &z__[z_offset], ldq, &work[1], & rwork[1], &result[1]); zget51_(&c__1, &n, &b[b_offset], lda, &t[t_offset], lda, & q[q_offset], ldq, &z__[z_offset], ldq, &work[1], & rwork[1], &result[2]); } else { zget54_(&n, &a[a_offset], lda, &b[b_offset], lda, &s[ s_offset], lda, &t[t_offset], lda, &q[q_offset], ldq, &z__[z_offset], ldq, &work[1], &result[rsub + 2]); } zget51_(&c__3, &n, &b[b_offset], lda, &t[t_offset], lda, &q[ q_offset], ldq, &q[q_offset], ldq, &work[1], &rwork[1] , &result[rsub + 3]); zget51_(&c__3, &n, &b[b_offset], lda, &t[t_offset], lda, &z__[ z_offset], ldq, &z__[z_offset], ldq, &work[1], &rwork[ 1], &result[rsub + 4]); /* Do test 5 and 6 (or Tests 10 and 11 when reordering): */ /* check Schur form of A and compare eigenvalues with */ /* diagonals. */ ntest = rsub + 6; temp1 = 0.; i__3 = n; for (j = 1; j <= i__3; ++j) { ilabad = FALSE_; i__4 = j; i__5 = j + j * s_dim1; z__2.r = alpha[i__4].r - s[i__5].r, z__2.i = alpha[i__4] .i - s[i__5].i; z__1.r = z__2.r, z__1.i = z__2.i; i__6 = j; i__7 = j + j * t_dim1; z__4.r = beta[i__6].r - t[i__7].r, z__4.i = beta[i__6].i - t[i__7].i; z__3.r = z__4.r, z__3.i = z__4.i; /* Computing MAX */ i__8 = j; i__9 = j + j * s_dim1; d__13 = safmin, d__14 = (d__1 = alpha[i__8].r, abs(d__1)) + (d__2 = d_imag(&alpha[j]), abs(d__2)), d__13 = max(d__13,d__14), d__14 = (d__3 = s[i__9].r, abs( d__3)) + (d__4 = d_imag(&s[j + j * s_dim1]), abs( d__4)); /* Computing MAX */ i__10 = j; i__11 = j + j * t_dim1; d__15 = safmin, d__16 = (d__5 = beta[i__10].r, abs(d__5)) + (d__6 = d_imag(&beta[j]), abs(d__6)), d__15 = max(d__15,d__16), d__16 = (d__7 = t[i__11].r, abs( d__7)) + (d__8 = d_imag(&t[j + j * t_dim1]), abs( d__8)); temp2 = (((d__9 = z__1.r, abs(d__9)) + (d__10 = d_imag(& z__1), abs(d__10))) / max(d__13,d__14) + ((d__11 = z__3.r, abs(d__11)) + (d__12 = d_imag(&z__3), abs(d__12))) / max(d__15,d__16)) / ulp; if (j < n) { i__4 = j + 1 + j * s_dim1; if (s[i__4].r != 0. || s[i__4].i != 0.) { ilabad = TRUE_; result[rsub + 5] = ulpinv; } } if (j > 1) { i__4 = j + (j - 1) * s_dim1; if (s[i__4].r != 0. || s[i__4].i != 0.) { ilabad = TRUE_; result[rsub + 5] = ulpinv; } } temp1 = max(temp1,temp2); if (ilabad) { io___51.ciunit = *nounit; s_wsfe(&io___51); do_fio(&c__1, (char *)&j, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer)) ; do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof( integer)); e_wsfe(); } /* L130: */ } result[rsub + 6] = temp1; if (isort >= 1) { /* Do test 12 */ ntest = 12; result[12] = 0.; knteig = 0; i__3 = n; for (i__ = 1; i__ <= i__3; ++i__) { if (zlctes_(&alpha[i__], &beta[i__])) { ++knteig; } /* L140: */ } if (sdim != knteig) { result[13] = ulpinv; } } /* L150: */ } /* End of Loop -- Check for RESULT(j) > THRESH */ L160: ntestt += ntest; /* Print out tests which fail. */ i__3 = ntest; for (jr = 1; jr <= i__3; ++jr) { if (result[jr] >= *thresh) { /* If this is the first test to fail, */ /* print a header to the data file. */ if (nerrs == 0) { io___53.ciunit = *nounit; s_wsfe(&io___53); do_fio(&c__1, "ZGS", (ftnlen)3); e_wsfe(); /* Matrix types */ io___54.ciunit = *nounit; s_wsfe(&io___54); e_wsfe(); io___55.ciunit = *nounit; s_wsfe(&io___55); e_wsfe(); io___56.ciunit = *nounit; s_wsfe(&io___56); do_fio(&c__1, "Unitary", (ftnlen)7); e_wsfe(); /* Tests performed */ io___57.ciunit = *nounit; s_wsfe(&io___57); do_fio(&c__1, "unitary", (ftnlen)7); do_fio(&c__1, "'", (ftnlen)1); do_fio(&c__1, "transpose", (ftnlen)9); for (j = 1; j <= 8; ++j) { do_fio(&c__1, "'", (ftnlen)1); } e_wsfe(); } ++nerrs; if (result[jr] < 1e4) { io___58.ciunit = *nounit; s_wsfe(&io___58); do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer)) ; do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof( integer)); do_fio(&c__1, (char *)&jr, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&result[jr], (ftnlen)sizeof( doublereal)); e_wsfe(); } else { io___59.ciunit = *nounit; s_wsfe(&io___59); do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer)) ; do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof( integer)); do_fio(&c__1, (char *)&jr, (ftnlen)sizeof(integer)); do_fio(&c__1, (char *)&result[jr], (ftnlen)sizeof( doublereal)); e_wsfe(); } } /* L170: */ } L180: ; } /* L190: */ } /* Summary */ alasvm_("ZGS", nounit, &nerrs, &ntestt, &c__0); work[1].r = (doublereal) maxwrk, work[1].i = 0.; return 0; /* End of ZDRGES */ } /* zdrges_ */