Пример #1
0
/* Subroutine */ int ztgex2_(logical *wantq, logical *wantz, integer *n, 
	doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb, 
	doublecomplex *q, integer *ldq, doublecomplex *z__, integer *ldz, 
	integer *j1, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1, 
	    z_offset, i__1, i__2, i__3;
    doublereal d__1;
    doublecomplex z__1, z__2, z__3;

    /* Builtin functions */
    double sqrt(doublereal), z_abs(doublecomplex *);
    void d_cnjg(doublecomplex *, doublecomplex *);

    /* Local variables */
    static doublecomplex f, g;
    static integer i__, m;
    static doublecomplex s[4]	/* was [2][2] */, t[4]	/* was [2][2] */;
    static doublereal cq, sa, sb, cz;
    static doublecomplex sq;
    static doublereal ss, ws;
    static doublecomplex sz;
    static doublereal eps, sum;
    static logical weak;
    static doublecomplex cdum, work[8];
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    static doublereal scale;
    extern doublereal dlamch_(char *, ftnlen);
    static logical dtrong;
    static doublereal thresh;
    extern /* Subroutine */ int zlacpy_(char *, integer *, integer *, 
	    doublecomplex *, integer *, doublecomplex *, integer *, ftnlen), 
	    zlartg_(doublecomplex *, doublecomplex *, doublereal *, 
	    doublecomplex *, doublecomplex *);
    static doublereal smlnum;
    extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
	     doublereal *, doublereal *);


/*  -- LAPACK auxiliary routine (version 3.0) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */
/*     Courant Institute, Argonne National Lab, and Rice University */
/*     June 30, 1999 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZTGEX2 swaps adjacent diagonal 1 by 1 blocks (A11,B11) and (A22,B22) */
/*  in an upper triangular matrix pair (A, B) by an unitary equivalence */
/*  transformation. */

/*  (A, B) must be in generalized Schur canonical form, that is, A and */
/*  B are both upper triangular. */

/*  Optionally, the matrices Q and Z of generalized Schur vectors are */
/*  updated. */

/*         Q(in) * A(in) * Z(in)' = Q(out) * A(out) * Z(out)' */
/*         Q(in) * B(in) * Z(in)' = Q(out) * B(out) * Z(out)' */


/*  Arguments */
/*  ========= */

/*  WANTQ   (input) LOGICAL */
/*          .TRUE. : update the left transformation matrix Q; */
/*          .FALSE.: do not update Q. */

/*  WANTZ   (input) LOGICAL */
/*          .TRUE. : update the right transformation matrix Z; */
/*          .FALSE.: do not update Z. */

/*  N       (input) INTEGER */
/*          The order of the matrices A and B. N >= 0. */

/*  A       (input/output) COMPLEX*16 arrays, dimensions (LDA,N) */
/*          On entry, the matrix A in the pair (A, B). */
/*          On exit, the updated matrix A. */

/*  LDA     (input)  INTEGER */
/*          The leading dimension of the array A. LDA >= max(1,N). */

/*  B       (input/output) COMPLEX*16 arrays, dimensions (LDB,N) */
/*          On entry, the matrix B in the pair (A, B). */
/*          On exit, the updated matrix B. */

/*  LDB     (input)  INTEGER */
/*          The leading dimension of the array B. LDB >= max(1,N). */

/*  Q       (input/output) COMPLEX*16 array, dimension (LDZ,N) */
/*          If WANTQ = .TRUE, on entry, the unitary matrix Q. On exit, */
/*          the updated matrix Q. */
/*          Not referenced if WANTQ = .FALSE.. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. LDQ >= 1; */
/*          If WANTQ = .TRUE., LDQ >= N. */

/*  Z       (input/output) COMPLEX*16 array, dimension (LDZ,N) */
/*          If WANTZ = .TRUE, on entry, the unitary matrix Z. On exit, */
/*          the updated matrix Z. */
/*          Not referenced if WANTZ = .FALSE.. */

/*  LDZ     (input) INTEGER */
/*          The leading dimension of the array Z. LDZ >= 1; */
/*          If WANTZ = .TRUE., LDZ >= N. */

/*  J1      (input) INTEGER */
/*          The index to the first block (A11, B11). */

/*  INFO    (output) INTEGER */
/*           =0:  Successful exit. */
/*           =1:  The transformed matrix pair (A, B) would be too far */
/*                from generalized Schur form; the problem is ill- */
/*                conditioned. (A, B) may have been partially reordered, */
/*                and ILST points to the first row of the current */
/*                position of the block being moved. */


/*  Further Details */
/*  =============== */

/*  Based on contributions by */
/*     Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
/*     Umea University, S-901 87 Umea, Sweden. */

/*  In the current code both weak and strong stability tests are */
/*  performed. The user can omit the strong stability test by changing */
/*  the internal logical parameter WANDS to .FALSE.. See ref. [2] for */
/*  details. */

/*  [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
/*      Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
/*      M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
/*      Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */

/*  [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
/*      Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
/*      Estimation: Theory, Algorithms and Software, Report UMINF-94.04, */
/*      Department of Computing Science, Umea University, S-901 87 Umea, */
/*      Sweden, 1994. Also as LAPACK Working Note 87. To appear in */
/*      Numerical Algorithms, 1996. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1;
    b -= b_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1;
    z__ -= z_offset;

    /* Function Body */
    *info = 0;

/*     Quick return if possible */

    if (*n <= 1) {
	return 0;
    }

    m = 2;
    weak = FALSE_;
    dtrong = FALSE_;

/*     Make a local copy of selected block in (A, B) */

    zlacpy_("Full", &m, &m, &a[*j1 + *j1 * a_dim1], lda, s, &c__2, (ftnlen)4);
    zlacpy_("Full", &m, &m, &b[*j1 + *j1 * b_dim1], ldb, t, &c__2, (ftnlen)4);

/*     Compute the threshold for testing the acceptance of swapping. */

    eps = dlamch_("P", (ftnlen)1);
    smlnum = dlamch_("S", (ftnlen)1) / eps;
    scale = 0.;
    sum = 1.;
    zlacpy_("Full", &m, &m, s, &c__2, work, &m, (ftnlen)4);
    zlacpy_("Full", &m, &m, t, &c__2, &work[m * m], &m, (ftnlen)4);
    i__1 = (m << 1) * m;
    zlassq_(&i__1, work, &c__1, &scale, &sum);
    sa = scale * sqrt(sum);
/* Computing MAX */
    d__1 = eps * 10. * sa;
    thresh = max(d__1,smlnum);

/*     Compute unitary QL and RQ that swap 1-by-1 and 1-by-1 blocks */
/*     using Givens rotations and perform the swap tentatively. */

    z__2.r = s[3].r * t[0].r - s[3].i * t[0].i, z__2.i = s[3].r * t[0].i + s[
	    3].i * t[0].r;
    z__3.r = t[3].r * s[0].r - t[3].i * s[0].i, z__3.i = t[3].r * s[0].i + t[
	    3].i * s[0].r;
    z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
    f.r = z__1.r, f.i = z__1.i;
    z__2.r = s[3].r * t[2].r - s[3].i * t[2].i, z__2.i = s[3].r * t[2].i + s[
	    3].i * t[2].r;
    z__3.r = t[3].r * s[2].r - t[3].i * s[2].i, z__3.i = t[3].r * s[2].i + t[
	    3].i * s[2].r;
    z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
    g.r = z__1.r, g.i = z__1.i;
    sa = z_abs(&s[3]);
    sb = z_abs(&t[3]);
    zlartg_(&g, &f, &cz, &sz, &cdum);
    z__1.r = -sz.r, z__1.i = -sz.i;
    sz.r = z__1.r, sz.i = z__1.i;
    d_cnjg(&z__1, &sz);
    zrot_(&c__2, s, &c__1, &s[2], &c__1, &cz, &z__1);
    d_cnjg(&z__1, &sz);
    zrot_(&c__2, t, &c__1, &t[2], &c__1, &cz, &z__1);
    if (sa >= sb) {
	zlartg_(s, &s[1], &cq, &sq, &cdum);
    } else {
	zlartg_(t, &t[1], &cq, &sq, &cdum);
    }
    zrot_(&c__2, s, &c__2, &s[1], &c__2, &cq, &sq);
    zrot_(&c__2, t, &c__2, &t[1], &c__2, &cq, &sq);

/*     Weak stability test: |S21| + |T21| <= O(EPS F-norm((S, T))) */

    ws = z_abs(&s[1]) + z_abs(&t[1]);
    weak = ws <= thresh;
    if (! weak) {
	goto L20;
    }

    if (TRUE_) {

/*        Strong stability test: */
/*           F-norm((A-QL'*S*QR, B-QL'*T*QR)) <= O(EPS*F-norm((A, B))) */

	zlacpy_("Full", &m, &m, s, &c__2, work, &m, (ftnlen)4);
	zlacpy_("Full", &m, &m, t, &c__2, &work[m * m], &m, (ftnlen)4);
	d_cnjg(&z__2, &sz);
	z__1.r = -z__2.r, z__1.i = -z__2.i;
	zrot_(&c__2, work, &c__1, &work[2], &c__1, &cz, &z__1);
	d_cnjg(&z__2, &sz);
	z__1.r = -z__2.r, z__1.i = -z__2.i;
	zrot_(&c__2, &work[4], &c__1, &work[6], &c__1, &cz, &z__1);
	z__1.r = -sq.r, z__1.i = -sq.i;
	zrot_(&c__2, work, &c__2, &work[1], &c__2, &cq, &z__1);
	z__1.r = -sq.r, z__1.i = -sq.i;
	zrot_(&c__2, &work[4], &c__2, &work[5], &c__2, &cq, &z__1);
	for (i__ = 1; i__ <= 2; ++i__) {
	    i__1 = i__ - 1;
	    i__2 = i__ - 1;
	    i__3 = *j1 + i__ - 1 + *j1 * a_dim1;
	    z__1.r = work[i__2].r - a[i__3].r, z__1.i = work[i__2].i - a[i__3]
		    .i;
	    work[i__1].r = z__1.r, work[i__1].i = z__1.i;
	    i__1 = i__ + 1;
	    i__2 = i__ + 1;
	    i__3 = *j1 + i__ - 1 + (*j1 + 1) * a_dim1;
	    z__1.r = work[i__2].r - a[i__3].r, z__1.i = work[i__2].i - a[i__3]
		    .i;
	    work[i__1].r = z__1.r, work[i__1].i = z__1.i;
	    i__1 = i__ + 3;
	    i__2 = i__ + 3;
	    i__3 = *j1 + i__ - 1 + *j1 * b_dim1;
	    z__1.r = work[i__2].r - b[i__3].r, z__1.i = work[i__2].i - b[i__3]
		    .i;
	    work[i__1].r = z__1.r, work[i__1].i = z__1.i;
	    i__1 = i__ + 5;
	    i__2 = i__ + 5;
	    i__3 = *j1 + i__ - 1 + (*j1 + 1) * b_dim1;
	    z__1.r = work[i__2].r - b[i__3].r, z__1.i = work[i__2].i - b[i__3]
		    .i;
	    work[i__1].r = z__1.r, work[i__1].i = z__1.i;
/* L10: */
	}
	scale = 0.;
	sum = 1.;
	i__1 = (m << 1) * m;
	zlassq_(&i__1, work, &c__1, &scale, &sum);
	ss = scale * sqrt(sum);
	dtrong = ss <= thresh;
	if (! dtrong) {
	    goto L20;
	}
    }

/*     If the swap is accepted ("weakly" and "strongly"), apply the */
/*     equivalence transformations to the original matrix pair (A,B) */

    i__1 = *j1 + 1;
    d_cnjg(&z__1, &sz);
    zrot_(&i__1, &a[*j1 * a_dim1 + 1], &c__1, &a[(*j1 + 1) * a_dim1 + 1], &
	    c__1, &cz, &z__1);
    i__1 = *j1 + 1;
    d_cnjg(&z__1, &sz);
    zrot_(&i__1, &b[*j1 * b_dim1 + 1], &c__1, &b[(*j1 + 1) * b_dim1 + 1], &
	    c__1, &cz, &z__1);
    i__1 = *n - *j1 + 1;
    zrot_(&i__1, &a[*j1 + *j1 * a_dim1], lda, &a[*j1 + 1 + *j1 * a_dim1], lda,
	     &cq, &sq);
    i__1 = *n - *j1 + 1;
    zrot_(&i__1, &b[*j1 + *j1 * b_dim1], ldb, &b[*j1 + 1 + *j1 * b_dim1], ldb,
	     &cq, &sq);

/*     Set  N1 by N2 (2,1) blocks to 0 */

    i__1 = *j1 + 1 + *j1 * a_dim1;
    a[i__1].r = 0., a[i__1].i = 0.;
    i__1 = *j1 + 1 + *j1 * b_dim1;
    b[i__1].r = 0., b[i__1].i = 0.;

/*     Accumulate transformations into Q and Z if requested. */

    if (*wantz) {
	d_cnjg(&z__1, &sz);
	zrot_(n, &z__[*j1 * z_dim1 + 1], &c__1, &z__[(*j1 + 1) * z_dim1 + 1], 
		&c__1, &cz, &z__1);
    }
    if (*wantq) {
	d_cnjg(&z__1, &sq);
	zrot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[(*j1 + 1) * q_dim1 + 1], &
		c__1, &cq, &z__1);
    }

/*     Exit with INFO = 0 if swap was successfully performed. */

    return 0;

/*     Exit with INFO = 1 if swap was rejected. */

L20:
    *info = 1;
    return 0;

/*     End of ZTGEX2 */

} /* ztgex2_ */
Пример #2
0
/* Subroutine */ int zgbbrd_(char *vect, integer *m, integer *n, integer *ncc, 
	 integer *kl, integer *ku, doublecomplex *ab, integer *ldab, 
	doublereal *d__, doublereal *e, doublecomplex *q, integer *ldq, 
	doublecomplex *pt, integer *ldpt, doublecomplex *c__, integer *ldc, 
	doublecomplex *work, doublereal *rwork, integer *info)
{
    /* System generated locals */
    integer ab_dim1, ab_offset, c_dim1, c_offset, pt_dim1, pt_offset, q_dim1, 
	    q_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
    doublecomplex z__1, z__2, z__3;

    /* Local variables */
    integer i__, j, l;
    doublecomplex t;
    integer j1, j2, kb;
    doublecomplex ra, rb;
    doublereal rc;
    integer kk, ml, nr, mu;
    doublecomplex rs;
    integer kb1, ml0, mu0, klm, kun, nrt, klu1, inca;
    doublereal abst;
    logical wantb, wantc;
    integer minmn;
    logical wantq;
    logical wantpt;

/*  -- LAPACK routine (version 3.2) -- */
/*     November 2006 */

/*  Purpose */
/*  ======= */

/*  ZGBBRD reduces a complex general m-by-n band matrix A to real upper */
/*  bidiagonal form B by a unitary transformation: Q' * A * P = B. */

/*  The routine computes B, and optionally forms Q or P', or computes */
/*  Q'*C for a given matrix C. */

/*  Arguments */
/*  ========= */

/*  VECT    (input) CHARACTER*1 */
/*          Specifies whether or not the matrices Q and P' are to be */
/*          formed. */
/*          = 'N': do not form Q or P'; */
/*          = 'Q': form Q only; */
/*          = 'P': form P' only; */
/*          = 'B': form both. */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A.  M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix A.  N >= 0. */

/*  NCC     (input) INTEGER */
/*          The number of columns of the matrix C.  NCC >= 0. */

/*  KL      (input) INTEGER */
/*          The number of subdiagonals of the matrix A. KL >= 0. */

/*  KU      (input) INTEGER */
/*          The number of superdiagonals of the matrix A. KU >= 0. */

/*  AB      (input/output) COMPLEX*16 array, dimension (LDAB,N) */
/*          On entry, the m-by-n band matrix A, stored in rows 1 to */
/*          KL+KU+1. The j-th column of A is stored in the j-th column of */
/*          the array AB as follows: */
/*          AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl). */
/*          On exit, A is overwritten by values generated during the */
/*          reduction. */

/*  LDAB    (input) INTEGER */
/*          The leading dimension of the array A. LDAB >= KL+KU+1. */

/*  D       (output) DOUBLE PRECISION array, dimension (min(M,N)) */
/*          The diagonal elements of the bidiagonal matrix B. */

/*  E       (output) DOUBLE PRECISION array, dimension (min(M,N)-1) */
/*          The superdiagonal elements of the bidiagonal matrix B. */

/*  Q       (output) COMPLEX*16 array, dimension (LDQ,M) */
/*          If VECT = 'Q' or 'B', the m-by-m unitary matrix Q. */
/*          If VECT = 'N' or 'P', the array Q is not referenced. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. */
/*          LDQ >= max(1,M) if VECT = 'Q' or 'B'; LDQ >= 1 otherwise. */

/*  PT      (output) COMPLEX*16 array, dimension (LDPT,N) */
/*          If VECT = 'P' or 'B', the n-by-n unitary matrix P'. */
/*          If VECT = 'N' or 'Q', the array PT is not referenced. */

/*  LDPT    (input) INTEGER */
/*          The leading dimension of the array PT. */
/*          LDPT >= max(1,N) if VECT = 'P' or 'B'; LDPT >= 1 otherwise. */

/*  C       (input/output) COMPLEX*16 array, dimension (LDC,NCC) */
/*          On entry, an m-by-ncc matrix C. */
/*          On exit, C is overwritten by Q'*C. */
/*          C is not referenced if NCC = 0. */

/*  LDC     (input) INTEGER */
/*          The leading dimension of the array C. */
/*          LDC >= max(1,M) if NCC > 0; LDC >= 1 if NCC = 0. */

/*  WORK    (workspace) COMPLEX*16 array, dimension (max(M,N)) */

/*  RWORK   (workspace) DOUBLE PRECISION array, dimension (max(M,N)) */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit. */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value. */

/*  ===================================================================== */

/*     Test the input parameters */

    /* Parameter adjustments */
    ab_dim1 = *ldab;
    ab_offset = 1 + ab_dim1;
    ab -= ab_offset;
    --d__;
    --e;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    pt_dim1 = *ldpt;
    pt_offset = 1 + pt_dim1;
    pt -= pt_offset;
    c_dim1 = *ldc;
    c_offset = 1 + c_dim1;
    c__ -= c_offset;
    --work;
    --rwork;

    /* Function Body */
    wantb = lsame_(vect, "B");
    wantq = lsame_(vect, "Q") || wantb;
    wantpt = lsame_(vect, "P") || wantb;
    wantc = *ncc > 0;
    klu1 = *kl + *ku + 1;
    *info = 0;
    if (! wantq && ! wantpt && ! lsame_(vect, "N")) {
	*info = -1;
    } else if (*m < 0) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    } else if (*ncc < 0) {
	*info = -4;
    } else if (*kl < 0) {
	*info = -5;
    } else if (*ku < 0) {
	*info = -6;
    } else if (*ldab < klu1) {
	*info = -8;
    } else if (*ldq < 1 || wantq && *ldq < max(1,*m)) {
	*info = -12;
    } else if (*ldpt < 1 || wantpt && *ldpt < max(1,*n)) {
	*info = -14;
    } else if (*ldc < 1 || wantc && *ldc < max(1,*m)) {
	*info = -16;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZGBBRD", &i__1);
	return 0;
    }

/*     Initialize Q and P' to the unit matrix, if needed */

    if (wantq) {
	zlaset_("Full", m, m, &c_b1, &c_b2, &q[q_offset], ldq);
    }
    if (wantpt) {
	zlaset_("Full", n, n, &c_b1, &c_b2, &pt[pt_offset], ldpt);
    }

/*     Quick return if possible. */

    if (*m == 0 || *n == 0) {
	return 0;
    }

    minmn = min(*m,*n);

    if (*kl + *ku > 1) {

/*        Reduce to upper bidiagonal form if KU > 0; if KU = 0, reduce */
/*        first to lower bidiagonal form and then transform to upper */
/*        bidiagonal */

	if (*ku > 0) {
	    ml0 = 1;
	    mu0 = 2;
	} else {
	    ml0 = 2;
	    mu0 = 1;
	}

/*        Wherever possible, plane rotations are generated and applied in */
/*        vector operations of length NR over the index set J1:J2:KLU1. */

/*        The complex sines of the plane rotations are stored in WORK, */
/*        and the real cosines in RWORK. */

/* Computing MIN */
	i__1 = *m - 1;
	klm = min(i__1,*kl);
/* Computing MIN */
	i__1 = *n - 1;
	kun = min(i__1,*ku);
	kb = klm + kun;
	kb1 = kb + 1;
	inca = kb1 * *ldab;
	nr = 0;
	j1 = klm + 2;
	j2 = 1 - kun;

	i__1 = minmn;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Reduce i-th column and i-th row of matrix to bidiagonal form */

	    ml = klm + 1;
	    mu = kun + 1;
	    i__2 = kb;
	    for (kk = 1; kk <= i__2; ++kk) {
		j1 += kb;
		j2 += kb;

/*              generate plane rotations to annihilate nonzero elements */
/*              which have been created below the band */

		if (nr > 0) {
		    zlargv_(&nr, &ab[klu1 + (j1 - klm - 1) * ab_dim1], &inca, 
			    &work[j1], &kb1, &rwork[j1], &kb1);
		}

/*              apply plane rotations from the left */

		i__3 = kb;
		for (l = 1; l <= i__3; ++l) {
		    if (j2 - klm + l - 1 > *n) {
			nrt = nr - 1;
		    } else {
			nrt = nr;
		    }
		    if (nrt > 0) {
			zlartv_(&nrt, &ab[klu1 - l + (j1 - klm + l - 1) * 
				ab_dim1], &inca, &ab[klu1 - l + 1 + (j1 - klm 
				+ l - 1) * ab_dim1], &inca, &rwork[j1], &work[
				j1], &kb1);
		    }
		}

		if (ml > ml0) {
		    if (ml <= *m - i__ + 1) {

/*                    generate plane rotation to annihilate a(i+ml-1,i) */
/*                    within the band, and apply rotation from the left */

			zlartg_(&ab[*ku + ml - 1 + i__ * ab_dim1], &ab[*ku + 
				ml + i__ * ab_dim1], &rwork[i__ + ml - 1], &
				work[i__ + ml - 1], &ra);
			i__3 = *ku + ml - 1 + i__ * ab_dim1;
			ab[i__3].r = ra.r, ab[i__3].i = ra.i;
			if (i__ < *n) {
/* Computing MIN */
			    i__4 = *ku + ml - 2, i__5 = *n - i__;
			    i__3 = min(i__4,i__5);
			    i__6 = *ldab - 1;
			    i__7 = *ldab - 1;
			    zrot_(&i__3, &ab[*ku + ml - 2 + (i__ + 1) * 
				    ab_dim1], &i__6, &ab[*ku + ml - 1 + (i__ 
				    + 1) * ab_dim1], &i__7, &rwork[i__ + ml - 
				    1], &work[i__ + ml - 1]);
			}
		    }
		    ++nr;
		    j1 -= kb1;
		}

		if (wantq) {

/*                 accumulate product of plane rotations in Q */

		    i__3 = j2;
		    i__4 = kb1;
		    for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) 
			    {
			d_cnjg(&z__1, &work[j]);
			zrot_(m, &q[(j - 1) * q_dim1 + 1], &c__1, &q[j * 
				q_dim1 + 1], &c__1, &rwork[j], &z__1);
		    }
		}

		if (wantc) {

/*                 apply plane rotations to C */

		    i__4 = j2;
		    i__3 = kb1;
		    for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) 
			    {
			zrot_(ncc, &c__[j - 1 + c_dim1], ldc, &c__[j + c_dim1]
, ldc, &rwork[j], &work[j]);
		    }
		}

		if (j2 + kun > *n) {

/*                 adjust J2 to keep within the bounds of the matrix */

		    --nr;
		    j2 -= kb1;
		}

		i__3 = j2;
		i__4 = kb1;
		for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {

/*                 create nonzero element a(j-1,j+ku) above the band */
/*                 and store it in WORK(n+1:2*n) */

		    i__5 = j + kun;
		    i__6 = j;
		    i__7 = (j + kun) * ab_dim1 + 1;
		    z__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[
			    i__7].i, z__1.i = work[i__6].r * ab[i__7].i + 
			    work[i__6].i * ab[i__7].r;
		    work[i__5].r = z__1.r, work[i__5].i = z__1.i;
		    i__5 = (j + kun) * ab_dim1 + 1;
		    i__6 = j;
		    i__7 = (j + kun) * ab_dim1 + 1;
		    z__1.r = rwork[i__6] * ab[i__7].r, z__1.i = rwork[i__6] * 
			    ab[i__7].i;
		    ab[i__5].r = z__1.r, ab[i__5].i = z__1.i;
		}

/*              generate plane rotations to annihilate nonzero elements */
/*              which have been generated above the band */

		if (nr > 0) {
		    zlargv_(&nr, &ab[(j1 + kun - 1) * ab_dim1 + 1], &inca, &
			    work[j1 + kun], &kb1, &rwork[j1 + kun], &kb1);
		}

/*              apply plane rotations from the right */

		i__4 = kb;
		for (l = 1; l <= i__4; ++l) {
		    if (j2 + l - 1 > *m) {
			nrt = nr - 1;
		    } else {
			nrt = nr;
		    }
		    if (nrt > 0) {
			zlartv_(&nrt, &ab[l + 1 + (j1 + kun - 1) * ab_dim1], &
				inca, &ab[l + (j1 + kun) * ab_dim1], &inca, &
				rwork[j1 + kun], &work[j1 + kun], &kb1);
		    }
		}

		if (ml == ml0 && mu > mu0) {
		    if (mu <= *n - i__ + 1) {

/*                    generate plane rotation to annihilate a(i,i+mu-1) */
/*                    within the band, and apply rotation from the right */

			zlartg_(&ab[*ku - mu + 3 + (i__ + mu - 2) * ab_dim1], 
				&ab[*ku - mu + 2 + (i__ + mu - 1) * ab_dim1], 
				&rwork[i__ + mu - 1], &work[i__ + mu - 1], &
				ra);
			i__4 = *ku - mu + 3 + (i__ + mu - 2) * ab_dim1;
			ab[i__4].r = ra.r, ab[i__4].i = ra.i;
/* Computing MIN */
			i__3 = *kl + mu - 2, i__5 = *m - i__;
			i__4 = min(i__3,i__5);
			zrot_(&i__4, &ab[*ku - mu + 4 + (i__ + mu - 2) * 
				ab_dim1], &c__1, &ab[*ku - mu + 3 + (i__ + mu 
				- 1) * ab_dim1], &c__1, &rwork[i__ + mu - 1], 
				&work[i__ + mu - 1]);
		    }
		    ++nr;
		    j1 -= kb1;
		}

		if (wantpt) {

/*                 accumulate product of plane rotations in P' */

		    i__4 = j2;
		    i__3 = kb1;
		    for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) 
			    {
			d_cnjg(&z__1, &work[j + kun]);
			zrot_(n, &pt[j + kun - 1 + pt_dim1], ldpt, &pt[j + 
				kun + pt_dim1], ldpt, &rwork[j + kun], &z__1);
		    }
		}

		if (j2 + kb > *m) {

/*                 adjust J2 to keep within the bounds of the matrix */

		    --nr;
		    j2 -= kb1;
		}

		i__3 = j2;
		i__4 = kb1;
		for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {

/*                 create nonzero element a(j+kl+ku,j+ku-1) below the */
/*                 band and store it in WORK(1:n) */

		    i__5 = j + kb;
		    i__6 = j + kun;
		    i__7 = klu1 + (j + kun) * ab_dim1;
		    z__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[
			    i__7].i, z__1.i = work[i__6].r * ab[i__7].i + 
			    work[i__6].i * ab[i__7].r;
		    work[i__5].r = z__1.r, work[i__5].i = z__1.i;
		    i__5 = klu1 + (j + kun) * ab_dim1;
		    i__6 = j + kun;
		    i__7 = klu1 + (j + kun) * ab_dim1;
		    z__1.r = rwork[i__6] * ab[i__7].r, z__1.i = rwork[i__6] * 
			    ab[i__7].i;
		    ab[i__5].r = z__1.r, ab[i__5].i = z__1.i;
		}

		if (ml > ml0) {
		    --ml;
		} else {
		    --mu;
		}
	    }
	}
    }

    if (*ku == 0 && *kl > 0) {

/*        A has been reduced to complex lower bidiagonal form */

/*        Transform lower bidiagonal form to upper bidiagonal by applying */
/*        plane rotations from the left, overwriting superdiagonal */
/*        elements on subdiagonal elements */

/* Computing MIN */
	i__2 = *m - 1;
	i__1 = min(i__2,*n);
	for (i__ = 1; i__ <= i__1; ++i__) {
	    zlartg_(&ab[i__ * ab_dim1 + 1], &ab[i__ * ab_dim1 + 2], &rc, &rs, 
		    &ra);
	    i__2 = i__ * ab_dim1 + 1;
	    ab[i__2].r = ra.r, ab[i__2].i = ra.i;
	    if (i__ < *n) {
		i__2 = i__ * ab_dim1 + 2;
		i__4 = (i__ + 1) * ab_dim1 + 1;
		z__1.r = rs.r * ab[i__4].r - rs.i * ab[i__4].i, z__1.i = rs.r 
			* ab[i__4].i + rs.i * ab[i__4].r;
		ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
		i__2 = (i__ + 1) * ab_dim1 + 1;
		i__4 = (i__ + 1) * ab_dim1 + 1;
		z__1.r = rc * ab[i__4].r, z__1.i = rc * ab[i__4].i;
		ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
	    }
	    if (wantq) {
		d_cnjg(&z__1, &rs);
		zrot_(m, &q[i__ * q_dim1 + 1], &c__1, &q[(i__ + 1) * q_dim1 + 
			1], &c__1, &rc, &z__1);
	    }
	    if (wantc) {
		zrot_(ncc, &c__[i__ + c_dim1], ldc, &c__[i__ + 1 + c_dim1], 
			ldc, &rc, &rs);
	    }
	}
    } else {

/*        A has been reduced to complex upper bidiagonal form or is */
/*        diagonal */

	if (*ku > 0 && *m < *n) {

/*           Annihilate a(m,m+1) by applying plane rotations from the */
/*           right */

	    i__1 = *ku + (*m + 1) * ab_dim1;
	    rb.r = ab[i__1].r, rb.i = ab[i__1].i;
	    for (i__ = *m; i__ >= 1; --i__) {
		zlartg_(&ab[*ku + 1 + i__ * ab_dim1], &rb, &rc, &rs, &ra);
		i__1 = *ku + 1 + i__ * ab_dim1;
		ab[i__1].r = ra.r, ab[i__1].i = ra.i;
		if (i__ > 1) {
		    d_cnjg(&z__3, &rs);
		    z__2.r = -z__3.r, z__2.i = -z__3.i;
		    i__1 = *ku + i__ * ab_dim1;
		    z__1.r = z__2.r * ab[i__1].r - z__2.i * ab[i__1].i, 
			    z__1.i = z__2.r * ab[i__1].i + z__2.i * ab[i__1]
			    .r;
		    rb.r = z__1.r, rb.i = z__1.i;
		    i__1 = *ku + i__ * ab_dim1;
		    i__2 = *ku + i__ * ab_dim1;
		    z__1.r = rc * ab[i__2].r, z__1.i = rc * ab[i__2].i;
		    ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
		}
		if (wantpt) {
		    d_cnjg(&z__1, &rs);
		    zrot_(n, &pt[i__ + pt_dim1], ldpt, &pt[*m + 1 + pt_dim1], 
			    ldpt, &rc, &z__1);
		}
	    }
	}
    }

/*     Make diagonal and superdiagonal elements real, storing them in D */
/*     and E */

    i__1 = *ku + 1 + ab_dim1;
    t.r = ab[i__1].r, t.i = ab[i__1].i;
    i__1 = minmn;
    for (i__ = 1; i__ <= i__1; ++i__) {
	abst = z_abs(&t);
	d__[i__] = abst;
	if (abst != 0.) {
	    z__1.r = t.r / abst, z__1.i = t.i / abst;
	    t.r = z__1.r, t.i = z__1.i;
	} else {
	    t.r = 1., t.i = 0.;
	}
	if (wantq) {
	    zscal_(m, &t, &q[i__ * q_dim1 + 1], &c__1);
	}
	if (wantc) {
	    d_cnjg(&z__1, &t);
	    zscal_(ncc, &z__1, &c__[i__ + c_dim1], ldc);
	}
	if (i__ < minmn) {
	    if (*ku == 0 && *kl == 0) {
		e[i__] = 0.;
		i__2 = (i__ + 1) * ab_dim1 + 1;
		t.r = ab[i__2].r, t.i = ab[i__2].i;
	    } else {
		if (*ku == 0) {
		    i__2 = i__ * ab_dim1 + 2;
		    d_cnjg(&z__2, &t);
		    z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, 
			    z__1.i = ab[i__2].r * z__2.i + ab[i__2].i * 
			    z__2.r;
		    t.r = z__1.r, t.i = z__1.i;
		} else {
		    i__2 = *ku + (i__ + 1) * ab_dim1;
		    d_cnjg(&z__2, &t);
		    z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, 
			    z__1.i = ab[i__2].r * z__2.i + ab[i__2].i * 
			    z__2.r;
		    t.r = z__1.r, t.i = z__1.i;
		}
		abst = z_abs(&t);
		e[i__] = abst;
		if (abst != 0.) {
		    z__1.r = t.r / abst, z__1.i = t.i / abst;
		    t.r = z__1.r, t.i = z__1.i;
		} else {
		    t.r = 1., t.i = 0.;
		}
		if (wantpt) {
		    zscal_(n, &t, &pt[i__ + 1 + pt_dim1], ldpt);
		}
		i__2 = *ku + 1 + (i__ + 1) * ab_dim1;
		d_cnjg(&z__2, &t);
		z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, z__1.i = 
			ab[i__2].r * z__2.i + ab[i__2].i * z__2.r;
		t.r = z__1.r, t.i = z__1.i;
	    }
	}
    }
    return 0;

/*     End of ZGBBRD */

} /* zgbbrd_ */
Пример #3
0
/* Subroutine */ int zhgeqz_(char *job, char *compq, char *compz, integer *n, 
	integer *ilo, integer *ihi, doublecomplex *a, integer *lda, 
	doublecomplex *b, integer *ldb, doublecomplex *alpha, doublecomplex *
	beta, doublecomplex *q, integer *ldq, doublecomplex *z__, integer *
	ldz, doublecomplex *work, integer *lwork, doublereal *rwork, integer *
	info)
{
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1, 
	    z_offset, i__1, i__2, i__3, i__4, i__5, i__6;
    doublereal d__1, d__2, d__3, d__4, d__5, d__6;
    doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;

    /* Builtin functions */
    double z_abs(doublecomplex *);
    void d_cnjg(doublecomplex *, doublecomplex *);
    double d_imag(doublecomplex *);
    void z_div(doublecomplex *, doublecomplex *, doublecomplex *), pow_zi(
	    doublecomplex *, doublecomplex *, integer *), z_sqrt(
	    doublecomplex *, doublecomplex *);

    /* Local variables */
    static doublereal absb, atol, btol, temp, opst;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    static doublereal temp2, c__;
    static integer j;
    static doublecomplex s, t;
    extern logical lsame_(char *, char *);
    static doublecomplex ctemp;
    static integer iiter, ilast, jiter;
    static doublereal anorm;
    static integer maxit;
    static doublereal bnorm;
    static doublecomplex shift;
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *);
    static doublereal tempr;
    static doublecomplex ctemp2, ctemp3;
    static logical ilazr2;
    static integer jc, in;
    static doublereal ascale, bscale;
    static doublecomplex u12;
    extern doublereal dlamch_(char *);
    static integer jr, nq;
    static doublecomplex signbc;
    static integer nz;
    static doublereal safmin;
    extern /* Subroutine */ int xerbla_(char *, integer *);
    static doublecomplex eshift;
    static logical ilschr;
    static integer icompq, ilastm;
    static doublecomplex rtdisc;
    static integer ischur;
    extern doublereal zlanhs_(char *, integer *, doublecomplex *, integer *, 
	    doublereal *);
    static logical ilazro;
    static integer icompz, ifirst;
    extern /* Subroutine */ int zlartg_(doublecomplex *, doublecomplex *, 
	    doublereal *, doublecomplex *, doublecomplex *);
    static integer ifrstm;
    extern /* Subroutine */ int zlaset_(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, doublecomplex *, integer *);
    static integer istart;
    static logical lquery;
    static doublecomplex ad11, ad12, ad21, ad22;
    static integer jch;
    static logical ilq, ilz;
    static doublereal ulp;
    static doublecomplex abi22;


#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]
#define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1
#define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)]
#define q_subscr(a_1,a_2) (a_2)*q_dim1 + a_1
#define q_ref(a_1,a_2) q[q_subscr(a_1,a_2)]
#define z___subscr(a_1,a_2) (a_2)*z_dim1 + a_1
#define z___ref(a_1,a_2) z__[z___subscr(a_1,a_2)]


/*  -- LAPACK routine (instrumented to count operations, version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   


       ----------------------- Begin Timing Code ------------------------   
       Common block to return operation count and iteration count   
       ITCNT is initialized to 0, OPS is only incremented   
       OPST is used to accumulate small contributions to OPS   
       to avoid roundoff error   
       ------------------------ End Timing Code -------------------------   


    Purpose   
    =======   

    ZHGEQZ implements a single-shift version of the QZ   
    method for finding the generalized eigenvalues w(i)=ALPHA(i)/BETA(i)   
    of the equation   

         det( A - w(i) B ) = 0   

    If JOB='S', then the pair (A,B) is simultaneously   
    reduced to Schur form (i.e., A and B are both upper triangular) by   
    applying one unitary tranformation (usually called Q) on the left and   
    another (usually called Z) on the right.  The diagonal elements of   
    A are then ALPHA(1),...,ALPHA(N), and of B are BETA(1),...,BETA(N).   

    If JOB='S' and COMPQ and COMPZ are 'V' or 'I', then the unitary   
    transformations used to reduce (A,B) are accumulated into the arrays   
    Q and Z s.t.:   

         Q(in) A(in) Z(in)* = Q(out) A(out) Z(out)*   
         Q(in) B(in) Z(in)* = Q(out) B(out) Z(out)*   

    Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix   
         Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973),   
         pp. 241--256.   

    Arguments   
    =========   

    JOB     (input) CHARACTER*1   
            = 'E': compute only ALPHA and BETA.  A and B will not   
                   necessarily be put into generalized Schur form.   
            = 'S': put A and B into generalized Schur form, as well   
                   as computing ALPHA and BETA.   

    COMPQ   (input) CHARACTER*1   
            = 'N': do not modify Q.   
            = 'V': multiply the array Q on the right by the conjugate   
                   transpose of the unitary tranformation that is   
                   applied to the left side of A and B to reduce them   
                   to Schur form.   
            = 'I': like COMPQ='V', except that Q will be initialized to   
                   the identity first.   

    COMPZ   (input) CHARACTER*1   
            = 'N': do not modify Z.   
            = 'V': multiply the array Z on the right by the unitary   
                   tranformation that is applied to the right side of   
                   A and B to reduce them to Schur form.   
            = 'I': like COMPZ='V', except that Z will be initialized to   
                   the identity first.   

    N       (input) INTEGER   
            The order of the matrices A, B, Q, and Z.  N >= 0.   

    ILO     (input) INTEGER   
    IHI     (input) INTEGER   
            It is assumed that A is already upper triangular in rows and   
            columns 1:ILO-1 and IHI+1:N.   
            1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.   

    A       (input/output) COMPLEX*16 array, dimension (LDA, N)   
            On entry, the N-by-N upper Hessenberg matrix A.  Elements   
            below the subdiagonal must be zero.   
            If JOB='S', then on exit A and B will have been   
               simultaneously reduced to upper triangular form.   
            If JOB='E', then on exit A will have been destroyed.   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= max( 1, N ).   

    B       (input/output) COMPLEX*16 array, dimension (LDB, N)   
            On entry, the N-by-N upper triangular matrix B.  Elements   
            below the diagonal must be zero.   
            If JOB='S', then on exit A and B will have been   
               simultaneously reduced to upper triangular form.   
            If JOB='E', then on exit B will have been destroyed.   

    LDB     (input) INTEGER   
            The leading dimension of the array B.  LDB >= max( 1, N ).   

    ALPHA   (output) COMPLEX*16 array, dimension (N)   
            The diagonal elements of A when the pair (A,B) has been   
            reduced to Schur form.  ALPHA(i)/BETA(i) i=1,...,N   
            are the generalized eigenvalues.   

    BETA    (output) COMPLEX*16 array, dimension (N)   
            The diagonal elements of B when the pair (A,B) has been   
            reduced to Schur form.  ALPHA(i)/BETA(i) i=1,...,N   
            are the generalized eigenvalues.  A and B are normalized   
            so that BETA(1),...,BETA(N) are non-negative real numbers.   

    Q       (input/output) COMPLEX*16 array, dimension (LDQ, N)   
            If COMPQ='N', then Q will not be referenced.   
            If COMPQ='V' or 'I', then the conjugate transpose of the   
               unitary transformations which are applied to A and B on   
               the left will be applied to the array Q on the right.   

    LDQ     (input) INTEGER   
            The leading dimension of the array Q.  LDQ >= 1.   
            If COMPQ='V' or 'I', then LDQ >= N.   

    Z       (input/output) COMPLEX*16 array, dimension (LDZ, N)   
            If COMPZ='N', then Z will not be referenced.   
            If COMPZ='V' or 'I', then the unitary transformations which   
               are applied to A and B on the right will be applied to the   
               array Z on the right.   

    LDZ     (input) INTEGER   
            The leading dimension of the array Z.  LDZ >= 1.   
            If COMPZ='V' or 'I', then LDZ >= N.   

    WORK    (workspace/output) COMPLEX*16 array, dimension (LWORK)   
            On exit, if INFO >= 0, WORK(1) returns the optimal LWORK.   

    LWORK   (input) INTEGER   
            The dimension of the array WORK.  LWORK >= max(1,N).   

            If LWORK = -1, then a workspace query is assumed; the routine   
            only calculates the optimal size of the WORK array, returns   
            this value as the first entry of the WORK array, and no error   
            message related to LWORK is issued by XERBLA.   

    RWORK   (workspace) DOUBLE PRECISION array, dimension (N)   

    INFO    (output) INTEGER   
            = 0: successful exit   
            < 0: if INFO = -i, the i-th argument had an illegal value   
            = 1,...,N: the QZ iteration did not converge.  (A,B) is not   
                       in Schur form, but ALPHA(i) and BETA(i),   
                       i=INFO+1,...,N should be correct.   
            = N+1,...,2*N: the shift calculation failed.  (A,B) is not   
                       in Schur form, but ALPHA(i) and BETA(i),   
                       i=INFO-N+1,...,N should be correct.   
            > 2*N:     various "impossible" errors.   

    Further Details   
    ===============   

    We assume that complex ABS works as long as its value is less than   
    overflow.   

    =====================================================================   

       ----------------------- Begin Timing Code ------------------------   
       Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;
    --alpha;
    --beta;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1 * 1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1 * 1;
    z__ -= z_offset;
    --work;
    --rwork;

    /* Function Body */
    latime_1.itcnt = 0.;
/*     ------------------------ End Timing Code -------------------------   

       Decode JOB, COMPQ, COMPZ */

    if (lsame_(job, "E")) {
	ilschr = FALSE_;
	ischur = 1;
    } else if (lsame_(job, "S")) {
	ilschr = TRUE_;
	ischur = 2;
    } else {
	ischur = 0;
    }

    if (lsame_(compq, "N")) {
	ilq = FALSE_;
	icompq = 1;
	nq = 0;
    } else if (lsame_(compq, "V")) {
	ilq = TRUE_;
	icompq = 2;
	nq = *n;
    } else if (lsame_(compq, "I")) {
	ilq = TRUE_;
	icompq = 3;
	nq = *n;
    } else {
	icompq = 0;
    }

    if (lsame_(compz, "N")) {
	ilz = FALSE_;
	icompz = 1;
	nz = 0;
    } else if (lsame_(compz, "V")) {
	ilz = TRUE_;
	icompz = 2;
	nz = *n;
    } else if (lsame_(compz, "I")) {
	ilz = TRUE_;
	icompz = 3;
	nz = *n;
    } else {
	icompz = 0;
    }

/*     Check Argument Values */

    *info = 0;
    i__1 = max(1,*n);
    work[1].r = (doublereal) i__1, work[1].i = 0.;
    lquery = *lwork == -1;
    if (ischur == 0) {
	*info = -1;
    } else if (icompq == 0) {
	*info = -2;
    } else if (icompz == 0) {
	*info = -3;
    } else if (*n < 0) {
	*info = -4;
    } else if (*ilo < 1) {
	*info = -5;
    } else if (*ihi > *n || *ihi < *ilo - 1) {
	*info = -6;
    } else if (*lda < *n) {
	*info = -8;
    } else if (*ldb < *n) {
	*info = -10;
    } else if (*ldq < 1 || ilq && *ldq < *n) {
	*info = -14;
    } else if (*ldz < 1 || ilz && *ldz < *n) {
	*info = -16;
    } else if (*lwork < max(1,*n) && ! lquery) {
	*info = -18;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZHGEQZ", &i__1);
	return 0;
    } else if (lquery) {
	return 0;
    }

/*     Quick return if possible   

       WORK( 1 ) = CMPLX( 1 ) */
    if (*n <= 0) {
	work[1].r = 1., work[1].i = 0.;
	return 0;
    }

/*     Initialize Q and Z */

    if (icompq == 3) {
	zlaset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
    }
    if (icompz == 3) {
	zlaset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
    }

/*     Machine Constants */

    in = *ihi + 1 - *ilo;
    safmin = dlamch_("S");
    ulp = dlamch_("E") * dlamch_("B");
    anorm = zlanhs_("F", &in, &a_ref(*ilo, *ilo), lda, &rwork[1]);
    bnorm = zlanhs_("F", &in, &b_ref(*ilo, *ilo), ldb, &rwork[1]);
/* Computing MAX */
    d__1 = safmin, d__2 = ulp * anorm;
    atol = max(d__1,d__2);
/* Computing MAX */
    d__1 = safmin, d__2 = ulp * bnorm;
    btol = max(d__1,d__2);
    ascale = 1. / max(safmin,anorm);
    bscale = 1. / max(safmin,bnorm);

/*     ---------------------- Begin Timing Code -------------------------   
       Count ops for norms, etc. */
    opst = 0.;
/* Computing 2nd power */
    i__1 = *n;
    latime_1.ops += (doublereal) ((i__1 * i__1 << 2) + *n * 12 - 5);
/*     ----------------------- End Timing Code --------------------------   



       Set Eigenvalues IHI+1:N */

    i__1 = *n;
    for (j = *ihi + 1; j <= i__1; ++j) {
	absb = z_abs(&b_ref(j, j));
	if (absb > safmin) {
	    i__2 = b_subscr(j, j);
	    z__2.r = b[i__2].r / absb, z__2.i = b[i__2].i / absb;
	    d_cnjg(&z__1, &z__2);
	    signbc.r = z__1.r, signbc.i = z__1.i;
	    i__2 = b_subscr(j, j);
	    b[i__2].r = absb, b[i__2].i = 0.;
	    if (ilschr) {
		i__2 = j - 1;
		zscal_(&i__2, &signbc, &b_ref(1, j), &c__1);
		zscal_(&j, &signbc, &a_ref(1, j), &c__1);
/*              ----------------- Begin Timing Code --------------------- */
		opst += (doublereal) ((j - 1) * 12);
/*              ------------------ End Timing Code ---------------------- */
	    } else {
		i__2 = a_subscr(j, j);
		i__3 = a_subscr(j, j);
		z__1.r = a[i__3].r * signbc.r - a[i__3].i * signbc.i, z__1.i =
			 a[i__3].r * signbc.i + a[i__3].i * signbc.r;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
	    }
	    if (ilz) {
		zscal_(n, &signbc, &z___ref(1, j), &c__1);
	    }
/*           ------------------- Begin Timing Code ---------------------- */
	    opst += (doublereal) (nz * 6 + 13);
/*           -------------------- End Timing Code ----------------------- */
	} else {
	    i__2 = b_subscr(j, j);
	    b[i__2].r = 0., b[i__2].i = 0.;
	}
	i__2 = j;
	i__3 = a_subscr(j, j);
	alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
	i__2 = j;
	i__3 = b_subscr(j, j);
	beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;
/* L10: */
    }

/*     If IHI < ILO, skip QZ steps */

    if (*ihi < *ilo) {
	goto L190;
    }

/*     MAIN QZ ITERATION LOOP   

       Initialize dynamic indices   

       Eigenvalues ILAST+1:N have been found.   
          Column operations modify rows IFRSTM:whatever   
          Row operations modify columns whatever:ILASTM   

       If only eigenvalues are being computed, then   
          IFRSTM is the row of the last splitting row above row ILAST;   
          this is always at least ILO.   
       IITER counts iterations since the last eigenvalue was found,   
          to tell when to use an extraordinary shift.   
       MAXIT is the maximum number of QZ sweeps allowed. */

    ilast = *ihi;
    if (ilschr) {
	ifrstm = 1;
	ilastm = *n;
    } else {
	ifrstm = *ilo;
	ilastm = *ihi;
    }
    iiter = 0;
    eshift.r = 0., eshift.i = 0.;
    maxit = (*ihi - *ilo + 1) * 30;

    i__1 = maxit;
    for (jiter = 1; jiter <= i__1; ++jiter) {

/*        Check for too many iterations. */

	if (jiter > maxit) {
	    goto L180;
	}

/*        Split the matrix if possible.   

          Two tests:   
             1: A(j,j-1)=0  or  j=ILO   
             2: B(j,j)=0   

          Special case: j=ILAST */

	if (ilast == *ilo) {
	    goto L60;
	} else {
	    i__2 = a_subscr(ilast, ilast - 1);
	    if ((d__1 = a[i__2].r, abs(d__1)) + (d__2 = d_imag(&a_ref(ilast, 
		    ilast - 1)), abs(d__2)) <= atol) {
		i__2 = a_subscr(ilast, ilast - 1);
		a[i__2].r = 0., a[i__2].i = 0.;
		goto L60;
	    }
	}

	if (z_abs(&b_ref(ilast, ilast)) <= btol) {
	    i__2 = b_subscr(ilast, ilast);
	    b[i__2].r = 0., b[i__2].i = 0.;
	    goto L50;
	}

/*        General case: j<ILAST */

	i__2 = *ilo;
	for (j = ilast - 1; j >= i__2; --j) {

/*           Test 1: for A(j,j-1)=0 or j=ILO */

	    if (j == *ilo) {
		ilazro = TRUE_;
	    } else {
		i__3 = a_subscr(j, j - 1);
		if ((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a_ref(j, 
			j - 1)), abs(d__2)) <= atol) {
		    i__3 = a_subscr(j, j - 1);
		    a[i__3].r = 0., a[i__3].i = 0.;
		    ilazro = TRUE_;
		} else {
		    ilazro = FALSE_;
		}
	    }

/*           Test 2: for B(j,j)=0 */

	    if (z_abs(&b_ref(j, j)) < btol) {
		i__3 = b_subscr(j, j);
		b[i__3].r = 0., b[i__3].i = 0.;

/*              Test 1a: Check for 2 consecutive small subdiagonals in A */

		ilazr2 = FALSE_;
		if (! ilazro) {
		    i__3 = a_subscr(j, j - 1);
		    i__4 = a_subscr(j + 1, j);
		    i__5 = a_subscr(j, j);
		    if (((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&
			    a_ref(j, j - 1)), abs(d__2))) * (ascale * ((d__3 =
			     a[i__4].r, abs(d__3)) + (d__4 = d_imag(&a_ref(j 
			    + 1, j)), abs(d__4)))) <= ((d__5 = a[i__5].r, abs(
			    d__5)) + (d__6 = d_imag(&a_ref(j, j)), abs(d__6)))
			     * (ascale * atol)) {
			ilazr2 = TRUE_;
		    }
		}

/*              If both tests pass (1 & 2), i.e., the leading diagonal   
                element of B in the block is zero, split a 1x1 block off   
                at the top. (I.e., at the J-th row/column) The leading   
                diagonal element of the remainder can also be zero, so   
                this may have to be done repeatedly. */

		if (ilazro || ilazr2) {
		    i__3 = ilast - 1;
		    for (jch = j; jch <= i__3; ++jch) {
			i__4 = a_subscr(jch, jch);
			ctemp.r = a[i__4].r, ctemp.i = a[i__4].i;
			zlartg_(&ctemp, &a_ref(jch + 1, jch), &c__, &s, &
				a_ref(jch, jch));
			i__4 = a_subscr(jch + 1, jch);
			a[i__4].r = 0., a[i__4].i = 0.;
			i__4 = ilastm - jch;
			zrot_(&i__4, &a_ref(jch, jch + 1), lda, &a_ref(jch + 
				1, jch + 1), lda, &c__, &s);
			i__4 = ilastm - jch;
			zrot_(&i__4, &b_ref(jch, jch + 1), ldb, &b_ref(jch + 
				1, jch + 1), ldb, &c__, &s);
			if (ilq) {
			    d_cnjg(&z__1, &s);
			    zrot_(n, &q_ref(1, jch), &c__1, &q_ref(1, jch + 1)
				    , &c__1, &c__, &z__1);
			}
			if (ilazr2) {
			    i__4 = a_subscr(jch, jch - 1);
			    i__5 = a_subscr(jch, jch - 1);
			    z__1.r = c__ * a[i__5].r, z__1.i = c__ * a[i__5]
				    .i;
			    a[i__4].r = z__1.r, a[i__4].i = z__1.i;
			}
			ilazr2 = FALSE_;
/*                    --------------- Begin Timing Code ----------------- */
			opst += (doublereal) ((ilastm - jch) * 40 + 32 + nq * 
				20);
/*                    ---------------- End Timing Code ------------------ */
			i__4 = b_subscr(jch + 1, jch + 1);
			if ((d__1 = b[i__4].r, abs(d__1)) + (d__2 = d_imag(&
				b_ref(jch + 1, jch + 1)), abs(d__2)) >= btol) 
				{
			    if (jch + 1 >= ilast) {
				goto L60;
			    } else {
				ifirst = jch + 1;
				goto L70;
			    }
			}
			i__4 = b_subscr(jch + 1, jch + 1);
			b[i__4].r = 0., b[i__4].i = 0.;
/* L20: */
		    }
		    goto L50;
		} else {

/*                 Only test 2 passed -- chase the zero to B(ILAST,ILAST)   
                   Then process as in the case B(ILAST,ILAST)=0 */

		    i__3 = ilast - 1;
		    for (jch = j; jch <= i__3; ++jch) {
			i__4 = b_subscr(jch, jch + 1);
			ctemp.r = b[i__4].r, ctemp.i = b[i__4].i;
			zlartg_(&ctemp, &b_ref(jch + 1, jch + 1), &c__, &s, &
				b_ref(jch, jch + 1));
			i__4 = b_subscr(jch + 1, jch + 1);
			b[i__4].r = 0., b[i__4].i = 0.;
			if (jch < ilastm - 1) {
			    i__4 = ilastm - jch - 1;
			    zrot_(&i__4, &b_ref(jch, jch + 2), ldb, &b_ref(
				    jch + 1, jch + 2), ldb, &c__, &s);
			}
			i__4 = ilastm - jch + 2;
			zrot_(&i__4, &a_ref(jch, jch - 1), lda, &a_ref(jch + 
				1, jch - 1), lda, &c__, &s);
			if (ilq) {
			    d_cnjg(&z__1, &s);
			    zrot_(n, &q_ref(1, jch), &c__1, &q_ref(1, jch + 1)
				    , &c__1, &c__, &z__1);
			}
			i__4 = a_subscr(jch + 1, jch);
			ctemp.r = a[i__4].r, ctemp.i = a[i__4].i;
			zlartg_(&ctemp, &a_ref(jch + 1, jch - 1), &c__, &s, &
				a_ref(jch + 1, jch));
			i__4 = a_subscr(jch + 1, jch - 1);
			a[i__4].r = 0., a[i__4].i = 0.;
			i__4 = jch + 1 - ifrstm;
			zrot_(&i__4, &a_ref(ifrstm, jch), &c__1, &a_ref(
				ifrstm, jch - 1), &c__1, &c__, &s);
			i__4 = jch - ifrstm;
			zrot_(&i__4, &b_ref(ifrstm, jch), &c__1, &b_ref(
				ifrstm, jch - 1), &c__1, &c__, &s);
			if (ilz) {
			    zrot_(n, &z___ref(1, jch), &c__1, &z___ref(1, jch 
				    - 1), &c__1, &c__, &s);
			}
/* L30: */
		    }

/*                 ---------------- Begin Timing Code ------------------- */
		    opst += (doublereal) ((ilastm + 1 - ifrstm) * 40 + 64 + (
			    nq + nz) * 20) * (doublereal) (ilast - j);
/*                 ----------------- End Timing Code -------------------- */

		    goto L50;
		}
	    } else if (ilazro) {

/*              Only test 1 passed -- work on J:ILAST */

		ifirst = j;
		goto L70;
	    }

/*           Neither test passed -- try next J   

   L40: */
	}

/*        (Drop-through is "impossible") */

	*info = (*n << 1) + 1;
	goto L210;

/*        B(ILAST,ILAST)=0 -- clear A(ILAST,ILAST-1) to split off a   
          1x1 block. */

L50:
	i__2 = a_subscr(ilast, ilast);
	ctemp.r = a[i__2].r, ctemp.i = a[i__2].i;
	zlartg_(&ctemp, &a_ref(ilast, ilast - 1), &c__, &s, &a_ref(ilast, 
		ilast));
	i__2 = a_subscr(ilast, ilast - 1);
	a[i__2].r = 0., a[i__2].i = 0.;
	i__2 = ilast - ifrstm;
	zrot_(&i__2, &a_ref(ifrstm, ilast), &c__1, &a_ref(ifrstm, ilast - 1), 
		&c__1, &c__, &s);
	i__2 = ilast - ifrstm;
	zrot_(&i__2, &b_ref(ifrstm, ilast), &c__1, &b_ref(ifrstm, ilast - 1), 
		&c__1, &c__, &s);
	if (ilz) {
	    zrot_(n, &z___ref(1, ilast), &c__1, &z___ref(1, ilast - 1), &c__1,
		     &c__, &s);
	}
/*        --------------------- Begin Timing Code ----------------------- */
	opst += (doublereal) ((ilast - ifrstm) * 40 + 32 + nz * 20);
/*        ---------------------- End Timing Code ------------------------   

          A(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA */

L60:
	absb = z_abs(&b_ref(ilast, ilast));
	if (absb > safmin) {
	    i__2 = b_subscr(ilast, ilast);
	    z__2.r = b[i__2].r / absb, z__2.i = b[i__2].i / absb;
	    d_cnjg(&z__1, &z__2);
	    signbc.r = z__1.r, signbc.i = z__1.i;
	    i__2 = b_subscr(ilast, ilast);
	    b[i__2].r = absb, b[i__2].i = 0.;
	    if (ilschr) {
		i__2 = ilast - ifrstm;
		zscal_(&i__2, &signbc, &b_ref(ifrstm, ilast), &c__1);
		i__2 = ilast + 1 - ifrstm;
		zscal_(&i__2, &signbc, &a_ref(ifrstm, ilast), &c__1);
/*              ----------------- Begin Timing Code --------------------- */
		opst += (doublereal) ((ilast - ifrstm) * 12);
/*              ------------------ End Timing Code ---------------------- */
	    } else {
		i__2 = a_subscr(ilast, ilast);
		i__3 = a_subscr(ilast, ilast);
		z__1.r = a[i__3].r * signbc.r - a[i__3].i * signbc.i, z__1.i =
			 a[i__3].r * signbc.i + a[i__3].i * signbc.r;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
	    }
	    if (ilz) {
		zscal_(n, &signbc, &z___ref(1, ilast), &c__1);
	    }
/*           ------------------- Begin Timing Code ---------------------- */
	    opst += (doublereal) (nz * 6 + 13);
/*           -------------------- End Timing Code ----------------------- */
	} else {
	    i__2 = b_subscr(ilast, ilast);
	    b[i__2].r = 0., b[i__2].i = 0.;
	}
	i__2 = ilast;
	i__3 = a_subscr(ilast, ilast);
	alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
	i__2 = ilast;
	i__3 = b_subscr(ilast, ilast);
	beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;

/*        Go to next block -- exit if finished. */

	--ilast;
	if (ilast < *ilo) {
	    goto L190;
	}

/*        Reset counters */

	iiter = 0;
	eshift.r = 0., eshift.i = 0.;
	if (! ilschr) {
	    ilastm = ilast;
	    if (ifrstm > ilast) {
		ifrstm = *ilo;
	    }
	}
	goto L160;

/*        QZ step   

          This iteration only involves rows/columns IFIRST:ILAST.  We   
          assume IFIRST < ILAST, and that the diagonal of B is non-zero. */

L70:
	++iiter;
	if (! ilschr) {
	    ifrstm = ifirst;
	}

/*        Compute the Shift.   

          At this point, IFIRST < ILAST, and the diagonal elements of   
          B(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in   
          magnitude) */

	if (iiter / 10 * 10 != iiter) {

/*           The Wilkinson shift (AEP p.512), i.e., the eigenvalue of   
             the bottom-right 2x2 block of A inv(B) which is nearest to   
             the bottom-right element.   

             We factor B as U*D, where U has unit diagonals, and   
             compute (A*inv(D))*inv(U). */

	    i__2 = b_subscr(ilast - 1, ilast);
	    z__2.r = bscale * b[i__2].r, z__2.i = bscale * b[i__2].i;
	    i__3 = b_subscr(ilast, ilast);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    u12.r = z__1.r, u12.i = z__1.i;
	    i__2 = a_subscr(ilast - 1, ilast - 1);
	    z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast - 1, ilast - 1);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    ad11.r = z__1.r, ad11.i = z__1.i;
	    i__2 = a_subscr(ilast, ilast - 1);
	    z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast - 1, ilast - 1);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    ad21.r = z__1.r, ad21.i = z__1.i;
	    i__2 = a_subscr(ilast - 1, ilast);
	    z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast, ilast);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    ad12.r = z__1.r, ad12.i = z__1.i;
	    i__2 = a_subscr(ilast, ilast);
	    z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast, ilast);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    ad22.r = z__1.r, ad22.i = z__1.i;
	    z__2.r = u12.r * ad21.r - u12.i * ad21.i, z__2.i = u12.r * ad21.i 
		    + u12.i * ad21.r;
	    z__1.r = ad22.r - z__2.r, z__1.i = ad22.i - z__2.i;
	    abi22.r = z__1.r, abi22.i = z__1.i;

	    z__2.r = ad11.r + abi22.r, z__2.i = ad11.i + abi22.i;
	    z__1.r = z__2.r * .5, z__1.i = z__2.i * .5;
	    t.r = z__1.r, t.i = z__1.i;
	    pow_zi(&z__4, &t, &c__2);
	    z__5.r = ad12.r * ad21.r - ad12.i * ad21.i, z__5.i = ad12.r * 
		    ad21.i + ad12.i * ad21.r;
	    z__3.r = z__4.r + z__5.r, z__3.i = z__4.i + z__5.i;
	    z__6.r = ad11.r * ad22.r - ad11.i * ad22.i, z__6.i = ad11.r * 
		    ad22.i + ad11.i * ad22.r;
	    z__2.r = z__3.r - z__6.r, z__2.i = z__3.i - z__6.i;
	    z_sqrt(&z__1, &z__2);
	    rtdisc.r = z__1.r, rtdisc.i = z__1.i;
	    z__1.r = t.r - abi22.r, z__1.i = t.i - abi22.i;
	    z__2.r = t.r - abi22.r, z__2.i = t.i - abi22.i;
	    temp = z__1.r * rtdisc.r + d_imag(&z__2) * d_imag(&rtdisc);
	    if (temp <= 0.) {
		z__1.r = t.r + rtdisc.r, z__1.i = t.i + rtdisc.i;
		shift.r = z__1.r, shift.i = z__1.i;
	    } else {
		z__1.r = t.r - rtdisc.r, z__1.i = t.i - rtdisc.i;
		shift.r = z__1.r, shift.i = z__1.i;
	    }

/*           ------------------- Begin Timing Code ---------------------- */
	    opst += 116.;
/*           -------------------- End Timing Code ----------------------- */

	} else {

/*           Exceptional shift.  Chosen for no particularly good reason. */

	    i__2 = a_subscr(ilast - 1, ilast);
	    z__4.r = ascale * a[i__2].r, z__4.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast - 1, ilast - 1);
	    z__5.r = bscale * b[i__3].r, z__5.i = bscale * b[i__3].i;
	    z_div(&z__3, &z__4, &z__5);
	    d_cnjg(&z__2, &z__3);
	    z__1.r = eshift.r + z__2.r, z__1.i = eshift.i + z__2.i;
	    eshift.r = z__1.r, eshift.i = z__1.i;
	    shift.r = eshift.r, shift.i = eshift.i;

/*           ------------------- Begin Timing Code ---------------------- */
	    opst += 15.;
/*           -------------------- End Timing Code ----------------------- */

	}

/*        Now check for two consecutive small subdiagonals. */

	i__2 = ifirst + 1;
	for (j = ilast - 1; j >= i__2; --j) {
	    istart = j;
	    i__3 = a_subscr(j, j);
	    z__2.r = ascale * a[i__3].r, z__2.i = ascale * a[i__3].i;
	    i__4 = b_subscr(j, j);
	    z__4.r = bscale * b[i__4].r, z__4.i = bscale * b[i__4].i;
	    z__3.r = shift.r * z__4.r - shift.i * z__4.i, z__3.i = shift.r * 
		    z__4.i + shift.i * z__4.r;
	    z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
	    ctemp.r = z__1.r, ctemp.i = z__1.i;
	    temp = (d__1 = ctemp.r, abs(d__1)) + (d__2 = d_imag(&ctemp), abs(
		    d__2));
	    i__3 = a_subscr(j + 1, j);
	    temp2 = ascale * ((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&
		    a_ref(j + 1, j)), abs(d__2)));
	    tempr = max(temp,temp2);
	    if (tempr < 1. && tempr != 0.) {
		temp /= tempr;
		temp2 /= tempr;
	    }
	    i__3 = a_subscr(j, j - 1);
	    if (((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a_ref(j, j - 
		    1)), abs(d__2))) * temp2 <= temp * atol) {
		goto L90;
	    }
/* L80: */
	}

	istart = ifirst;
	i__2 = a_subscr(ifirst, ifirst);
	z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	i__3 = b_subscr(ifirst, ifirst);
	z__4.r = bscale * b[i__3].r, z__4.i = bscale * b[i__3].i;
	z__3.r = shift.r * z__4.r - shift.i * z__4.i, z__3.i = shift.r * 
		z__4.i + shift.i * z__4.r;
	z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
	ctemp.r = z__1.r, ctemp.i = z__1.i;

/*        --------------------- Begin Timing Code ----------------------- */
	opst += -6.;
/*        ---------------------- End Timing Code ------------------------ */

L90:

/*        Do an implicit-shift QZ sweep.   

          Initial Q */

	i__2 = a_subscr(istart + 1, istart);
	z__1.r = ascale * a[i__2].r, z__1.i = ascale * a[i__2].i;
	ctemp2.r = z__1.r, ctemp2.i = z__1.i;

/*        --------------------- Begin Timing Code ----------------------- */
	opst += (doublereal) ((ilast - istart) * 18 + 2);
/*        ---------------------- End Timing Code ------------------------ */

	zlartg_(&ctemp, &ctemp2, &c__, &s, &ctemp3);

/*        Sweep */

	i__2 = ilast - 1;
	for (j = istart; j <= i__2; ++j) {
	    if (j > istart) {
		i__3 = a_subscr(j, j - 1);
		ctemp.r = a[i__3].r, ctemp.i = a[i__3].i;
		zlartg_(&ctemp, &a_ref(j + 1, j - 1), &c__, &s, &a_ref(j, j - 
			1));
		i__3 = a_subscr(j + 1, j - 1);
		a[i__3].r = 0., a[i__3].i = 0.;
	    }

	    i__3 = ilastm;
	    for (jc = j; jc <= i__3; ++jc) {
		i__4 = a_subscr(j, jc);
		z__2.r = c__ * a[i__4].r, z__2.i = c__ * a[i__4].i;
		i__5 = a_subscr(j + 1, jc);
		z__3.r = s.r * a[i__5].r - s.i * a[i__5].i, z__3.i = s.r * a[
			i__5].i + s.i * a[i__5].r;
		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		ctemp.r = z__1.r, ctemp.i = z__1.i;
		i__4 = a_subscr(j + 1, jc);
		d_cnjg(&z__4, &s);
		z__3.r = -z__4.r, z__3.i = -z__4.i;
		i__5 = a_subscr(j, jc);
		z__2.r = z__3.r * a[i__5].r - z__3.i * a[i__5].i, z__2.i = 
			z__3.r * a[i__5].i + z__3.i * a[i__5].r;
		i__6 = a_subscr(j + 1, jc);
		z__5.r = c__ * a[i__6].r, z__5.i = c__ * a[i__6].i;
		z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		a[i__4].r = z__1.r, a[i__4].i = z__1.i;
		i__4 = a_subscr(j, jc);
		a[i__4].r = ctemp.r, a[i__4].i = ctemp.i;
		i__4 = b_subscr(j, jc);
		z__2.r = c__ * b[i__4].r, z__2.i = c__ * b[i__4].i;
		i__5 = b_subscr(j + 1, jc);
		z__3.r = s.r * b[i__5].r - s.i * b[i__5].i, z__3.i = s.r * b[
			i__5].i + s.i * b[i__5].r;
		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		ctemp2.r = z__1.r, ctemp2.i = z__1.i;
		i__4 = b_subscr(j + 1, jc);
		d_cnjg(&z__4, &s);
		z__3.r = -z__4.r, z__3.i = -z__4.i;
		i__5 = b_subscr(j, jc);
		z__2.r = z__3.r * b[i__5].r - z__3.i * b[i__5].i, z__2.i = 
			z__3.r * b[i__5].i + z__3.i * b[i__5].r;
		i__6 = b_subscr(j + 1, jc);
		z__5.r = c__ * b[i__6].r, z__5.i = c__ * b[i__6].i;
		z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		b[i__4].r = z__1.r, b[i__4].i = z__1.i;
		i__4 = b_subscr(j, jc);
		b[i__4].r = ctemp2.r, b[i__4].i = ctemp2.i;
/* L100: */
	    }
	    if (ilq) {
		i__3 = *n;
		for (jr = 1; jr <= i__3; ++jr) {
		    i__4 = q_subscr(jr, j);
		    z__2.r = c__ * q[i__4].r, z__2.i = c__ * q[i__4].i;
		    d_cnjg(&z__4, &s);
		    i__5 = q_subscr(jr, j + 1);
		    z__3.r = z__4.r * q[i__5].r - z__4.i * q[i__5].i, z__3.i =
			     z__4.r * q[i__5].i + z__4.i * q[i__5].r;
		    z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		    ctemp.r = z__1.r, ctemp.i = z__1.i;
		    i__4 = q_subscr(jr, j + 1);
		    z__3.r = -s.r, z__3.i = -s.i;
		    i__5 = q_subscr(jr, j);
		    z__2.r = z__3.r * q[i__5].r - z__3.i * q[i__5].i, z__2.i =
			     z__3.r * q[i__5].i + z__3.i * q[i__5].r;
		    i__6 = q_subscr(jr, j + 1);
		    z__4.r = c__ * q[i__6].r, z__4.i = c__ * q[i__6].i;
		    z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
		    q[i__4].r = z__1.r, q[i__4].i = z__1.i;
		    i__4 = q_subscr(jr, j);
		    q[i__4].r = ctemp.r, q[i__4].i = ctemp.i;
/* L110: */
		}
	    }

	    i__3 = b_subscr(j + 1, j + 1);
	    ctemp.r = b[i__3].r, ctemp.i = b[i__3].i;
	    zlartg_(&ctemp, &b_ref(j + 1, j), &c__, &s, &b_ref(j + 1, j + 1));
	    i__3 = b_subscr(j + 1, j);
	    b[i__3].r = 0., b[i__3].i = 0.;

/* Computing MIN */
	    i__4 = j + 2;
	    i__3 = min(i__4,ilast);
	    for (jr = ifrstm; jr <= i__3; ++jr) {
		i__4 = a_subscr(jr, j + 1);
		z__2.r = c__ * a[i__4].r, z__2.i = c__ * a[i__4].i;
		i__5 = a_subscr(jr, j);
		z__3.r = s.r * a[i__5].r - s.i * a[i__5].i, z__3.i = s.r * a[
			i__5].i + s.i * a[i__5].r;
		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		ctemp.r = z__1.r, ctemp.i = z__1.i;
		i__4 = a_subscr(jr, j);
		d_cnjg(&z__4, &s);
		z__3.r = -z__4.r, z__3.i = -z__4.i;
		i__5 = a_subscr(jr, j + 1);
		z__2.r = z__3.r * a[i__5].r - z__3.i * a[i__5].i, z__2.i = 
			z__3.r * a[i__5].i + z__3.i * a[i__5].r;
		i__6 = a_subscr(jr, j);
		z__5.r = c__ * a[i__6].r, z__5.i = c__ * a[i__6].i;
		z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		a[i__4].r = z__1.r, a[i__4].i = z__1.i;
		i__4 = a_subscr(jr, j + 1);
		a[i__4].r = ctemp.r, a[i__4].i = ctemp.i;
/* L120: */
	    }
	    i__3 = j;
	    for (jr = ifrstm; jr <= i__3; ++jr) {
		i__4 = b_subscr(jr, j + 1);
		z__2.r = c__ * b[i__4].r, z__2.i = c__ * b[i__4].i;
		i__5 = b_subscr(jr, j);
		z__3.r = s.r * b[i__5].r - s.i * b[i__5].i, z__3.i = s.r * b[
			i__5].i + s.i * b[i__5].r;
		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		ctemp.r = z__1.r, ctemp.i = z__1.i;
		i__4 = b_subscr(jr, j);
		d_cnjg(&z__4, &s);
		z__3.r = -z__4.r, z__3.i = -z__4.i;
		i__5 = b_subscr(jr, j + 1);
		z__2.r = z__3.r * b[i__5].r - z__3.i * b[i__5].i, z__2.i = 
			z__3.r * b[i__5].i + z__3.i * b[i__5].r;
		i__6 = b_subscr(jr, j);
		z__5.r = c__ * b[i__6].r, z__5.i = c__ * b[i__6].i;
		z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		b[i__4].r = z__1.r, b[i__4].i = z__1.i;
		i__4 = b_subscr(jr, j + 1);
		b[i__4].r = ctemp.r, b[i__4].i = ctemp.i;
/* L130: */
	    }
	    if (ilz) {
		i__3 = *n;
		for (jr = 1; jr <= i__3; ++jr) {
		    i__4 = z___subscr(jr, j + 1);
		    z__2.r = c__ * z__[i__4].r, z__2.i = c__ * z__[i__4].i;
		    i__5 = z___subscr(jr, j);
		    z__3.r = s.r * z__[i__5].r - s.i * z__[i__5].i, z__3.i = 
			    s.r * z__[i__5].i + s.i * z__[i__5].r;
		    z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		    ctemp.r = z__1.r, ctemp.i = z__1.i;
		    i__4 = z___subscr(jr, j);
		    d_cnjg(&z__4, &s);
		    z__3.r = -z__4.r, z__3.i = -z__4.i;
		    i__5 = z___subscr(jr, j + 1);
		    z__2.r = z__3.r * z__[i__5].r - z__3.i * z__[i__5].i, 
			    z__2.i = z__3.r * z__[i__5].i + z__3.i * z__[i__5]
			    .r;
		    i__6 = z___subscr(jr, j);
		    z__5.r = c__ * z__[i__6].r, z__5.i = c__ * z__[i__6].i;
		    z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		    z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
		    i__4 = z___subscr(jr, j + 1);
		    z__[i__4].r = ctemp.r, z__[i__4].i = ctemp.i;
/* L140: */
		}
	    }
/* L150: */
	}

/*        --------------------- Begin Timing Code ----------------------- */
	opst += (doublereal) (ilast - istart) * (doublereal) ((ilastm - 
		ifrstm) * 40 + 184 + (nq + nz) * 20) - 20;
/*        ---------------------- End Timing Code ------------------------ */

L160:

/*        --------------------- Begin Timing Code -----------------------   
          End of iteration -- add in "small" contributions. */
	latime_1.ops += opst;
	opst = 0.;
/*        ---------------------- End Timing Code ------------------------   


   L170: */
    }

/*     Drop-through = non-convergence */

L180:
    *info = ilast;

/*     ---------------------- Begin Timing Code ------------------------- */
    latime_1.ops += opst;
    opst = 0.;
/*     ----------------------- End Timing Code -------------------------- */

    goto L210;

/*     Successful completion of all QZ steps */

L190:

/*     Set Eigenvalues 1:ILO-1 */

    i__1 = *ilo - 1;
    for (j = 1; j <= i__1; ++j) {
	absb = z_abs(&b_ref(j, j));
	if (absb > safmin) {
	    i__2 = b_subscr(j, j);
	    z__2.r = b[i__2].r / absb, z__2.i = b[i__2].i / absb;
	    d_cnjg(&z__1, &z__2);
	    signbc.r = z__1.r, signbc.i = z__1.i;
	    i__2 = b_subscr(j, j);
	    b[i__2].r = absb, b[i__2].i = 0.;
	    if (ilschr) {
		i__2 = j - 1;
		zscal_(&i__2, &signbc, &b_ref(1, j), &c__1);
		zscal_(&j, &signbc, &a_ref(1, j), &c__1);
/*              ----------------- Begin Timing Code --------------------- */
		opst += (doublereal) ((j - 1) * 12);
/*              ------------------ End Timing Code ---------------------- */
	    } else {
		i__2 = a_subscr(j, j);
		i__3 = a_subscr(j, j);
		z__1.r = a[i__3].r * signbc.r - a[i__3].i * signbc.i, z__1.i =
			 a[i__3].r * signbc.i + a[i__3].i * signbc.r;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
	    }
	    if (ilz) {
		zscal_(n, &signbc, &z___ref(1, j), &c__1);
	    }
/*           ------------------- Begin Timing Code ---------------------- */
	    opst += (doublereal) (nz * 6 + 13);
/*           -------------------- End Timing Code ----------------------- */
	} else {
	    i__2 = b_subscr(j, j);
	    b[i__2].r = 0., b[i__2].i = 0.;
	}
	i__2 = j;
	i__3 = a_subscr(j, j);
	alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
	i__2 = j;
	i__3 = b_subscr(j, j);
	beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;
/* L200: */
    }

/*     Normal Termination */

    *info = 0;

/*     Exit (other than argument error) -- return optimal workspace size */

L210:

/*     ---------------------- Begin Timing Code ------------------------- */
    latime_1.ops += opst;
    opst = 0.;
    latime_1.itcnt = (doublereal) jiter;
/*     ----------------------- End Timing Code -------------------------- */

    z__1.r = (doublereal) (*n), z__1.i = 0.;
    work[1].r = z__1.r, work[1].i = z__1.i;
    return 0;

/*     End of ZHGEQZ */

} /* zhgeqz_ */
Пример #4
0
/* Subroutine */ int ztgsja_(char *jobu, char *jobv, char *jobq, integer *m, 
	integer *p, integer *n, integer *k, integer *l, doublecomplex *a, 
	integer *lda, doublecomplex *b, integer *ldb, doublereal *tola, 
	doublereal *tolb, doublereal *alpha, doublereal *beta, doublecomplex *
	u, integer *ldu, doublecomplex *v, integer *ldv, doublecomplex *q, 
	integer *ldq, doublecomplex *work, integer *ncycle, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, u_dim1, 
	    u_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4;
    doublereal d__1;
    doublecomplex z__1;

    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);

    /* Local variables */
    integer i__, j;
    doublereal a1, b1, a3, b3;
    doublecomplex a2, b2;
    doublereal csq, csu, csv;
    doublecomplex snq;
    doublereal rwk;
    doublecomplex snu, snv;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    doublereal gamma;
    extern logical lsame_(char *, char *);
    logical initq, initu, initv, wantq, upper;
    doublereal error, ssmin;
    logical wantu, wantv;
    extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *), zlags2_(logical *, doublereal *, 
	    doublecomplex *, doublereal *, doublereal *, doublecomplex *, 
	    doublereal *, doublereal *, doublecomplex *, doublereal *, 
	    doublecomplex *, doublereal *, doublecomplex *);
    integer kcycle;
    extern /* Subroutine */ int dlartg_(doublereal *, doublereal *, 
	    doublereal *, doublereal *, doublereal *), xerbla_(char *, 
	    integer *), zdscal_(integer *, doublereal *, 
	    doublecomplex *, integer *), zlapll_(integer *, doublecomplex *, 
	    integer *, doublecomplex *, integer *, doublereal *), zlaset_(
	    char *, integer *, integer *, doublecomplex *, doublecomplex *, 
	    doublecomplex *, integer *);


/*  -- LAPACK routine (version 3.1) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZTGSJA computes the generalized singular value decomposition (GSVD) */
/*  of two complex upper triangular (or trapezoidal) matrices A and B. */

/*  On entry, it is assumed that matrices A and B have the following */
/*  forms, which may be obtained by the preprocessing subroutine ZGGSVP */
/*  from a general M-by-N matrix A and P-by-N matrix B: */

/*               N-K-L  K    L */
/*     A =    K ( 0    A12  A13 ) if M-K-L >= 0; */
/*            L ( 0     0   A23 ) */
/*        M-K-L ( 0     0    0  ) */

/*             N-K-L  K    L */
/*     A =  K ( 0    A12  A13 ) if M-K-L < 0; */
/*        M-K ( 0     0   A23 ) */

/*             N-K-L  K    L */
/*     B =  L ( 0     0   B13 ) */
/*        P-L ( 0     0    0  ) */

/*  where the K-by-K matrix A12 and L-by-L matrix B13 are nonsingular */
/*  upper triangular; A23 is L-by-L upper triangular if M-K-L >= 0, */
/*  otherwise A23 is (M-K)-by-L upper trapezoidal. */

/*  On exit, */

/*         U'*A*Q = D1*( 0 R ),    V'*B*Q = D2*( 0 R ), */

/*  where U, V and Q are unitary matrices, Z' denotes the conjugate */
/*  transpose of Z, R is a nonsingular upper triangular matrix, and D1 */
/*  and D2 are ``diagonal'' matrices, which are of the following */
/*  structures: */

/*  If M-K-L >= 0, */

/*                      K  L */
/*         D1 =     K ( I  0 ) */
/*                  L ( 0  C ) */
/*              M-K-L ( 0  0 ) */

/*                     K  L */
/*         D2 = L   ( 0  S ) */
/*              P-L ( 0  0 ) */

/*                 N-K-L  K    L */
/*    ( 0 R ) = K (  0   R11  R12 ) K */
/*              L (  0    0   R22 ) L */

/*  where */

/*    C = diag( ALPHA(K+1), ... , ALPHA(K+L) ), */
/*    S = diag( BETA(K+1),  ... , BETA(K+L) ), */
/*    C**2 + S**2 = I. */

/*    R is stored in A(1:K+L,N-K-L+1:N) on exit. */

/*  If M-K-L < 0, */

/*                 K M-K K+L-M */
/*      D1 =   K ( I  0    0   ) */
/*           M-K ( 0  C    0   ) */

/*                   K M-K K+L-M */
/*      D2 =   M-K ( 0  S    0   ) */
/*           K+L-M ( 0  0    I   ) */
/*             P-L ( 0  0    0   ) */

/*                 N-K-L  K   M-K  K+L-M */
/* ( 0 R ) =    K ( 0    R11  R12  R13  ) */
/*            M-K ( 0     0   R22  R23  ) */
/*          K+L-M ( 0     0    0   R33  ) */

/*  where */
/*  C = diag( ALPHA(K+1), ... , ALPHA(M) ), */
/*  S = diag( BETA(K+1),  ... , BETA(M) ), */
/*  C**2 + S**2 = I. */

/*  R = ( R11 R12 R13 ) is stored in A(1:M, N-K-L+1:N) and R33 is stored */
/*      (  0  R22 R23 ) */
/*  in B(M-K+1:L,N+M-K-L+1:N) on exit. */

/*  The computation of the unitary transformation matrices U, V or Q */
/*  is optional.  These matrices may either be formed explicitly, or they */
/*  may be postmultiplied into input matrices U1, V1, or Q1. */

/*  Arguments */
/*  ========= */

/*  JOBU    (input) CHARACTER*1 */
/*          = 'U':  U must contain a unitary matrix U1 on entry, and */
/*                  the product U1*U is returned; */
/*          = 'I':  U is initialized to the unit matrix, and the */
/*                  unitary matrix U is returned; */
/*          = 'N':  U is not computed. */

/*  JOBV    (input) CHARACTER*1 */
/*          = 'V':  V must contain a unitary matrix V1 on entry, and */
/*                  the product V1*V is returned; */
/*          = 'I':  V is initialized to the unit matrix, and the */
/*                  unitary matrix V is returned; */
/*          = 'N':  V is not computed. */

/*  JOBQ    (input) CHARACTER*1 */
/*          = 'Q':  Q must contain a unitary matrix Q1 on entry, and */
/*                  the product Q1*Q is returned; */
/*          = 'I':  Q is initialized to the unit matrix, and the */
/*                  unitary matrix Q is returned; */
/*          = 'N':  Q is not computed. */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A.  M >= 0. */

/*  P       (input) INTEGER */
/*          The number of rows of the matrix B.  P >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrices A and B.  N >= 0. */

/*  K       (input) INTEGER */
/*  L       (input) INTEGER */
/*          K and L specify the subblocks in the input matrices A and B: */
/*          A23 = A(K+1:MIN(K+L,M),N-L+1:N) and B13 = B(1:L,,N-L+1:N) */
/*          of A and B, whose GSVD is going to be computed by ZTGSJA. */
/*          See Further details. */

/*  A       (input/output) COMPLEX*16 array, dimension (LDA,N) */
/*          On entry, the M-by-N matrix A. */
/*          On exit, A(N-K+1:N,1:MIN(K+L,M) ) contains the triangular */
/*          matrix R or part of R.  See Purpose for details. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A. LDA >= max(1,M). */

/*  B       (input/output) COMPLEX*16 array, dimension (LDB,N) */
/*          On entry, the P-by-N matrix B. */
/*          On exit, if necessary, B(M-K+1:L,N+M-K-L+1:N) contains */
/*          a part of R.  See Purpose for details. */

/*  LDB     (input) INTEGER */
/*          The leading dimension of the array B. LDB >= max(1,P). */

/*  TOLA    (input) DOUBLE PRECISION */
/*  TOLB    (input) DOUBLE PRECISION */
/*          TOLA and TOLB are the convergence criteria for the Jacobi- */
/*          Kogbetliantz iteration procedure. Generally, they are the */
/*          same as used in the preprocessing step, say */
/*              TOLA = MAX(M,N)*norm(A)*MAZHEPS, */
/*              TOLB = MAX(P,N)*norm(B)*MAZHEPS. */

/*  ALPHA   (output) DOUBLE PRECISION array, dimension (N) */
/*  BETA    (output) DOUBLE PRECISION array, dimension (N) */
/*          On exit, ALPHA and BETA contain the generalized singular */
/*          value pairs of A and B; */
/*            ALPHA(1:K) = 1, */
/*            BETA(1:K)  = 0, */
/*          and if M-K-L >= 0, */
/*            ALPHA(K+1:K+L) = diag(C), */
/*            BETA(K+1:K+L)  = diag(S), */
/*          or if M-K-L < 0, */
/*            ALPHA(K+1:M)= C, ALPHA(M+1:K+L)= 0 */
/*            BETA(K+1:M) = S, BETA(M+1:K+L) = 1. */
/*          Furthermore, if K+L < N, */
/*            ALPHA(K+L+1:N) = 0 */
/*            BETA(K+L+1:N)  = 0. */

/*  U       (input/output) COMPLEX*16 array, dimension (LDU,M) */
/*          On entry, if JOBU = 'U', U must contain a matrix U1 (usually */
/*          the unitary matrix returned by ZGGSVP). */
/*          On exit, */
/*          if JOBU = 'I', U contains the unitary matrix U; */
/*          if JOBU = 'U', U contains the product U1*U. */
/*          If JOBU = 'N', U is not referenced. */

/*  LDU     (input) INTEGER */
/*          The leading dimension of the array U. LDU >= max(1,M) if */
/*          JOBU = 'U'; LDU >= 1 otherwise. */

/*  V       (input/output) COMPLEX*16 array, dimension (LDV,P) */
/*          On entry, if JOBV = 'V', V must contain a matrix V1 (usually */
/*          the unitary matrix returned by ZGGSVP). */
/*          On exit, */
/*          if JOBV = 'I', V contains the unitary matrix V; */
/*          if JOBV = 'V', V contains the product V1*V. */
/*          If JOBV = 'N', V is not referenced. */

/*  LDV     (input) INTEGER */
/*          The leading dimension of the array V. LDV >= max(1,P) if */
/*          JOBV = 'V'; LDV >= 1 otherwise. */

/*  Q       (input/output) COMPLEX*16 array, dimension (LDQ,N) */
/*          On entry, if JOBQ = 'Q', Q must contain a matrix Q1 (usually */
/*          the unitary matrix returned by ZGGSVP). */
/*          On exit, */
/*          if JOBQ = 'I', Q contains the unitary matrix Q; */
/*          if JOBQ = 'Q', Q contains the product Q1*Q. */
/*          If JOBQ = 'N', Q is not referenced. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. LDQ >= max(1,N) if */
/*          JOBQ = 'Q'; LDQ >= 1 otherwise. */

/*  WORK    (workspace) COMPLEX*16 array, dimension (2*N) */

/*  NCYCLE  (output) INTEGER */
/*          The number of cycles required for convergence. */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value. */
/*          = 1:  the procedure does not converge after MAXIT cycles. */

/*  Internal Parameters */
/*  =================== */

/*  MAXIT   INTEGER */
/*          MAXIT specifies the total loops that the iterative procedure */
/*          may take. If after MAXIT cycles, the routine fails to */
/*          converge, we return INFO = 1. */

/*  Further Details */
/*  =============== */

/*  ZTGSJA essentially uses a variant of Kogbetliantz algorithm to reduce */
/*  min(L,M-K)-by-L triangular (or trapezoidal) matrix A23 and L-by-L */
/*  matrix B13 to the form: */

/*           U1'*A13*Q1 = C1*R1; V1'*B13*Q1 = S1*R1, */

/*  where U1, V1 and Q1 are unitary matrix, and Z' is the conjugate */
/*  transpose of Z.  C1 and S1 are diagonal matrices satisfying */

/*                C1**2 + S1**2 = I, */

/*  and R1 is an L-by-L nonsingular upper triangular matrix. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */

/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Decode and test the input parameters */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1;
    b -= b_offset;
    --alpha;
    --beta;
    u_dim1 = *ldu;
    u_offset = 1 + u_dim1;
    u -= u_offset;
    v_dim1 = *ldv;
    v_offset = 1 + v_dim1;
    v -= v_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    --work;

    /* Function Body */
    initu = lsame_(jobu, "I");
    wantu = initu || lsame_(jobu, "U");

    initv = lsame_(jobv, "I");
    wantv = initv || lsame_(jobv, "V");

    initq = lsame_(jobq, "I");
    wantq = initq || lsame_(jobq, "Q");

    *info = 0;
    if (! (initu || wantu || lsame_(jobu, "N"))) {
	*info = -1;
    } else if (! (initv || wantv || lsame_(jobv, "N"))) 
	    {
	*info = -2;
    } else if (! (initq || wantq || lsame_(jobq, "N"))) 
	    {
	*info = -3;
    } else if (*m < 0) {
	*info = -4;
    } else if (*p < 0) {
	*info = -5;
    } else if (*n < 0) {
	*info = -6;
    } else if (*lda < max(1,*m)) {
	*info = -10;
    } else if (*ldb < max(1,*p)) {
	*info = -12;
    } else if (*ldu < 1 || wantu && *ldu < *m) {
	*info = -18;
    } else if (*ldv < 1 || wantv && *ldv < *p) {
	*info = -20;
    } else if (*ldq < 1 || wantq && *ldq < *n) {
	*info = -22;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZTGSJA", &i__1);
	return 0;
    }

/*     Initialize U, V and Q, if necessary */

    if (initu) {
	zlaset_("Full", m, m, &c_b1, &c_b2, &u[u_offset], ldu);
    }
    if (initv) {
	zlaset_("Full", p, p, &c_b1, &c_b2, &v[v_offset], ldv);
    }
    if (initq) {
	zlaset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
    }

/*     Loop until convergence */

    upper = FALSE_;
    for (kcycle = 1; kcycle <= 40; ++kcycle) {

	upper = ! upper;

	i__1 = *l - 1;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    i__2 = *l;
	    for (j = i__ + 1; j <= i__2; ++j) {

		a1 = 0.;
		a2.r = 0., a2.i = 0.;
		a3 = 0.;
		if (*k + i__ <= *m) {
		    i__3 = *k + i__ + (*n - *l + i__) * a_dim1;
		    a1 = a[i__3].r;
		}
		if (*k + j <= *m) {
		    i__3 = *k + j + (*n - *l + j) * a_dim1;
		    a3 = a[i__3].r;
		}

		i__3 = i__ + (*n - *l + i__) * b_dim1;
		b1 = b[i__3].r;
		i__3 = j + (*n - *l + j) * b_dim1;
		b3 = b[i__3].r;

		if (upper) {
		    if (*k + i__ <= *m) {
			i__3 = *k + i__ + (*n - *l + j) * a_dim1;
			a2.r = a[i__3].r, a2.i = a[i__3].i;
		    }
		    i__3 = i__ + (*n - *l + j) * b_dim1;
		    b2.r = b[i__3].r, b2.i = b[i__3].i;
		} else {
		    if (*k + j <= *m) {
			i__3 = *k + j + (*n - *l + i__) * a_dim1;
			a2.r = a[i__3].r, a2.i = a[i__3].i;
		    }
		    i__3 = j + (*n - *l + i__) * b_dim1;
		    b2.r = b[i__3].r, b2.i = b[i__3].i;
		}

		zlags2_(&upper, &a1, &a2, &a3, &b1, &b2, &b3, &csu, &snu, &
			csv, &snv, &csq, &snq);

/*              Update (K+I)-th and (K+J)-th rows of matrix A: U'*A */

		if (*k + j <= *m) {
		    d_cnjg(&z__1, &snu);
		    zrot_(l, &a[*k + j + (*n - *l + 1) * a_dim1], lda, &a[*k 
			    + i__ + (*n - *l + 1) * a_dim1], lda, &csu, &z__1)
			    ;
		}

/*              Update I-th and J-th rows of matrix B: V'*B */

		d_cnjg(&z__1, &snv);
		zrot_(l, &b[j + (*n - *l + 1) * b_dim1], ldb, &b[i__ + (*n - *
			l + 1) * b_dim1], ldb, &csv, &z__1);

/*              Update (N-L+I)-th and (N-L+J)-th columns of matrices */
/*              A and B: A*Q and B*Q */

/* Computing MIN */
		i__4 = *k + *l;
		i__3 = min(i__4,*m);
		zrot_(&i__3, &a[(*n - *l + j) * a_dim1 + 1], &c__1, &a[(*n - *
			l + i__) * a_dim1 + 1], &c__1, &csq, &snq);

		zrot_(l, &b[(*n - *l + j) * b_dim1 + 1], &c__1, &b[(*n - *l + 
			i__) * b_dim1 + 1], &c__1, &csq, &snq);

		if (upper) {
		    if (*k + i__ <= *m) {
			i__3 = *k + i__ + (*n - *l + j) * a_dim1;
			a[i__3].r = 0., a[i__3].i = 0.;
		    }
		    i__3 = i__ + (*n - *l + j) * b_dim1;
		    b[i__3].r = 0., b[i__3].i = 0.;
		} else {
		    if (*k + j <= *m) {
			i__3 = *k + j + (*n - *l + i__) * a_dim1;
			a[i__3].r = 0., a[i__3].i = 0.;
		    }
		    i__3 = j + (*n - *l + i__) * b_dim1;
		    b[i__3].r = 0., b[i__3].i = 0.;
		}

/*              Ensure that the diagonal elements of A and B are real. */

		if (*k + i__ <= *m) {
		    i__3 = *k + i__ + (*n - *l + i__) * a_dim1;
		    i__4 = *k + i__ + (*n - *l + i__) * a_dim1;
		    d__1 = a[i__4].r;
		    a[i__3].r = d__1, a[i__3].i = 0.;
		}
		if (*k + j <= *m) {
		    i__3 = *k + j + (*n - *l + j) * a_dim1;
		    i__4 = *k + j + (*n - *l + j) * a_dim1;
		    d__1 = a[i__4].r;
		    a[i__3].r = d__1, a[i__3].i = 0.;
		}
		i__3 = i__ + (*n - *l + i__) * b_dim1;
		i__4 = i__ + (*n - *l + i__) * b_dim1;
		d__1 = b[i__4].r;
		b[i__3].r = d__1, b[i__3].i = 0.;
		i__3 = j + (*n - *l + j) * b_dim1;
		i__4 = j + (*n - *l + j) * b_dim1;
		d__1 = b[i__4].r;
		b[i__3].r = d__1, b[i__3].i = 0.;

/*              Update unitary matrices U, V, Q, if desired. */

		if (wantu && *k + j <= *m) {
		    zrot_(m, &u[(*k + j) * u_dim1 + 1], &c__1, &u[(*k + i__) *
			     u_dim1 + 1], &c__1, &csu, &snu);
		}

		if (wantv) {
		    zrot_(p, &v[j * v_dim1 + 1], &c__1, &v[i__ * v_dim1 + 1], 
			    &c__1, &csv, &snv);
		}

		if (wantq) {
		    zrot_(n, &q[(*n - *l + j) * q_dim1 + 1], &c__1, &q[(*n - *
			    l + i__) * q_dim1 + 1], &c__1, &csq, &snq);
		}

/* L10: */
	    }
/* L20: */
	}

	if (! upper) {

/*           The matrices A13 and B13 were lower triangular at the start */
/*           of the cycle, and are now upper triangular. */

/*           Convergence test: test the parallelism of the corresponding */
/*           rows of A and B. */

	    error = 0.;
/* Computing MIN */
	    i__2 = *l, i__3 = *m - *k;
	    i__1 = min(i__2,i__3);
	    for (i__ = 1; i__ <= i__1; ++i__) {
		i__2 = *l - i__ + 1;
		zcopy_(&i__2, &a[*k + i__ + (*n - *l + i__) * a_dim1], lda, &
			work[1], &c__1);
		i__2 = *l - i__ + 1;
		zcopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &work[*
			l + 1], &c__1);
		i__2 = *l - i__ + 1;
		zlapll_(&i__2, &work[1], &c__1, &work[*l + 1], &c__1, &ssmin);
		error = max(error,ssmin);
/* L30: */
	    }

	    if (abs(error) <= min(*tola,*tolb)) {
		goto L50;
	    }
	}

/*        End of cycle loop */

/* L40: */
    }

/*     The algorithm has not converged after MAXIT cycles. */

    *info = 1;
    goto L100;

L50:

/*     If ERROR <= MIN(TOLA,TOLB), then the algorithm has converged. */
/*     Compute the generalized singular value pairs (ALPHA, BETA), and */
/*     set the triangular matrix R to array A. */

    i__1 = *k;
    for (i__ = 1; i__ <= i__1; ++i__) {
	alpha[i__] = 1.;
	beta[i__] = 0.;
/* L60: */
    }

/* Computing MIN */
    i__2 = *l, i__3 = *m - *k;
    i__1 = min(i__2,i__3);
    for (i__ = 1; i__ <= i__1; ++i__) {

	i__2 = *k + i__ + (*n - *l + i__) * a_dim1;
	a1 = a[i__2].r;
	i__2 = i__ + (*n - *l + i__) * b_dim1;
	b1 = b[i__2].r;

	if (a1 != 0.) {
	    gamma = b1 / a1;

	    if (gamma < 0.) {
		i__2 = *l - i__ + 1;
		zdscal_(&i__2, &c_b39, &b[i__ + (*n - *l + i__) * b_dim1], 
			ldb);
		if (wantv) {
		    zdscal_(p, &c_b39, &v[i__ * v_dim1 + 1], &c__1);
		}
	    }

	    d__1 = abs(gamma);
	    dlartg_(&d__1, &c_b42, &beta[*k + i__], &alpha[*k + i__], &rwk);

	    if (alpha[*k + i__] >= beta[*k + i__]) {
		i__2 = *l - i__ + 1;
		d__1 = 1. / alpha[*k + i__];
		zdscal_(&i__2, &d__1, &a[*k + i__ + (*n - *l + i__) * a_dim1], 
			 lda);
	    } else {
		i__2 = *l - i__ + 1;
		d__1 = 1. / beta[*k + i__];
		zdscal_(&i__2, &d__1, &b[i__ + (*n - *l + i__) * b_dim1], ldb)
			;
		i__2 = *l - i__ + 1;
		zcopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &a[*k 
			+ i__ + (*n - *l + i__) * a_dim1], lda);
	    }

	} else {
	    alpha[*k + i__] = 0.;
	    beta[*k + i__] = 1.;
	    i__2 = *l - i__ + 1;
	    zcopy_(&i__2, &b[i__ + (*n - *l + i__) * b_dim1], ldb, &a[*k + 
		    i__ + (*n - *l + i__) * a_dim1], lda);
	}
/* L70: */
    }

/*     Post-assignment */

    i__1 = *k + *l;
    for (i__ = *m + 1; i__ <= i__1; ++i__) {
	alpha[i__] = 0.;
	beta[i__] = 1.;
/* L80: */
    }

    if (*k + *l < *n) {
	i__1 = *n;
	for (i__ = *k + *l + 1; i__ <= i__1; ++i__) {
	    alpha[i__] = 0.;
	    beta[i__] = 0.;
/* L90: */
	}
    }

L100:
    *ncycle = kcycle;

    return 0;

/*     End of ZTGSJA */

} /* ztgsja_ */
Пример #5
0
/* Subroutine */ int zgghrd_(char *compq, char *compz, integer *n, integer *
	ilo, integer *ihi, doublecomplex *a, integer *lda, doublecomplex *b, 
	integer *ldb, doublecomplex *q, integer *ldq, doublecomplex *z__, 
	integer *ldz, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1, 
	    z_offset, i__1, i__2, i__3;
    doublecomplex z__1;

    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);

    /* Local variables */
    doublereal c__;
    doublecomplex s;
    logical ilq, ilz;
    integer jcol, jrow;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    extern logical lsame_(char *, char *);
    doublecomplex ctemp;
    extern /* Subroutine */ int xerbla_(char *, integer *);
    integer icompq, icompz;
    extern /* Subroutine */ int zlaset_(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, doublecomplex *, integer *), zlartg_(doublecomplex *, doublecomplex *, doublereal *, 
	    doublecomplex *, doublecomplex *);


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZGGHRD reduces a pair of complex matrices (A,B) to generalized upper */
/*  Hessenberg form using unitary transformations, where A is a */
/*  general matrix and B is upper triangular.  The form of the */
/*  generalized eigenvalue problem is */
/*     A*x = lambda*B*x, */
/*  and B is typically made upper triangular by computing its QR */
/*  factorization and moving the unitary matrix Q to the left side */
/*  of the equation. */

/*  This subroutine simultaneously reduces A to a Hessenberg matrix H: */
/*     Q**H*A*Z = H */
/*  and transforms B to another upper triangular matrix T: */
/*     Q**H*B*Z = T */
/*  in order to reduce the problem to its standard form */
/*     H*y = lambda*T*y */
/*  where y = Z**H*x. */

/*  The unitary matrices Q and Z are determined as products of Givens */
/*  rotations.  They may either be formed explicitly, or they may be */
/*  postmultiplied into input matrices Q1 and Z1, so that */
/*       Q1 * A * Z1**H = (Q1*Q) * H * (Z1*Z)**H */
/*       Q1 * B * Z1**H = (Q1*Q) * T * (Z1*Z)**H */
/*  If Q1 is the unitary matrix from the QR factorization of B in the */
/*  original equation A*x = lambda*B*x, then ZGGHRD reduces the original */
/*  problem to generalized Hessenberg form. */

/*  Arguments */
/*  ========= */

/*  COMPQ   (input) CHARACTER*1 */
/*          = 'N': do not compute Q; */
/*          = 'I': Q is initialized to the unit matrix, and the */
/*                 unitary matrix Q is returned; */
/*          = 'V': Q must contain a unitary matrix Q1 on entry, */
/*                 and the product Q1*Q is returned. */

/*  COMPZ   (input) CHARACTER*1 */
/*          = 'N': do not compute Q; */
/*          = 'I': Q is initialized to the unit matrix, and the */
/*                 unitary matrix Q is returned; */
/*          = 'V': Q must contain a unitary matrix Q1 on entry, */
/*                 and the product Q1*Q is returned. */

/*  N       (input) INTEGER */
/*          The order of the matrices A and B.  N >= 0. */

/*  ILO     (input) INTEGER */
/*  IHI     (input) INTEGER */
/*          ILO and IHI mark the rows and columns of A which are to be */
/*          reduced.  It is assumed that A is already upper triangular */
/*          in rows and columns 1:ILO-1 and IHI+1:N.  ILO and IHI are */
/*          normally set by a previous call to ZGGBAL; otherwise they */
/*          should be set to 1 and N respectively. */
/*          1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */

/*  A       (input/output) COMPLEX*16 array, dimension (LDA, N) */
/*          On entry, the N-by-N general matrix to be reduced. */
/*          On exit, the upper triangle and the first subdiagonal of A */
/*          are overwritten with the upper Hessenberg matrix H, and the */
/*          rest is set to zero. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,N). */

/*  B       (input/output) COMPLEX*16 array, dimension (LDB, N) */
/*          On entry, the N-by-N upper triangular matrix B. */
/*          On exit, the upper triangular matrix T = Q**H B Z.  The */
/*          elements below the diagonal are set to zero. */

/*  LDB     (input) INTEGER */
/*          The leading dimension of the array B.  LDB >= max(1,N). */

/*  Q       (input/output) COMPLEX*16 array, dimension (LDQ, N) */
/*          On entry, if COMPQ = 'V', the unitary matrix Q1, typically */
/*          from the QR factorization of B. */
/*          On exit, if COMPQ='I', the unitary matrix Q, and if */
/*          COMPQ = 'V', the product Q1*Q. */
/*          Not referenced if COMPQ='N'. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. */
/*          LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. */

/*  Z       (input/output) COMPLEX*16 array, dimension (LDZ, N) */
/*          On entry, if COMPZ = 'V', the unitary matrix Z1. */
/*          On exit, if COMPZ='I', the unitary matrix Z, and if */
/*          COMPZ = 'V', the product Z1*Z. */
/*          Not referenced if COMPZ='N'. */

/*  LDZ     (input) INTEGER */
/*          The leading dimension of the array Z. */
/*          LDZ >= N if COMPZ='V' or 'I'; LDZ >= 1 otherwise. */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit. */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value. */

/*  Further Details */
/*  =============== */

/*  This routine reduces A to Hessenberg and B to triangular form by */
/*  an unblocked reduction, as described in _Matrix_Computations_, */
/*  by Golub and van Loan (Johns Hopkins Press). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Decode COMPQ */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1;
    b -= b_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1;
    z__ -= z_offset;

    /* Function Body */
    if (lsame_(compq, "N")) {
	ilq = FALSE_;
	icompq = 1;
    } else if (lsame_(compq, "V")) {
	ilq = TRUE_;
	icompq = 2;
    } else if (lsame_(compq, "I")) {
	ilq = TRUE_;
	icompq = 3;
    } else {
	icompq = 0;
    }

/*     Decode COMPZ */

    if (lsame_(compz, "N")) {
	ilz = FALSE_;
	icompz = 1;
    } else if (lsame_(compz, "V")) {
	ilz = TRUE_;
	icompz = 2;
    } else if (lsame_(compz, "I")) {
	ilz = TRUE_;
	icompz = 3;
    } else {
	icompz = 0;
    }

/*     Test the input parameters. */

    *info = 0;
    if (icompq <= 0) {
	*info = -1;
    } else if (icompz <= 0) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    } else if (*ilo < 1) {
	*info = -4;
    } else if (*ihi > *n || *ihi < *ilo - 1) {
	*info = -5;
    } else if (*lda < max(1,*n)) {
	*info = -7;
    } else if (*ldb < max(1,*n)) {
	*info = -9;
    } else if (ilq && *ldq < *n || *ldq < 1) {
	*info = -11;
    } else if (ilz && *ldz < *n || *ldz < 1) {
	*info = -13;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZGGHRD", &i__1);
	return 0;
    }

/*     Initialize Q and Z if desired. */

    if (icompq == 3) {
	zlaset_("Full", n, n, &c_b2, &c_b1, &q[q_offset], ldq);
    }
    if (icompz == 3) {
	zlaset_("Full", n, n, &c_b2, &c_b1, &z__[z_offset], ldz);
    }

/*     Quick return if possible */

    if (*n <= 1) {
	return 0;
    }

/*     Zero out lower triangle of B */

    i__1 = *n - 1;
    for (jcol = 1; jcol <= i__1; ++jcol) {
	i__2 = *n;
	for (jrow = jcol + 1; jrow <= i__2; ++jrow) {
	    i__3 = jrow + jcol * b_dim1;
	    b[i__3].r = 0., b[i__3].i = 0.;
/* L10: */
	}
/* L20: */
    }

/*     Reduce A and B */

    i__1 = *ihi - 2;
    for (jcol = *ilo; jcol <= i__1; ++jcol) {

	i__2 = jcol + 2;
	for (jrow = *ihi; jrow >= i__2; --jrow) {

/*           Step 1: rotate rows JROW-1, JROW to kill A(JROW,JCOL) */

	    i__3 = jrow - 1 + jcol * a_dim1;
	    ctemp.r = a[i__3].r, ctemp.i = a[i__3].i;
	    zlartg_(&ctemp, &a[jrow + jcol * a_dim1], &c__, &s, &a[jrow - 1 + 
		    jcol * a_dim1]);
	    i__3 = jrow + jcol * a_dim1;
	    a[i__3].r = 0., a[i__3].i = 0.;
	    i__3 = *n - jcol;
	    zrot_(&i__3, &a[jrow - 1 + (jcol + 1) * a_dim1], lda, &a[jrow + (
		    jcol + 1) * a_dim1], lda, &c__, &s);
	    i__3 = *n + 2 - jrow;
	    zrot_(&i__3, &b[jrow - 1 + (jrow - 1) * b_dim1], ldb, &b[jrow + (
		    jrow - 1) * b_dim1], ldb, &c__, &s);
	    if (ilq) {
		d_cnjg(&z__1, &s);
		zrot_(n, &q[(jrow - 1) * q_dim1 + 1], &c__1, &q[jrow * q_dim1 
			+ 1], &c__1, &c__, &z__1);
	    }

/*           Step 2: rotate columns JROW, JROW-1 to kill B(JROW,JROW-1) */

	    i__3 = jrow + jrow * b_dim1;
	    ctemp.r = b[i__3].r, ctemp.i = b[i__3].i;
	    zlartg_(&ctemp, &b[jrow + (jrow - 1) * b_dim1], &c__, &s, &b[jrow 
		    + jrow * b_dim1]);
	    i__3 = jrow + (jrow - 1) * b_dim1;
	    b[i__3].r = 0., b[i__3].i = 0.;
	    zrot_(ihi, &a[jrow * a_dim1 + 1], &c__1, &a[(jrow - 1) * a_dim1 + 
		    1], &c__1, &c__, &s);
	    i__3 = jrow - 1;
	    zrot_(&i__3, &b[jrow * b_dim1 + 1], &c__1, &b[(jrow - 1) * b_dim1 
		    + 1], &c__1, &c__, &s);
	    if (ilz) {
		zrot_(n, &z__[jrow * z_dim1 + 1], &c__1, &z__[(jrow - 1) * 
			z_dim1 + 1], &c__1, &c__, &s);
	    }
/* L30: */
	}
/* L40: */
    }

    return 0;

/*     End of ZGGHRD */

} /* zgghrd_ */
/*<    >*/
/* Subroutine */ int zgghrd_(char *compq, char *compz, integer *n, integer *
        ilo, integer *ihi, doublecomplex *a, integer *lda, doublecomplex *b,
        integer *ldb, doublecomplex *q, integer *ldq, doublecomplex *z__,
        integer *ldz, integer *info, ftnlen compq_len, ftnlen compz_len)
{
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
            z_offset, i__1, i__2, i__3;
    doublecomplex z__1;

    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);

    /* Local variables */
    doublereal c__;
    doublecomplex s;
    logical ilq, ilz;
    integer jcol, jrow;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *,
            doublecomplex *, integer *, doublereal *, doublecomplex *);
    extern logical lsame_(const char *, const char *, ftnlen, ftnlen);
    doublecomplex ctemp;
    extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen);
    integer icompq, icompz;
    extern /* Subroutine */ int zlaset_(char *, integer *, integer *,
            doublecomplex *, doublecomplex *, doublecomplex *, integer *,
            ftnlen), zlartg_(doublecomplex *, doublecomplex *, doublereal *,
            doublecomplex *, doublecomplex *);
    (void)compq_len;
    (void)compz_len;

/*  -- LAPACK routine (version 3.2) -- */
/*  -- LAPACK is a software package provided by Univ. of Tennessee,    -- */
/*  -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*<       CHARACTER          COMPQ, COMPZ >*/
/*<       INTEGER            IHI, ILO, INFO, LDA, LDB, LDQ, LDZ, N >*/
/*     .. */
/*     .. Array Arguments .. */
/*<    >*/
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZGGHRD reduces a pair of complex matrices (A,B) to generalized upper */
/*  Hessenberg form using unitary transformations, where A is a */
/*  general matrix and B is upper triangular.  The form of the */
/*  generalized eigenvalue problem is */
/*     A*x = lambda*B*x, */
/*  and B is typically made upper triangular by computing its QR */
/*  factorization and moving the unitary matrix Q to the left side */
/*  of the equation. */

/*  This subroutine simultaneously reduces A to a Hessenberg matrix H: */
/*     Q**H*A*Z = H */
/*  and transforms B to another upper triangular matrix T: */
/*     Q**H*B*Z = T */
/*  in order to reduce the problem to its standard form */
/*     H*y = lambda*T*y */
/*  where y = Z**H*x. */

/*  The unitary matrices Q and Z are determined as products of Givens */
/*  rotations.  They may either be formed explicitly, or they may be */
/*  postmultiplied into input matrices Q1 and Z1, so that */
/*       Q1 * A * Z1**H = (Q1*Q) * H * (Z1*Z)**H */
/*       Q1 * B * Z1**H = (Q1*Q) * T * (Z1*Z)**H */
/*  If Q1 is the unitary matrix from the QR factorization of B in the */
/*  original equation A*x = lambda*B*x, then ZGGHRD reduces the original */
/*  problem to generalized Hessenberg form. */

/*  Arguments */
/*  ========= */

/*  COMPQ   (input) CHARACTER*1 */
/*          = 'N': do not compute Q; */
/*          = 'I': Q is initialized to the unit matrix, and the */
/*                 unitary matrix Q is returned; */
/*          = 'V': Q must contain a unitary matrix Q1 on entry, */
/*                 and the product Q1*Q is returned. */

/*  COMPZ   (input) CHARACTER*1 */
/*          = 'N': do not compute Q; */
/*          = 'I': Q is initialized to the unit matrix, and the */
/*                 unitary matrix Q is returned; */
/*          = 'V': Q must contain a unitary matrix Q1 on entry, */
/*                 and the product Q1*Q is returned. */

/*  N       (input) INTEGER */
/*          The order of the matrices A and B.  N >= 0. */

/*  ILO     (input) INTEGER */
/*  IHI     (input) INTEGER */
/*          ILO and IHI mark the rows and columns of A which are to be */
/*          reduced.  It is assumed that A is already upper triangular */
/*          in rows and columns 1:ILO-1 and IHI+1:N.  ILO and IHI are */
/*          normally set by a previous call to ZGGBAL; otherwise they */
/*          should be set to 1 and N respectively. */
/*          1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */

/*  A       (input/output) COMPLEX*16 array, dimension (LDA, N) */
/*          On entry, the N-by-N general matrix to be reduced. */
/*          On exit, the upper triangle and the first subdiagonal of A */
/*          are overwritten with the upper Hessenberg matrix H, and the */
/*          rest is set to zero. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,N). */

/*  B       (input/output) COMPLEX*16 array, dimension (LDB, N) */
/*          On entry, the N-by-N upper triangular matrix B. */
/*          On exit, the upper triangular matrix T = Q**H B Z.  The */
/*          elements below the diagonal are set to zero. */

/*  LDB     (input) INTEGER */
/*          The leading dimension of the array B.  LDB >= max(1,N). */

/*  Q       (input/output) COMPLEX*16 array, dimension (LDQ, N) */
/*          On entry, if COMPQ = 'V', the unitary matrix Q1, typically */
/*          from the QR factorization of B. */
/*          On exit, if COMPQ='I', the unitary matrix Q, and if */
/*          COMPQ = 'V', the product Q1*Q. */
/*          Not referenced if COMPQ='N'. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. */
/*          LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. */

/*  Z       (input/output) COMPLEX*16 array, dimension (LDZ, N) */
/*          On entry, if COMPZ = 'V', the unitary matrix Z1. */
/*          On exit, if COMPZ='I', the unitary matrix Z, and if */
/*          COMPZ = 'V', the product Z1*Z. */
/*          Not referenced if COMPZ='N'. */

/*  LDZ     (input) INTEGER */
/*          The leading dimension of the array Z. */
/*          LDZ >= N if COMPZ='V' or 'I'; LDZ >= 1 otherwise. */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit. */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value. */

/*  Further Details */
/*  =============== */

/*  This routine reduces A to Hessenberg and B to triangular form by */
/*  an unblocked reduction, as described in _Matrix_Computations_, */
/*  by Golub and van Loan (Johns Hopkins Press). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*<       COMPLEX*16         CONE, CZERO >*/
/*<    >*/
/*     .. */
/*     .. Local Scalars .. */
/*<       LOGICAL            ILQ, ILZ >*/
/*<       INTEGER            ICOMPQ, ICOMPZ, JCOL, JROW >*/
/*<       DOUBLE PRECISION   C >*/
/*<       COMPLEX*16         CTEMP, S >*/
/*     .. */
/*     .. External Functions .. */
/*<       LOGICAL            LSAME >*/
/*<       EXTERNAL           LSAME >*/
/*     .. */
/*     .. External Subroutines .. */
/*<       EXTERNAL           XERBLA, ZLARTG, ZLASET, ZROT >*/
/*     .. */
/*     .. Intrinsic Functions .. */
/*<       INTRINSIC          DCONJG, MAX >*/
/*     .. */
/*     .. Executable Statements .. */

/*     Decode COMPQ */

/*<       IF( LSAME( COMPQ, 'N' ) ) THEN >*/
    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1;
    b -= b_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1;
    z__ -= z_offset;

    /* Function Body */
    if (lsame_(compq, "N", (ftnlen)1, (ftnlen)1)) {
/*<          ILQ = .FALSE. >*/
        ilq = FALSE_;
/*<          ICOMPQ = 1 >*/
        icompq = 1;
/*<       ELSE IF( LSAME( COMPQ, 'V' ) ) THEN >*/
    } else if (lsame_(compq, "V", (ftnlen)1, (ftnlen)1)) {
/*<          ILQ = .TRUE. >*/
        ilq = TRUE_;
/*<          ICOMPQ = 2 >*/
        icompq = 2;
/*<       ELSE IF( LSAME( COMPQ, 'I' ) ) THEN >*/
    } else if (lsame_(compq, "I", (ftnlen)1, (ftnlen)1)) {
/*<          ILQ = .TRUE. >*/
        ilq = TRUE_;
/*<          ICOMPQ = 3 >*/
        icompq = 3;
/*<       ELSE >*/
    } else {
/*<          ICOMPQ = 0 >*/
        icompq = 0;
/*<       END IF >*/
    }

/*     Decode COMPZ */

/*<       IF( LSAME( COMPZ, 'N' ) ) THEN >*/
    if (lsame_(compz, "N", (ftnlen)1, (ftnlen)1)) {
/*<          ILZ = .FALSE. >*/
        ilz = FALSE_;
/*<          ICOMPZ = 1 >*/
        icompz = 1;
/*<       ELSE IF( LSAME( COMPZ, 'V' ) ) THEN >*/
    } else if (lsame_(compz, "V", (ftnlen)1, (ftnlen)1)) {
/*<          ILZ = .TRUE. >*/
        ilz = TRUE_;
/*<          ICOMPZ = 2 >*/
        icompz = 2;
/*<       ELSE IF( LSAME( COMPZ, 'I' ) ) THEN >*/
    } else if (lsame_(compz, "I", (ftnlen)1, (ftnlen)1)) {
/*<          ILZ = .TRUE. >*/
        ilz = TRUE_;
/*<          ICOMPZ = 3 >*/
        icompz = 3;
/*<       ELSE >*/
    } else {
/*<          ICOMPZ = 0 >*/
        icompz = 0;
/*<       END IF >*/
    }

/*     Test the input parameters. */

/*<       INFO = 0 >*/
    *info = 0;
/*<       IF( ICOMPQ.LE.0 ) THEN >*/
    if (icompq <= 0) {
/*<          INFO = -1 >*/
        *info = -1;
/*<       ELSE IF( ICOMPZ.LE.0 ) THEN >*/
    } else if (icompz <= 0) {
/*<          INFO = -2 >*/
        *info = -2;
/*<       ELSE IF( N.LT.0 ) THEN >*/
    } else if (*n < 0) {
/*<          INFO = -3 >*/
        *info = -3;
/*<       ELSE IF( ILO.LT.1 ) THEN >*/
    } else if (*ilo < 1) {
/*<          INFO = -4 >*/
        *info = -4;
/*<       ELSE IF( IHI.GT.N .OR. IHI.LT.ILO-1 ) THEN >*/
    } else if (*ihi > *n || *ihi < *ilo - 1) {
/*<          INFO = -5 >*/
        *info = -5;
/*<       ELSE IF( LDA.LT.MAX( 1, N ) ) THEN >*/
    } else if (*lda < max(1,*n)) {
/*<          INFO = -7 >*/
        *info = -7;
/*<       ELSE IF( LDB.LT.MAX( 1, N ) ) THEN >*/
    } else if (*ldb < max(1,*n)) {
/*<          INFO = -9 >*/
        *info = -9;
/*<       ELSE IF( ( ILQ .AND. LDQ.LT.N ) .OR. LDQ.LT.1 ) THEN >*/
    } else if ((ilq && *ldq < *n) || *ldq < 1) {
/*<          INFO = -11 >*/
        *info = -11;
/*<       ELSE IF( ( ILZ .AND. LDZ.LT.N ) .OR. LDZ.LT.1 ) THEN >*/
    } else if ((ilz && *ldz < *n) || *ldz < 1) {
/*<          INFO = -13 >*/
        *info = -13;
/*<       END IF >*/
    }
/*<       IF( INFO.NE.0 ) THEN >*/
    if (*info != 0) {
/*<          CALL XERBLA( 'ZGGHRD', -INFO ) >*/
        i__1 = -(*info);
        xerbla_("ZGGHRD", &i__1, (ftnlen)6);
/*<          RETURN >*/
        return 0;
/*<       END IF >*/
    }

/*     Initialize Q and Z if desired. */

/*<    >*/
    if (icompq == 3) {
        zlaset_("Full", n, n, &c_b2, &c_b1, &q[q_offset], ldq, (ftnlen)4);
    }
/*<    >*/
    if (icompz == 3) {
        zlaset_("Full", n, n, &c_b2, &c_b1, &z__[z_offset], ldz, (ftnlen)4);
    }

/*     Quick return if possible */

/*<    >*/
    if (*n <= 1) {
        return 0;
    }

/*     Zero out lower triangle of B */

/*<       DO 20 JCOL = 1, N - 1 >*/
    i__1 = *n - 1;
    for (jcol = 1; jcol <= i__1; ++jcol) {
/*<          DO 10 JROW = JCOL + 1, N >*/
        i__2 = *n;
        for (jrow = jcol + 1; jrow <= i__2; ++jrow) {
/*<             B( JROW, JCOL ) = CZERO >*/
            i__3 = jrow + jcol * b_dim1;
            b[i__3].r = 0., b[i__3].i = 0.;
/*<    10    CONTINUE >*/
/* L10: */
        }
/*<    20 CONTINUE >*/
/* L20: */
    }

/*     Reduce A and B */

/*<       DO 40 JCOL = ILO, IHI - 2 >*/
    i__1 = *ihi - 2;
    for (jcol = *ilo; jcol <= i__1; ++jcol) {

/*<          DO 30 JROW = IHI, JCOL + 2, -1 >*/
        i__2 = jcol + 2;
        for (jrow = *ihi; jrow >= i__2; --jrow) {

/*           Step 1: rotate rows JROW-1, JROW to kill A(JROW,JCOL) */

/*<             CTEMP = A( JROW-1, JCOL ) >*/
            i__3 = jrow - 1 + jcol * a_dim1;
            ctemp.r = a[i__3].r, ctemp.i = a[i__3].i;
/*<    >*/
            zlartg_(&ctemp, &a[jrow + jcol * a_dim1], &c__, &s, &a[jrow - 1 +
                    jcol * a_dim1]);
/*<             A( JROW, JCOL ) = CZERO >*/
            i__3 = jrow + jcol * a_dim1;
            a[i__3].r = 0., a[i__3].i = 0.;
/*<    >*/
            i__3 = *n - jcol;
            zrot_(&i__3, &a[jrow - 1 + (jcol + 1) * a_dim1], lda, &a[jrow + (
                    jcol + 1) * a_dim1], lda, &c__, &s);
/*<    >*/
            i__3 = *n + 2 - jrow;
            zrot_(&i__3, &b[jrow - 1 + (jrow - 1) * b_dim1], ldb, &b[jrow + (
                    jrow - 1) * b_dim1], ldb, &c__, &s);
/*<    >*/
            if (ilq) {
                d_cnjg(&z__1, &s);
                zrot_(n, &q[(jrow - 1) * q_dim1 + 1], &c__1, &q[jrow * q_dim1
                        + 1], &c__1, &c__, &z__1);
            }

/*           Step 2: rotate columns JROW, JROW-1 to kill B(JROW,JROW-1) */

/*<             CTEMP = B( JROW, JROW ) >*/
            i__3 = jrow + jrow * b_dim1;
            ctemp.r = b[i__3].r, ctemp.i = b[i__3].i;
/*<    >*/
            zlartg_(&ctemp, &b[jrow + (jrow - 1) * b_dim1], &c__, &s, &b[jrow
                    + jrow * b_dim1]);
/*<             B( JROW, JROW-1 ) = CZERO >*/
            i__3 = jrow + (jrow - 1) * b_dim1;
            b[i__3].r = 0., b[i__3].i = 0.;
/*<             CALL ZROT( IHI, A( 1, JROW ), 1, A( 1, JROW-1 ), 1, C, S ) >*/
            zrot_(ihi, &a[jrow * a_dim1 + 1], &c__1, &a[(jrow - 1) * a_dim1 +
                    1], &c__1, &c__, &s);
/*<    >*/
            i__3 = jrow - 1;
            zrot_(&i__3, &b[jrow * b_dim1 + 1], &c__1, &b[(jrow - 1) * b_dim1
                    + 1], &c__1, &c__, &s);
/*<    >*/
            if (ilz) {
                zrot_(n, &z__[jrow * z_dim1 + 1], &c__1, &z__[(jrow - 1) *
                        z_dim1 + 1], &c__1, &c__, &s);
            }
/*<    30    CONTINUE >*/
/* L30: */
        }
/*<    40 CONTINUE >*/
/* L40: */
    }

/*<       RETURN >*/
    return 0;

/*     End of ZGGHRD */

/*<       END >*/
} /* zgghrd_ */
Пример #7
0
/* Subroutine */ int zlattr_(integer *imat, char *uplo, char *trans, char *
	diag, integer *iseed, integer *n, doublecomplex *a, integer *lda, 
	doublecomplex *b, doublecomplex *work, doublereal *rwork, integer *
	info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
    doublereal d__1, d__2;
    doublecomplex z__1, z__2;

    /* Builtin functions */
    /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen);
    void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
    double pow_dd(doublereal *, doublereal *), sqrt(doublereal);
    void d_cnjg(doublecomplex *, doublecomplex *);
    double z_abs(doublecomplex *);

    /* Local variables */
    doublereal c__;
    integer i__, j;
    doublecomplex s;
    doublereal x, y, z__;
    doublecomplex ra, rb;
    integer kl, ku, iy;
    doublereal ulp, sfac;
    integer mode;
    char path[3], dist[1];
    doublereal unfl, rexp;
    char type__[1];
    doublereal texp;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    doublecomplex star1, plus1, plus2;
    doublereal bscal;
    extern logical lsame_(char *, char *);
    doublereal tscal, anorm, bnorm, tleft;
    logical upper;
    extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *), zrotg_(doublecomplex *, 
	    doublecomplex *, doublereal *, doublecomplex *), zswap_(integer *, 
	     doublecomplex *, integer *, doublecomplex *, integer *), zlatb4_(
	    char *, integer *, integer *, integer *, char *, integer *, 
	    integer *, doublereal *, integer *, doublereal *, char *), dlabad_(doublereal *, doublereal *);
    extern doublereal dlamch_(char *), dlarnd_(integer *, integer *);
    extern /* Subroutine */ int zdscal_(integer *, doublereal *, 
	    doublecomplex *, integer *);
    doublereal bignum, cndnum;
    extern /* Subroutine */ int dlarnv_(integer *, integer *, integer *, 
	    doublereal *);
    extern integer izamax_(integer *, doublecomplex *, integer *);
    extern /* Double Complex */ VOID zlarnd_(doublecomplex *, integer *, 
	    integer *);
    integer jcount;
    extern /* Subroutine */ int zlatms_(integer *, integer *, char *, integer 
	    *, char *, doublereal *, integer *, doublereal *, doublereal *, 
	    integer *, integer *, char *, doublecomplex *, integer *, 
	    doublecomplex *, integer *);
    doublereal smlnum;
    extern /* Subroutine */ int zlarnv_(integer *, integer *, integer *, 
	    doublecomplex *);


/*  -- LAPACK test routine (version 3.1) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZLATTR generates a triangular test matrix in 2-dimensional storage. */
/*  IMAT and UPLO uniquely specify the properties of the test matrix, */
/*  which is returned in the array A. */

/*  Arguments */
/*  ========= */

/*  IMAT    (input) INTEGER */
/*          An integer key describing which matrix to generate for this */
/*          path. */

/*  UPLO    (input) CHARACTER*1 */
/*          Specifies whether the matrix A will be upper or lower */
/*          triangular. */
/*          = 'U':  Upper triangular */
/*          = 'L':  Lower triangular */

/*  TRANS   (input) CHARACTER*1 */
/*          Specifies whether the matrix or its transpose will be used. */
/*          = 'N':  No transpose */
/*          = 'T':  Transpose */
/*          = 'C':  Conjugate transpose */

/*  DIAG    (output) CHARACTER*1 */
/*          Specifies whether or not the matrix A is unit triangular. */
/*          = 'N':  Non-unit triangular */
/*          = 'U':  Unit triangular */

/*  ISEED   (input/output) INTEGER array, dimension (4) */
/*          The seed vector for the random number generator (used in */
/*          ZLATMS).  Modified on exit. */

/*  N       (input) INTEGER */
/*          The order of the matrix to be generated. */

/*  A       (output) COMPLEX*16 array, dimension (LDA,N) */
/*          The triangular matrix A.  If UPLO = 'U', the leading N x N */
/*          upper triangular part of the array A contains the upper */
/*          triangular matrix, and the strictly lower triangular part of */
/*          A is not referenced.  If UPLO = 'L', the leading N x N lower */
/*          triangular part of the array A contains the lower triangular */
/*          matrix and the strictly upper triangular part of A is not */
/*          referenced. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,N). */

/*  B       (output) COMPLEX*16 array, dimension (N) */
/*          The right hand side vector, if IMAT > 10. */

/*  WORK    (workspace) COMPLEX*16 array, dimension (2*N) */

/*  RWORK   (workspace) DOUBLE PRECISION array, dimension (N) */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    --iseed;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --b;
    --work;
    --rwork;

    /* Function Body */
    s_copy(path, "Zomplex precision", (ftnlen)1, (ftnlen)17);
    s_copy(path + 1, "TR", (ftnlen)2, (ftnlen)2);
    unfl = dlamch_("Safe minimum");
    ulp = dlamch_("Epsilon") * dlamch_("Base");
    smlnum = unfl;
    bignum = (1. - ulp) / smlnum;
    dlabad_(&smlnum, &bignum);
    if (*imat >= 7 && *imat <= 10 || *imat == 18) {
	*(unsigned char *)diag = 'U';
    } else {
	*(unsigned char *)diag = 'N';
    }
    *info = 0;

/*     Quick return if N.LE.0. */

    if (*n <= 0) {
	return 0;
    }

/*     Call ZLATB4 to set parameters for CLATMS. */

    upper = lsame_(uplo, "U");
    if (upper) {
	zlatb4_(path, imat, n, n, type__, &kl, &ku, &anorm, &mode, &cndnum, 
		dist);
    } else {
	i__1 = -(*imat);
	zlatb4_(path, &i__1, n, n, type__, &kl, &ku, &anorm, &mode, &cndnum, 
		dist);
    }

/*     IMAT <= 6:  Non-unit triangular matrix */

    if (*imat <= 6) {
	zlatms_(n, n, dist, &iseed[1], type__, &rwork[1], &mode, &cndnum, &
		anorm, &kl, &ku, "No packing", &a[a_offset], lda, &work[1], 
		info);

/*     IMAT > 6:  Unit triangular matrix */
/*     The diagonal is deliberately set to something other than 1. */

/*     IMAT = 7:  Matrix is the identity */

    } else if (*imat == 7) {
	if (upper) {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = j - 1;
		for (i__ = 1; i__ <= i__2; ++i__) {
		    i__3 = i__ + j * a_dim1;
		    a[i__3].r = 0., a[i__3].i = 0.;
/* L10: */
		}
		i__2 = j + j * a_dim1;
		a[i__2].r = (doublereal) j, a[i__2].i = 0.;
/* L20: */
	    }
	} else {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = j + j * a_dim1;
		a[i__2].r = (doublereal) j, a[i__2].i = 0.;
		i__2 = *n;
		for (i__ = j + 1; i__ <= i__2; ++i__) {
		    i__3 = i__ + j * a_dim1;
		    a[i__3].r = 0., a[i__3].i = 0.;
/* L30: */
		}
/* L40: */
	    }
	}

/*     IMAT > 7:  Non-trivial unit triangular matrix */

/*     Generate a unit triangular matrix T with condition CNDNUM by */
/*     forming a triangular matrix with known singular values and */
/*     filling in the zero entries with Givens rotations. */

    } else if (*imat <= 10) {
	if (upper) {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = j - 1;
		for (i__ = 1; i__ <= i__2; ++i__) {
		    i__3 = i__ + j * a_dim1;
		    a[i__3].r = 0., a[i__3].i = 0.;
/* L50: */
		}
		i__2 = j + j * a_dim1;
		a[i__2].r = (doublereal) j, a[i__2].i = 0.;
/* L60: */
	    }
	} else {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = j + j * a_dim1;
		a[i__2].r = (doublereal) j, a[i__2].i = 0.;
		i__2 = *n;
		for (i__ = j + 1; i__ <= i__2; ++i__) {
		    i__3 = i__ + j * a_dim1;
		    a[i__3].r = 0., a[i__3].i = 0.;
/* L70: */
		}
/* L80: */
	    }
	}

/*        Since the trace of a unit triangular matrix is 1, the product */
/*        of its singular values must be 1.  Let s = sqrt(CNDNUM), */
/*        x = sqrt(s) - 1/sqrt(s), y = sqrt(2/(n-2))*x, and z = x**2. */
/*        The following triangular matrix has singular values s, 1, 1, */
/*        ..., 1, 1/s: */

/*        1  y  y  y  ...  y  y  z */
/*           1  0  0  ...  0  0  y */
/*              1  0  ...  0  0  y */
/*                 .  ...  .  .  . */
/*                     .   .  .  . */
/*                         1  0  y */
/*                            1  y */
/*                               1 */

/*        To fill in the zeros, we first multiply by a matrix with small */
/*        condition number of the form */

/*        1  0  0  0  0  ... */
/*           1  +  *  0  0  ... */
/*              1  +  0  0  0 */
/*                 1  +  *  0  0 */
/*                    1  +  0  0 */
/*                       ... */
/*                          1  +  0 */
/*                             1  0 */
/*                                1 */

/*        Each element marked with a '*' is formed by taking the product */
/*        of the adjacent elements marked with '+'.  The '*'s can be */
/*        chosen freely, and the '+'s are chosen so that the inverse of */
/*        T will have elements of the same magnitude as T.  If the *'s in */
/*        both T and inv(T) have small magnitude, T is well conditioned. */
/*        The two offdiagonals of T are stored in WORK. */

/*        The product of these two matrices has the form */

/*        1  y  y  y  y  y  .  y  y  z */
/*           1  +  *  0  0  .  0  0  y */
/*              1  +  0  0  .  0  0  y */
/*                 1  +  *  .  .  .  . */
/*                    1  +  .  .  .  . */
/*                       .  .  .  .  . */
/*                          .  .  .  . */
/*                             1  +  y */
/*                                1  y */
/*                                   1 */

/*        Now we multiply by Givens rotations, using the fact that */

/*              [  c   s ] [  1   w ] [ -c  -s ] =  [  1  -w ] */
/*              [ -s   c ] [  0   1 ] [  s  -c ]    [  0   1 ] */
/*        and */
/*              [ -c  -s ] [  1   0 ] [  c   s ] =  [  1   0 ] */
/*              [  s  -c ] [  w   1 ] [ -s   c ]    [ -w   1 ] */

/*        where c = w / sqrt(w**2+4) and s = 2 / sqrt(w**2+4). */

	zlarnd_(&z__2, &c__5, &iseed[1]);
	z__1.r = z__2.r * .25, z__1.i = z__2.i * .25;
	star1.r = z__1.r, star1.i = z__1.i;
	sfac = .5;
	zlarnd_(&z__2, &c__5, &iseed[1]);
	z__1.r = sfac * z__2.r, z__1.i = sfac * z__2.i;
	plus1.r = z__1.r, plus1.i = z__1.i;
	i__1 = *n;
	for (j = 1; j <= i__1; j += 2) {
	    z_div(&z__1, &star1, &plus1);
	    plus2.r = z__1.r, plus2.i = z__1.i;
	    i__2 = j;
	    work[i__2].r = plus1.r, work[i__2].i = plus1.i;
	    i__2 = *n + j;
	    work[i__2].r = star1.r, work[i__2].i = star1.i;
	    if (j + 1 <= *n) {
		i__2 = j + 1;
		work[i__2].r = plus2.r, work[i__2].i = plus2.i;
		i__2 = *n + j + 1;
		work[i__2].r = 0., work[i__2].i = 0.;
		z_div(&z__1, &star1, &plus2);
		plus1.r = z__1.r, plus1.i = z__1.i;
		rexp = dlarnd_(&c__2, &iseed[1]);
		if (rexp < 0.) {
		    d__2 = 1. - rexp;
		    d__1 = -pow_dd(&sfac, &d__2);
		    zlarnd_(&z__2, &c__5, &iseed[1]);
		    z__1.r = d__1 * z__2.r, z__1.i = d__1 * z__2.i;
		    star1.r = z__1.r, star1.i = z__1.i;
		} else {
		    d__2 = rexp + 1.;
		    d__1 = pow_dd(&sfac, &d__2);
		    zlarnd_(&z__2, &c__5, &iseed[1]);
		    z__1.r = d__1 * z__2.r, z__1.i = d__1 * z__2.i;
		    star1.r = z__1.r, star1.i = z__1.i;
		}
	    }
/* L90: */
	}

	x = sqrt(cndnum) - 1 / sqrt(cndnum);
	if (*n > 2) {
	    y = sqrt(2. / (*n - 2)) * x;
	} else {
	    y = 0.;
	}
	z__ = x * x;

	if (upper) {
	    if (*n > 3) {
		i__1 = *n - 3;
		i__2 = *lda + 1;
		zcopy_(&i__1, &work[1], &c__1, &a[a_dim1 * 3 + 2], &i__2);
		if (*n > 4) {
		    i__1 = *n - 4;
		    i__2 = *lda + 1;
		    zcopy_(&i__1, &work[*n + 1], &c__1, &a[(a_dim1 << 2) + 2], 
			     &i__2);
		}
	    }
	    i__1 = *n - 1;
	    for (j = 2; j <= i__1; ++j) {
		i__2 = j * a_dim1 + 1;
		a[i__2].r = y, a[i__2].i = 0.;
		i__2 = j + *n * a_dim1;
		a[i__2].r = y, a[i__2].i = 0.;
/* L100: */
	    }
	    i__1 = *n * a_dim1 + 1;
	    a[i__1].r = z__, a[i__1].i = 0.;
	} else {
	    if (*n > 3) {
		i__1 = *n - 3;
		i__2 = *lda + 1;
		zcopy_(&i__1, &work[1], &c__1, &a[(a_dim1 << 1) + 3], &i__2);
		if (*n > 4) {
		    i__1 = *n - 4;
		    i__2 = *lda + 1;
		    zcopy_(&i__1, &work[*n + 1], &c__1, &a[(a_dim1 << 1) + 4], 
			     &i__2);
		}
	    }
	    i__1 = *n - 1;
	    for (j = 2; j <= i__1; ++j) {
		i__2 = j + a_dim1;
		a[i__2].r = y, a[i__2].i = 0.;
		i__2 = *n + j * a_dim1;
		a[i__2].r = y, a[i__2].i = 0.;
/* L110: */
	    }
	    i__1 = *n + a_dim1;
	    a[i__1].r = z__, a[i__1].i = 0.;
	}

/*        Fill in the zeros using Givens rotations. */

	if (upper) {
	    i__1 = *n - 1;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = j + (j + 1) * a_dim1;
		ra.r = a[i__2].r, ra.i = a[i__2].i;
		rb.r = 2., rb.i = 0.;
		zrotg_(&ra, &rb, &c__, &s);

/*              Multiply by [ c  s; -conjg(s)  c] on the left. */

		if (*n > j + 1) {
		    i__2 = *n - j - 1;
		    zrot_(&i__2, &a[j + (j + 2) * a_dim1], lda, &a[j + 1 + (j 
			    + 2) * a_dim1], lda, &c__, &s);
		}

/*              Multiply by [-c -s;  conjg(s) -c] on the right. */

		if (j > 1) {
		    i__2 = j - 1;
		    d__1 = -c__;
		    z__1.r = -s.r, z__1.i = -s.i;
		    zrot_(&i__2, &a[(j + 1) * a_dim1 + 1], &c__1, &a[j * 
			    a_dim1 + 1], &c__1, &d__1, &z__1);
		}

/*              Negate A(J,J+1). */

		i__2 = j + (j + 1) * a_dim1;
		i__3 = j + (j + 1) * a_dim1;
		z__1.r = -a[i__3].r, z__1.i = -a[i__3].i;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
/* L120: */
	    }
	} else {
	    i__1 = *n - 1;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = j + 1 + j * a_dim1;
		ra.r = a[i__2].r, ra.i = a[i__2].i;
		rb.r = 2., rb.i = 0.;
		zrotg_(&ra, &rb, &c__, &s);
		d_cnjg(&z__1, &s);
		s.r = z__1.r, s.i = z__1.i;

/*              Multiply by [ c -s;  conjg(s) c] on the right. */

		if (*n > j + 1) {
		    i__2 = *n - j - 1;
		    z__1.r = -s.r, z__1.i = -s.i;
		    zrot_(&i__2, &a[j + 2 + (j + 1) * a_dim1], &c__1, &a[j + 
			    2 + j * a_dim1], &c__1, &c__, &z__1);
		}

/*              Multiply by [-c  s; -conjg(s) -c] on the left. */

		if (j > 1) {
		    i__2 = j - 1;
		    d__1 = -c__;
		    zrot_(&i__2, &a[j + a_dim1], lda, &a[j + 1 + a_dim1], lda, 
			     &d__1, &s);
		}

/*              Negate A(J+1,J). */

		i__2 = j + 1 + j * a_dim1;
		i__3 = j + 1 + j * a_dim1;
		z__1.r = -a[i__3].r, z__1.i = -a[i__3].i;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
/* L130: */
	    }
	}

/*     IMAT > 10:  Pathological test cases.  These triangular matrices */
/*     are badly scaled or badly conditioned, so when used in solving a */
/*     triangular system they may cause overflow in the solution vector. */

    } else if (*imat == 11) {

/*        Type 11:  Generate a triangular matrix with elements between */
/*        -1 and 1. Give the diagonal norm 2 to make it well-conditioned. */
/*        Make the right hand side large so that it requires scaling. */

	if (upper) {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = j - 1;
		zlarnv_(&c__4, &iseed[1], &i__2, &a[j * a_dim1 + 1]);
		i__2 = j + j * a_dim1;
		zlarnd_(&z__2, &c__5, &iseed[1]);
		z__1.r = z__2.r * 2., z__1.i = z__2.i * 2.;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
/* L140: */
	    }
	} else {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		if (j < *n) {
		    i__2 = *n - j;
		    zlarnv_(&c__4, &iseed[1], &i__2, &a[j + 1 + j * a_dim1]);
		}
		i__2 = j + j * a_dim1;
		zlarnd_(&z__2, &c__5, &iseed[1]);
		z__1.r = z__2.r * 2., z__1.i = z__2.i * 2.;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
/* L150: */
	    }
	}

/*        Set the right hand side so that the largest value is BIGNUM. */

	zlarnv_(&c__2, &iseed[1], n, &b[1]);
	iy = izamax_(n, &b[1], &c__1);
	bnorm = z_abs(&b[iy]);
	bscal = bignum / max(1.,bnorm);
	zdscal_(n, &bscal, &b[1], &c__1);

    } else if (*imat == 12) {

/*        Type 12:  Make the first diagonal element in the solve small to */
/*        cause immediate overflow when dividing by T(j,j). */
/*        In type 12, the offdiagonal elements are small (CNORM(j) < 1). */

	zlarnv_(&c__2, &iseed[1], n, &b[1]);
/* Computing MAX */
	d__1 = 1., d__2 = (doublereal) (*n - 1);
	tscal = 1. / max(d__1,d__2);
	if (upper) {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = j - 1;
		zlarnv_(&c__4, &iseed[1], &i__2, &a[j * a_dim1 + 1]);
		i__2 = j - 1;
		zdscal_(&i__2, &tscal, &a[j * a_dim1 + 1], &c__1);
		i__2 = j + j * a_dim1;
		zlarnd_(&z__1, &c__5, &iseed[1]);
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
/* L160: */
	    }
	    i__1 = *n + *n * a_dim1;
	    i__2 = *n + *n * a_dim1;
	    z__1.r = smlnum * a[i__2].r, z__1.i = smlnum * a[i__2].i;
	    a[i__1].r = z__1.r, a[i__1].i = z__1.i;
	} else {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		if (j < *n) {
		    i__2 = *n - j;
		    zlarnv_(&c__4, &iseed[1], &i__2, &a[j + 1 + j * a_dim1]);
		    i__2 = *n - j;
		    zdscal_(&i__2, &tscal, &a[j + 1 + j * a_dim1], &c__1);
		}
		i__2 = j + j * a_dim1;
		zlarnd_(&z__1, &c__5, &iseed[1]);
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
/* L170: */
	    }
	    i__1 = a_dim1 + 1;
	    i__2 = a_dim1 + 1;
	    z__1.r = smlnum * a[i__2].r, z__1.i = smlnum * a[i__2].i;
	    a[i__1].r = z__1.r, a[i__1].i = z__1.i;
	}

    } else if (*imat == 13) {

/*        Type 13:  Make the first diagonal element in the solve small to */
/*        cause immediate overflow when dividing by T(j,j). */
/*        In type 13, the offdiagonal elements are O(1) (CNORM(j) > 1). */

	zlarnv_(&c__2, &iseed[1], n, &b[1]);
	if (upper) {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = j - 1;
		zlarnv_(&c__4, &iseed[1], &i__2, &a[j * a_dim1 + 1]);
		i__2 = j + j * a_dim1;
		zlarnd_(&z__1, &c__5, &iseed[1]);
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
/* L180: */
	    }
	    i__1 = *n + *n * a_dim1;
	    i__2 = *n + *n * a_dim1;
	    z__1.r = smlnum * a[i__2].r, z__1.i = smlnum * a[i__2].i;
	    a[i__1].r = z__1.r, a[i__1].i = z__1.i;
	} else {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		if (j < *n) {
		    i__2 = *n - j;
		    zlarnv_(&c__4, &iseed[1], &i__2, &a[j + 1 + j * a_dim1]);
		}
		i__2 = j + j * a_dim1;
		zlarnd_(&z__1, &c__5, &iseed[1]);
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
/* L190: */
	    }
	    i__1 = a_dim1 + 1;
	    i__2 = a_dim1 + 1;
	    z__1.r = smlnum * a[i__2].r, z__1.i = smlnum * a[i__2].i;
	    a[i__1].r = z__1.r, a[i__1].i = z__1.i;
	}

    } else if (*imat == 14) {

/*        Type 14:  T is diagonal with small numbers on the diagonal to */
/*        make the growth factor underflow, but a small right hand side */
/*        chosen so that the solution does not overflow. */

	if (upper) {
	    jcount = 1;
	    for (j = *n; j >= 1; --j) {
		i__1 = j - 1;
		for (i__ = 1; i__ <= i__1; ++i__) {
		    i__2 = i__ + j * a_dim1;
		    a[i__2].r = 0., a[i__2].i = 0.;
/* L200: */
		}
		if (jcount <= 2) {
		    i__1 = j + j * a_dim1;
		    zlarnd_(&z__2, &c__5, &iseed[1]);
		    z__1.r = smlnum * z__2.r, z__1.i = smlnum * z__2.i;
		    a[i__1].r = z__1.r, a[i__1].i = z__1.i;
		} else {
		    i__1 = j + j * a_dim1;
		    zlarnd_(&z__1, &c__5, &iseed[1]);
		    a[i__1].r = z__1.r, a[i__1].i = z__1.i;
		}
		++jcount;
		if (jcount > 4) {
		    jcount = 1;
		}
/* L210: */
	    }
	} else {
	    jcount = 1;
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = *n;
		for (i__ = j + 1; i__ <= i__2; ++i__) {
		    i__3 = i__ + j * a_dim1;
		    a[i__3].r = 0., a[i__3].i = 0.;
/* L220: */
		}
		if (jcount <= 2) {
		    i__2 = j + j * a_dim1;
		    zlarnd_(&z__2, &c__5, &iseed[1]);
		    z__1.r = smlnum * z__2.r, z__1.i = smlnum * z__2.i;
		    a[i__2].r = z__1.r, a[i__2].i = z__1.i;
		} else {
		    i__2 = j + j * a_dim1;
		    zlarnd_(&z__1, &c__5, &iseed[1]);
		    a[i__2].r = z__1.r, a[i__2].i = z__1.i;
		}
		++jcount;
		if (jcount > 4) {
		    jcount = 1;
		}
/* L230: */
	    }
	}

/*        Set the right hand side alternately zero and small. */

	if (upper) {
	    b[1].r = 0., b[1].i = 0.;
	    for (i__ = *n; i__ >= 2; i__ += -2) {
		i__1 = i__;
		b[i__1].r = 0., b[i__1].i = 0.;
		i__1 = i__ - 1;
		zlarnd_(&z__2, &c__5, &iseed[1]);
		z__1.r = smlnum * z__2.r, z__1.i = smlnum * z__2.i;
		b[i__1].r = z__1.r, b[i__1].i = z__1.i;
/* L240: */
	    }
	} else {
	    i__1 = *n;
	    b[i__1].r = 0., b[i__1].i = 0.;
	    i__1 = *n - 1;
	    for (i__ = 1; i__ <= i__1; i__ += 2) {
		i__2 = i__;
		b[i__2].r = 0., b[i__2].i = 0.;
		i__2 = i__ + 1;
		zlarnd_(&z__2, &c__5, &iseed[1]);
		z__1.r = smlnum * z__2.r, z__1.i = smlnum * z__2.i;
		b[i__2].r = z__1.r, b[i__2].i = z__1.i;
/* L250: */
	    }
	}

    } else if (*imat == 15) {

/*        Type 15:  Make the diagonal elements small to cause gradual */
/*        overflow when dividing by T(j,j).  To control the amount of */
/*        scaling needed, the matrix is bidiagonal. */

/* Computing MAX */
	d__1 = 1., d__2 = (doublereal) (*n - 1);
	texp = 1. / max(d__1,d__2);
	tscal = pow_dd(&smlnum, &texp);
	zlarnv_(&c__4, &iseed[1], n, &b[1]);
	if (upper) {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = j - 2;
		for (i__ = 1; i__ <= i__2; ++i__) {
		    i__3 = i__ + j * a_dim1;
		    a[i__3].r = 0., a[i__3].i = 0.;
/* L260: */
		}
		if (j > 1) {
		    i__2 = j - 1 + j * a_dim1;
		    a[i__2].r = -1., a[i__2].i = -1.;
		}
		i__2 = j + j * a_dim1;
		zlarnd_(&z__2, &c__5, &iseed[1]);
		z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
/* L270: */
	    }
	    i__1 = *n;
	    b[i__1].r = 1., b[i__1].i = 1.;
	} else {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = *n;
		for (i__ = j + 2; i__ <= i__2; ++i__) {
		    i__3 = i__ + j * a_dim1;
		    a[i__3].r = 0., a[i__3].i = 0.;
/* L280: */
		}
		if (j < *n) {
		    i__2 = j + 1 + j * a_dim1;
		    a[i__2].r = -1., a[i__2].i = -1.;
		}
		i__2 = j + j * a_dim1;
		zlarnd_(&z__2, &c__5, &iseed[1]);
		z__1.r = tscal * z__2.r, z__1.i = tscal * z__2.i;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
/* L290: */
	    }
	    b[1].r = 1., b[1].i = 1.;
	}

    } else if (*imat == 16) {

/*        Type 16:  One zero diagonal element. */

	iy = *n / 2 + 1;
	if (upper) {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = j - 1;
		zlarnv_(&c__4, &iseed[1], &i__2, &a[j * a_dim1 + 1]);
		if (j != iy) {
		    i__2 = j + j * a_dim1;
		    zlarnd_(&z__2, &c__5, &iseed[1]);
		    z__1.r = z__2.r * 2., z__1.i = z__2.i * 2.;
		    a[i__2].r = z__1.r, a[i__2].i = z__1.i;
		} else {
		    i__2 = j + j * a_dim1;
		    a[i__2].r = 0., a[i__2].i = 0.;
		}
/* L300: */
	    }
	} else {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		if (j < *n) {
		    i__2 = *n - j;
		    zlarnv_(&c__4, &iseed[1], &i__2, &a[j + 1 + j * a_dim1]);
		}
		if (j != iy) {
		    i__2 = j + j * a_dim1;
		    zlarnd_(&z__2, &c__5, &iseed[1]);
		    z__1.r = z__2.r * 2., z__1.i = z__2.i * 2.;
		    a[i__2].r = z__1.r, a[i__2].i = z__1.i;
		} else {
		    i__2 = j + j * a_dim1;
		    a[i__2].r = 0., a[i__2].i = 0.;
		}
/* L310: */
	    }
	}
	zlarnv_(&c__2, &iseed[1], n, &b[1]);
	zdscal_(n, &c_b92, &b[1], &c__1);

    } else if (*imat == 17) {

/*        Type 17:  Make the offdiagonal elements large to cause overflow */
/*        when adding a column of T.  In the non-transposed case, the */
/*        matrix is constructed to cause overflow when adding a column in */
/*        every other step. */

	tscal = unfl / ulp;
	tscal = (1. - ulp) / tscal;
	i__1 = *n;
	for (j = 1; j <= i__1; ++j) {
	    i__2 = *n;
	    for (i__ = 1; i__ <= i__2; ++i__) {
		i__3 = i__ + j * a_dim1;
		a[i__3].r = 0., a[i__3].i = 0.;
/* L320: */
	    }
/* L330: */
	}
	texp = 1.;
	if (upper) {
	    for (j = *n; j >= 2; j += -2) {
		i__1 = j * a_dim1 + 1;
		d__1 = -tscal / (doublereal) (*n + 1);
		a[i__1].r = d__1, a[i__1].i = 0.;
		i__1 = j + j * a_dim1;
		a[i__1].r = 1., a[i__1].i = 0.;
		i__1 = j;
		d__1 = texp * (1. - ulp);
		b[i__1].r = d__1, b[i__1].i = 0.;
		i__1 = (j - 1) * a_dim1 + 1;
		d__1 = -(tscal / (doublereal) (*n + 1)) / (doublereal) (*n + 
			2);
		a[i__1].r = d__1, a[i__1].i = 0.;
		i__1 = j - 1 + (j - 1) * a_dim1;
		a[i__1].r = 1., a[i__1].i = 0.;
		i__1 = j - 1;
		d__1 = texp * (doublereal) (*n * *n + *n - 1);
		b[i__1].r = d__1, b[i__1].i = 0.;
		texp *= 2.;
/* L340: */
	    }
	    d__1 = (doublereal) (*n + 1) / (doublereal) (*n + 2) * tscal;
	    b[1].r = d__1, b[1].i = 0.;
	} else {
	    i__1 = *n - 1;
	    for (j = 1; j <= i__1; j += 2) {
		i__2 = *n + j * a_dim1;
		d__1 = -tscal / (doublereal) (*n + 1);
		a[i__2].r = d__1, a[i__2].i = 0.;
		i__2 = j + j * a_dim1;
		a[i__2].r = 1., a[i__2].i = 0.;
		i__2 = j;
		d__1 = texp * (1. - ulp);
		b[i__2].r = d__1, b[i__2].i = 0.;
		i__2 = *n + (j + 1) * a_dim1;
		d__1 = -(tscal / (doublereal) (*n + 1)) / (doublereal) (*n + 
			2);
		a[i__2].r = d__1, a[i__2].i = 0.;
		i__2 = j + 1 + (j + 1) * a_dim1;
		a[i__2].r = 1., a[i__2].i = 0.;
		i__2 = j + 1;
		d__1 = texp * (doublereal) (*n * *n + *n - 1);
		b[i__2].r = d__1, b[i__2].i = 0.;
		texp *= 2.;
/* L350: */
	    }
	    i__1 = *n;
	    d__1 = (doublereal) (*n + 1) / (doublereal) (*n + 2) * tscal;
	    b[i__1].r = d__1, b[i__1].i = 0.;
	}

    } else if (*imat == 18) {

/*        Type 18:  Generate a unit triangular matrix with elements */
/*        between -1 and 1, and make the right hand side large so that it */
/*        requires scaling. */

	if (upper) {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = j - 1;
		zlarnv_(&c__4, &iseed[1], &i__2, &a[j * a_dim1 + 1]);
		i__2 = j + j * a_dim1;
		a[i__2].r = 0., a[i__2].i = 0.;
/* L360: */
	    }
	} else {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		if (j < *n) {
		    i__2 = *n - j;
		    zlarnv_(&c__4, &iseed[1], &i__2, &a[j + 1 + j * a_dim1]);
		}
		i__2 = j + j * a_dim1;
		a[i__2].r = 0., a[i__2].i = 0.;
/* L370: */
	    }
	}

/*        Set the right hand side so that the largest value is BIGNUM. */

	zlarnv_(&c__2, &iseed[1], n, &b[1]);
	iy = izamax_(n, &b[1], &c__1);
	bnorm = z_abs(&b[iy]);
	bscal = bignum / max(1.,bnorm);
	zdscal_(n, &bscal, &b[1], &c__1);

    } else if (*imat == 19) {

/*        Type 19:  Generate a triangular matrix with elements between */
/*        BIGNUM/(n-1) and BIGNUM so that at least one of the column */
/*        norms will exceed BIGNUM. */
/*        1/3/91:  ZLATRS no longer can handle this case */

/* Computing MAX */
	d__1 = 1., d__2 = (doublereal) (*n - 1);
	tleft = bignum / max(d__1,d__2);
/* Computing MAX */
	d__1 = 1., d__2 = (doublereal) (*n);
	tscal = bignum * ((doublereal) (*n - 1) / max(d__1,d__2));
	if (upper) {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		zlarnv_(&c__5, &iseed[1], &j, &a[j * a_dim1 + 1]);
		dlarnv_(&c__1, &iseed[1], &j, &rwork[1]);
		i__2 = j;
		for (i__ = 1; i__ <= i__2; ++i__) {
		    i__3 = i__ + j * a_dim1;
		    i__4 = i__ + j * a_dim1;
		    d__1 = tleft + rwork[i__] * tscal;
		    z__1.r = d__1 * a[i__4].r, z__1.i = d__1 * a[i__4].i;
		    a[i__3].r = z__1.r, a[i__3].i = z__1.i;
/* L380: */
		}
/* L390: */
	    }
	} else {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = *n - j + 1;
		zlarnv_(&c__5, &iseed[1], &i__2, &a[j + j * a_dim1]);
		i__2 = *n - j + 1;
		dlarnv_(&c__1, &iseed[1], &i__2, &rwork[1]);
		i__2 = *n;
		for (i__ = j; i__ <= i__2; ++i__) {
		    i__3 = i__ + j * a_dim1;
		    i__4 = i__ + j * a_dim1;
		    d__1 = tleft + rwork[i__ - j + 1] * tscal;
		    z__1.r = d__1 * a[i__4].r, z__1.i = d__1 * a[i__4].i;
		    a[i__3].r = z__1.r, a[i__3].i = z__1.i;
/* L400: */
		}
/* L410: */
	    }
	}
	zlarnv_(&c__2, &iseed[1], n, &b[1]);
	zdscal_(n, &c_b92, &b[1], &c__1);
    }

/*     Flip the matrix if the transpose will be used. */

    if (! lsame_(trans, "N")) {
	if (upper) {
	    i__1 = *n / 2;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = *n - (j << 1) + 1;
		zswap_(&i__2, &a[j + j * a_dim1], lda, &a[j + 1 + (*n - j + 1)
			 * a_dim1], &c_n1);
/* L420: */
	    }
	} else {
	    i__1 = *n / 2;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = *n - (j << 1) + 1;
		i__3 = -(*lda);
		zswap_(&i__2, &a[j + j * a_dim1], &c__1, &a[*n - j + 1 + (j + 
			1) * a_dim1], &i__3);
/* L430: */
	    }
	}
    }

    return 0;

/*     End of ZLATTR */

} /* zlattr_ */
Пример #8
0
/* Subroutine */ int ztrexc_(char *compq, integer *n, doublecomplex *t, 
	integer *ldt, doublecomplex *q, integer *ldq, integer *ifst, integer *
	ilst, integer *info)
{
/*  -- LAPACK routine (version 2.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       March 31, 1993   


    Purpose   
    =======   

    ZTREXC reorders the Schur factorization of a complex matrix   
    A = Q*T*Q**H, so that the diagonal element of T with row index IFST   
    is moved to row ILST.   

    The Schur form T is reordered by a unitary similarity transformation 
  
    Z**H*T*Z, and optionally the matrix Q of Schur vectors is updated by 
  
    postmultplying it with Z.   

    Arguments   
    =========   

    COMPQ   (input) CHARACTER*1   
            = 'V':  update the matrix Q of Schur vectors;   
            = 'N':  do not update Q.   

    N       (input) INTEGER   
            The order of the matrix T. N >= 0.   

    T       (input/output) COMPLEX*16 array, dimension (LDT,N)   
            On entry, the upper triangular matrix T.   
            On exit, the reordered upper triangular matrix.   

    LDT     (input) INTEGER   
            The leading dimension of the array T. LDT >= max(1,N).   

    Q       (input/output) COMPLEX*16 array, dimension (LDQ,N)   
            On entry, if COMPQ = 'V', the matrix Q of Schur vectors.   
            On exit, if COMPQ = 'V', Q has been postmultiplied by the   
            unitary transformation matrix Z which reorders T.   
            If COMPQ = 'N', Q is not referenced.   

    LDQ     (input) INTEGER   
            The leading dimension of the array Q.  LDQ >= max(1,N).   

    IFST    (input) INTEGER   
    ILST    (input) INTEGER   
            Specify the reordering of the diagonal elements of T:   
            The element with row index IFST is moved to row ILST by a   
            sequence of transpositions between adjacent elements.   
            1 <= IFST <= N; 1 <= ILST <= N.   

    INFO    (output) INTEGER   
            = 0:  successful exit   
            < 0:  if INFO = -i, the i-th argument had an illegal value   

    ===================================================================== 
  


       Decode and test the input parameters.   

    
   Parameter adjustments   
       Function Body */
    /* Table of constant values */
    static integer c__1 = 1;
    
    /* System generated locals */
    integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2, i__3;
    doublecomplex z__1;
    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);
    /* Local variables */
    static doublecomplex temp;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    static integer k;
    extern logical lsame_(char *, char *);
    static logical wantq;
    static integer m1, m2, m3;
    static doublereal cs;
    static doublecomplex t11, t22, sn;
    extern /* Subroutine */ int xerbla_(char *, integer *), zlartg_(
	    doublecomplex *, doublecomplex *, doublereal *, doublecomplex *, 
	    doublecomplex *);




#define T(I,J) t[(I)-1 + ((J)-1)* ( *ldt)]
#define Q(I,J) q[(I)-1 + ((J)-1)* ( *ldq)]

    *info = 0;
    wantq = lsame_(compq, "V");
    if (! lsame_(compq, "N") && ! wantq) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*ldt < max(1,*n)) {
	*info = -4;
    } else if (*ldq < 1 || wantq && *ldq < max(1,*n)) {
	*info = -6;
    } else if (*ifst < 1 || *ifst > *n) {
	*info = -7;
    } else if (*ilst < 1 || *ilst > *n) {
	*info = -8;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZTREXC", &i__1);
	return 0;
    }

/*     Quick return if possible */

    if (*n == 1 || *ifst == *ilst) {
	return 0;
    }

    if (*ifst < *ilst) {

/*        Move the IFST-th diagonal element forward down the diagonal.
 */

	m1 = 0;
	m2 = -1;
	m3 = 1;
    } else {

/*        Move the IFST-th diagonal element backward up the diagonal. 
*/

	m1 = -1;
	m2 = 0;
	m3 = -1;
    }

    i__1 = *ilst + m2;
    i__2 = m3;
    for (k = *ifst + m1; m3 < 0 ? k >= *ilst+m2 : k <= *ilst+m2; k += m3) {

/*        Interchange the k-th and (k+1)-th diagonal elements. */

	i__3 = k + k * t_dim1;
	t11.r = T(k,k).r, t11.i = T(k,k).i;
	i__3 = k + 1 + (k + 1) * t_dim1;
	t22.r = T(k+1,k+1).r, t22.i = T(k+1,k+1).i;

/*        Determine the transformation to perform the interchange. */

	z__1.r = t22.r - t11.r, z__1.i = t22.i - t11.i;
	zlartg_(&T(k,k+1), &z__1, &cs, &sn, &temp);

/*        Apply transformation to the matrix T. */

	if (k + 2 <= *n) {
	    i__3 = *n - k - 1;
	    zrot_(&i__3, &T(k,k+2), ldt, &T(k+1,k+2), ldt, &cs, &sn);
	}
	i__3 = k - 1;
	d_cnjg(&z__1, &sn);
	zrot_(&i__3, &T(1,k), &c__1, &T(1,k+1), &
		c__1, &cs, &z__1);

	i__3 = k + k * t_dim1;
	T(k,k).r = t22.r, T(k,k).i = t22.i;
	i__3 = k + 1 + (k + 1) * t_dim1;
	T(k+1,k+1).r = t11.r, T(k+1,k+1).i = t11.i;

	if (wantq) {

/*           Accumulate transformation in the matrix Q. */

	    d_cnjg(&z__1, &sn);
	    zrot_(n, &Q(1,k), &c__1, &Q(1,k+1), &
		    c__1, &cs, &z__1);
	}

/* L10: */
    }

    return 0;

/*     End of ZTREXC */

} /* ztrexc_ */
Пример #9
0
/* Subroutine */ int zgghrd_(char *compq, char *compz, integer *n, integer *
	ilo, integer *ihi, doublecomplex *a, integer *lda, doublecomplex *b, 
	integer *ldb, doublecomplex *q, integer *ldq, doublecomplex *z__, 
	integer *ldz, integer *info)
{
/*  -- LAPACK routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    ZGGHRD reduces a pair of complex matrices (A,B) to generalized upper   
    Hessenberg form using unitary transformations, where A is a   
    general matrix and B is upper triangular:  Q' * A * Z = H and   
    Q' * B * Z = T, where H is upper Hessenberg, T is upper triangular,   
    and Q and Z are unitary, and ' means conjugate transpose.   

    The unitary matrices Q and Z are determined as products of Givens   
    rotations.  They may either be formed explicitly, or they may be   
    postmultiplied into input matrices Q1 and Z1, so that   

         Q1 * A * Z1' = (Q1*Q) * H * (Z1*Z)'   
         Q1 * B * Z1' = (Q1*Q) * T * (Z1*Z)'   

    Arguments   
    =========   

    COMPQ   (input) CHARACTER*1   
            = 'N': do not compute Q;   
            = 'I': Q is initialized to the unit matrix, and the   
                   unitary matrix Q is returned;   
            = 'V': Q must contain a unitary matrix Q1 on entry,   
                   and the product Q1*Q is returned.   

    COMPZ   (input) CHARACTER*1   
            = 'N': do not compute Q;   
            = 'I': Q is initialized to the unit matrix, and the   
                   unitary matrix Q is returned;   
            = 'V': Q must contain a unitary matrix Q1 on entry,   
                   and the product Q1*Q is returned.   

    N       (input) INTEGER   
            The order of the matrices A and B.  N >= 0.   

    ILO     (input) INTEGER   
    IHI     (input) INTEGER   
            It is assumed that A is already upper triangular in rows and   
            columns 1:ILO-1 and IHI+1:N.  ILO and IHI are normally set   
            by a previous call to ZGGBAL; otherwise they should be set   
            to 1 and N respectively.   
            1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.   

    A       (input/output) COMPLEX*16 array, dimension (LDA, N)   
            On entry, the N-by-N general matrix to be reduced.   
            On exit, the upper triangle and the first subdiagonal of A   
            are overwritten with the upper Hessenberg matrix H, and the   
            rest is set to zero.   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= max(1,N).   

    B       (input/output) COMPLEX*16 array, dimension (LDB, N)   
            On entry, the N-by-N upper triangular matrix B.   
            On exit, the upper triangular matrix T = Q' B Z.  The   
            elements below the diagonal are set to zero.   

    LDB     (input) INTEGER   
            The leading dimension of the array B.  LDB >= max(1,N).   

    Q       (input/output) COMPLEX*16 array, dimension (LDQ, N)   
            If COMPQ='N':  Q is not referenced.   
            If COMPQ='I':  on entry, Q need not be set, and on exit it   
                           contains the unitary matrix Q, where Q'   
                           is the product of the Givens transformations   
                           which are applied to A and B on the left.   
            If COMPQ='V':  on entry, Q must contain a unitary matrix   
                           Q1, and on exit this is overwritten by Q1*Q.   

    LDQ     (input) INTEGER   
            The leading dimension of the array Q.   
            LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise.   

    Z       (input/output) COMPLEX*16 array, dimension (LDZ, N)   
            If COMPZ='N':  Z is not referenced.   
            If COMPZ='I':  on entry, Z need not be set, and on exit it   
                           contains the unitary matrix Z, which is   
                           the product of the Givens transformations   
                           which are applied to A and B on the right.   
            If COMPZ='V':  on entry, Z must contain a unitary matrix   
                           Z1, and on exit this is overwritten by Z1*Z.   

    LDZ     (input) INTEGER   
            The leading dimension of the array Z.   
            LDZ >= N if COMPZ='V' or 'I'; LDZ >= 1 otherwise.   

    INFO    (output) INTEGER   
            = 0:  successful exit.   
            < 0:  if INFO = -i, the i-th argument had an illegal value.   

    Further Details   
    ===============   

    This routine reduces A to Hessenberg and B to triangular form by   
    an unblocked reduction, as described in _Matrix_Computations_,   
    by Golub and van Loan (Johns Hopkins Press).   

    =====================================================================   


       Decode COMPQ   

       Parameter adjustments */
    /* Table of constant values */
    static doublecomplex c_b1 = {1.,0.};
    static doublecomplex c_b2 = {0.,0.};
    static integer c__1 = 1;
    
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1, 
	    z_offset, i__1, i__2, i__3;
    doublecomplex z__1;
    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);
    /* Local variables */
    static integer jcol, jrow;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    static doublereal c__;
    static doublecomplex s;
    extern logical lsame_(char *, char *);
    static doublecomplex ctemp;
    extern /* Subroutine */ int xerbla_(char *, integer *);
    static integer icompq, icompz;
    extern /* Subroutine */ int zlaset_(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, doublecomplex *, integer *), zlartg_(doublecomplex *, doublecomplex *, doublereal *, 
	    doublecomplex *, doublecomplex *);
    static logical ilq, ilz;
#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]
#define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1
#define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)]
#define q_subscr(a_1,a_2) (a_2)*q_dim1 + a_1
#define q_ref(a_1,a_2) q[q_subscr(a_1,a_2)]
#define z___subscr(a_1,a_2) (a_2)*z_dim1 + a_1
#define z___ref(a_1,a_2) z__[z___subscr(a_1,a_2)]


    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1 * 1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1 * 1;
    z__ -= z_offset;

    /* Function Body */
    if (lsame_(compq, "N")) {
	ilq = FALSE_;
	icompq = 1;
    } else if (lsame_(compq, "V")) {
	ilq = TRUE_;
	icompq = 2;
    } else if (lsame_(compq, "I")) {
	ilq = TRUE_;
	icompq = 3;
    } else {
	icompq = 0;
    }

/*     Decode COMPZ */

    if (lsame_(compz, "N")) {
	ilz = FALSE_;
	icompz = 1;
    } else if (lsame_(compz, "V")) {
	ilz = TRUE_;
	icompz = 2;
    } else if (lsame_(compz, "I")) {
	ilz = TRUE_;
	icompz = 3;
    } else {
	icompz = 0;
    }

/*     Test the input parameters. */

    *info = 0;
    if (icompq <= 0) {
	*info = -1;
    } else if (icompz <= 0) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    } else if (*ilo < 1) {
	*info = -4;
    } else if (*ihi > *n || *ihi < *ilo - 1) {
	*info = -5;
    } else if (*lda < max(1,*n)) {
	*info = -7;
    } else if (*ldb < max(1,*n)) {
	*info = -9;
    } else if (ilq && *ldq < *n || *ldq < 1) {
	*info = -11;
    } else if (ilz && *ldz < *n || *ldz < 1) {
	*info = -13;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZGGHRD", &i__1);
	return 0;
    }

/*     Initialize Q and Z if desired. */

    if (icompq == 3) {
	zlaset_("Full", n, n, &c_b2, &c_b1, &q[q_offset], ldq);
    }
    if (icompz == 3) {
	zlaset_("Full", n, n, &c_b2, &c_b1, &z__[z_offset], ldz);
    }

/*     Quick return if possible */

    if (*n <= 1) {
	return 0;
    }

/*     Zero out lower triangle of B */

    i__1 = *n - 1;
    for (jcol = 1; jcol <= i__1; ++jcol) {
	i__2 = *n;
	for (jrow = jcol + 1; jrow <= i__2; ++jrow) {
	    i__3 = b_subscr(jrow, jcol);
	    b[i__3].r = 0., b[i__3].i = 0.;
/* L10: */
	}
/* L20: */
    }

/*     Reduce A and B */

    i__1 = *ihi - 2;
    for (jcol = *ilo; jcol <= i__1; ++jcol) {

	i__2 = jcol + 2;
	for (jrow = *ihi; jrow >= i__2; --jrow) {

/*           Step 1: rotate rows JROW-1, JROW to kill A(JROW,JCOL) */

	    i__3 = a_subscr(jrow - 1, jcol);
	    ctemp.r = a[i__3].r, ctemp.i = a[i__3].i;
	    zlartg_(&ctemp, &a_ref(jrow, jcol), &c__, &s, &a_ref(jrow - 1, 
		    jcol));
	    i__3 = a_subscr(jrow, jcol);
	    a[i__3].r = 0., a[i__3].i = 0.;
	    i__3 = *n - jcol;
	    zrot_(&i__3, &a_ref(jrow - 1, jcol + 1), lda, &a_ref(jrow, jcol + 
		    1), lda, &c__, &s);
	    i__3 = *n + 2 - jrow;
	    zrot_(&i__3, &b_ref(jrow - 1, jrow - 1), ldb, &b_ref(jrow, jrow - 
		    1), ldb, &c__, &s);
	    if (ilq) {
		d_cnjg(&z__1, &s);
		zrot_(n, &q_ref(1, jrow - 1), &c__1, &q_ref(1, jrow), &c__1, &
			c__, &z__1);
	    }

/*           Step 2: rotate columns JROW, JROW-1 to kill B(JROW,JROW-1) */

	    i__3 = b_subscr(jrow, jrow);
	    ctemp.r = b[i__3].r, ctemp.i = b[i__3].i;
	    zlartg_(&ctemp, &b_ref(jrow, jrow - 1), &c__, &s, &b_ref(jrow, 
		    jrow));
	    i__3 = b_subscr(jrow, jrow - 1);
	    b[i__3].r = 0., b[i__3].i = 0.;
	    zrot_(ihi, &a_ref(1, jrow), &c__1, &a_ref(1, jrow - 1), &c__1, &
		    c__, &s);
	    i__3 = jrow - 1;
	    zrot_(&i__3, &b_ref(1, jrow), &c__1, &b_ref(1, jrow - 1), &c__1, &
		    c__, &s);
	    if (ilz) {
		zrot_(n, &z___ref(1, jrow), &c__1, &z___ref(1, jrow - 1), &
			c__1, &c__, &s);
	    }
/* L30: */
	}
/* L40: */
    }

    return 0;

/*     End of ZGGHRD */

} /* zgghrd_ */
Пример #10
0
/* Subroutine */ int zhbtrd_(char *vect, char *uplo, integer *n, integer *kd, 
	doublecomplex *ab, integer *ldab, doublereal *d__, doublereal *e, 
	doublecomplex *q, integer *ldq, doublecomplex *work, integer *info)
{
    /* System generated locals */
    integer ab_dim1, ab_offset, q_dim1, q_offset, i__1, i__2, i__3, i__4, 
	    i__5, i__6;
    doublereal d__1;
    doublecomplex z__1;

    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);
    double z_abs(doublecomplex *);

    /* Local variables */
    integer i__, j, k, l;
    doublecomplex t;
    integer i2, j1, j2, nq, nr, kd1, ibl, iqb, kdn, jin, nrt, kdm1, inca, 
	    jend, lend, jinc;
    doublereal abst;
    integer incx, last;
    doublecomplex temp;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    integer j1end, j1inc, iqend;
    extern logical lsame_(char *, char *);
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *);
    logical initq, wantq, upper;
    extern /* Subroutine */ int zlar2v_(integer *, doublecomplex *, 
	    doublecomplex *, doublecomplex *, integer *, doublereal *, 
	    doublecomplex *, integer *);
    integer iqaend;
    extern /* Subroutine */ int xerbla_(char *, integer *), zlacgv_(
	    integer *, doublecomplex *, integer *), zlaset_(char *, integer *, 
	     integer *, doublecomplex *, doublecomplex *, doublecomplex *, 
	    integer *), zlartg_(doublecomplex *, doublecomplex *, 
	    doublereal *, doublecomplex *, doublecomplex *), zlargv_(integer *
, doublecomplex *, integer *, doublecomplex *, integer *, 
	    doublereal *, integer *), zlartv_(integer *, doublecomplex *, 
	    integer *, doublecomplex *, integer *, doublereal *, 
	    doublecomplex *, integer *);


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZHBTRD reduces a complex Hermitian band matrix A to real symmetric */
/*  tridiagonal form T by a unitary similarity transformation: */
/*  Q**H * A * Q = T. */

/*  Arguments */
/*  ========= */

/*  VECT    (input) CHARACTER*1 */
/*          = 'N':  do not form Q; */
/*          = 'V':  form Q; */
/*          = 'U':  update a matrix X, by forming X*Q. */

/*  UPLO    (input) CHARACTER*1 */
/*          = 'U':  Upper triangle of A is stored; */
/*          = 'L':  Lower triangle of A is stored. */

/*  N       (input) INTEGER */
/*          The order of the matrix A.  N >= 0. */

/*  KD      (input) INTEGER */
/*          The number of superdiagonals of the matrix A if UPLO = 'U', */
/*          or the number of subdiagonals if UPLO = 'L'.  KD >= 0. */

/*  AB      (input/output) COMPLEX*16 array, dimension (LDAB,N) */
/*          On entry, the upper or lower triangle of the Hermitian band */
/*          matrix A, stored in the first KD+1 rows of the array.  The */
/*          j-th column of A is stored in the j-th column of the array AB */
/*          as follows: */
/*          if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
/*          if UPLO = 'L', AB(1+i-j,j)    = A(i,j) for j<=i<=min(n,j+kd). */
/*          On exit, the diagonal elements of AB are overwritten by the */
/*          diagonal elements of the tridiagonal matrix T; if KD > 0, the */
/*          elements on the first superdiagonal (if UPLO = 'U') or the */
/*          first subdiagonal (if UPLO = 'L') are overwritten by the */
/*          off-diagonal elements of T; the rest of AB is overwritten by */
/*          values generated during the reduction. */

/*  LDAB    (input) INTEGER */
/*          The leading dimension of the array AB.  LDAB >= KD+1. */

/*  D       (output) DOUBLE PRECISION array, dimension (N) */
/*          The diagonal elements of the tridiagonal matrix T. */

/*  E       (output) DOUBLE PRECISION array, dimension (N-1) */
/*          The off-diagonal elements of the tridiagonal matrix T: */
/*          E(i) = T(i,i+1) if UPLO = 'U'; E(i) = T(i+1,i) if UPLO = 'L'. */

/*  Q       (input/output) COMPLEX*16 array, dimension (LDQ,N) */
/*          On entry, if VECT = 'U', then Q must contain an N-by-N */
/*          matrix X; if VECT = 'N' or 'V', then Q need not be set. */

/*          On exit: */
/*          if VECT = 'V', Q contains the N-by-N unitary matrix Q; */
/*          if VECT = 'U', Q contains the product X*Q; */
/*          if VECT = 'N', the array Q is not referenced. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. */
/*          LDQ >= 1, and LDQ >= N if VECT = 'V' or 'U'. */

/*  WORK    (workspace) COMPLEX*16 array, dimension (N) */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value */

/*  Further Details */
/*  =============== */

/*  Modified by Linda Kaufman, Bell Labs. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input parameters */

    /* Parameter adjustments */
    ab_dim1 = *ldab;
    ab_offset = 1 + ab_dim1;
    ab -= ab_offset;
    --d__;
    --e;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    --work;

    /* Function Body */
    initq = lsame_(vect, "V");
    wantq = initq || lsame_(vect, "U");
    upper = lsame_(uplo, "U");
    kd1 = *kd + 1;
    kdm1 = *kd - 1;
    incx = *ldab - 1;
    iqend = 1;

    *info = 0;
    if (! wantq && ! lsame_(vect, "N")) {
	*info = -1;
    } else if (! upper && ! lsame_(uplo, "L")) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    } else if (*kd < 0) {
	*info = -4;
    } else if (*ldab < kd1) {
	*info = -6;
    } else if (*ldq < max(1,*n) && wantq) {
	*info = -10;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZHBTRD", &i__1);
	return 0;
    }

/*     Quick return if possible */

    if (*n == 0) {
	return 0;
    }

/*     Initialize Q to the unit matrix, if needed */

    if (initq) {
	zlaset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
    }

/*     Wherever possible, plane rotations are generated and applied in */
/*     vector operations of length NR over the index set J1:J2:KD1. */

/*     The real cosines and complex sines of the plane rotations are */
/*     stored in the arrays D and WORK. */

    inca = kd1 * *ldab;
/* Computing MIN */
    i__1 = *n - 1;
    kdn = min(i__1,*kd);
    if (upper) {

	if (*kd > 1) {

/*           Reduce to complex Hermitian tridiagonal form, working with */
/*           the upper triangle */

	    nr = 0;
	    j1 = kdn + 2;
	    j2 = 1;

	    i__1 = kd1 + ab_dim1;
	    i__2 = kd1 + ab_dim1;
	    d__1 = ab[i__2].r;
	    ab[i__1].r = d__1, ab[i__1].i = 0.;
	    i__1 = *n - 2;
	    for (i__ = 1; i__ <= i__1; ++i__) {

/*              Reduce i-th row of matrix to tridiagonal form */

		for (k = kdn + 1; k >= 2; --k) {
		    j1 += kdn;
		    j2 += kdn;

		    if (nr > 0) {

/*                    generate plane rotations to annihilate nonzero */
/*                    elements which have been created outside the band */

			zlargv_(&nr, &ab[(j1 - 1) * ab_dim1 + 1], &inca, &
				work[j1], &kd1, &d__[j1], &kd1);

/*                    apply rotations from the right */


/*                    Dependent on the the number of diagonals either */
/*                    ZLARTV or ZROT is used */

			if (nr >= (*kd << 1) - 1) {
			    i__2 = *kd - 1;
			    for (l = 1; l <= i__2; ++l) {
				zlartv_(&nr, &ab[l + 1 + (j1 - 1) * ab_dim1], 
					&inca, &ab[l + j1 * ab_dim1], &inca, &
					d__[j1], &work[j1], &kd1);
/* L10: */
			    }

			} else {
			    jend = j1 + (nr - 1) * kd1;
			    i__2 = jend;
			    i__3 = kd1;
			    for (jinc = j1; i__3 < 0 ? jinc >= i__2 : jinc <= 
				    i__2; jinc += i__3) {
				zrot_(&kdm1, &ab[(jinc - 1) * ab_dim1 + 2], &
					c__1, &ab[jinc * ab_dim1 + 1], &c__1, 
					&d__[jinc], &work[jinc]);
/* L20: */
			    }
			}
		    }


		    if (k > 2) {
			if (k <= *n - i__ + 1) {

/*                       generate plane rotation to annihilate a(i,i+k-1) */
/*                       within the band */

			    zlartg_(&ab[*kd - k + 3 + (i__ + k - 2) * ab_dim1]
, &ab[*kd - k + 2 + (i__ + k - 1) * 
				    ab_dim1], &d__[i__ + k - 1], &work[i__ + 
				    k - 1], &temp);
			    i__3 = *kd - k + 3 + (i__ + k - 2) * ab_dim1;
			    ab[i__3].r = temp.r, ab[i__3].i = temp.i;

/*                       apply rotation from the right */

			    i__3 = k - 3;
			    zrot_(&i__3, &ab[*kd - k + 4 + (i__ + k - 2) * 
				    ab_dim1], &c__1, &ab[*kd - k + 3 + (i__ + 
				    k - 1) * ab_dim1], &c__1, &d__[i__ + k - 
				    1], &work[i__ + k - 1]);
			}
			++nr;
			j1 = j1 - kdn - 1;
		    }

/*                 apply plane rotations from both sides to diagonal */
/*                 blocks */

		    if (nr > 0) {
			zlar2v_(&nr, &ab[kd1 + (j1 - 1) * ab_dim1], &ab[kd1 + 
				j1 * ab_dim1], &ab[*kd + j1 * ab_dim1], &inca, 
				 &d__[j1], &work[j1], &kd1);
		    }

/*                 apply plane rotations from the left */

		    if (nr > 0) {
			zlacgv_(&nr, &work[j1], &kd1);
			if ((*kd << 1) - 1 < nr) {

/*                    Dependent on the the number of diagonals either */
/*                    ZLARTV or ZROT is used */

			    i__3 = *kd - 1;
			    for (l = 1; l <= i__3; ++l) {
				if (j2 + l > *n) {
				    nrt = nr - 1;
				} else {
				    nrt = nr;
				}
				if (nrt > 0) {
				    zlartv_(&nrt, &ab[*kd - l + (j1 + l) * 
					    ab_dim1], &inca, &ab[*kd - l + 1 
					    + (j1 + l) * ab_dim1], &inca, &
					    d__[j1], &work[j1], &kd1);
				}
/* L30: */
			    }
			} else {
			    j1end = j1 + kd1 * (nr - 2);
			    if (j1end >= j1) {
				i__3 = j1end;
				i__2 = kd1;
				for (jin = j1; i__2 < 0 ? jin >= i__3 : jin <=
					 i__3; jin += i__2) {
				    i__4 = *kd - 1;
				    zrot_(&i__4, &ab[*kd - 1 + (jin + 1) * 
					    ab_dim1], &incx, &ab[*kd + (jin + 
					    1) * ab_dim1], &incx, &d__[jin], &
					    work[jin]);
/* L40: */
				}
			    }
/* Computing MIN */
			    i__2 = kdm1, i__3 = *n - j2;
			    lend = min(i__2,i__3);
			    last = j1end + kd1;
			    if (lend > 0) {
				zrot_(&lend, &ab[*kd - 1 + (last + 1) * 
					ab_dim1], &incx, &ab[*kd + (last + 1) 
					* ab_dim1], &incx, &d__[last], &work[
					last]);
			    }
			}
		    }

		    if (wantq) {

/*                    accumulate product of plane rotations in Q */

			if (initq) {

/*                 take advantage of the fact that Q was */
/*                 initially the Identity matrix */

			    iqend = max(iqend,j2);
/* Computing MAX */
			    i__2 = 0, i__3 = k - 3;
			    i2 = max(i__2,i__3);
			    iqaend = i__ * *kd + 1;
			    if (k == 2) {
				iqaend += *kd;
			    }
			    iqaend = min(iqaend,iqend);
			    i__2 = j2;
			    i__3 = kd1;
			    for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j 
				    += i__3) {
				ibl = i__ - i2 / kdm1;
				++i2;
/* Computing MAX */
				i__4 = 1, i__5 = j - ibl;
				iqb = max(i__4,i__5);
				nq = iqaend + 1 - iqb;
/* Computing MIN */
				i__4 = iqaend + *kd;
				iqaend = min(i__4,iqend);
				d_cnjg(&z__1, &work[j]);
				zrot_(&nq, &q[iqb + (j - 1) * q_dim1], &c__1, 
					&q[iqb + j * q_dim1], &c__1, &d__[j], 
					&z__1);
/* L50: */
			    }
			} else {

			    i__3 = j2;
			    i__2 = kd1;
			    for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j 
				    += i__2) {
				d_cnjg(&z__1, &work[j]);
				zrot_(n, &q[(j - 1) * q_dim1 + 1], &c__1, &q[
					j * q_dim1 + 1], &c__1, &d__[j], &
					z__1);
/* L60: */
			    }
			}

		    }

		    if (j2 + kdn > *n) {

/*                    adjust J2 to keep within the bounds of the matrix */

			--nr;
			j2 = j2 - kdn - 1;
		    }

		    i__2 = j2;
		    i__3 = kd1;
		    for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j += i__3) 
			    {

/*                    create nonzero element a(j-1,j+kd) outside the band */
/*                    and store it in WORK */

			i__4 = j + *kd;
			i__5 = j;
			i__6 = (j + *kd) * ab_dim1 + 1;
			z__1.r = work[i__5].r * ab[i__6].r - work[i__5].i * 
				ab[i__6].i, z__1.i = work[i__5].r * ab[i__6]
				.i + work[i__5].i * ab[i__6].r;
			work[i__4].r = z__1.r, work[i__4].i = z__1.i;
			i__4 = (j + *kd) * ab_dim1 + 1;
			i__5 = j;
			i__6 = (j + *kd) * ab_dim1 + 1;
			z__1.r = d__[i__5] * ab[i__6].r, z__1.i = d__[i__5] * 
				ab[i__6].i;
			ab[i__4].r = z__1.r, ab[i__4].i = z__1.i;
/* L70: */
		    }
/* L80: */
		}
/* L90: */
	    }
	}

	if (*kd > 0) {

/*           make off-diagonal elements real and copy them to E */

	    i__1 = *n - 1;
	    for (i__ = 1; i__ <= i__1; ++i__) {
		i__3 = *kd + (i__ + 1) * ab_dim1;
		t.r = ab[i__3].r, t.i = ab[i__3].i;
		abst = z_abs(&t);
		i__3 = *kd + (i__ + 1) * ab_dim1;
		ab[i__3].r = abst, ab[i__3].i = 0.;
		e[i__] = abst;
		if (abst != 0.) {
		    z__1.r = t.r / abst, z__1.i = t.i / abst;
		    t.r = z__1.r, t.i = z__1.i;
		} else {
		    t.r = 1., t.i = 0.;
		}
		if (i__ < *n - 1) {
		    i__3 = *kd + (i__ + 2) * ab_dim1;
		    i__2 = *kd + (i__ + 2) * ab_dim1;
		    z__1.r = ab[i__2].r * t.r - ab[i__2].i * t.i, z__1.i = ab[
			    i__2].r * t.i + ab[i__2].i * t.r;
		    ab[i__3].r = z__1.r, ab[i__3].i = z__1.i;
		}
		if (wantq) {
		    d_cnjg(&z__1, &t);
		    zscal_(n, &z__1, &q[(i__ + 1) * q_dim1 + 1], &c__1);
		}
/* L100: */
	    }
	} else {

/*           set E to zero if original matrix was diagonal */

	    i__1 = *n - 1;
	    for (i__ = 1; i__ <= i__1; ++i__) {
		e[i__] = 0.;
/* L110: */
	    }
	}

/*        copy diagonal elements to D */

	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    i__3 = i__;
	    i__2 = kd1 + i__ * ab_dim1;
	    d__[i__3] = ab[i__2].r;
/* L120: */
	}

    } else {

	if (*kd > 1) {

/*           Reduce to complex Hermitian tridiagonal form, working with */
/*           the lower triangle */

	    nr = 0;
	    j1 = kdn + 2;
	    j2 = 1;

	    i__1 = ab_dim1 + 1;
	    i__3 = ab_dim1 + 1;
	    d__1 = ab[i__3].r;
	    ab[i__1].r = d__1, ab[i__1].i = 0.;
	    i__1 = *n - 2;
	    for (i__ = 1; i__ <= i__1; ++i__) {

/*              Reduce i-th column of matrix to tridiagonal form */

		for (k = kdn + 1; k >= 2; --k) {
		    j1 += kdn;
		    j2 += kdn;

		    if (nr > 0) {

/*                    generate plane rotations to annihilate nonzero */
/*                    elements which have been created outside the band */

			zlargv_(&nr, &ab[kd1 + (j1 - kd1) * ab_dim1], &inca, &
				work[j1], &kd1, &d__[j1], &kd1);

/*                    apply plane rotations from one side */


/*                    Dependent on the the number of diagonals either */
/*                    ZLARTV or ZROT is used */

			if (nr > (*kd << 1) - 1) {
			    i__3 = *kd - 1;
			    for (l = 1; l <= i__3; ++l) {
				zlartv_(&nr, &ab[kd1 - l + (j1 - kd1 + l) * 
					ab_dim1], &inca, &ab[kd1 - l + 1 + (
					j1 - kd1 + l) * ab_dim1], &inca, &d__[
					j1], &work[j1], &kd1);
/* L130: */
			    }
			} else {
			    jend = j1 + kd1 * (nr - 1);
			    i__3 = jend;
			    i__2 = kd1;
			    for (jinc = j1; i__2 < 0 ? jinc >= i__3 : jinc <= 
				    i__3; jinc += i__2) {
				zrot_(&kdm1, &ab[*kd + (jinc - *kd) * ab_dim1]
, &incx, &ab[kd1 + (jinc - *kd) * 
					ab_dim1], &incx, &d__[jinc], &work[
					jinc]);
/* L140: */
			    }
			}

		    }

		    if (k > 2) {
			if (k <= *n - i__ + 1) {

/*                       generate plane rotation to annihilate a(i+k-1,i) */
/*                       within the band */

			    zlartg_(&ab[k - 1 + i__ * ab_dim1], &ab[k + i__ * 
				    ab_dim1], &d__[i__ + k - 1], &work[i__ + 
				    k - 1], &temp);
			    i__2 = k - 1 + i__ * ab_dim1;
			    ab[i__2].r = temp.r, ab[i__2].i = temp.i;

/*                       apply rotation from the left */

			    i__2 = k - 3;
			    i__3 = *ldab - 1;
			    i__4 = *ldab - 1;
			    zrot_(&i__2, &ab[k - 2 + (i__ + 1) * ab_dim1], &
				    i__3, &ab[k - 1 + (i__ + 1) * ab_dim1], &
				    i__4, &d__[i__ + k - 1], &work[i__ + k - 
				    1]);
			}
			++nr;
			j1 = j1 - kdn - 1;
		    }

/*                 apply plane rotations from both sides to diagonal */
/*                 blocks */

		    if (nr > 0) {
			zlar2v_(&nr, &ab[(j1 - 1) * ab_dim1 + 1], &ab[j1 * 
				ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 + 2], &
				inca, &d__[j1], &work[j1], &kd1);
		    }

/*                 apply plane rotations from the right */


/*                    Dependent on the the number of diagonals either */
/*                    ZLARTV or ZROT is used */

		    if (nr > 0) {
			zlacgv_(&nr, &work[j1], &kd1);
			if (nr > (*kd << 1) - 1) {
			    i__2 = *kd - 1;
			    for (l = 1; l <= i__2; ++l) {
				if (j2 + l > *n) {
				    nrt = nr - 1;
				} else {
				    nrt = nr;
				}
				if (nrt > 0) {
				    zlartv_(&nrt, &ab[l + 2 + (j1 - 1) * 
					    ab_dim1], &inca, &ab[l + 1 + j1 * 
					    ab_dim1], &inca, &d__[j1], &work[
					    j1], &kd1);
				}
/* L150: */
			    }
			} else {
			    j1end = j1 + kd1 * (nr - 2);
			    if (j1end >= j1) {
				i__2 = j1end;
				i__3 = kd1;
				for (j1inc = j1; i__3 < 0 ? j1inc >= i__2 : 
					j1inc <= i__2; j1inc += i__3) {
				    zrot_(&kdm1, &ab[(j1inc - 1) * ab_dim1 + 
					    3], &c__1, &ab[j1inc * ab_dim1 + 
					    2], &c__1, &d__[j1inc], &work[
					    j1inc]);
/* L160: */
				}
			    }
/* Computing MIN */
			    i__3 = kdm1, i__2 = *n - j2;
			    lend = min(i__3,i__2);
			    last = j1end + kd1;
			    if (lend > 0) {
				zrot_(&lend, &ab[(last - 1) * ab_dim1 + 3], &
					c__1, &ab[last * ab_dim1 + 2], &c__1, 
					&d__[last], &work[last]);
			    }
			}
		    }



		    if (wantq) {

/*                    accumulate product of plane rotations in Q */

			if (initq) {

/*                 take advantage of the fact that Q was */
/*                 initially the Identity matrix */

			    iqend = max(iqend,j2);
/* Computing MAX */
			    i__3 = 0, i__2 = k - 3;
			    i2 = max(i__3,i__2);
			    iqaend = i__ * *kd + 1;
			    if (k == 2) {
				iqaend += *kd;
			    }
			    iqaend = min(iqaend,iqend);
			    i__3 = j2;
			    i__2 = kd1;
			    for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j 
				    += i__2) {
				ibl = i__ - i2 / kdm1;
				++i2;
/* Computing MAX */
				i__4 = 1, i__5 = j - ibl;
				iqb = max(i__4,i__5);
				nq = iqaend + 1 - iqb;
/* Computing MIN */
				i__4 = iqaend + *kd;
				iqaend = min(i__4,iqend);
				zrot_(&nq, &q[iqb + (j - 1) * q_dim1], &c__1, 
					&q[iqb + j * q_dim1], &c__1, &d__[j], 
					&work[j]);
/* L170: */
			    }
			} else {

			    i__2 = j2;
			    i__3 = kd1;
			    for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j 
				    += i__3) {
				zrot_(n, &q[(j - 1) * q_dim1 + 1], &c__1, &q[
					j * q_dim1 + 1], &c__1, &d__[j], &
					work[j]);
/* L180: */
			    }
			}
		    }

		    if (j2 + kdn > *n) {

/*                    adjust J2 to keep within the bounds of the matrix */

			--nr;
			j2 = j2 - kdn - 1;
		    }

		    i__3 = j2;
		    i__2 = kd1;
		    for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2) 
			    {

/*                    create nonzero element a(j+kd,j-1) outside the */
/*                    band and store it in WORK */

			i__4 = j + *kd;
			i__5 = j;
			i__6 = kd1 + j * ab_dim1;
			z__1.r = work[i__5].r * ab[i__6].r - work[i__5].i * 
				ab[i__6].i, z__1.i = work[i__5].r * ab[i__6]
				.i + work[i__5].i * ab[i__6].r;
			work[i__4].r = z__1.r, work[i__4].i = z__1.i;
			i__4 = kd1 + j * ab_dim1;
			i__5 = j;
			i__6 = kd1 + j * ab_dim1;
			z__1.r = d__[i__5] * ab[i__6].r, z__1.i = d__[i__5] * 
				ab[i__6].i;
			ab[i__4].r = z__1.r, ab[i__4].i = z__1.i;
/* L190: */
		    }
/* L200: */
		}
/* L210: */
	    }
	}

	if (*kd > 0) {

/*           make off-diagonal elements real and copy them to E */

	    i__1 = *n - 1;
	    for (i__ = 1; i__ <= i__1; ++i__) {
		i__2 = i__ * ab_dim1 + 2;
		t.r = ab[i__2].r, t.i = ab[i__2].i;
		abst = z_abs(&t);
		i__2 = i__ * ab_dim1 + 2;
		ab[i__2].r = abst, ab[i__2].i = 0.;
		e[i__] = abst;
		if (abst != 0.) {
		    z__1.r = t.r / abst, z__1.i = t.i / abst;
		    t.r = z__1.r, t.i = z__1.i;
		} else {
		    t.r = 1., t.i = 0.;
		}
		if (i__ < *n - 1) {
		    i__2 = (i__ + 1) * ab_dim1 + 2;
		    i__3 = (i__ + 1) * ab_dim1 + 2;
		    z__1.r = ab[i__3].r * t.r - ab[i__3].i * t.i, z__1.i = ab[
			    i__3].r * t.i + ab[i__3].i * t.r;
		    ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
		}
		if (wantq) {
		    zscal_(n, &t, &q[(i__ + 1) * q_dim1 + 1], &c__1);
		}
/* L220: */
	    }
	} else {

/*           set E to zero if original matrix was diagonal */

	    i__1 = *n - 1;
	    for (i__ = 1; i__ <= i__1; ++i__) {
		e[i__] = 0.;
/* L230: */
	    }
	}

/*        copy diagonal elements to D */

	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    i__2 = i__;
	    i__3 = i__ * ab_dim1 + 1;
	    d__[i__2] = ab[i__3].r;
/* L240: */
	}
    }

    return 0;

/*     End of ZHBTRD */

} /* zhbtrd_ */