Beispiel #1
0
/* Subroutine */ int zhgeqz_(char *job, char *compq, char *compz, integer *n, 
	integer *ilo, integer *ihi, doublecomplex *a, integer *lda, 
	doublecomplex *b, integer *ldb, doublecomplex *alpha, doublecomplex *
	beta, doublecomplex *q, integer *ldq, doublecomplex *z__, integer *
	ldz, doublecomplex *work, integer *lwork, doublereal *rwork, integer *
	info)
{
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1, 
	    z_offset, i__1, i__2, i__3, i__4, i__5, i__6;
    doublereal d__1, d__2, d__3, d__4, d__5, d__6;
    doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;

    /* Builtin functions */
    double z_abs(doublecomplex *);
    void d_cnjg(doublecomplex *, doublecomplex *);
    double d_imag(doublecomplex *);
    void z_div(doublecomplex *, doublecomplex *, doublecomplex *), pow_zi(
	    doublecomplex *, doublecomplex *, integer *), z_sqrt(
	    doublecomplex *, doublecomplex *);

    /* Local variables */
    static doublereal absb, atol, btol, temp, opst;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    static doublereal temp2, c__;
    static integer j;
    static doublecomplex s, t;
    extern logical lsame_(char *, char *);
    static doublecomplex ctemp;
    static integer iiter, ilast, jiter;
    static doublereal anorm;
    static integer maxit;
    static doublereal bnorm;
    static doublecomplex shift;
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *);
    static doublereal tempr;
    static doublecomplex ctemp2, ctemp3;
    static logical ilazr2;
    static integer jc, in;
    static doublereal ascale, bscale;
    static doublecomplex u12;
    extern doublereal dlamch_(char *);
    static integer jr, nq;
    static doublecomplex signbc;
    static integer nz;
    static doublereal safmin;
    extern /* Subroutine */ int xerbla_(char *, integer *);
    static doublecomplex eshift;
    static logical ilschr;
    static integer icompq, ilastm;
    static doublecomplex rtdisc;
    static integer ischur;
    extern doublereal zlanhs_(char *, integer *, doublecomplex *, integer *, 
	    doublereal *);
    static logical ilazro;
    static integer icompz, ifirst;
    extern /* Subroutine */ int zlartg_(doublecomplex *, doublecomplex *, 
	    doublereal *, doublecomplex *, doublecomplex *);
    static integer ifrstm;
    extern /* Subroutine */ int zlaset_(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, doublecomplex *, integer *);
    static integer istart;
    static logical lquery;
    static doublecomplex ad11, ad12, ad21, ad22;
    static integer jch;
    static logical ilq, ilz;
    static doublereal ulp;
    static doublecomplex abi22;


#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]
#define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1
#define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)]
#define q_subscr(a_1,a_2) (a_2)*q_dim1 + a_1
#define q_ref(a_1,a_2) q[q_subscr(a_1,a_2)]
#define z___subscr(a_1,a_2) (a_2)*z_dim1 + a_1
#define z___ref(a_1,a_2) z__[z___subscr(a_1,a_2)]


/*  -- LAPACK routine (instrumented to count operations, version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   


       ----------------------- Begin Timing Code ------------------------   
       Common block to return operation count and iteration count   
       ITCNT is initialized to 0, OPS is only incremented   
       OPST is used to accumulate small contributions to OPS   
       to avoid roundoff error   
       ------------------------ End Timing Code -------------------------   


    Purpose   
    =======   

    ZHGEQZ implements a single-shift version of the QZ   
    method for finding the generalized eigenvalues w(i)=ALPHA(i)/BETA(i)   
    of the equation   

         det( A - w(i) B ) = 0   

    If JOB='S', then the pair (A,B) is simultaneously   
    reduced to Schur form (i.e., A and B are both upper triangular) by   
    applying one unitary tranformation (usually called Q) on the left and   
    another (usually called Z) on the right.  The diagonal elements of   
    A are then ALPHA(1),...,ALPHA(N), and of B are BETA(1),...,BETA(N).   

    If JOB='S' and COMPQ and COMPZ are 'V' or 'I', then the unitary   
    transformations used to reduce (A,B) are accumulated into the arrays   
    Q and Z s.t.:   

         Q(in) A(in) Z(in)* = Q(out) A(out) Z(out)*   
         Q(in) B(in) Z(in)* = Q(out) B(out) Z(out)*   

    Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix   
         Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973),   
         pp. 241--256.   

    Arguments   
    =========   

    JOB     (input) CHARACTER*1   
            = 'E': compute only ALPHA and BETA.  A and B will not   
                   necessarily be put into generalized Schur form.   
            = 'S': put A and B into generalized Schur form, as well   
                   as computing ALPHA and BETA.   

    COMPQ   (input) CHARACTER*1   
            = 'N': do not modify Q.   
            = 'V': multiply the array Q on the right by the conjugate   
                   transpose of the unitary tranformation that is   
                   applied to the left side of A and B to reduce them   
                   to Schur form.   
            = 'I': like COMPQ='V', except that Q will be initialized to   
                   the identity first.   

    COMPZ   (input) CHARACTER*1   
            = 'N': do not modify Z.   
            = 'V': multiply the array Z on the right by the unitary   
                   tranformation that is applied to the right side of   
                   A and B to reduce them to Schur form.   
            = 'I': like COMPZ='V', except that Z will be initialized to   
                   the identity first.   

    N       (input) INTEGER   
            The order of the matrices A, B, Q, and Z.  N >= 0.   

    ILO     (input) INTEGER   
    IHI     (input) INTEGER   
            It is assumed that A is already upper triangular in rows and   
            columns 1:ILO-1 and IHI+1:N.   
            1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.   

    A       (input/output) COMPLEX*16 array, dimension (LDA, N)   
            On entry, the N-by-N upper Hessenberg matrix A.  Elements   
            below the subdiagonal must be zero.   
            If JOB='S', then on exit A and B will have been   
               simultaneously reduced to upper triangular form.   
            If JOB='E', then on exit A will have been destroyed.   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= max( 1, N ).   

    B       (input/output) COMPLEX*16 array, dimension (LDB, N)   
            On entry, the N-by-N upper triangular matrix B.  Elements   
            below the diagonal must be zero.   
            If JOB='S', then on exit A and B will have been   
               simultaneously reduced to upper triangular form.   
            If JOB='E', then on exit B will have been destroyed.   

    LDB     (input) INTEGER   
            The leading dimension of the array B.  LDB >= max( 1, N ).   

    ALPHA   (output) COMPLEX*16 array, dimension (N)   
            The diagonal elements of A when the pair (A,B) has been   
            reduced to Schur form.  ALPHA(i)/BETA(i) i=1,...,N   
            are the generalized eigenvalues.   

    BETA    (output) COMPLEX*16 array, dimension (N)   
            The diagonal elements of B when the pair (A,B) has been   
            reduced to Schur form.  ALPHA(i)/BETA(i) i=1,...,N   
            are the generalized eigenvalues.  A and B are normalized   
            so that BETA(1),...,BETA(N) are non-negative real numbers.   

    Q       (input/output) COMPLEX*16 array, dimension (LDQ, N)   
            If COMPQ='N', then Q will not be referenced.   
            If COMPQ='V' or 'I', then the conjugate transpose of the   
               unitary transformations which are applied to A and B on   
               the left will be applied to the array Q on the right.   

    LDQ     (input) INTEGER   
            The leading dimension of the array Q.  LDQ >= 1.   
            If COMPQ='V' or 'I', then LDQ >= N.   

    Z       (input/output) COMPLEX*16 array, dimension (LDZ, N)   
            If COMPZ='N', then Z will not be referenced.   
            If COMPZ='V' or 'I', then the unitary transformations which   
               are applied to A and B on the right will be applied to the   
               array Z on the right.   

    LDZ     (input) INTEGER   
            The leading dimension of the array Z.  LDZ >= 1.   
            If COMPZ='V' or 'I', then LDZ >= N.   

    WORK    (workspace/output) COMPLEX*16 array, dimension (LWORK)   
            On exit, if INFO >= 0, WORK(1) returns the optimal LWORK.   

    LWORK   (input) INTEGER   
            The dimension of the array WORK.  LWORK >= max(1,N).   

            If LWORK = -1, then a workspace query is assumed; the routine   
            only calculates the optimal size of the WORK array, returns   
            this value as the first entry of the WORK array, and no error   
            message related to LWORK is issued by XERBLA.   

    RWORK   (workspace) DOUBLE PRECISION array, dimension (N)   

    INFO    (output) INTEGER   
            = 0: successful exit   
            < 0: if INFO = -i, the i-th argument had an illegal value   
            = 1,...,N: the QZ iteration did not converge.  (A,B) is not   
                       in Schur form, but ALPHA(i) and BETA(i),   
                       i=INFO+1,...,N should be correct.   
            = N+1,...,2*N: the shift calculation failed.  (A,B) is not   
                       in Schur form, but ALPHA(i) and BETA(i),   
                       i=INFO-N+1,...,N should be correct.   
            > 2*N:     various "impossible" errors.   

    Further Details   
    ===============   

    We assume that complex ABS works as long as its value is less than   
    overflow.   

    =====================================================================   

       ----------------------- Begin Timing Code ------------------------   
       Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;
    --alpha;
    --beta;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1 * 1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1 * 1;
    z__ -= z_offset;
    --work;
    --rwork;

    /* Function Body */
    latime_1.itcnt = 0.;
/*     ------------------------ End Timing Code -------------------------   

       Decode JOB, COMPQ, COMPZ */

    if (lsame_(job, "E")) {
	ilschr = FALSE_;
	ischur = 1;
    } else if (lsame_(job, "S")) {
	ilschr = TRUE_;
	ischur = 2;
    } else {
	ischur = 0;
    }

    if (lsame_(compq, "N")) {
	ilq = FALSE_;
	icompq = 1;
	nq = 0;
    } else if (lsame_(compq, "V")) {
	ilq = TRUE_;
	icompq = 2;
	nq = *n;
    } else if (lsame_(compq, "I")) {
	ilq = TRUE_;
	icompq = 3;
	nq = *n;
    } else {
	icompq = 0;
    }

    if (lsame_(compz, "N")) {
	ilz = FALSE_;
	icompz = 1;
	nz = 0;
    } else if (lsame_(compz, "V")) {
	ilz = TRUE_;
	icompz = 2;
	nz = *n;
    } else if (lsame_(compz, "I")) {
	ilz = TRUE_;
	icompz = 3;
	nz = *n;
    } else {
	icompz = 0;
    }

/*     Check Argument Values */

    *info = 0;
    i__1 = max(1,*n);
    work[1].r = (doublereal) i__1, work[1].i = 0.;
    lquery = *lwork == -1;
    if (ischur == 0) {
	*info = -1;
    } else if (icompq == 0) {
	*info = -2;
    } else if (icompz == 0) {
	*info = -3;
    } else if (*n < 0) {
	*info = -4;
    } else if (*ilo < 1) {
	*info = -5;
    } else if (*ihi > *n || *ihi < *ilo - 1) {
	*info = -6;
    } else if (*lda < *n) {
	*info = -8;
    } else if (*ldb < *n) {
	*info = -10;
    } else if (*ldq < 1 || ilq && *ldq < *n) {
	*info = -14;
    } else if (*ldz < 1 || ilz && *ldz < *n) {
	*info = -16;
    } else if (*lwork < max(1,*n) && ! lquery) {
	*info = -18;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZHGEQZ", &i__1);
	return 0;
    } else if (lquery) {
	return 0;
    }

/*     Quick return if possible   

       WORK( 1 ) = CMPLX( 1 ) */
    if (*n <= 0) {
	work[1].r = 1., work[1].i = 0.;
	return 0;
    }

/*     Initialize Q and Z */

    if (icompq == 3) {
	zlaset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
    }
    if (icompz == 3) {
	zlaset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
    }

/*     Machine Constants */

    in = *ihi + 1 - *ilo;
    safmin = dlamch_("S");
    ulp = dlamch_("E") * dlamch_("B");
    anorm = zlanhs_("F", &in, &a_ref(*ilo, *ilo), lda, &rwork[1]);
    bnorm = zlanhs_("F", &in, &b_ref(*ilo, *ilo), ldb, &rwork[1]);
/* Computing MAX */
    d__1 = safmin, d__2 = ulp * anorm;
    atol = max(d__1,d__2);
/* Computing MAX */
    d__1 = safmin, d__2 = ulp * bnorm;
    btol = max(d__1,d__2);
    ascale = 1. / max(safmin,anorm);
    bscale = 1. / max(safmin,bnorm);

/*     ---------------------- Begin Timing Code -------------------------   
       Count ops for norms, etc. */
    opst = 0.;
/* Computing 2nd power */
    i__1 = *n;
    latime_1.ops += (doublereal) ((i__1 * i__1 << 2) + *n * 12 - 5);
/*     ----------------------- End Timing Code --------------------------   



       Set Eigenvalues IHI+1:N */

    i__1 = *n;
    for (j = *ihi + 1; j <= i__1; ++j) {
	absb = z_abs(&b_ref(j, j));
	if (absb > safmin) {
	    i__2 = b_subscr(j, j);
	    z__2.r = b[i__2].r / absb, z__2.i = b[i__2].i / absb;
	    d_cnjg(&z__1, &z__2);
	    signbc.r = z__1.r, signbc.i = z__1.i;
	    i__2 = b_subscr(j, j);
	    b[i__2].r = absb, b[i__2].i = 0.;
	    if (ilschr) {
		i__2 = j - 1;
		zscal_(&i__2, &signbc, &b_ref(1, j), &c__1);
		zscal_(&j, &signbc, &a_ref(1, j), &c__1);
/*              ----------------- Begin Timing Code --------------------- */
		opst += (doublereal) ((j - 1) * 12);
/*              ------------------ End Timing Code ---------------------- */
	    } else {
		i__2 = a_subscr(j, j);
		i__3 = a_subscr(j, j);
		z__1.r = a[i__3].r * signbc.r - a[i__3].i * signbc.i, z__1.i =
			 a[i__3].r * signbc.i + a[i__3].i * signbc.r;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
	    }
	    if (ilz) {
		zscal_(n, &signbc, &z___ref(1, j), &c__1);
	    }
/*           ------------------- Begin Timing Code ---------------------- */
	    opst += (doublereal) (nz * 6 + 13);
/*           -------------------- End Timing Code ----------------------- */
	} else {
	    i__2 = b_subscr(j, j);
	    b[i__2].r = 0., b[i__2].i = 0.;
	}
	i__2 = j;
	i__3 = a_subscr(j, j);
	alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
	i__2 = j;
	i__3 = b_subscr(j, j);
	beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;
/* L10: */
    }

/*     If IHI < ILO, skip QZ steps */

    if (*ihi < *ilo) {
	goto L190;
    }

/*     MAIN QZ ITERATION LOOP   

       Initialize dynamic indices   

       Eigenvalues ILAST+1:N have been found.   
          Column operations modify rows IFRSTM:whatever   
          Row operations modify columns whatever:ILASTM   

       If only eigenvalues are being computed, then   
          IFRSTM is the row of the last splitting row above row ILAST;   
          this is always at least ILO.   
       IITER counts iterations since the last eigenvalue was found,   
          to tell when to use an extraordinary shift.   
       MAXIT is the maximum number of QZ sweeps allowed. */

    ilast = *ihi;
    if (ilschr) {
	ifrstm = 1;
	ilastm = *n;
    } else {
	ifrstm = *ilo;
	ilastm = *ihi;
    }
    iiter = 0;
    eshift.r = 0., eshift.i = 0.;
    maxit = (*ihi - *ilo + 1) * 30;

    i__1 = maxit;
    for (jiter = 1; jiter <= i__1; ++jiter) {

/*        Check for too many iterations. */

	if (jiter > maxit) {
	    goto L180;
	}

/*        Split the matrix if possible.   

          Two tests:   
             1: A(j,j-1)=0  or  j=ILO   
             2: B(j,j)=0   

          Special case: j=ILAST */

	if (ilast == *ilo) {
	    goto L60;
	} else {
	    i__2 = a_subscr(ilast, ilast - 1);
	    if ((d__1 = a[i__2].r, abs(d__1)) + (d__2 = d_imag(&a_ref(ilast, 
		    ilast - 1)), abs(d__2)) <= atol) {
		i__2 = a_subscr(ilast, ilast - 1);
		a[i__2].r = 0., a[i__2].i = 0.;
		goto L60;
	    }
	}

	if (z_abs(&b_ref(ilast, ilast)) <= btol) {
	    i__2 = b_subscr(ilast, ilast);
	    b[i__2].r = 0., b[i__2].i = 0.;
	    goto L50;
	}

/*        General case: j<ILAST */

	i__2 = *ilo;
	for (j = ilast - 1; j >= i__2; --j) {

/*           Test 1: for A(j,j-1)=0 or j=ILO */

	    if (j == *ilo) {
		ilazro = TRUE_;
	    } else {
		i__3 = a_subscr(j, j - 1);
		if ((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a_ref(j, 
			j - 1)), abs(d__2)) <= atol) {
		    i__3 = a_subscr(j, j - 1);
		    a[i__3].r = 0., a[i__3].i = 0.;
		    ilazro = TRUE_;
		} else {
		    ilazro = FALSE_;
		}
	    }

/*           Test 2: for B(j,j)=0 */

	    if (z_abs(&b_ref(j, j)) < btol) {
		i__3 = b_subscr(j, j);
		b[i__3].r = 0., b[i__3].i = 0.;

/*              Test 1a: Check for 2 consecutive small subdiagonals in A */

		ilazr2 = FALSE_;
		if (! ilazro) {
		    i__3 = a_subscr(j, j - 1);
		    i__4 = a_subscr(j + 1, j);
		    i__5 = a_subscr(j, j);
		    if (((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&
			    a_ref(j, j - 1)), abs(d__2))) * (ascale * ((d__3 =
			     a[i__4].r, abs(d__3)) + (d__4 = d_imag(&a_ref(j 
			    + 1, j)), abs(d__4)))) <= ((d__5 = a[i__5].r, abs(
			    d__5)) + (d__6 = d_imag(&a_ref(j, j)), abs(d__6)))
			     * (ascale * atol)) {
			ilazr2 = TRUE_;
		    }
		}

/*              If both tests pass (1 & 2), i.e., the leading diagonal   
                element of B in the block is zero, split a 1x1 block off   
                at the top. (I.e., at the J-th row/column) The leading   
                diagonal element of the remainder can also be zero, so   
                this may have to be done repeatedly. */

		if (ilazro || ilazr2) {
		    i__3 = ilast - 1;
		    for (jch = j; jch <= i__3; ++jch) {
			i__4 = a_subscr(jch, jch);
			ctemp.r = a[i__4].r, ctemp.i = a[i__4].i;
			zlartg_(&ctemp, &a_ref(jch + 1, jch), &c__, &s, &
				a_ref(jch, jch));
			i__4 = a_subscr(jch + 1, jch);
			a[i__4].r = 0., a[i__4].i = 0.;
			i__4 = ilastm - jch;
			zrot_(&i__4, &a_ref(jch, jch + 1), lda, &a_ref(jch + 
				1, jch + 1), lda, &c__, &s);
			i__4 = ilastm - jch;
			zrot_(&i__4, &b_ref(jch, jch + 1), ldb, &b_ref(jch + 
				1, jch + 1), ldb, &c__, &s);
			if (ilq) {
			    d_cnjg(&z__1, &s);
			    zrot_(n, &q_ref(1, jch), &c__1, &q_ref(1, jch + 1)
				    , &c__1, &c__, &z__1);
			}
			if (ilazr2) {
			    i__4 = a_subscr(jch, jch - 1);
			    i__5 = a_subscr(jch, jch - 1);
			    z__1.r = c__ * a[i__5].r, z__1.i = c__ * a[i__5]
				    .i;
			    a[i__4].r = z__1.r, a[i__4].i = z__1.i;
			}
			ilazr2 = FALSE_;
/*                    --------------- Begin Timing Code ----------------- */
			opst += (doublereal) ((ilastm - jch) * 40 + 32 + nq * 
				20);
/*                    ---------------- End Timing Code ------------------ */
			i__4 = b_subscr(jch + 1, jch + 1);
			if ((d__1 = b[i__4].r, abs(d__1)) + (d__2 = d_imag(&
				b_ref(jch + 1, jch + 1)), abs(d__2)) >= btol) 
				{
			    if (jch + 1 >= ilast) {
				goto L60;
			    } else {
				ifirst = jch + 1;
				goto L70;
			    }
			}
			i__4 = b_subscr(jch + 1, jch + 1);
			b[i__4].r = 0., b[i__4].i = 0.;
/* L20: */
		    }
		    goto L50;
		} else {

/*                 Only test 2 passed -- chase the zero to B(ILAST,ILAST)   
                   Then process as in the case B(ILAST,ILAST)=0 */

		    i__3 = ilast - 1;
		    for (jch = j; jch <= i__3; ++jch) {
			i__4 = b_subscr(jch, jch + 1);
			ctemp.r = b[i__4].r, ctemp.i = b[i__4].i;
			zlartg_(&ctemp, &b_ref(jch + 1, jch + 1), &c__, &s, &
				b_ref(jch, jch + 1));
			i__4 = b_subscr(jch + 1, jch + 1);
			b[i__4].r = 0., b[i__4].i = 0.;
			if (jch < ilastm - 1) {
			    i__4 = ilastm - jch - 1;
			    zrot_(&i__4, &b_ref(jch, jch + 2), ldb, &b_ref(
				    jch + 1, jch + 2), ldb, &c__, &s);
			}
			i__4 = ilastm - jch + 2;
			zrot_(&i__4, &a_ref(jch, jch - 1), lda, &a_ref(jch + 
				1, jch - 1), lda, &c__, &s);
			if (ilq) {
			    d_cnjg(&z__1, &s);
			    zrot_(n, &q_ref(1, jch), &c__1, &q_ref(1, jch + 1)
				    , &c__1, &c__, &z__1);
			}
			i__4 = a_subscr(jch + 1, jch);
			ctemp.r = a[i__4].r, ctemp.i = a[i__4].i;
			zlartg_(&ctemp, &a_ref(jch + 1, jch - 1), &c__, &s, &
				a_ref(jch + 1, jch));
			i__4 = a_subscr(jch + 1, jch - 1);
			a[i__4].r = 0., a[i__4].i = 0.;
			i__4 = jch + 1 - ifrstm;
			zrot_(&i__4, &a_ref(ifrstm, jch), &c__1, &a_ref(
				ifrstm, jch - 1), &c__1, &c__, &s);
			i__4 = jch - ifrstm;
			zrot_(&i__4, &b_ref(ifrstm, jch), &c__1, &b_ref(
				ifrstm, jch - 1), &c__1, &c__, &s);
			if (ilz) {
			    zrot_(n, &z___ref(1, jch), &c__1, &z___ref(1, jch 
				    - 1), &c__1, &c__, &s);
			}
/* L30: */
		    }

/*                 ---------------- Begin Timing Code ------------------- */
		    opst += (doublereal) ((ilastm + 1 - ifrstm) * 40 + 64 + (
			    nq + nz) * 20) * (doublereal) (ilast - j);
/*                 ----------------- End Timing Code -------------------- */

		    goto L50;
		}
	    } else if (ilazro) {

/*              Only test 1 passed -- work on J:ILAST */

		ifirst = j;
		goto L70;
	    }

/*           Neither test passed -- try next J   

   L40: */
	}

/*        (Drop-through is "impossible") */

	*info = (*n << 1) + 1;
	goto L210;

/*        B(ILAST,ILAST)=0 -- clear A(ILAST,ILAST-1) to split off a   
          1x1 block. */

L50:
	i__2 = a_subscr(ilast, ilast);
	ctemp.r = a[i__2].r, ctemp.i = a[i__2].i;
	zlartg_(&ctemp, &a_ref(ilast, ilast - 1), &c__, &s, &a_ref(ilast, 
		ilast));
	i__2 = a_subscr(ilast, ilast - 1);
	a[i__2].r = 0., a[i__2].i = 0.;
	i__2 = ilast - ifrstm;
	zrot_(&i__2, &a_ref(ifrstm, ilast), &c__1, &a_ref(ifrstm, ilast - 1), 
		&c__1, &c__, &s);
	i__2 = ilast - ifrstm;
	zrot_(&i__2, &b_ref(ifrstm, ilast), &c__1, &b_ref(ifrstm, ilast - 1), 
		&c__1, &c__, &s);
	if (ilz) {
	    zrot_(n, &z___ref(1, ilast), &c__1, &z___ref(1, ilast - 1), &c__1,
		     &c__, &s);
	}
/*        --------------------- Begin Timing Code ----------------------- */
	opst += (doublereal) ((ilast - ifrstm) * 40 + 32 + nz * 20);
/*        ---------------------- End Timing Code ------------------------   

          A(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA */

L60:
	absb = z_abs(&b_ref(ilast, ilast));
	if (absb > safmin) {
	    i__2 = b_subscr(ilast, ilast);
	    z__2.r = b[i__2].r / absb, z__2.i = b[i__2].i / absb;
	    d_cnjg(&z__1, &z__2);
	    signbc.r = z__1.r, signbc.i = z__1.i;
	    i__2 = b_subscr(ilast, ilast);
	    b[i__2].r = absb, b[i__2].i = 0.;
	    if (ilschr) {
		i__2 = ilast - ifrstm;
		zscal_(&i__2, &signbc, &b_ref(ifrstm, ilast), &c__1);
		i__2 = ilast + 1 - ifrstm;
		zscal_(&i__2, &signbc, &a_ref(ifrstm, ilast), &c__1);
/*              ----------------- Begin Timing Code --------------------- */
		opst += (doublereal) ((ilast - ifrstm) * 12);
/*              ------------------ End Timing Code ---------------------- */
	    } else {
		i__2 = a_subscr(ilast, ilast);
		i__3 = a_subscr(ilast, ilast);
		z__1.r = a[i__3].r * signbc.r - a[i__3].i * signbc.i, z__1.i =
			 a[i__3].r * signbc.i + a[i__3].i * signbc.r;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
	    }
	    if (ilz) {
		zscal_(n, &signbc, &z___ref(1, ilast), &c__1);
	    }
/*           ------------------- Begin Timing Code ---------------------- */
	    opst += (doublereal) (nz * 6 + 13);
/*           -------------------- End Timing Code ----------------------- */
	} else {
	    i__2 = b_subscr(ilast, ilast);
	    b[i__2].r = 0., b[i__2].i = 0.;
	}
	i__2 = ilast;
	i__3 = a_subscr(ilast, ilast);
	alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
	i__2 = ilast;
	i__3 = b_subscr(ilast, ilast);
	beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;

/*        Go to next block -- exit if finished. */

	--ilast;
	if (ilast < *ilo) {
	    goto L190;
	}

/*        Reset counters */

	iiter = 0;
	eshift.r = 0., eshift.i = 0.;
	if (! ilschr) {
	    ilastm = ilast;
	    if (ifrstm > ilast) {
		ifrstm = *ilo;
	    }
	}
	goto L160;

/*        QZ step   

          This iteration only involves rows/columns IFIRST:ILAST.  We   
          assume IFIRST < ILAST, and that the diagonal of B is non-zero. */

L70:
	++iiter;
	if (! ilschr) {
	    ifrstm = ifirst;
	}

/*        Compute the Shift.   

          At this point, IFIRST < ILAST, and the diagonal elements of   
          B(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in   
          magnitude) */

	if (iiter / 10 * 10 != iiter) {

/*           The Wilkinson shift (AEP p.512), i.e., the eigenvalue of   
             the bottom-right 2x2 block of A inv(B) which is nearest to   
             the bottom-right element.   

             We factor B as U*D, where U has unit diagonals, and   
             compute (A*inv(D))*inv(U). */

	    i__2 = b_subscr(ilast - 1, ilast);
	    z__2.r = bscale * b[i__2].r, z__2.i = bscale * b[i__2].i;
	    i__3 = b_subscr(ilast, ilast);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    u12.r = z__1.r, u12.i = z__1.i;
	    i__2 = a_subscr(ilast - 1, ilast - 1);
	    z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast - 1, ilast - 1);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    ad11.r = z__1.r, ad11.i = z__1.i;
	    i__2 = a_subscr(ilast, ilast - 1);
	    z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast - 1, ilast - 1);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    ad21.r = z__1.r, ad21.i = z__1.i;
	    i__2 = a_subscr(ilast - 1, ilast);
	    z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast, ilast);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    ad12.r = z__1.r, ad12.i = z__1.i;
	    i__2 = a_subscr(ilast, ilast);
	    z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast, ilast);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    ad22.r = z__1.r, ad22.i = z__1.i;
	    z__2.r = u12.r * ad21.r - u12.i * ad21.i, z__2.i = u12.r * ad21.i 
		    + u12.i * ad21.r;
	    z__1.r = ad22.r - z__2.r, z__1.i = ad22.i - z__2.i;
	    abi22.r = z__1.r, abi22.i = z__1.i;

	    z__2.r = ad11.r + abi22.r, z__2.i = ad11.i + abi22.i;
	    z__1.r = z__2.r * .5, z__1.i = z__2.i * .5;
	    t.r = z__1.r, t.i = z__1.i;
	    pow_zi(&z__4, &t, &c__2);
	    z__5.r = ad12.r * ad21.r - ad12.i * ad21.i, z__5.i = ad12.r * 
		    ad21.i + ad12.i * ad21.r;
	    z__3.r = z__4.r + z__5.r, z__3.i = z__4.i + z__5.i;
	    z__6.r = ad11.r * ad22.r - ad11.i * ad22.i, z__6.i = ad11.r * 
		    ad22.i + ad11.i * ad22.r;
	    z__2.r = z__3.r - z__6.r, z__2.i = z__3.i - z__6.i;
	    z_sqrt(&z__1, &z__2);
	    rtdisc.r = z__1.r, rtdisc.i = z__1.i;
	    z__1.r = t.r - abi22.r, z__1.i = t.i - abi22.i;
	    z__2.r = t.r - abi22.r, z__2.i = t.i - abi22.i;
	    temp = z__1.r * rtdisc.r + d_imag(&z__2) * d_imag(&rtdisc);
	    if (temp <= 0.) {
		z__1.r = t.r + rtdisc.r, z__1.i = t.i + rtdisc.i;
		shift.r = z__1.r, shift.i = z__1.i;
	    } else {
		z__1.r = t.r - rtdisc.r, z__1.i = t.i - rtdisc.i;
		shift.r = z__1.r, shift.i = z__1.i;
	    }

/*           ------------------- Begin Timing Code ---------------------- */
	    opst += 116.;
/*           -------------------- End Timing Code ----------------------- */

	} else {

/*           Exceptional shift.  Chosen for no particularly good reason. */

	    i__2 = a_subscr(ilast - 1, ilast);
	    z__4.r = ascale * a[i__2].r, z__4.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast - 1, ilast - 1);
	    z__5.r = bscale * b[i__3].r, z__5.i = bscale * b[i__3].i;
	    z_div(&z__3, &z__4, &z__5);
	    d_cnjg(&z__2, &z__3);
	    z__1.r = eshift.r + z__2.r, z__1.i = eshift.i + z__2.i;
	    eshift.r = z__1.r, eshift.i = z__1.i;
	    shift.r = eshift.r, shift.i = eshift.i;

/*           ------------------- Begin Timing Code ---------------------- */
	    opst += 15.;
/*           -------------------- End Timing Code ----------------------- */

	}

/*        Now check for two consecutive small subdiagonals. */

	i__2 = ifirst + 1;
	for (j = ilast - 1; j >= i__2; --j) {
	    istart = j;
	    i__3 = a_subscr(j, j);
	    z__2.r = ascale * a[i__3].r, z__2.i = ascale * a[i__3].i;
	    i__4 = b_subscr(j, j);
	    z__4.r = bscale * b[i__4].r, z__4.i = bscale * b[i__4].i;
	    z__3.r = shift.r * z__4.r - shift.i * z__4.i, z__3.i = shift.r * 
		    z__4.i + shift.i * z__4.r;
	    z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
	    ctemp.r = z__1.r, ctemp.i = z__1.i;
	    temp = (d__1 = ctemp.r, abs(d__1)) + (d__2 = d_imag(&ctemp), abs(
		    d__2));
	    i__3 = a_subscr(j + 1, j);
	    temp2 = ascale * ((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&
		    a_ref(j + 1, j)), abs(d__2)));
	    tempr = max(temp,temp2);
	    if (tempr < 1. && tempr != 0.) {
		temp /= tempr;
		temp2 /= tempr;
	    }
	    i__3 = a_subscr(j, j - 1);
	    if (((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a_ref(j, j - 
		    1)), abs(d__2))) * temp2 <= temp * atol) {
		goto L90;
	    }
/* L80: */
	}

	istart = ifirst;
	i__2 = a_subscr(ifirst, ifirst);
	z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	i__3 = b_subscr(ifirst, ifirst);
	z__4.r = bscale * b[i__3].r, z__4.i = bscale * b[i__3].i;
	z__3.r = shift.r * z__4.r - shift.i * z__4.i, z__3.i = shift.r * 
		z__4.i + shift.i * z__4.r;
	z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
	ctemp.r = z__1.r, ctemp.i = z__1.i;

/*        --------------------- Begin Timing Code ----------------------- */
	opst += -6.;
/*        ---------------------- End Timing Code ------------------------ */

L90:

/*        Do an implicit-shift QZ sweep.   

          Initial Q */

	i__2 = a_subscr(istart + 1, istart);
	z__1.r = ascale * a[i__2].r, z__1.i = ascale * a[i__2].i;
	ctemp2.r = z__1.r, ctemp2.i = z__1.i;

/*        --------------------- Begin Timing Code ----------------------- */
	opst += (doublereal) ((ilast - istart) * 18 + 2);
/*        ---------------------- End Timing Code ------------------------ */

	zlartg_(&ctemp, &ctemp2, &c__, &s, &ctemp3);

/*        Sweep */

	i__2 = ilast - 1;
	for (j = istart; j <= i__2; ++j) {
	    if (j > istart) {
		i__3 = a_subscr(j, j - 1);
		ctemp.r = a[i__3].r, ctemp.i = a[i__3].i;
		zlartg_(&ctemp, &a_ref(j + 1, j - 1), &c__, &s, &a_ref(j, j - 
			1));
		i__3 = a_subscr(j + 1, j - 1);
		a[i__3].r = 0., a[i__3].i = 0.;
	    }

	    i__3 = ilastm;
	    for (jc = j; jc <= i__3; ++jc) {
		i__4 = a_subscr(j, jc);
		z__2.r = c__ * a[i__4].r, z__2.i = c__ * a[i__4].i;
		i__5 = a_subscr(j + 1, jc);
		z__3.r = s.r * a[i__5].r - s.i * a[i__5].i, z__3.i = s.r * a[
			i__5].i + s.i * a[i__5].r;
		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		ctemp.r = z__1.r, ctemp.i = z__1.i;
		i__4 = a_subscr(j + 1, jc);
		d_cnjg(&z__4, &s);
		z__3.r = -z__4.r, z__3.i = -z__4.i;
		i__5 = a_subscr(j, jc);
		z__2.r = z__3.r * a[i__5].r - z__3.i * a[i__5].i, z__2.i = 
			z__3.r * a[i__5].i + z__3.i * a[i__5].r;
		i__6 = a_subscr(j + 1, jc);
		z__5.r = c__ * a[i__6].r, z__5.i = c__ * a[i__6].i;
		z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		a[i__4].r = z__1.r, a[i__4].i = z__1.i;
		i__4 = a_subscr(j, jc);
		a[i__4].r = ctemp.r, a[i__4].i = ctemp.i;
		i__4 = b_subscr(j, jc);
		z__2.r = c__ * b[i__4].r, z__2.i = c__ * b[i__4].i;
		i__5 = b_subscr(j + 1, jc);
		z__3.r = s.r * b[i__5].r - s.i * b[i__5].i, z__3.i = s.r * b[
			i__5].i + s.i * b[i__5].r;
		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		ctemp2.r = z__1.r, ctemp2.i = z__1.i;
		i__4 = b_subscr(j + 1, jc);
		d_cnjg(&z__4, &s);
		z__3.r = -z__4.r, z__3.i = -z__4.i;
		i__5 = b_subscr(j, jc);
		z__2.r = z__3.r * b[i__5].r - z__3.i * b[i__5].i, z__2.i = 
			z__3.r * b[i__5].i + z__3.i * b[i__5].r;
		i__6 = b_subscr(j + 1, jc);
		z__5.r = c__ * b[i__6].r, z__5.i = c__ * b[i__6].i;
		z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		b[i__4].r = z__1.r, b[i__4].i = z__1.i;
		i__4 = b_subscr(j, jc);
		b[i__4].r = ctemp2.r, b[i__4].i = ctemp2.i;
/* L100: */
	    }
	    if (ilq) {
		i__3 = *n;
		for (jr = 1; jr <= i__3; ++jr) {
		    i__4 = q_subscr(jr, j);
		    z__2.r = c__ * q[i__4].r, z__2.i = c__ * q[i__4].i;
		    d_cnjg(&z__4, &s);
		    i__5 = q_subscr(jr, j + 1);
		    z__3.r = z__4.r * q[i__5].r - z__4.i * q[i__5].i, z__3.i =
			     z__4.r * q[i__5].i + z__4.i * q[i__5].r;
		    z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		    ctemp.r = z__1.r, ctemp.i = z__1.i;
		    i__4 = q_subscr(jr, j + 1);
		    z__3.r = -s.r, z__3.i = -s.i;
		    i__5 = q_subscr(jr, j);
		    z__2.r = z__3.r * q[i__5].r - z__3.i * q[i__5].i, z__2.i =
			     z__3.r * q[i__5].i + z__3.i * q[i__5].r;
		    i__6 = q_subscr(jr, j + 1);
		    z__4.r = c__ * q[i__6].r, z__4.i = c__ * q[i__6].i;
		    z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
		    q[i__4].r = z__1.r, q[i__4].i = z__1.i;
		    i__4 = q_subscr(jr, j);
		    q[i__4].r = ctemp.r, q[i__4].i = ctemp.i;
/* L110: */
		}
	    }

	    i__3 = b_subscr(j + 1, j + 1);
	    ctemp.r = b[i__3].r, ctemp.i = b[i__3].i;
	    zlartg_(&ctemp, &b_ref(j + 1, j), &c__, &s, &b_ref(j + 1, j + 1));
	    i__3 = b_subscr(j + 1, j);
	    b[i__3].r = 0., b[i__3].i = 0.;

/* Computing MIN */
	    i__4 = j + 2;
	    i__3 = min(i__4,ilast);
	    for (jr = ifrstm; jr <= i__3; ++jr) {
		i__4 = a_subscr(jr, j + 1);
		z__2.r = c__ * a[i__4].r, z__2.i = c__ * a[i__4].i;
		i__5 = a_subscr(jr, j);
		z__3.r = s.r * a[i__5].r - s.i * a[i__5].i, z__3.i = s.r * a[
			i__5].i + s.i * a[i__5].r;
		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		ctemp.r = z__1.r, ctemp.i = z__1.i;
		i__4 = a_subscr(jr, j);
		d_cnjg(&z__4, &s);
		z__3.r = -z__4.r, z__3.i = -z__4.i;
		i__5 = a_subscr(jr, j + 1);
		z__2.r = z__3.r * a[i__5].r - z__3.i * a[i__5].i, z__2.i = 
			z__3.r * a[i__5].i + z__3.i * a[i__5].r;
		i__6 = a_subscr(jr, j);
		z__5.r = c__ * a[i__6].r, z__5.i = c__ * a[i__6].i;
		z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		a[i__4].r = z__1.r, a[i__4].i = z__1.i;
		i__4 = a_subscr(jr, j + 1);
		a[i__4].r = ctemp.r, a[i__4].i = ctemp.i;
/* L120: */
	    }
	    i__3 = j;
	    for (jr = ifrstm; jr <= i__3; ++jr) {
		i__4 = b_subscr(jr, j + 1);
		z__2.r = c__ * b[i__4].r, z__2.i = c__ * b[i__4].i;
		i__5 = b_subscr(jr, j);
		z__3.r = s.r * b[i__5].r - s.i * b[i__5].i, z__3.i = s.r * b[
			i__5].i + s.i * b[i__5].r;
		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		ctemp.r = z__1.r, ctemp.i = z__1.i;
		i__4 = b_subscr(jr, j);
		d_cnjg(&z__4, &s);
		z__3.r = -z__4.r, z__3.i = -z__4.i;
		i__5 = b_subscr(jr, j + 1);
		z__2.r = z__3.r * b[i__5].r - z__3.i * b[i__5].i, z__2.i = 
			z__3.r * b[i__5].i + z__3.i * b[i__5].r;
		i__6 = b_subscr(jr, j);
		z__5.r = c__ * b[i__6].r, z__5.i = c__ * b[i__6].i;
		z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		b[i__4].r = z__1.r, b[i__4].i = z__1.i;
		i__4 = b_subscr(jr, j + 1);
		b[i__4].r = ctemp.r, b[i__4].i = ctemp.i;
/* L130: */
	    }
	    if (ilz) {
		i__3 = *n;
		for (jr = 1; jr <= i__3; ++jr) {
		    i__4 = z___subscr(jr, j + 1);
		    z__2.r = c__ * z__[i__4].r, z__2.i = c__ * z__[i__4].i;
		    i__5 = z___subscr(jr, j);
		    z__3.r = s.r * z__[i__5].r - s.i * z__[i__5].i, z__3.i = 
			    s.r * z__[i__5].i + s.i * z__[i__5].r;
		    z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		    ctemp.r = z__1.r, ctemp.i = z__1.i;
		    i__4 = z___subscr(jr, j);
		    d_cnjg(&z__4, &s);
		    z__3.r = -z__4.r, z__3.i = -z__4.i;
		    i__5 = z___subscr(jr, j + 1);
		    z__2.r = z__3.r * z__[i__5].r - z__3.i * z__[i__5].i, 
			    z__2.i = z__3.r * z__[i__5].i + z__3.i * z__[i__5]
			    .r;
		    i__6 = z___subscr(jr, j);
		    z__5.r = c__ * z__[i__6].r, z__5.i = c__ * z__[i__6].i;
		    z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		    z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
		    i__4 = z___subscr(jr, j + 1);
		    z__[i__4].r = ctemp.r, z__[i__4].i = ctemp.i;
/* L140: */
		}
	    }
/* L150: */
	}

/*        --------------------- Begin Timing Code ----------------------- */
	opst += (doublereal) (ilast - istart) * (doublereal) ((ilastm - 
		ifrstm) * 40 + 184 + (nq + nz) * 20) - 20;
/*        ---------------------- End Timing Code ------------------------ */

L160:

/*        --------------------- Begin Timing Code -----------------------   
          End of iteration -- add in "small" contributions. */
	latime_1.ops += opst;
	opst = 0.;
/*        ---------------------- End Timing Code ------------------------   


   L170: */
    }

/*     Drop-through = non-convergence */

L180:
    *info = ilast;

/*     ---------------------- Begin Timing Code ------------------------- */
    latime_1.ops += opst;
    opst = 0.;
/*     ----------------------- End Timing Code -------------------------- */

    goto L210;

/*     Successful completion of all QZ steps */

L190:

/*     Set Eigenvalues 1:ILO-1 */

    i__1 = *ilo - 1;
    for (j = 1; j <= i__1; ++j) {
	absb = z_abs(&b_ref(j, j));
	if (absb > safmin) {
	    i__2 = b_subscr(j, j);
	    z__2.r = b[i__2].r / absb, z__2.i = b[i__2].i / absb;
	    d_cnjg(&z__1, &z__2);
	    signbc.r = z__1.r, signbc.i = z__1.i;
	    i__2 = b_subscr(j, j);
	    b[i__2].r = absb, b[i__2].i = 0.;
	    if (ilschr) {
		i__2 = j - 1;
		zscal_(&i__2, &signbc, &b_ref(1, j), &c__1);
		zscal_(&j, &signbc, &a_ref(1, j), &c__1);
/*              ----------------- Begin Timing Code --------------------- */
		opst += (doublereal) ((j - 1) * 12);
/*              ------------------ End Timing Code ---------------------- */
	    } else {
		i__2 = a_subscr(j, j);
		i__3 = a_subscr(j, j);
		z__1.r = a[i__3].r * signbc.r - a[i__3].i * signbc.i, z__1.i =
			 a[i__3].r * signbc.i + a[i__3].i * signbc.r;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
	    }
	    if (ilz) {
		zscal_(n, &signbc, &z___ref(1, j), &c__1);
	    }
/*           ------------------- Begin Timing Code ---------------------- */
	    opst += (doublereal) (nz * 6 + 13);
/*           -------------------- End Timing Code ----------------------- */
	} else {
	    i__2 = b_subscr(j, j);
	    b[i__2].r = 0., b[i__2].i = 0.;
	}
	i__2 = j;
	i__3 = a_subscr(j, j);
	alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
	i__2 = j;
	i__3 = b_subscr(j, j);
	beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;
/* L200: */
    }

/*     Normal Termination */

    *info = 0;

/*     Exit (other than argument error) -- return optimal workspace size */

L210:

/*     ---------------------- Begin Timing Code ------------------------- */
    latime_1.ops += opst;
    opst = 0.;
    latime_1.itcnt = (doublereal) jiter;
/*     ----------------------- End Timing Code -------------------------- */

    z__1.r = (doublereal) (*n), z__1.i = 0.;
    work[1].r = z__1.r, work[1].i = z__1.i;
    return 0;

/*     End of ZHGEQZ */

} /* zhgeqz_ */
Beispiel #2
0
/* Subroutine */ int ztgex2_(logical *wantq, logical *wantz, integer *n, 
	doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb, 
	doublecomplex *q, integer *ldq, doublecomplex *z__, integer *ldz, 
	integer *j1, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1, 
	    z_offset, i__1, i__2, i__3;
    doublereal d__1;
    doublecomplex z__1, z__2, z__3;

    /* Builtin functions */
    double sqrt(doublereal), z_abs(doublecomplex *);
    void d_cnjg(doublecomplex *, doublecomplex *);

    /* Local variables */
    static doublecomplex f, g;
    static integer i__, m;
    static doublecomplex s[4]	/* was [2][2] */, t[4]	/* was [2][2] */;
    static doublereal cq, sa, sb, cz;
    static doublecomplex sq;
    static doublereal ss, ws;
    static doublecomplex sz;
    static doublereal eps, sum;
    static logical weak;
    static doublecomplex cdum, work[8];
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    static doublereal scale;
    extern doublereal dlamch_(char *, ftnlen);
    static logical dtrong;
    static doublereal thresh;
    extern /* Subroutine */ int zlacpy_(char *, integer *, integer *, 
	    doublecomplex *, integer *, doublecomplex *, integer *, ftnlen), 
	    zlartg_(doublecomplex *, doublecomplex *, doublereal *, 
	    doublecomplex *, doublecomplex *);
    static doublereal smlnum;
    extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
	     doublereal *, doublereal *);


/*  -- LAPACK auxiliary routine (version 3.0) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */
/*     Courant Institute, Argonne National Lab, and Rice University */
/*     June 30, 1999 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZTGEX2 swaps adjacent diagonal 1 by 1 blocks (A11,B11) and (A22,B22) */
/*  in an upper triangular matrix pair (A, B) by an unitary equivalence */
/*  transformation. */

/*  (A, B) must be in generalized Schur canonical form, that is, A and */
/*  B are both upper triangular. */

/*  Optionally, the matrices Q and Z of generalized Schur vectors are */
/*  updated. */

/*         Q(in) * A(in) * Z(in)' = Q(out) * A(out) * Z(out)' */
/*         Q(in) * B(in) * Z(in)' = Q(out) * B(out) * Z(out)' */


/*  Arguments */
/*  ========= */

/*  WANTQ   (input) LOGICAL */
/*          .TRUE. : update the left transformation matrix Q; */
/*          .FALSE.: do not update Q. */

/*  WANTZ   (input) LOGICAL */
/*          .TRUE. : update the right transformation matrix Z; */
/*          .FALSE.: do not update Z. */

/*  N       (input) INTEGER */
/*          The order of the matrices A and B. N >= 0. */

/*  A       (input/output) COMPLEX*16 arrays, dimensions (LDA,N) */
/*          On entry, the matrix A in the pair (A, B). */
/*          On exit, the updated matrix A. */

/*  LDA     (input)  INTEGER */
/*          The leading dimension of the array A. LDA >= max(1,N). */

/*  B       (input/output) COMPLEX*16 arrays, dimensions (LDB,N) */
/*          On entry, the matrix B in the pair (A, B). */
/*          On exit, the updated matrix B. */

/*  LDB     (input)  INTEGER */
/*          The leading dimension of the array B. LDB >= max(1,N). */

/*  Q       (input/output) COMPLEX*16 array, dimension (LDZ,N) */
/*          If WANTQ = .TRUE, on entry, the unitary matrix Q. On exit, */
/*          the updated matrix Q. */
/*          Not referenced if WANTQ = .FALSE.. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. LDQ >= 1; */
/*          If WANTQ = .TRUE., LDQ >= N. */

/*  Z       (input/output) COMPLEX*16 array, dimension (LDZ,N) */
/*          If WANTZ = .TRUE, on entry, the unitary matrix Z. On exit, */
/*          the updated matrix Z. */
/*          Not referenced if WANTZ = .FALSE.. */

/*  LDZ     (input) INTEGER */
/*          The leading dimension of the array Z. LDZ >= 1; */
/*          If WANTZ = .TRUE., LDZ >= N. */

/*  J1      (input) INTEGER */
/*          The index to the first block (A11, B11). */

/*  INFO    (output) INTEGER */
/*           =0:  Successful exit. */
/*           =1:  The transformed matrix pair (A, B) would be too far */
/*                from generalized Schur form; the problem is ill- */
/*                conditioned. (A, B) may have been partially reordered, */
/*                and ILST points to the first row of the current */
/*                position of the block being moved. */


/*  Further Details */
/*  =============== */

/*  Based on contributions by */
/*     Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
/*     Umea University, S-901 87 Umea, Sweden. */

/*  In the current code both weak and strong stability tests are */
/*  performed. The user can omit the strong stability test by changing */
/*  the internal logical parameter WANDS to .FALSE.. See ref. [2] for */
/*  details. */

/*  [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
/*      Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
/*      M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
/*      Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */

/*  [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
/*      Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
/*      Estimation: Theory, Algorithms and Software, Report UMINF-94.04, */
/*      Department of Computing Science, Umea University, S-901 87 Umea, */
/*      Sweden, 1994. Also as LAPACK Working Note 87. To appear in */
/*      Numerical Algorithms, 1996. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1;
    b -= b_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1;
    z__ -= z_offset;

    /* Function Body */
    *info = 0;

/*     Quick return if possible */

    if (*n <= 1) {
	return 0;
    }

    m = 2;
    weak = FALSE_;
    dtrong = FALSE_;

/*     Make a local copy of selected block in (A, B) */

    zlacpy_("Full", &m, &m, &a[*j1 + *j1 * a_dim1], lda, s, &c__2, (ftnlen)4);
    zlacpy_("Full", &m, &m, &b[*j1 + *j1 * b_dim1], ldb, t, &c__2, (ftnlen)4);

/*     Compute the threshold for testing the acceptance of swapping. */

    eps = dlamch_("P", (ftnlen)1);
    smlnum = dlamch_("S", (ftnlen)1) / eps;
    scale = 0.;
    sum = 1.;
    zlacpy_("Full", &m, &m, s, &c__2, work, &m, (ftnlen)4);
    zlacpy_("Full", &m, &m, t, &c__2, &work[m * m], &m, (ftnlen)4);
    i__1 = (m << 1) * m;
    zlassq_(&i__1, work, &c__1, &scale, &sum);
    sa = scale * sqrt(sum);
/* Computing MAX */
    d__1 = eps * 10. * sa;
    thresh = max(d__1,smlnum);

/*     Compute unitary QL and RQ that swap 1-by-1 and 1-by-1 blocks */
/*     using Givens rotations and perform the swap tentatively. */

    z__2.r = s[3].r * t[0].r - s[3].i * t[0].i, z__2.i = s[3].r * t[0].i + s[
	    3].i * t[0].r;
    z__3.r = t[3].r * s[0].r - t[3].i * s[0].i, z__3.i = t[3].r * s[0].i + t[
	    3].i * s[0].r;
    z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
    f.r = z__1.r, f.i = z__1.i;
    z__2.r = s[3].r * t[2].r - s[3].i * t[2].i, z__2.i = s[3].r * t[2].i + s[
	    3].i * t[2].r;
    z__3.r = t[3].r * s[2].r - t[3].i * s[2].i, z__3.i = t[3].r * s[2].i + t[
	    3].i * s[2].r;
    z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
    g.r = z__1.r, g.i = z__1.i;
    sa = z_abs(&s[3]);
    sb = z_abs(&t[3]);
    zlartg_(&g, &f, &cz, &sz, &cdum);
    z__1.r = -sz.r, z__1.i = -sz.i;
    sz.r = z__1.r, sz.i = z__1.i;
    d_cnjg(&z__1, &sz);
    zrot_(&c__2, s, &c__1, &s[2], &c__1, &cz, &z__1);
    d_cnjg(&z__1, &sz);
    zrot_(&c__2, t, &c__1, &t[2], &c__1, &cz, &z__1);
    if (sa >= sb) {
	zlartg_(s, &s[1], &cq, &sq, &cdum);
    } else {
	zlartg_(t, &t[1], &cq, &sq, &cdum);
    }
    zrot_(&c__2, s, &c__2, &s[1], &c__2, &cq, &sq);
    zrot_(&c__2, t, &c__2, &t[1], &c__2, &cq, &sq);

/*     Weak stability test: |S21| + |T21| <= O(EPS F-norm((S, T))) */

    ws = z_abs(&s[1]) + z_abs(&t[1]);
    weak = ws <= thresh;
    if (! weak) {
	goto L20;
    }

    if (TRUE_) {

/*        Strong stability test: */
/*           F-norm((A-QL'*S*QR, B-QL'*T*QR)) <= O(EPS*F-norm((A, B))) */

	zlacpy_("Full", &m, &m, s, &c__2, work, &m, (ftnlen)4);
	zlacpy_("Full", &m, &m, t, &c__2, &work[m * m], &m, (ftnlen)4);
	d_cnjg(&z__2, &sz);
	z__1.r = -z__2.r, z__1.i = -z__2.i;
	zrot_(&c__2, work, &c__1, &work[2], &c__1, &cz, &z__1);
	d_cnjg(&z__2, &sz);
	z__1.r = -z__2.r, z__1.i = -z__2.i;
	zrot_(&c__2, &work[4], &c__1, &work[6], &c__1, &cz, &z__1);
	z__1.r = -sq.r, z__1.i = -sq.i;
	zrot_(&c__2, work, &c__2, &work[1], &c__2, &cq, &z__1);
	z__1.r = -sq.r, z__1.i = -sq.i;
	zrot_(&c__2, &work[4], &c__2, &work[5], &c__2, &cq, &z__1);
	for (i__ = 1; i__ <= 2; ++i__) {
	    i__1 = i__ - 1;
	    i__2 = i__ - 1;
	    i__3 = *j1 + i__ - 1 + *j1 * a_dim1;
	    z__1.r = work[i__2].r - a[i__3].r, z__1.i = work[i__2].i - a[i__3]
		    .i;
	    work[i__1].r = z__1.r, work[i__1].i = z__1.i;
	    i__1 = i__ + 1;
	    i__2 = i__ + 1;
	    i__3 = *j1 + i__ - 1 + (*j1 + 1) * a_dim1;
	    z__1.r = work[i__2].r - a[i__3].r, z__1.i = work[i__2].i - a[i__3]
		    .i;
	    work[i__1].r = z__1.r, work[i__1].i = z__1.i;
	    i__1 = i__ + 3;
	    i__2 = i__ + 3;
	    i__3 = *j1 + i__ - 1 + *j1 * b_dim1;
	    z__1.r = work[i__2].r - b[i__3].r, z__1.i = work[i__2].i - b[i__3]
		    .i;
	    work[i__1].r = z__1.r, work[i__1].i = z__1.i;
	    i__1 = i__ + 5;
	    i__2 = i__ + 5;
	    i__3 = *j1 + i__ - 1 + (*j1 + 1) * b_dim1;
	    z__1.r = work[i__2].r - b[i__3].r, z__1.i = work[i__2].i - b[i__3]
		    .i;
	    work[i__1].r = z__1.r, work[i__1].i = z__1.i;
/* L10: */
	}
	scale = 0.;
	sum = 1.;
	i__1 = (m << 1) * m;
	zlassq_(&i__1, work, &c__1, &scale, &sum);
	ss = scale * sqrt(sum);
	dtrong = ss <= thresh;
	if (! dtrong) {
	    goto L20;
	}
    }

/*     If the swap is accepted ("weakly" and "strongly"), apply the */
/*     equivalence transformations to the original matrix pair (A,B) */

    i__1 = *j1 + 1;
    d_cnjg(&z__1, &sz);
    zrot_(&i__1, &a[*j1 * a_dim1 + 1], &c__1, &a[(*j1 + 1) * a_dim1 + 1], &
	    c__1, &cz, &z__1);
    i__1 = *j1 + 1;
    d_cnjg(&z__1, &sz);
    zrot_(&i__1, &b[*j1 * b_dim1 + 1], &c__1, &b[(*j1 + 1) * b_dim1 + 1], &
	    c__1, &cz, &z__1);
    i__1 = *n - *j1 + 1;
    zrot_(&i__1, &a[*j1 + *j1 * a_dim1], lda, &a[*j1 + 1 + *j1 * a_dim1], lda,
	     &cq, &sq);
    i__1 = *n - *j1 + 1;
    zrot_(&i__1, &b[*j1 + *j1 * b_dim1], ldb, &b[*j1 + 1 + *j1 * b_dim1], ldb,
	     &cq, &sq);

/*     Set  N1 by N2 (2,1) blocks to 0 */

    i__1 = *j1 + 1 + *j1 * a_dim1;
    a[i__1].r = 0., a[i__1].i = 0.;
    i__1 = *j1 + 1 + *j1 * b_dim1;
    b[i__1].r = 0., b[i__1].i = 0.;

/*     Accumulate transformations into Q and Z if requested. */

    if (*wantz) {
	d_cnjg(&z__1, &sz);
	zrot_(n, &z__[*j1 * z_dim1 + 1], &c__1, &z__[(*j1 + 1) * z_dim1 + 1], 
		&c__1, &cz, &z__1);
    }
    if (*wantq) {
	d_cnjg(&z__1, &sq);
	zrot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[(*j1 + 1) * q_dim1 + 1], &
		c__1, &cq, &z__1);
    }

/*     Exit with INFO = 0 if swap was successfully performed. */

    return 0;

/*     Exit with INFO = 1 if swap was rejected. */

L20:
    *info = 1;
    return 0;

/*     End of ZTGEX2 */

} /* ztgex2_ */
Beispiel #3
0
/* Subroutine */ int zgghrd_(char *compq, char *compz, integer *n, integer *
	ilo, integer *ihi, doublecomplex *a, integer *lda, doublecomplex *b, 
	integer *ldb, doublecomplex *q, integer *ldq, doublecomplex *z__, 
	integer *ldz, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1, 
	    z_offset, i__1, i__2, i__3;
    doublecomplex z__1;

    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);

    /* Local variables */
    doublereal c__;
    doublecomplex s;
    logical ilq, ilz;
    integer jcol, jrow;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    extern logical lsame_(char *, char *);
    doublecomplex ctemp;
    extern /* Subroutine */ int xerbla_(char *, integer *);
    integer icompq, icompz;
    extern /* Subroutine */ int zlaset_(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, doublecomplex *, integer *), zlartg_(doublecomplex *, doublecomplex *, doublereal *, 
	    doublecomplex *, doublecomplex *);


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZGGHRD reduces a pair of complex matrices (A,B) to generalized upper */
/*  Hessenberg form using unitary transformations, where A is a */
/*  general matrix and B is upper triangular.  The form of the */
/*  generalized eigenvalue problem is */
/*     A*x = lambda*B*x, */
/*  and B is typically made upper triangular by computing its QR */
/*  factorization and moving the unitary matrix Q to the left side */
/*  of the equation. */

/*  This subroutine simultaneously reduces A to a Hessenberg matrix H: */
/*     Q**H*A*Z = H */
/*  and transforms B to another upper triangular matrix T: */
/*     Q**H*B*Z = T */
/*  in order to reduce the problem to its standard form */
/*     H*y = lambda*T*y */
/*  where y = Z**H*x. */

/*  The unitary matrices Q and Z are determined as products of Givens */
/*  rotations.  They may either be formed explicitly, or they may be */
/*  postmultiplied into input matrices Q1 and Z1, so that */
/*       Q1 * A * Z1**H = (Q1*Q) * H * (Z1*Z)**H */
/*       Q1 * B * Z1**H = (Q1*Q) * T * (Z1*Z)**H */
/*  If Q1 is the unitary matrix from the QR factorization of B in the */
/*  original equation A*x = lambda*B*x, then ZGGHRD reduces the original */
/*  problem to generalized Hessenberg form. */

/*  Arguments */
/*  ========= */

/*  COMPQ   (input) CHARACTER*1 */
/*          = 'N': do not compute Q; */
/*          = 'I': Q is initialized to the unit matrix, and the */
/*                 unitary matrix Q is returned; */
/*          = 'V': Q must contain a unitary matrix Q1 on entry, */
/*                 and the product Q1*Q is returned. */

/*  COMPZ   (input) CHARACTER*1 */
/*          = 'N': do not compute Q; */
/*          = 'I': Q is initialized to the unit matrix, and the */
/*                 unitary matrix Q is returned; */
/*          = 'V': Q must contain a unitary matrix Q1 on entry, */
/*                 and the product Q1*Q is returned. */

/*  N       (input) INTEGER */
/*          The order of the matrices A and B.  N >= 0. */

/*  ILO     (input) INTEGER */
/*  IHI     (input) INTEGER */
/*          ILO and IHI mark the rows and columns of A which are to be */
/*          reduced.  It is assumed that A is already upper triangular */
/*          in rows and columns 1:ILO-1 and IHI+1:N.  ILO and IHI are */
/*          normally set by a previous call to ZGGBAL; otherwise they */
/*          should be set to 1 and N respectively. */
/*          1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */

/*  A       (input/output) COMPLEX*16 array, dimension (LDA, N) */
/*          On entry, the N-by-N general matrix to be reduced. */
/*          On exit, the upper triangle and the first subdiagonal of A */
/*          are overwritten with the upper Hessenberg matrix H, and the */
/*          rest is set to zero. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,N). */

/*  B       (input/output) COMPLEX*16 array, dimension (LDB, N) */
/*          On entry, the N-by-N upper triangular matrix B. */
/*          On exit, the upper triangular matrix T = Q**H B Z.  The */
/*          elements below the diagonal are set to zero. */

/*  LDB     (input) INTEGER */
/*          The leading dimension of the array B.  LDB >= max(1,N). */

/*  Q       (input/output) COMPLEX*16 array, dimension (LDQ, N) */
/*          On entry, if COMPQ = 'V', the unitary matrix Q1, typically */
/*          from the QR factorization of B. */
/*          On exit, if COMPQ='I', the unitary matrix Q, and if */
/*          COMPQ = 'V', the product Q1*Q. */
/*          Not referenced if COMPQ='N'. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. */
/*          LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. */

/*  Z       (input/output) COMPLEX*16 array, dimension (LDZ, N) */
/*          On entry, if COMPZ = 'V', the unitary matrix Z1. */
/*          On exit, if COMPZ='I', the unitary matrix Z, and if */
/*          COMPZ = 'V', the product Z1*Z. */
/*          Not referenced if COMPZ='N'. */

/*  LDZ     (input) INTEGER */
/*          The leading dimension of the array Z. */
/*          LDZ >= N if COMPZ='V' or 'I'; LDZ >= 1 otherwise. */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit. */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value. */

/*  Further Details */
/*  =============== */

/*  This routine reduces A to Hessenberg and B to triangular form by */
/*  an unblocked reduction, as described in _Matrix_Computations_, */
/*  by Golub and van Loan (Johns Hopkins Press). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Decode COMPQ */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1;
    b -= b_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1;
    z__ -= z_offset;

    /* Function Body */
    if (lsame_(compq, "N")) {
	ilq = FALSE_;
	icompq = 1;
    } else if (lsame_(compq, "V")) {
	ilq = TRUE_;
	icompq = 2;
    } else if (lsame_(compq, "I")) {
	ilq = TRUE_;
	icompq = 3;
    } else {
	icompq = 0;
    }

/*     Decode COMPZ */

    if (lsame_(compz, "N")) {
	ilz = FALSE_;
	icompz = 1;
    } else if (lsame_(compz, "V")) {
	ilz = TRUE_;
	icompz = 2;
    } else if (lsame_(compz, "I")) {
	ilz = TRUE_;
	icompz = 3;
    } else {
	icompz = 0;
    }

/*     Test the input parameters. */

    *info = 0;
    if (icompq <= 0) {
	*info = -1;
    } else if (icompz <= 0) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    } else if (*ilo < 1) {
	*info = -4;
    } else if (*ihi > *n || *ihi < *ilo - 1) {
	*info = -5;
    } else if (*lda < max(1,*n)) {
	*info = -7;
    } else if (*ldb < max(1,*n)) {
	*info = -9;
    } else if (ilq && *ldq < *n || *ldq < 1) {
	*info = -11;
    } else if (ilz && *ldz < *n || *ldz < 1) {
	*info = -13;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZGGHRD", &i__1);
	return 0;
    }

/*     Initialize Q and Z if desired. */

    if (icompq == 3) {
	zlaset_("Full", n, n, &c_b2, &c_b1, &q[q_offset], ldq);
    }
    if (icompz == 3) {
	zlaset_("Full", n, n, &c_b2, &c_b1, &z__[z_offset], ldz);
    }

/*     Quick return if possible */

    if (*n <= 1) {
	return 0;
    }

/*     Zero out lower triangle of B */

    i__1 = *n - 1;
    for (jcol = 1; jcol <= i__1; ++jcol) {
	i__2 = *n;
	for (jrow = jcol + 1; jrow <= i__2; ++jrow) {
	    i__3 = jrow + jcol * b_dim1;
	    b[i__3].r = 0., b[i__3].i = 0.;
/* L10: */
	}
/* L20: */
    }

/*     Reduce A and B */

    i__1 = *ihi - 2;
    for (jcol = *ilo; jcol <= i__1; ++jcol) {

	i__2 = jcol + 2;
	for (jrow = *ihi; jrow >= i__2; --jrow) {

/*           Step 1: rotate rows JROW-1, JROW to kill A(JROW,JCOL) */

	    i__3 = jrow - 1 + jcol * a_dim1;
	    ctemp.r = a[i__3].r, ctemp.i = a[i__3].i;
	    zlartg_(&ctemp, &a[jrow + jcol * a_dim1], &c__, &s, &a[jrow - 1 + 
		    jcol * a_dim1]);
	    i__3 = jrow + jcol * a_dim1;
	    a[i__3].r = 0., a[i__3].i = 0.;
	    i__3 = *n - jcol;
	    zrot_(&i__3, &a[jrow - 1 + (jcol + 1) * a_dim1], lda, &a[jrow + (
		    jcol + 1) * a_dim1], lda, &c__, &s);
	    i__3 = *n + 2 - jrow;
	    zrot_(&i__3, &b[jrow - 1 + (jrow - 1) * b_dim1], ldb, &b[jrow + (
		    jrow - 1) * b_dim1], ldb, &c__, &s);
	    if (ilq) {
		d_cnjg(&z__1, &s);
		zrot_(n, &q[(jrow - 1) * q_dim1 + 1], &c__1, &q[jrow * q_dim1 
			+ 1], &c__1, &c__, &z__1);
	    }

/*           Step 2: rotate columns JROW, JROW-1 to kill B(JROW,JROW-1) */

	    i__3 = jrow + jrow * b_dim1;
	    ctemp.r = b[i__3].r, ctemp.i = b[i__3].i;
	    zlartg_(&ctemp, &b[jrow + (jrow - 1) * b_dim1], &c__, &s, &b[jrow 
		    + jrow * b_dim1]);
	    i__3 = jrow + (jrow - 1) * b_dim1;
	    b[i__3].r = 0., b[i__3].i = 0.;
	    zrot_(ihi, &a[jrow * a_dim1 + 1], &c__1, &a[(jrow - 1) * a_dim1 + 
		    1], &c__1, &c__, &s);
	    i__3 = jrow - 1;
	    zrot_(&i__3, &b[jrow * b_dim1 + 1], &c__1, &b[(jrow - 1) * b_dim1 
		    + 1], &c__1, &c__, &s);
	    if (ilz) {
		zrot_(n, &z__[jrow * z_dim1 + 1], &c__1, &z__[(jrow - 1) * 
			z_dim1 + 1], &c__1, &c__, &s);
	    }
/* L30: */
	}
/* L40: */
    }

    return 0;

/*     End of ZGGHRD */

} /* zgghrd_ */
Beispiel #4
0
/* Subroutine */ int zgbbrd_(char *vect, integer *m, integer *n, integer *ncc, 
	 integer *kl, integer *ku, doublecomplex *ab, integer *ldab, 
	doublereal *d__, doublereal *e, doublecomplex *q, integer *ldq, 
	doublecomplex *pt, integer *ldpt, doublecomplex *c__, integer *ldc, 
	doublecomplex *work, doublereal *rwork, integer *info)
{
    /* System generated locals */
    integer ab_dim1, ab_offset, c_dim1, c_offset, pt_dim1, pt_offset, q_dim1, 
	    q_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
    doublecomplex z__1, z__2, z__3;

    /* Local variables */
    integer i__, j, l;
    doublecomplex t;
    integer j1, j2, kb;
    doublecomplex ra, rb;
    doublereal rc;
    integer kk, ml, nr, mu;
    doublecomplex rs;
    integer kb1, ml0, mu0, klm, kun, nrt, klu1, inca;
    doublereal abst;
    logical wantb, wantc;
    integer minmn;
    logical wantq;
    logical wantpt;

/*  -- LAPACK routine (version 3.2) -- */
/*     November 2006 */

/*  Purpose */
/*  ======= */

/*  ZGBBRD reduces a complex general m-by-n band matrix A to real upper */
/*  bidiagonal form B by a unitary transformation: Q' * A * P = B. */

/*  The routine computes B, and optionally forms Q or P', or computes */
/*  Q'*C for a given matrix C. */

/*  Arguments */
/*  ========= */

/*  VECT    (input) CHARACTER*1 */
/*          Specifies whether or not the matrices Q and P' are to be */
/*          formed. */
/*          = 'N': do not form Q or P'; */
/*          = 'Q': form Q only; */
/*          = 'P': form P' only; */
/*          = 'B': form both. */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A.  M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix A.  N >= 0. */

/*  NCC     (input) INTEGER */
/*          The number of columns of the matrix C.  NCC >= 0. */

/*  KL      (input) INTEGER */
/*          The number of subdiagonals of the matrix A. KL >= 0. */

/*  KU      (input) INTEGER */
/*          The number of superdiagonals of the matrix A. KU >= 0. */

/*  AB      (input/output) COMPLEX*16 array, dimension (LDAB,N) */
/*          On entry, the m-by-n band matrix A, stored in rows 1 to */
/*          KL+KU+1. The j-th column of A is stored in the j-th column of */
/*          the array AB as follows: */
/*          AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl). */
/*          On exit, A is overwritten by values generated during the */
/*          reduction. */

/*  LDAB    (input) INTEGER */
/*          The leading dimension of the array A. LDAB >= KL+KU+1. */

/*  D       (output) DOUBLE PRECISION array, dimension (min(M,N)) */
/*          The diagonal elements of the bidiagonal matrix B. */

/*  E       (output) DOUBLE PRECISION array, dimension (min(M,N)-1) */
/*          The superdiagonal elements of the bidiagonal matrix B. */

/*  Q       (output) COMPLEX*16 array, dimension (LDQ,M) */
/*          If VECT = 'Q' or 'B', the m-by-m unitary matrix Q. */
/*          If VECT = 'N' or 'P', the array Q is not referenced. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. */
/*          LDQ >= max(1,M) if VECT = 'Q' or 'B'; LDQ >= 1 otherwise. */

/*  PT      (output) COMPLEX*16 array, dimension (LDPT,N) */
/*          If VECT = 'P' or 'B', the n-by-n unitary matrix P'. */
/*          If VECT = 'N' or 'Q', the array PT is not referenced. */

/*  LDPT    (input) INTEGER */
/*          The leading dimension of the array PT. */
/*          LDPT >= max(1,N) if VECT = 'P' or 'B'; LDPT >= 1 otherwise. */

/*  C       (input/output) COMPLEX*16 array, dimension (LDC,NCC) */
/*          On entry, an m-by-ncc matrix C. */
/*          On exit, C is overwritten by Q'*C. */
/*          C is not referenced if NCC = 0. */

/*  LDC     (input) INTEGER */
/*          The leading dimension of the array C. */
/*          LDC >= max(1,M) if NCC > 0; LDC >= 1 if NCC = 0. */

/*  WORK    (workspace) COMPLEX*16 array, dimension (max(M,N)) */

/*  RWORK   (workspace) DOUBLE PRECISION array, dimension (max(M,N)) */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit. */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value. */

/*  ===================================================================== */

/*     Test the input parameters */

    /* Parameter adjustments */
    ab_dim1 = *ldab;
    ab_offset = 1 + ab_dim1;
    ab -= ab_offset;
    --d__;
    --e;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    pt_dim1 = *ldpt;
    pt_offset = 1 + pt_dim1;
    pt -= pt_offset;
    c_dim1 = *ldc;
    c_offset = 1 + c_dim1;
    c__ -= c_offset;
    --work;
    --rwork;

    /* Function Body */
    wantb = lsame_(vect, "B");
    wantq = lsame_(vect, "Q") || wantb;
    wantpt = lsame_(vect, "P") || wantb;
    wantc = *ncc > 0;
    klu1 = *kl + *ku + 1;
    *info = 0;
    if (! wantq && ! wantpt && ! lsame_(vect, "N")) {
	*info = -1;
    } else if (*m < 0) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    } else if (*ncc < 0) {
	*info = -4;
    } else if (*kl < 0) {
	*info = -5;
    } else if (*ku < 0) {
	*info = -6;
    } else if (*ldab < klu1) {
	*info = -8;
    } else if (*ldq < 1 || wantq && *ldq < max(1,*m)) {
	*info = -12;
    } else if (*ldpt < 1 || wantpt && *ldpt < max(1,*n)) {
	*info = -14;
    } else if (*ldc < 1 || wantc && *ldc < max(1,*m)) {
	*info = -16;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZGBBRD", &i__1);
	return 0;
    }

/*     Initialize Q and P' to the unit matrix, if needed */

    if (wantq) {
	zlaset_("Full", m, m, &c_b1, &c_b2, &q[q_offset], ldq);
    }
    if (wantpt) {
	zlaset_("Full", n, n, &c_b1, &c_b2, &pt[pt_offset], ldpt);
    }

/*     Quick return if possible. */

    if (*m == 0 || *n == 0) {
	return 0;
    }

    minmn = min(*m,*n);

    if (*kl + *ku > 1) {

/*        Reduce to upper bidiagonal form if KU > 0; if KU = 0, reduce */
/*        first to lower bidiagonal form and then transform to upper */
/*        bidiagonal */

	if (*ku > 0) {
	    ml0 = 1;
	    mu0 = 2;
	} else {
	    ml0 = 2;
	    mu0 = 1;
	}

/*        Wherever possible, plane rotations are generated and applied in */
/*        vector operations of length NR over the index set J1:J2:KLU1. */

/*        The complex sines of the plane rotations are stored in WORK, */
/*        and the real cosines in RWORK. */

/* Computing MIN */
	i__1 = *m - 1;
	klm = min(i__1,*kl);
/* Computing MIN */
	i__1 = *n - 1;
	kun = min(i__1,*ku);
	kb = klm + kun;
	kb1 = kb + 1;
	inca = kb1 * *ldab;
	nr = 0;
	j1 = klm + 2;
	j2 = 1 - kun;

	i__1 = minmn;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Reduce i-th column and i-th row of matrix to bidiagonal form */

	    ml = klm + 1;
	    mu = kun + 1;
	    i__2 = kb;
	    for (kk = 1; kk <= i__2; ++kk) {
		j1 += kb;
		j2 += kb;

/*              generate plane rotations to annihilate nonzero elements */
/*              which have been created below the band */

		if (nr > 0) {
		    zlargv_(&nr, &ab[klu1 + (j1 - klm - 1) * ab_dim1], &inca, 
			    &work[j1], &kb1, &rwork[j1], &kb1);
		}

/*              apply plane rotations from the left */

		i__3 = kb;
		for (l = 1; l <= i__3; ++l) {
		    if (j2 - klm + l - 1 > *n) {
			nrt = nr - 1;
		    } else {
			nrt = nr;
		    }
		    if (nrt > 0) {
			zlartv_(&nrt, &ab[klu1 - l + (j1 - klm + l - 1) * 
				ab_dim1], &inca, &ab[klu1 - l + 1 + (j1 - klm 
				+ l - 1) * ab_dim1], &inca, &rwork[j1], &work[
				j1], &kb1);
		    }
		}

		if (ml > ml0) {
		    if (ml <= *m - i__ + 1) {

/*                    generate plane rotation to annihilate a(i+ml-1,i) */
/*                    within the band, and apply rotation from the left */

			zlartg_(&ab[*ku + ml - 1 + i__ * ab_dim1], &ab[*ku + 
				ml + i__ * ab_dim1], &rwork[i__ + ml - 1], &
				work[i__ + ml - 1], &ra);
			i__3 = *ku + ml - 1 + i__ * ab_dim1;
			ab[i__3].r = ra.r, ab[i__3].i = ra.i;
			if (i__ < *n) {
/* Computing MIN */
			    i__4 = *ku + ml - 2, i__5 = *n - i__;
			    i__3 = min(i__4,i__5);
			    i__6 = *ldab - 1;
			    i__7 = *ldab - 1;
			    zrot_(&i__3, &ab[*ku + ml - 2 + (i__ + 1) * 
				    ab_dim1], &i__6, &ab[*ku + ml - 1 + (i__ 
				    + 1) * ab_dim1], &i__7, &rwork[i__ + ml - 
				    1], &work[i__ + ml - 1]);
			}
		    }
		    ++nr;
		    j1 -= kb1;
		}

		if (wantq) {

/*                 accumulate product of plane rotations in Q */

		    i__3 = j2;
		    i__4 = kb1;
		    for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) 
			    {
			d_cnjg(&z__1, &work[j]);
			zrot_(m, &q[(j - 1) * q_dim1 + 1], &c__1, &q[j * 
				q_dim1 + 1], &c__1, &rwork[j], &z__1);
		    }
		}

		if (wantc) {

/*                 apply plane rotations to C */

		    i__4 = j2;
		    i__3 = kb1;
		    for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) 
			    {
			zrot_(ncc, &c__[j - 1 + c_dim1], ldc, &c__[j + c_dim1]
, ldc, &rwork[j], &work[j]);
		    }
		}

		if (j2 + kun > *n) {

/*                 adjust J2 to keep within the bounds of the matrix */

		    --nr;
		    j2 -= kb1;
		}

		i__3 = j2;
		i__4 = kb1;
		for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {

/*                 create nonzero element a(j-1,j+ku) above the band */
/*                 and store it in WORK(n+1:2*n) */

		    i__5 = j + kun;
		    i__6 = j;
		    i__7 = (j + kun) * ab_dim1 + 1;
		    z__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[
			    i__7].i, z__1.i = work[i__6].r * ab[i__7].i + 
			    work[i__6].i * ab[i__7].r;
		    work[i__5].r = z__1.r, work[i__5].i = z__1.i;
		    i__5 = (j + kun) * ab_dim1 + 1;
		    i__6 = j;
		    i__7 = (j + kun) * ab_dim1 + 1;
		    z__1.r = rwork[i__6] * ab[i__7].r, z__1.i = rwork[i__6] * 
			    ab[i__7].i;
		    ab[i__5].r = z__1.r, ab[i__5].i = z__1.i;
		}

/*              generate plane rotations to annihilate nonzero elements */
/*              which have been generated above the band */

		if (nr > 0) {
		    zlargv_(&nr, &ab[(j1 + kun - 1) * ab_dim1 + 1], &inca, &
			    work[j1 + kun], &kb1, &rwork[j1 + kun], &kb1);
		}

/*              apply plane rotations from the right */

		i__4 = kb;
		for (l = 1; l <= i__4; ++l) {
		    if (j2 + l - 1 > *m) {
			nrt = nr - 1;
		    } else {
			nrt = nr;
		    }
		    if (nrt > 0) {
			zlartv_(&nrt, &ab[l + 1 + (j1 + kun - 1) * ab_dim1], &
				inca, &ab[l + (j1 + kun) * ab_dim1], &inca, &
				rwork[j1 + kun], &work[j1 + kun], &kb1);
		    }
		}

		if (ml == ml0 && mu > mu0) {
		    if (mu <= *n - i__ + 1) {

/*                    generate plane rotation to annihilate a(i,i+mu-1) */
/*                    within the band, and apply rotation from the right */

			zlartg_(&ab[*ku - mu + 3 + (i__ + mu - 2) * ab_dim1], 
				&ab[*ku - mu + 2 + (i__ + mu - 1) * ab_dim1], 
				&rwork[i__ + mu - 1], &work[i__ + mu - 1], &
				ra);
			i__4 = *ku - mu + 3 + (i__ + mu - 2) * ab_dim1;
			ab[i__4].r = ra.r, ab[i__4].i = ra.i;
/* Computing MIN */
			i__3 = *kl + mu - 2, i__5 = *m - i__;
			i__4 = min(i__3,i__5);
			zrot_(&i__4, &ab[*ku - mu + 4 + (i__ + mu - 2) * 
				ab_dim1], &c__1, &ab[*ku - mu + 3 + (i__ + mu 
				- 1) * ab_dim1], &c__1, &rwork[i__ + mu - 1], 
				&work[i__ + mu - 1]);
		    }
		    ++nr;
		    j1 -= kb1;
		}

		if (wantpt) {

/*                 accumulate product of plane rotations in P' */

		    i__4 = j2;
		    i__3 = kb1;
		    for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) 
			    {
			d_cnjg(&z__1, &work[j + kun]);
			zrot_(n, &pt[j + kun - 1 + pt_dim1], ldpt, &pt[j + 
				kun + pt_dim1], ldpt, &rwork[j + kun], &z__1);
		    }
		}

		if (j2 + kb > *m) {

/*                 adjust J2 to keep within the bounds of the matrix */

		    --nr;
		    j2 -= kb1;
		}

		i__3 = j2;
		i__4 = kb1;
		for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {

/*                 create nonzero element a(j+kl+ku,j+ku-1) below the */
/*                 band and store it in WORK(1:n) */

		    i__5 = j + kb;
		    i__6 = j + kun;
		    i__7 = klu1 + (j + kun) * ab_dim1;
		    z__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[
			    i__7].i, z__1.i = work[i__6].r * ab[i__7].i + 
			    work[i__6].i * ab[i__7].r;
		    work[i__5].r = z__1.r, work[i__5].i = z__1.i;
		    i__5 = klu1 + (j + kun) * ab_dim1;
		    i__6 = j + kun;
		    i__7 = klu1 + (j + kun) * ab_dim1;
		    z__1.r = rwork[i__6] * ab[i__7].r, z__1.i = rwork[i__6] * 
			    ab[i__7].i;
		    ab[i__5].r = z__1.r, ab[i__5].i = z__1.i;
		}

		if (ml > ml0) {
		    --ml;
		} else {
		    --mu;
		}
	    }
	}
    }

    if (*ku == 0 && *kl > 0) {

/*        A has been reduced to complex lower bidiagonal form */

/*        Transform lower bidiagonal form to upper bidiagonal by applying */
/*        plane rotations from the left, overwriting superdiagonal */
/*        elements on subdiagonal elements */

/* Computing MIN */
	i__2 = *m - 1;
	i__1 = min(i__2,*n);
	for (i__ = 1; i__ <= i__1; ++i__) {
	    zlartg_(&ab[i__ * ab_dim1 + 1], &ab[i__ * ab_dim1 + 2], &rc, &rs, 
		    &ra);
	    i__2 = i__ * ab_dim1 + 1;
	    ab[i__2].r = ra.r, ab[i__2].i = ra.i;
	    if (i__ < *n) {
		i__2 = i__ * ab_dim1 + 2;
		i__4 = (i__ + 1) * ab_dim1 + 1;
		z__1.r = rs.r * ab[i__4].r - rs.i * ab[i__4].i, z__1.i = rs.r 
			* ab[i__4].i + rs.i * ab[i__4].r;
		ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
		i__2 = (i__ + 1) * ab_dim1 + 1;
		i__4 = (i__ + 1) * ab_dim1 + 1;
		z__1.r = rc * ab[i__4].r, z__1.i = rc * ab[i__4].i;
		ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
	    }
	    if (wantq) {
		d_cnjg(&z__1, &rs);
		zrot_(m, &q[i__ * q_dim1 + 1], &c__1, &q[(i__ + 1) * q_dim1 + 
			1], &c__1, &rc, &z__1);
	    }
	    if (wantc) {
		zrot_(ncc, &c__[i__ + c_dim1], ldc, &c__[i__ + 1 + c_dim1], 
			ldc, &rc, &rs);
	    }
	}
    } else {

/*        A has been reduced to complex upper bidiagonal form or is */
/*        diagonal */

	if (*ku > 0 && *m < *n) {

/*           Annihilate a(m,m+1) by applying plane rotations from the */
/*           right */

	    i__1 = *ku + (*m + 1) * ab_dim1;
	    rb.r = ab[i__1].r, rb.i = ab[i__1].i;
	    for (i__ = *m; i__ >= 1; --i__) {
		zlartg_(&ab[*ku + 1 + i__ * ab_dim1], &rb, &rc, &rs, &ra);
		i__1 = *ku + 1 + i__ * ab_dim1;
		ab[i__1].r = ra.r, ab[i__1].i = ra.i;
		if (i__ > 1) {
		    d_cnjg(&z__3, &rs);
		    z__2.r = -z__3.r, z__2.i = -z__3.i;
		    i__1 = *ku + i__ * ab_dim1;
		    z__1.r = z__2.r * ab[i__1].r - z__2.i * ab[i__1].i, 
			    z__1.i = z__2.r * ab[i__1].i + z__2.i * ab[i__1]
			    .r;
		    rb.r = z__1.r, rb.i = z__1.i;
		    i__1 = *ku + i__ * ab_dim1;
		    i__2 = *ku + i__ * ab_dim1;
		    z__1.r = rc * ab[i__2].r, z__1.i = rc * ab[i__2].i;
		    ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
		}
		if (wantpt) {
		    d_cnjg(&z__1, &rs);
		    zrot_(n, &pt[i__ + pt_dim1], ldpt, &pt[*m + 1 + pt_dim1], 
			    ldpt, &rc, &z__1);
		}
	    }
	}
    }

/*     Make diagonal and superdiagonal elements real, storing them in D */
/*     and E */

    i__1 = *ku + 1 + ab_dim1;
    t.r = ab[i__1].r, t.i = ab[i__1].i;
    i__1 = minmn;
    for (i__ = 1; i__ <= i__1; ++i__) {
	abst = z_abs(&t);
	d__[i__] = abst;
	if (abst != 0.) {
	    z__1.r = t.r / abst, z__1.i = t.i / abst;
	    t.r = z__1.r, t.i = z__1.i;
	} else {
	    t.r = 1., t.i = 0.;
	}
	if (wantq) {
	    zscal_(m, &t, &q[i__ * q_dim1 + 1], &c__1);
	}
	if (wantc) {
	    d_cnjg(&z__1, &t);
	    zscal_(ncc, &z__1, &c__[i__ + c_dim1], ldc);
	}
	if (i__ < minmn) {
	    if (*ku == 0 && *kl == 0) {
		e[i__] = 0.;
		i__2 = (i__ + 1) * ab_dim1 + 1;
		t.r = ab[i__2].r, t.i = ab[i__2].i;
	    } else {
		if (*ku == 0) {
		    i__2 = i__ * ab_dim1 + 2;
		    d_cnjg(&z__2, &t);
		    z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, 
			    z__1.i = ab[i__2].r * z__2.i + ab[i__2].i * 
			    z__2.r;
		    t.r = z__1.r, t.i = z__1.i;
		} else {
		    i__2 = *ku + (i__ + 1) * ab_dim1;
		    d_cnjg(&z__2, &t);
		    z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, 
			    z__1.i = ab[i__2].r * z__2.i + ab[i__2].i * 
			    z__2.r;
		    t.r = z__1.r, t.i = z__1.i;
		}
		abst = z_abs(&t);
		e[i__] = abst;
		if (abst != 0.) {
		    z__1.r = t.r / abst, z__1.i = t.i / abst;
		    t.r = z__1.r, t.i = z__1.i;
		} else {
		    t.r = 1., t.i = 0.;
		}
		if (wantpt) {
		    zscal_(n, &t, &pt[i__ + 1 + pt_dim1], ldpt);
		}
		i__2 = *ku + 1 + (i__ + 1) * ab_dim1;
		d_cnjg(&z__2, &t);
		z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, z__1.i = 
			ab[i__2].r * z__2.i + ab[i__2].i * z__2.r;
		t.r = z__1.r, t.i = z__1.i;
	    }
	}
    }
    return 0;

/*     End of ZGBBRD */

} /* zgbbrd_ */
/*<    >*/
/* Subroutine */ int zgghrd_(char *compq, char *compz, integer *n, integer *
        ilo, integer *ihi, doublecomplex *a, integer *lda, doublecomplex *b,
        integer *ldb, doublecomplex *q, integer *ldq, doublecomplex *z__,
        integer *ldz, integer *info, ftnlen compq_len, ftnlen compz_len)
{
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
            z_offset, i__1, i__2, i__3;
    doublecomplex z__1;

    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);

    /* Local variables */
    doublereal c__;
    doublecomplex s;
    logical ilq, ilz;
    integer jcol, jrow;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *,
            doublecomplex *, integer *, doublereal *, doublecomplex *);
    extern logical lsame_(const char *, const char *, ftnlen, ftnlen);
    doublecomplex ctemp;
    extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen);
    integer icompq, icompz;
    extern /* Subroutine */ int zlaset_(char *, integer *, integer *,
            doublecomplex *, doublecomplex *, doublecomplex *, integer *,
            ftnlen), zlartg_(doublecomplex *, doublecomplex *, doublereal *,
            doublecomplex *, doublecomplex *);
    (void)compq_len;
    (void)compz_len;

/*  -- LAPACK routine (version 3.2) -- */
/*  -- LAPACK is a software package provided by Univ. of Tennessee,    -- */
/*  -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*<       CHARACTER          COMPQ, COMPZ >*/
/*<       INTEGER            IHI, ILO, INFO, LDA, LDB, LDQ, LDZ, N >*/
/*     .. */
/*     .. Array Arguments .. */
/*<    >*/
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZGGHRD reduces a pair of complex matrices (A,B) to generalized upper */
/*  Hessenberg form using unitary transformations, where A is a */
/*  general matrix and B is upper triangular.  The form of the */
/*  generalized eigenvalue problem is */
/*     A*x = lambda*B*x, */
/*  and B is typically made upper triangular by computing its QR */
/*  factorization and moving the unitary matrix Q to the left side */
/*  of the equation. */

/*  This subroutine simultaneously reduces A to a Hessenberg matrix H: */
/*     Q**H*A*Z = H */
/*  and transforms B to another upper triangular matrix T: */
/*     Q**H*B*Z = T */
/*  in order to reduce the problem to its standard form */
/*     H*y = lambda*T*y */
/*  where y = Z**H*x. */

/*  The unitary matrices Q and Z are determined as products of Givens */
/*  rotations.  They may either be formed explicitly, or they may be */
/*  postmultiplied into input matrices Q1 and Z1, so that */
/*       Q1 * A * Z1**H = (Q1*Q) * H * (Z1*Z)**H */
/*       Q1 * B * Z1**H = (Q1*Q) * T * (Z1*Z)**H */
/*  If Q1 is the unitary matrix from the QR factorization of B in the */
/*  original equation A*x = lambda*B*x, then ZGGHRD reduces the original */
/*  problem to generalized Hessenberg form. */

/*  Arguments */
/*  ========= */

/*  COMPQ   (input) CHARACTER*1 */
/*          = 'N': do not compute Q; */
/*          = 'I': Q is initialized to the unit matrix, and the */
/*                 unitary matrix Q is returned; */
/*          = 'V': Q must contain a unitary matrix Q1 on entry, */
/*                 and the product Q1*Q is returned. */

/*  COMPZ   (input) CHARACTER*1 */
/*          = 'N': do not compute Q; */
/*          = 'I': Q is initialized to the unit matrix, and the */
/*                 unitary matrix Q is returned; */
/*          = 'V': Q must contain a unitary matrix Q1 on entry, */
/*                 and the product Q1*Q is returned. */

/*  N       (input) INTEGER */
/*          The order of the matrices A and B.  N >= 0. */

/*  ILO     (input) INTEGER */
/*  IHI     (input) INTEGER */
/*          ILO and IHI mark the rows and columns of A which are to be */
/*          reduced.  It is assumed that A is already upper triangular */
/*          in rows and columns 1:ILO-1 and IHI+1:N.  ILO and IHI are */
/*          normally set by a previous call to ZGGBAL; otherwise they */
/*          should be set to 1 and N respectively. */
/*          1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */

/*  A       (input/output) COMPLEX*16 array, dimension (LDA, N) */
/*          On entry, the N-by-N general matrix to be reduced. */
/*          On exit, the upper triangle and the first subdiagonal of A */
/*          are overwritten with the upper Hessenberg matrix H, and the */
/*          rest is set to zero. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,N). */

/*  B       (input/output) COMPLEX*16 array, dimension (LDB, N) */
/*          On entry, the N-by-N upper triangular matrix B. */
/*          On exit, the upper triangular matrix T = Q**H B Z.  The */
/*          elements below the diagonal are set to zero. */

/*  LDB     (input) INTEGER */
/*          The leading dimension of the array B.  LDB >= max(1,N). */

/*  Q       (input/output) COMPLEX*16 array, dimension (LDQ, N) */
/*          On entry, if COMPQ = 'V', the unitary matrix Q1, typically */
/*          from the QR factorization of B. */
/*          On exit, if COMPQ='I', the unitary matrix Q, and if */
/*          COMPQ = 'V', the product Q1*Q. */
/*          Not referenced if COMPQ='N'. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. */
/*          LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. */

/*  Z       (input/output) COMPLEX*16 array, dimension (LDZ, N) */
/*          On entry, if COMPZ = 'V', the unitary matrix Z1. */
/*          On exit, if COMPZ='I', the unitary matrix Z, and if */
/*          COMPZ = 'V', the product Z1*Z. */
/*          Not referenced if COMPZ='N'. */

/*  LDZ     (input) INTEGER */
/*          The leading dimension of the array Z. */
/*          LDZ >= N if COMPZ='V' or 'I'; LDZ >= 1 otherwise. */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit. */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value. */

/*  Further Details */
/*  =============== */

/*  This routine reduces A to Hessenberg and B to triangular form by */
/*  an unblocked reduction, as described in _Matrix_Computations_, */
/*  by Golub and van Loan (Johns Hopkins Press). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*<       COMPLEX*16         CONE, CZERO >*/
/*<    >*/
/*     .. */
/*     .. Local Scalars .. */
/*<       LOGICAL            ILQ, ILZ >*/
/*<       INTEGER            ICOMPQ, ICOMPZ, JCOL, JROW >*/
/*<       DOUBLE PRECISION   C >*/
/*<       COMPLEX*16         CTEMP, S >*/
/*     .. */
/*     .. External Functions .. */
/*<       LOGICAL            LSAME >*/
/*<       EXTERNAL           LSAME >*/
/*     .. */
/*     .. External Subroutines .. */
/*<       EXTERNAL           XERBLA, ZLARTG, ZLASET, ZROT >*/
/*     .. */
/*     .. Intrinsic Functions .. */
/*<       INTRINSIC          DCONJG, MAX >*/
/*     .. */
/*     .. Executable Statements .. */

/*     Decode COMPQ */

/*<       IF( LSAME( COMPQ, 'N' ) ) THEN >*/
    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1;
    b -= b_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1;
    z__ -= z_offset;

    /* Function Body */
    if (lsame_(compq, "N", (ftnlen)1, (ftnlen)1)) {
/*<          ILQ = .FALSE. >*/
        ilq = FALSE_;
/*<          ICOMPQ = 1 >*/
        icompq = 1;
/*<       ELSE IF( LSAME( COMPQ, 'V' ) ) THEN >*/
    } else if (lsame_(compq, "V", (ftnlen)1, (ftnlen)1)) {
/*<          ILQ = .TRUE. >*/
        ilq = TRUE_;
/*<          ICOMPQ = 2 >*/
        icompq = 2;
/*<       ELSE IF( LSAME( COMPQ, 'I' ) ) THEN >*/
    } else if (lsame_(compq, "I", (ftnlen)1, (ftnlen)1)) {
/*<          ILQ = .TRUE. >*/
        ilq = TRUE_;
/*<          ICOMPQ = 3 >*/
        icompq = 3;
/*<       ELSE >*/
    } else {
/*<          ICOMPQ = 0 >*/
        icompq = 0;
/*<       END IF >*/
    }

/*     Decode COMPZ */

/*<       IF( LSAME( COMPZ, 'N' ) ) THEN >*/
    if (lsame_(compz, "N", (ftnlen)1, (ftnlen)1)) {
/*<          ILZ = .FALSE. >*/
        ilz = FALSE_;
/*<          ICOMPZ = 1 >*/
        icompz = 1;
/*<       ELSE IF( LSAME( COMPZ, 'V' ) ) THEN >*/
    } else if (lsame_(compz, "V", (ftnlen)1, (ftnlen)1)) {
/*<          ILZ = .TRUE. >*/
        ilz = TRUE_;
/*<          ICOMPZ = 2 >*/
        icompz = 2;
/*<       ELSE IF( LSAME( COMPZ, 'I' ) ) THEN >*/
    } else if (lsame_(compz, "I", (ftnlen)1, (ftnlen)1)) {
/*<          ILZ = .TRUE. >*/
        ilz = TRUE_;
/*<          ICOMPZ = 3 >*/
        icompz = 3;
/*<       ELSE >*/
    } else {
/*<          ICOMPZ = 0 >*/
        icompz = 0;
/*<       END IF >*/
    }

/*     Test the input parameters. */

/*<       INFO = 0 >*/
    *info = 0;
/*<       IF( ICOMPQ.LE.0 ) THEN >*/
    if (icompq <= 0) {
/*<          INFO = -1 >*/
        *info = -1;
/*<       ELSE IF( ICOMPZ.LE.0 ) THEN >*/
    } else if (icompz <= 0) {
/*<          INFO = -2 >*/
        *info = -2;
/*<       ELSE IF( N.LT.0 ) THEN >*/
    } else if (*n < 0) {
/*<          INFO = -3 >*/
        *info = -3;
/*<       ELSE IF( ILO.LT.1 ) THEN >*/
    } else if (*ilo < 1) {
/*<          INFO = -4 >*/
        *info = -4;
/*<       ELSE IF( IHI.GT.N .OR. IHI.LT.ILO-1 ) THEN >*/
    } else if (*ihi > *n || *ihi < *ilo - 1) {
/*<          INFO = -5 >*/
        *info = -5;
/*<       ELSE IF( LDA.LT.MAX( 1, N ) ) THEN >*/
    } else if (*lda < max(1,*n)) {
/*<          INFO = -7 >*/
        *info = -7;
/*<       ELSE IF( LDB.LT.MAX( 1, N ) ) THEN >*/
    } else if (*ldb < max(1,*n)) {
/*<          INFO = -9 >*/
        *info = -9;
/*<       ELSE IF( ( ILQ .AND. LDQ.LT.N ) .OR. LDQ.LT.1 ) THEN >*/
    } else if ((ilq && *ldq < *n) || *ldq < 1) {
/*<          INFO = -11 >*/
        *info = -11;
/*<       ELSE IF( ( ILZ .AND. LDZ.LT.N ) .OR. LDZ.LT.1 ) THEN >*/
    } else if ((ilz && *ldz < *n) || *ldz < 1) {
/*<          INFO = -13 >*/
        *info = -13;
/*<       END IF >*/
    }
/*<       IF( INFO.NE.0 ) THEN >*/
    if (*info != 0) {
/*<          CALL XERBLA( 'ZGGHRD', -INFO ) >*/
        i__1 = -(*info);
        xerbla_("ZGGHRD", &i__1, (ftnlen)6);
/*<          RETURN >*/
        return 0;
/*<       END IF >*/
    }

/*     Initialize Q and Z if desired. */

/*<    >*/
    if (icompq == 3) {
        zlaset_("Full", n, n, &c_b2, &c_b1, &q[q_offset], ldq, (ftnlen)4);
    }
/*<    >*/
    if (icompz == 3) {
        zlaset_("Full", n, n, &c_b2, &c_b1, &z__[z_offset], ldz, (ftnlen)4);
    }

/*     Quick return if possible */

/*<    >*/
    if (*n <= 1) {
        return 0;
    }

/*     Zero out lower triangle of B */

/*<       DO 20 JCOL = 1, N - 1 >*/
    i__1 = *n - 1;
    for (jcol = 1; jcol <= i__1; ++jcol) {
/*<          DO 10 JROW = JCOL + 1, N >*/
        i__2 = *n;
        for (jrow = jcol + 1; jrow <= i__2; ++jrow) {
/*<             B( JROW, JCOL ) = CZERO >*/
            i__3 = jrow + jcol * b_dim1;
            b[i__3].r = 0., b[i__3].i = 0.;
/*<    10    CONTINUE >*/
/* L10: */
        }
/*<    20 CONTINUE >*/
/* L20: */
    }

/*     Reduce A and B */

/*<       DO 40 JCOL = ILO, IHI - 2 >*/
    i__1 = *ihi - 2;
    for (jcol = *ilo; jcol <= i__1; ++jcol) {

/*<          DO 30 JROW = IHI, JCOL + 2, -1 >*/
        i__2 = jcol + 2;
        for (jrow = *ihi; jrow >= i__2; --jrow) {

/*           Step 1: rotate rows JROW-1, JROW to kill A(JROW,JCOL) */

/*<             CTEMP = A( JROW-1, JCOL ) >*/
            i__3 = jrow - 1 + jcol * a_dim1;
            ctemp.r = a[i__3].r, ctemp.i = a[i__3].i;
/*<    >*/
            zlartg_(&ctemp, &a[jrow + jcol * a_dim1], &c__, &s, &a[jrow - 1 +
                    jcol * a_dim1]);
/*<             A( JROW, JCOL ) = CZERO >*/
            i__3 = jrow + jcol * a_dim1;
            a[i__3].r = 0., a[i__3].i = 0.;
/*<    >*/
            i__3 = *n - jcol;
            zrot_(&i__3, &a[jrow - 1 + (jcol + 1) * a_dim1], lda, &a[jrow + (
                    jcol + 1) * a_dim1], lda, &c__, &s);
/*<    >*/
            i__3 = *n + 2 - jrow;
            zrot_(&i__3, &b[jrow - 1 + (jrow - 1) * b_dim1], ldb, &b[jrow + (
                    jrow - 1) * b_dim1], ldb, &c__, &s);
/*<    >*/
            if (ilq) {
                d_cnjg(&z__1, &s);
                zrot_(n, &q[(jrow - 1) * q_dim1 + 1], &c__1, &q[jrow * q_dim1
                        + 1], &c__1, &c__, &z__1);
            }

/*           Step 2: rotate columns JROW, JROW-1 to kill B(JROW,JROW-1) */

/*<             CTEMP = B( JROW, JROW ) >*/
            i__3 = jrow + jrow * b_dim1;
            ctemp.r = b[i__3].r, ctemp.i = b[i__3].i;
/*<    >*/
            zlartg_(&ctemp, &b[jrow + (jrow - 1) * b_dim1], &c__, &s, &b[jrow
                    + jrow * b_dim1]);
/*<             B( JROW, JROW-1 ) = CZERO >*/
            i__3 = jrow + (jrow - 1) * b_dim1;
            b[i__3].r = 0., b[i__3].i = 0.;
/*<             CALL ZROT( IHI, A( 1, JROW ), 1, A( 1, JROW-1 ), 1, C, S ) >*/
            zrot_(ihi, &a[jrow * a_dim1 + 1], &c__1, &a[(jrow - 1) * a_dim1 +
                    1], &c__1, &c__, &s);
/*<    >*/
            i__3 = jrow - 1;
            zrot_(&i__3, &b[jrow * b_dim1 + 1], &c__1, &b[(jrow - 1) * b_dim1
                    + 1], &c__1, &c__, &s);
/*<    >*/
            if (ilz) {
                zrot_(n, &z__[jrow * z_dim1 + 1], &c__1, &z__[(jrow - 1) *
                        z_dim1 + 1], &c__1, &c__, &s);
            }
/*<    30    CONTINUE >*/
/* L30: */
        }
/*<    40 CONTINUE >*/
/* L40: */
    }

/*<       RETURN >*/
    return 0;

/*     End of ZGGHRD */

/*<       END >*/
} /* zgghrd_ */
Beispiel #6
0
 int zlags2_(int *upper, double *a1, doublecomplex *
	a2, double *a3, double *b1, doublecomplex *b2, double *b3, 
	 double *csu, doublecomplex *snu, double *csv, doublecomplex *
	snv, double *csq, doublecomplex *snq)
{
    /* System generated locals */
    double d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8;
    doublecomplex z__1, z__2, z__3, z__4, z__5;

    /* Builtin functions */
    double z_abs(doublecomplex *), d_imag(doublecomplex *);
    void d_cnjg(doublecomplex *, doublecomplex *);

    /* Local variables */
    double a;
    doublecomplex b, c__;
    double d__;
    doublecomplex r__, d1;
    double s1, s2, fb, fc;
    doublecomplex ua11, ua12, ua21, ua22, vb11, vb12, vb21, vb22;
    double csl, csr, snl, snr, aua11, aua12, aua21, aua22, avb12, avb11, 
	    avb21, avb22, ua11r, ua22r, vb11r, vb22r;
    extern  int dlasv2_(double *, double *, 
	    double *, double *, double *, double *, 
	    double *, double *, double *), zlartg_(doublecomplex *
, doublecomplex *, double *, doublecomplex *, doublecomplex *)
	    ;


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZLAGS2 computes 2-by-2 unitary matrices U, V and Q, such */
/*  that if ( UPPER ) then */

/*            U'*A*Q = U'*( A1 A2 )*Q = ( x  0  ) */
/*                        ( 0  A3 )     ( x  x  ) */
/*  and */
/*            V'*B*Q = V'*( B1 B2 )*Q = ( x  0  ) */
/*                        ( 0  B3 )     ( x  x  ) */

/*  or if ( .NOT.UPPER ) then */

/*            U'*A*Q = U'*( A1 0  )*Q = ( x  x  ) */
/*                        ( A2 A3 )     ( 0  x  ) */
/*  and */
/*            V'*B*Q = V'*( B1 0  )*Q = ( x  x  ) */
/*                        ( B2 B3 )     ( 0  x  ) */
/*  where */

/*    U = (     CSU      SNU ), V = (     CSV     SNV ), */
/*        ( -CONJG(SNU)  CSU )      ( -CONJG(SNV) CSV ) */

/*    Q = (     CSQ      SNQ ) */
/*        ( -CONJG(SNQ)  CSQ ) */

/*  Z' denotes the conjugate transpose of Z. */

/*  The rows of the transformed A and B are parallel. Moreover, if the */
/*  input 2-by-2 matrix A is not zero, then the transformed (1,1) entry */
/*  of A is not zero. If the input matrices A and B are both not zero, */
/*  then the transformed (2,2) element of B is not zero, except when the */
/*  first rows of input A and B are parallel and the second rows are */
/*  zero. */

/*  Arguments */
/*  ========= */

/*  UPPER   (input) LOGICAL */
/*          = .TRUE.: the input matrices A and B are upper triangular. */
/*          = .FALSE.: the input matrices A and B are lower triangular. */

/*  A1      (input) DOUBLE PRECISION */
/*  A2      (input) COMPLEX*16 */
/*  A3      (input) DOUBLE PRECISION */
/*          On entry, A1, A2 and A3 are elements of the input 2-by-2 */
/*          upper (lower) triangular matrix A. */

/*  B1      (input) DOUBLE PRECISION */
/*  B2      (input) COMPLEX*16 */
/*  B3      (input) DOUBLE PRECISION */
/*          On entry, B1, B2 and B3 are elements of the input 2-by-2 */
/*          upper (lower) triangular matrix B. */

/*  CSU     (output) DOUBLE PRECISION */
/*  SNU     (output) COMPLEX*16 */
/*          The desired unitary matrix U. */

/*  CSV     (output) DOUBLE PRECISION */
/*  SNV     (output) COMPLEX*16 */
/*          The desired unitary matrix V. */

/*  CSQ     (output) DOUBLE PRECISION */
/*  SNQ     (output) COMPLEX*16 */
/*          The desired unitary matrix Q. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Statement Functions .. */
/*     .. */
/*     .. Statement Function definitions .. */
/*     .. */
/*     .. Executable Statements .. */

    if (*upper) {

/*        Input matrices A and B are upper triangular matrices */

/*        Form matrix C = A*adj(B) = ( a b ) */
/*                                   ( 0 d ) */

	a = *a1 * *b3;
	d__ = *a3 * *b1;
	z__2.r = *b1 * a2->r, z__2.i = *b1 * a2->i;
	z__3.r = *a1 * b2->r, z__3.i = *a1 * b2->i;
	z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
	b.r = z__1.r, b.i = z__1.i;
	fb = z_abs(&b);

/*        Transform complex 2-by-2 matrix C to float matrix by unitary */
/*        diagonal matrix diag(1,D1). */

	d1.r = 1., d1.i = 0.;
	if (fb != 0.) {
	    z__1.r = b.r / fb, z__1.i = b.i / fb;
	    d1.r = z__1.r, d1.i = z__1.i;
	}

/*        The SVD of float 2 by 2 triangular C */

/*         ( CSL -SNL )*( A B )*(  CSR  SNR ) = ( R 0 ) */
/*         ( SNL  CSL ) ( 0 D ) ( -SNR  CSR )   ( 0 T ) */

	dlasv2_(&a, &fb, &d__, &s1, &s2, &snr, &csr, &snl, &csl);

	if (ABS(csl) >= ABS(snl) || ABS(csr) >= ABS(snr)) {

/*           Compute the (1,1) and (1,2) elements of U'*A and V'*B, */
/*           and (1,2) element of |U|'*|A| and |V|'*|B|. */

	    ua11r = csl * *a1;
	    z__2.r = csl * a2->r, z__2.i = csl * a2->i;
	    z__4.r = snl * d1.r, z__4.i = snl * d1.i;
	    z__3.r = *a3 * z__4.r, z__3.i = *a3 * z__4.i;
	    z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
	    ua12.r = z__1.r, ua12.i = z__1.i;

	    vb11r = csr * *b1;
	    z__2.r = csr * b2->r, z__2.i = csr * b2->i;
	    z__4.r = snr * d1.r, z__4.i = snr * d1.i;
	    z__3.r = *b3 * z__4.r, z__3.i = *b3 * z__4.i;
	    z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
	    vb12.r = z__1.r, vb12.i = z__1.i;

	    aua12 = ABS(csl) * ((d__1 = a2->r, ABS(d__1)) + (d__2 = d_imag(a2)
		    , ABS(d__2))) + ABS(snl) * ABS(*a3);
	    avb12 = ABS(csr) * ((d__1 = b2->r, ABS(d__1)) + (d__2 = d_imag(b2)
		    , ABS(d__2))) + ABS(snr) * ABS(*b3);

/*           zero (1,2) elements of U'*A and V'*B */

	    if (ABS(ua11r) + ((d__1 = ua12.r, ABS(d__1)) + (d__2 = d_imag(&
		    ua12), ABS(d__2))) == 0.) {
		z__2.r = vb11r, z__2.i = 0.;
		z__1.r = -z__2.r, z__1.i = -z__2.i;
		d_cnjg(&z__3, &vb12);
		zlartg_(&z__1, &z__3, csq, snq, &r__);
	    } else if (ABS(vb11r) + ((d__1 = vb12.r, ABS(d__1)) + (d__2 = 
		    d_imag(&vb12), ABS(d__2))) == 0.) {
		z__2.r = ua11r, z__2.i = 0.;
		z__1.r = -z__2.r, z__1.i = -z__2.i;
		d_cnjg(&z__3, &ua12);
		zlartg_(&z__1, &z__3, csq, snq, &r__);
	    } else if (aua12 / (ABS(ua11r) + ((d__1 = ua12.r, ABS(d__1)) + (
		    d__2 = d_imag(&ua12), ABS(d__2)))) <= avb12 / (ABS(vb11r) 
		    + ((d__3 = vb12.r, ABS(d__3)) + (d__4 = d_imag(&vb12), 
		    ABS(d__4))))) {
		z__2.r = ua11r, z__2.i = 0.;
		z__1.r = -z__2.r, z__1.i = -z__2.i;
		d_cnjg(&z__3, &ua12);
		zlartg_(&z__1, &z__3, csq, snq, &r__);
	    } else {
		z__2.r = vb11r, z__2.i = 0.;
		z__1.r = -z__2.r, z__1.i = -z__2.i;
		d_cnjg(&z__3, &vb12);
		zlartg_(&z__1, &z__3, csq, snq, &r__);
	    }

	    *csu = csl;
	    z__2.r = -d1.r, z__2.i = -d1.i;
	    z__1.r = snl * z__2.r, z__1.i = snl * z__2.i;
	    snu->r = z__1.r, snu->i = z__1.i;
	    *csv = csr;
	    z__2.r = -d1.r, z__2.i = -d1.i;
	    z__1.r = snr * z__2.r, z__1.i = snr * z__2.i;
	    snv->r = z__1.r, snv->i = z__1.i;

	} else {

/*           Compute the (2,1) and (2,2) elements of U'*A and V'*B, */
/*           and (2,2) element of |U|'*|A| and |V|'*|B|. */

	    d_cnjg(&z__4, &d1);
	    z__3.r = -z__4.r, z__3.i = -z__4.i;
	    z__2.r = snl * z__3.r, z__2.i = snl * z__3.i;
	    z__1.r = *a1 * z__2.r, z__1.i = *a1 * z__2.i;
	    ua21.r = z__1.r, ua21.i = z__1.i;
	    d_cnjg(&z__5, &d1);
	    z__4.r = -z__5.r, z__4.i = -z__5.i;
	    z__3.r = snl * z__4.r, z__3.i = snl * z__4.i;
	    z__2.r = z__3.r * a2->r - z__3.i * a2->i, z__2.i = z__3.r * a2->i 
		    + z__3.i * a2->r;
	    d__1 = csl * *a3;
	    z__1.r = z__2.r + d__1, z__1.i = z__2.i;
	    ua22.r = z__1.r, ua22.i = z__1.i;

	    d_cnjg(&z__4, &d1);
	    z__3.r = -z__4.r, z__3.i = -z__4.i;
	    z__2.r = snr * z__3.r, z__2.i = snr * z__3.i;
	    z__1.r = *b1 * z__2.r, z__1.i = *b1 * z__2.i;
	    vb21.r = z__1.r, vb21.i = z__1.i;
	    d_cnjg(&z__5, &d1);
	    z__4.r = -z__5.r, z__4.i = -z__5.i;
	    z__3.r = snr * z__4.r, z__3.i = snr * z__4.i;
	    z__2.r = z__3.r * b2->r - z__3.i * b2->i, z__2.i = z__3.r * b2->i 
		    + z__3.i * b2->r;
	    d__1 = csr * *b3;
	    z__1.r = z__2.r + d__1, z__1.i = z__2.i;
	    vb22.r = z__1.r, vb22.i = z__1.i;

	    aua22 = ABS(snl) * ((d__1 = a2->r, ABS(d__1)) + (d__2 = d_imag(a2)
		    , ABS(d__2))) + ABS(csl) * ABS(*a3);
	    avb22 = ABS(snr) * ((d__1 = b2->r, ABS(d__1)) + (d__2 = d_imag(b2)
		    , ABS(d__2))) + ABS(csr) * ABS(*b3);

/*           zero (2,2) elements of U'*A and V'*B, and then swap. */

	    if ((d__1 = ua21.r, ABS(d__1)) + (d__2 = d_imag(&ua21), ABS(d__2))
		     + ((d__3 = ua22.r, ABS(d__3)) + (d__4 = d_imag(&ua22), 
		    ABS(d__4))) == 0.) {
		d_cnjg(&z__2, &vb21);
		z__1.r = -z__2.r, z__1.i = -z__2.i;
		d_cnjg(&z__3, &vb22);
		zlartg_(&z__1, &z__3, csq, snq, &r__);
	    } else if ((d__1 = vb21.r, ABS(d__1)) + (d__2 = d_imag(&vb21), 
		    ABS(d__2)) + z_abs(&vb22) == 0.) {
		d_cnjg(&z__2, &ua21);
		z__1.r = -z__2.r, z__1.i = -z__2.i;
		d_cnjg(&z__3, &ua22);
		zlartg_(&z__1, &z__3, csq, snq, &r__);
	    } else if (aua22 / ((d__1 = ua21.r, ABS(d__1)) + (d__2 = d_imag(&
		    ua21), ABS(d__2)) + ((d__3 = ua22.r, ABS(d__3)) + (d__4 = 
		    d_imag(&ua22), ABS(d__4)))) <= avb22 / ((d__5 = vb21.r, 
		    ABS(d__5)) + (d__6 = d_imag(&vb21), ABS(d__6)) + ((d__7 = 
		    vb22.r, ABS(d__7)) + (d__8 = d_imag(&vb22), ABS(d__8))))) 
		    {
		d_cnjg(&z__2, &ua21);
		z__1.r = -z__2.r, z__1.i = -z__2.i;
		d_cnjg(&z__3, &ua22);
		zlartg_(&z__1, &z__3, csq, snq, &r__);
	    } else {
		d_cnjg(&z__2, &vb21);
		z__1.r = -z__2.r, z__1.i = -z__2.i;
		d_cnjg(&z__3, &vb22);
		zlartg_(&z__1, &z__3, csq, snq, &r__);
	    }

	    *csu = snl;
	    z__1.r = csl * d1.r, z__1.i = csl * d1.i;
	    snu->r = z__1.r, snu->i = z__1.i;
	    *csv = snr;
	    z__1.r = csr * d1.r, z__1.i = csr * d1.i;
	    snv->r = z__1.r, snv->i = z__1.i;

	}

    } else {

/*        Input matrices A and B are lower triangular matrices */

/*        Form matrix C = A*adj(B) = ( a 0 ) */
/*                                   ( c d ) */

	a = *a1 * *b3;
	d__ = *a3 * *b1;
	z__2.r = *b3 * a2->r, z__2.i = *b3 * a2->i;
	z__3.r = *a3 * b2->r, z__3.i = *a3 * b2->i;
	z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
	c__.r = z__1.r, c__.i = z__1.i;
	fc = z_abs(&c__);

/*        Transform complex 2-by-2 matrix C to float matrix by unitary */
/*        diagonal matrix diag(d1,1). */

	d1.r = 1., d1.i = 0.;
	if (fc != 0.) {
	    z__1.r = c__.r / fc, z__1.i = c__.i / fc;
	    d1.r = z__1.r, d1.i = z__1.i;
	}

/*        The SVD of float 2 by 2 triangular C */

/*         ( CSL -SNL )*( A 0 )*(  CSR  SNR ) = ( R 0 ) */
/*         ( SNL  CSL ) ( C D ) ( -SNR  CSR )   ( 0 T ) */

	dlasv2_(&a, &fc, &d__, &s1, &s2, &snr, &csr, &snl, &csl);

	if (ABS(csr) >= ABS(snr) || ABS(csl) >= ABS(snl)) {

/*           Compute the (2,1) and (2,2) elements of U'*A and V'*B, */
/*           and (2,1) element of |U|'*|A| and |V|'*|B|. */

	    z__4.r = -d1.r, z__4.i = -d1.i;
	    z__3.r = snr * z__4.r, z__3.i = snr * z__4.i;
	    z__2.r = *a1 * z__3.r, z__2.i = *a1 * z__3.i;
	    z__5.r = csr * a2->r, z__5.i = csr * a2->i;
	    z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
	    ua21.r = z__1.r, ua21.i = z__1.i;
	    ua22r = csr * *a3;

	    z__4.r = -d1.r, z__4.i = -d1.i;
	    z__3.r = snl * z__4.r, z__3.i = snl * z__4.i;
	    z__2.r = *b1 * z__3.r, z__2.i = *b1 * z__3.i;
	    z__5.r = csl * b2->r, z__5.i = csl * b2->i;
	    z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
	    vb21.r = z__1.r, vb21.i = z__1.i;
	    vb22r = csl * *b3;

	    aua21 = ABS(snr) * ABS(*a1) + ABS(csr) * ((d__1 = a2->r, ABS(d__1)
		    ) + (d__2 = d_imag(a2), ABS(d__2)));
	    avb21 = ABS(snl) * ABS(*b1) + ABS(csl) * ((d__1 = b2->r, ABS(d__1)
		    ) + (d__2 = d_imag(b2), ABS(d__2)));

/*           zero (2,1) elements of U'*A and V'*B. */

	    if ((d__1 = ua21.r, ABS(d__1)) + (d__2 = d_imag(&ua21), ABS(d__2))
		     + ABS(ua22r) == 0.) {
		z__1.r = vb22r, z__1.i = 0.;
		zlartg_(&z__1, &vb21, csq, snq, &r__);
	    } else if ((d__1 = vb21.r, ABS(d__1)) + (d__2 = d_imag(&vb21), 
		    ABS(d__2)) + ABS(vb22r) == 0.) {
		z__1.r = ua22r, z__1.i = 0.;
		zlartg_(&z__1, &ua21, csq, snq, &r__);
	    } else if (aua21 / ((d__1 = ua21.r, ABS(d__1)) + (d__2 = d_imag(&
		    ua21), ABS(d__2)) + ABS(ua22r)) <= avb21 / ((d__3 = 
		    vb21.r, ABS(d__3)) + (d__4 = d_imag(&vb21), ABS(d__4)) + 
		    ABS(vb22r))) {
		z__1.r = ua22r, z__1.i = 0.;
		zlartg_(&z__1, &ua21, csq, snq, &r__);
	    } else {
		z__1.r = vb22r, z__1.i = 0.;
		zlartg_(&z__1, &vb21, csq, snq, &r__);
	    }

	    *csu = csr;
	    d_cnjg(&z__3, &d1);
	    z__2.r = -z__3.r, z__2.i = -z__3.i;
	    z__1.r = snr * z__2.r, z__1.i = snr * z__2.i;
	    snu->r = z__1.r, snu->i = z__1.i;
	    *csv = csl;
	    d_cnjg(&z__3, &d1);
	    z__2.r = -z__3.r, z__2.i = -z__3.i;
	    z__1.r = snl * z__2.r, z__1.i = snl * z__2.i;
	    snv->r = z__1.r, snv->i = z__1.i;

	} else {

/*           Compute the (1,1) and (1,2) elements of U'*A and V'*B, */
/*           and (1,1) element of |U|'*|A| and |V|'*|B|. */

	    d__1 = csr * *a1;
	    d_cnjg(&z__4, &d1);
	    z__3.r = snr * z__4.r, z__3.i = snr * z__4.i;
	    z__2.r = z__3.r * a2->r - z__3.i * a2->i, z__2.i = z__3.r * a2->i 
		    + z__3.i * a2->r;
	    z__1.r = d__1 + z__2.r, z__1.i = z__2.i;
	    ua11.r = z__1.r, ua11.i = z__1.i;
	    d_cnjg(&z__3, &d1);
	    z__2.r = snr * z__3.r, z__2.i = snr * z__3.i;
	    z__1.r = *a3 * z__2.r, z__1.i = *a3 * z__2.i;
	    ua12.r = z__1.r, ua12.i = z__1.i;

	    d__1 = csl * *b1;
	    d_cnjg(&z__4, &d1);
	    z__3.r = snl * z__4.r, z__3.i = snl * z__4.i;
	    z__2.r = z__3.r * b2->r - z__3.i * b2->i, z__2.i = z__3.r * b2->i 
		    + z__3.i * b2->r;
	    z__1.r = d__1 + z__2.r, z__1.i = z__2.i;
	    vb11.r = z__1.r, vb11.i = z__1.i;
	    d_cnjg(&z__3, &d1);
	    z__2.r = snl * z__3.r, z__2.i = snl * z__3.i;
	    z__1.r = *b3 * z__2.r, z__1.i = *b3 * z__2.i;
	    vb12.r = z__1.r, vb12.i = z__1.i;

	    aua11 = ABS(csr) * ABS(*a1) + ABS(snr) * ((d__1 = a2->r, ABS(d__1)
		    ) + (d__2 = d_imag(a2), ABS(d__2)));
	    avb11 = ABS(csl) * ABS(*b1) + ABS(snl) * ((d__1 = b2->r, ABS(d__1)
		    ) + (d__2 = d_imag(b2), ABS(d__2)));

/*           zero (1,1) elements of U'*A and V'*B, and then swap. */

	    if ((d__1 = ua11.r, ABS(d__1)) + (d__2 = d_imag(&ua11), ABS(d__2))
		     + ((d__3 = ua12.r, ABS(d__3)) + (d__4 = d_imag(&ua12), 
		    ABS(d__4))) == 0.) {
		zlartg_(&vb12, &vb11, csq, snq, &r__);
	    } else if ((d__1 = vb11.r, ABS(d__1)) + (d__2 = d_imag(&vb11), 
		    ABS(d__2)) + ((d__3 = vb12.r, ABS(d__3)) + (d__4 = d_imag(
		    &vb12), ABS(d__4))) == 0.) {
		zlartg_(&ua12, &ua11, csq, snq, &r__);
	    } else if (aua11 / ((d__1 = ua11.r, ABS(d__1)) + (d__2 = d_imag(&
		    ua11), ABS(d__2)) + ((d__3 = ua12.r, ABS(d__3)) + (d__4 = 
		    d_imag(&ua12), ABS(d__4)))) <= avb11 / ((d__5 = vb11.r, 
		    ABS(d__5)) + (d__6 = d_imag(&vb11), ABS(d__6)) + ((d__7 = 
		    vb12.r, ABS(d__7)) + (d__8 = d_imag(&vb12), ABS(d__8))))) 
		    {
		zlartg_(&ua12, &ua11, csq, snq, &r__);
	    } else {
		zlartg_(&vb12, &vb11, csq, snq, &r__);
	    }

	    *csu = snr;
	    d_cnjg(&z__2, &d1);
	    z__1.r = csr * z__2.r, z__1.i = csr * z__2.i;
	    snu->r = z__1.r, snu->i = z__1.i;
	    *csv = snl;
	    d_cnjg(&z__2, &d1);
	    z__1.r = csl * z__2.r, z__1.i = csl * z__2.i;
	    snv->r = z__1.r, snv->i = z__1.i;

	}

    }

    return 0;

/*     End of ZLAGS2 */

} /* zlags2_ */
Beispiel #7
0
/* Subroutine */ int ztrexc_(char *compq, integer *n, doublecomplex *t, 
	integer *ldt, doublecomplex *q, integer *ldq, integer *ifst, integer *
	ilst, integer *info)
{
/*  -- LAPACK routine (version 2.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       March 31, 1993   


    Purpose   
    =======   

    ZTREXC reorders the Schur factorization of a complex matrix   
    A = Q*T*Q**H, so that the diagonal element of T with row index IFST   
    is moved to row ILST.   

    The Schur form T is reordered by a unitary similarity transformation 
  
    Z**H*T*Z, and optionally the matrix Q of Schur vectors is updated by 
  
    postmultplying it with Z.   

    Arguments   
    =========   

    COMPQ   (input) CHARACTER*1   
            = 'V':  update the matrix Q of Schur vectors;   
            = 'N':  do not update Q.   

    N       (input) INTEGER   
            The order of the matrix T. N >= 0.   

    T       (input/output) COMPLEX*16 array, dimension (LDT,N)   
            On entry, the upper triangular matrix T.   
            On exit, the reordered upper triangular matrix.   

    LDT     (input) INTEGER   
            The leading dimension of the array T. LDT >= max(1,N).   

    Q       (input/output) COMPLEX*16 array, dimension (LDQ,N)   
            On entry, if COMPQ = 'V', the matrix Q of Schur vectors.   
            On exit, if COMPQ = 'V', Q has been postmultiplied by the   
            unitary transformation matrix Z which reorders T.   
            If COMPQ = 'N', Q is not referenced.   

    LDQ     (input) INTEGER   
            The leading dimension of the array Q.  LDQ >= max(1,N).   

    IFST    (input) INTEGER   
    ILST    (input) INTEGER   
            Specify the reordering of the diagonal elements of T:   
            The element with row index IFST is moved to row ILST by a   
            sequence of transpositions between adjacent elements.   
            1 <= IFST <= N; 1 <= ILST <= N.   

    INFO    (output) INTEGER   
            = 0:  successful exit   
            < 0:  if INFO = -i, the i-th argument had an illegal value   

    ===================================================================== 
  


       Decode and test the input parameters.   

    
   Parameter adjustments   
       Function Body */
    /* Table of constant values */
    static integer c__1 = 1;
    
    /* System generated locals */
    integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2, i__3;
    doublecomplex z__1;
    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);
    /* Local variables */
    static doublecomplex temp;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    static integer k;
    extern logical lsame_(char *, char *);
    static logical wantq;
    static integer m1, m2, m3;
    static doublereal cs;
    static doublecomplex t11, t22, sn;
    extern /* Subroutine */ int xerbla_(char *, integer *), zlartg_(
	    doublecomplex *, doublecomplex *, doublereal *, doublecomplex *, 
	    doublecomplex *);




#define T(I,J) t[(I)-1 + ((J)-1)* ( *ldt)]
#define Q(I,J) q[(I)-1 + ((J)-1)* ( *ldq)]

    *info = 0;
    wantq = lsame_(compq, "V");
    if (! lsame_(compq, "N") && ! wantq) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*ldt < max(1,*n)) {
	*info = -4;
    } else if (*ldq < 1 || wantq && *ldq < max(1,*n)) {
	*info = -6;
    } else if (*ifst < 1 || *ifst > *n) {
	*info = -7;
    } else if (*ilst < 1 || *ilst > *n) {
	*info = -8;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZTREXC", &i__1);
	return 0;
    }

/*     Quick return if possible */

    if (*n == 1 || *ifst == *ilst) {
	return 0;
    }

    if (*ifst < *ilst) {

/*        Move the IFST-th diagonal element forward down the diagonal.
 */

	m1 = 0;
	m2 = -1;
	m3 = 1;
    } else {

/*        Move the IFST-th diagonal element backward up the diagonal. 
*/

	m1 = -1;
	m2 = 0;
	m3 = -1;
    }

    i__1 = *ilst + m2;
    i__2 = m3;
    for (k = *ifst + m1; m3 < 0 ? k >= *ilst+m2 : k <= *ilst+m2; k += m3) {

/*        Interchange the k-th and (k+1)-th diagonal elements. */

	i__3 = k + k * t_dim1;
	t11.r = T(k,k).r, t11.i = T(k,k).i;
	i__3 = k + 1 + (k + 1) * t_dim1;
	t22.r = T(k+1,k+1).r, t22.i = T(k+1,k+1).i;

/*        Determine the transformation to perform the interchange. */

	z__1.r = t22.r - t11.r, z__1.i = t22.i - t11.i;
	zlartg_(&T(k,k+1), &z__1, &cs, &sn, &temp);

/*        Apply transformation to the matrix T. */

	if (k + 2 <= *n) {
	    i__3 = *n - k - 1;
	    zrot_(&i__3, &T(k,k+2), ldt, &T(k+1,k+2), ldt, &cs, &sn);
	}
	i__3 = k - 1;
	d_cnjg(&z__1, &sn);
	zrot_(&i__3, &T(1,k), &c__1, &T(1,k+1), &
		c__1, &cs, &z__1);

	i__3 = k + k * t_dim1;
	T(k,k).r = t22.r, T(k,k).i = t22.i;
	i__3 = k + 1 + (k + 1) * t_dim1;
	T(k+1,k+1).r = t11.r, T(k+1,k+1).i = t11.i;

	if (wantq) {

/*           Accumulate transformation in the matrix Q. */

	    d_cnjg(&z__1, &sn);
	    zrot_(n, &Q(1,k), &c__1, &Q(1,k+1), &
		    c__1, &cs, &z__1);
	}

/* L10: */
    }

    return 0;

/*     End of ZTREXC */

} /* ztrexc_ */
/* Subroutine */ int zgghrd_(char *compq, char *compz, integer *n, integer *
	ilo, integer *ihi, doublecomplex *a, integer *lda, doublecomplex *b, 
	integer *ldb, doublecomplex *q, integer *ldq, doublecomplex *z__, 
	integer *ldz, integer *info)
{
/*  -- LAPACK routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    ZGGHRD reduces a pair of complex matrices (A,B) to generalized upper   
    Hessenberg form using unitary transformations, where A is a   
    general matrix and B is upper triangular:  Q' * A * Z = H and   
    Q' * B * Z = T, where H is upper Hessenberg, T is upper triangular,   
    and Q and Z are unitary, and ' means conjugate transpose.   

    The unitary matrices Q and Z are determined as products of Givens   
    rotations.  They may either be formed explicitly, or they may be   
    postmultiplied into input matrices Q1 and Z1, so that   

         Q1 * A * Z1' = (Q1*Q) * H * (Z1*Z)'   
         Q1 * B * Z1' = (Q1*Q) * T * (Z1*Z)'   

    Arguments   
    =========   

    COMPQ   (input) CHARACTER*1   
            = 'N': do not compute Q;   
            = 'I': Q is initialized to the unit matrix, and the   
                   unitary matrix Q is returned;   
            = 'V': Q must contain a unitary matrix Q1 on entry,   
                   and the product Q1*Q is returned.   

    COMPZ   (input) CHARACTER*1   
            = 'N': do not compute Q;   
            = 'I': Q is initialized to the unit matrix, and the   
                   unitary matrix Q is returned;   
            = 'V': Q must contain a unitary matrix Q1 on entry,   
                   and the product Q1*Q is returned.   

    N       (input) INTEGER   
            The order of the matrices A and B.  N >= 0.   

    ILO     (input) INTEGER   
    IHI     (input) INTEGER   
            It is assumed that A is already upper triangular in rows and   
            columns 1:ILO-1 and IHI+1:N.  ILO and IHI are normally set   
            by a previous call to ZGGBAL; otherwise they should be set   
            to 1 and N respectively.   
            1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.   

    A       (input/output) COMPLEX*16 array, dimension (LDA, N)   
            On entry, the N-by-N general matrix to be reduced.   
            On exit, the upper triangle and the first subdiagonal of A   
            are overwritten with the upper Hessenberg matrix H, and the   
            rest is set to zero.   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= max(1,N).   

    B       (input/output) COMPLEX*16 array, dimension (LDB, N)   
            On entry, the N-by-N upper triangular matrix B.   
            On exit, the upper triangular matrix T = Q' B Z.  The   
            elements below the diagonal are set to zero.   

    LDB     (input) INTEGER   
            The leading dimension of the array B.  LDB >= max(1,N).   

    Q       (input/output) COMPLEX*16 array, dimension (LDQ, N)   
            If COMPQ='N':  Q is not referenced.   
            If COMPQ='I':  on entry, Q need not be set, and on exit it   
                           contains the unitary matrix Q, where Q'   
                           is the product of the Givens transformations   
                           which are applied to A and B on the left.   
            If COMPQ='V':  on entry, Q must contain a unitary matrix   
                           Q1, and on exit this is overwritten by Q1*Q.   

    LDQ     (input) INTEGER   
            The leading dimension of the array Q.   
            LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise.   

    Z       (input/output) COMPLEX*16 array, dimension (LDZ, N)   
            If COMPZ='N':  Z is not referenced.   
            If COMPZ='I':  on entry, Z need not be set, and on exit it   
                           contains the unitary matrix Z, which is   
                           the product of the Givens transformations   
                           which are applied to A and B on the right.   
            If COMPZ='V':  on entry, Z must contain a unitary matrix   
                           Z1, and on exit this is overwritten by Z1*Z.   

    LDZ     (input) INTEGER   
            The leading dimension of the array Z.   
            LDZ >= N if COMPZ='V' or 'I'; LDZ >= 1 otherwise.   

    INFO    (output) INTEGER   
            = 0:  successful exit.   
            < 0:  if INFO = -i, the i-th argument had an illegal value.   

    Further Details   
    ===============   

    This routine reduces A to Hessenberg and B to triangular form by   
    an unblocked reduction, as described in _Matrix_Computations_,   
    by Golub and van Loan (Johns Hopkins Press).   

    =====================================================================   


       Decode COMPQ   

       Parameter adjustments */
    /* Table of constant values */
    static doublecomplex c_b1 = {1.,0.};
    static doublecomplex c_b2 = {0.,0.};
    static integer c__1 = 1;
    
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1, 
	    z_offset, i__1, i__2, i__3;
    doublecomplex z__1;
    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);
    /* Local variables */
    static integer jcol, jrow;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    static doublereal c__;
    static doublecomplex s;
    extern logical lsame_(char *, char *);
    static doublecomplex ctemp;
    extern /* Subroutine */ int xerbla_(char *, integer *);
    static integer icompq, icompz;
    extern /* Subroutine */ int zlaset_(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, doublecomplex *, integer *), zlartg_(doublecomplex *, doublecomplex *, doublereal *, 
	    doublecomplex *, doublecomplex *);
    static logical ilq, ilz;
#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]
#define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1
#define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)]
#define q_subscr(a_1,a_2) (a_2)*q_dim1 + a_1
#define q_ref(a_1,a_2) q[q_subscr(a_1,a_2)]
#define z___subscr(a_1,a_2) (a_2)*z_dim1 + a_1
#define z___ref(a_1,a_2) z__[z___subscr(a_1,a_2)]


    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1 * 1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1 * 1;
    z__ -= z_offset;

    /* Function Body */
    if (lsame_(compq, "N")) {
	ilq = FALSE_;
	icompq = 1;
    } else if (lsame_(compq, "V")) {
	ilq = TRUE_;
	icompq = 2;
    } else if (lsame_(compq, "I")) {
	ilq = TRUE_;
	icompq = 3;
    } else {
	icompq = 0;
    }

/*     Decode COMPZ */

    if (lsame_(compz, "N")) {
	ilz = FALSE_;
	icompz = 1;
    } else if (lsame_(compz, "V")) {
	ilz = TRUE_;
	icompz = 2;
    } else if (lsame_(compz, "I")) {
	ilz = TRUE_;
	icompz = 3;
    } else {
	icompz = 0;
    }

/*     Test the input parameters. */

    *info = 0;
    if (icompq <= 0) {
	*info = -1;
    } else if (icompz <= 0) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    } else if (*ilo < 1) {
	*info = -4;
    } else if (*ihi > *n || *ihi < *ilo - 1) {
	*info = -5;
    } else if (*lda < max(1,*n)) {
	*info = -7;
    } else if (*ldb < max(1,*n)) {
	*info = -9;
    } else if (ilq && *ldq < *n || *ldq < 1) {
	*info = -11;
    } else if (ilz && *ldz < *n || *ldz < 1) {
	*info = -13;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZGGHRD", &i__1);
	return 0;
    }

/*     Initialize Q and Z if desired. */

    if (icompq == 3) {
	zlaset_("Full", n, n, &c_b2, &c_b1, &q[q_offset], ldq);
    }
    if (icompz == 3) {
	zlaset_("Full", n, n, &c_b2, &c_b1, &z__[z_offset], ldz);
    }

/*     Quick return if possible */

    if (*n <= 1) {
	return 0;
    }

/*     Zero out lower triangle of B */

    i__1 = *n - 1;
    for (jcol = 1; jcol <= i__1; ++jcol) {
	i__2 = *n;
	for (jrow = jcol + 1; jrow <= i__2; ++jrow) {
	    i__3 = b_subscr(jrow, jcol);
	    b[i__3].r = 0., b[i__3].i = 0.;
/* L10: */
	}
/* L20: */
    }

/*     Reduce A and B */

    i__1 = *ihi - 2;
    for (jcol = *ilo; jcol <= i__1; ++jcol) {

	i__2 = jcol + 2;
	for (jrow = *ihi; jrow >= i__2; --jrow) {

/*           Step 1: rotate rows JROW-1, JROW to kill A(JROW,JCOL) */

	    i__3 = a_subscr(jrow - 1, jcol);
	    ctemp.r = a[i__3].r, ctemp.i = a[i__3].i;
	    zlartg_(&ctemp, &a_ref(jrow, jcol), &c__, &s, &a_ref(jrow - 1, 
		    jcol));
	    i__3 = a_subscr(jrow, jcol);
	    a[i__3].r = 0., a[i__3].i = 0.;
	    i__3 = *n - jcol;
	    zrot_(&i__3, &a_ref(jrow - 1, jcol + 1), lda, &a_ref(jrow, jcol + 
		    1), lda, &c__, &s);
	    i__3 = *n + 2 - jrow;
	    zrot_(&i__3, &b_ref(jrow - 1, jrow - 1), ldb, &b_ref(jrow, jrow - 
		    1), ldb, &c__, &s);
	    if (ilq) {
		d_cnjg(&z__1, &s);
		zrot_(n, &q_ref(1, jrow - 1), &c__1, &q_ref(1, jrow), &c__1, &
			c__, &z__1);
	    }

/*           Step 2: rotate columns JROW, JROW-1 to kill B(JROW,JROW-1) */

	    i__3 = b_subscr(jrow, jrow);
	    ctemp.r = b[i__3].r, ctemp.i = b[i__3].i;
	    zlartg_(&ctemp, &b_ref(jrow, jrow - 1), &c__, &s, &b_ref(jrow, 
		    jrow));
	    i__3 = b_subscr(jrow, jrow - 1);
	    b[i__3].r = 0., b[i__3].i = 0.;
	    zrot_(ihi, &a_ref(1, jrow), &c__1, &a_ref(1, jrow - 1), &c__1, &
		    c__, &s);
	    i__3 = jrow - 1;
	    zrot_(&i__3, &b_ref(1, jrow), &c__1, &b_ref(1, jrow - 1), &c__1, &
		    c__, &s);
	    if (ilz) {
		zrot_(n, &z___ref(1, jrow), &c__1, &z___ref(1, jrow - 1), &
			c__1, &c__, &s);
	    }
/* L30: */
	}
/* L40: */
    }

    return 0;

/*     End of ZGGHRD */

} /* zgghrd_ */
Beispiel #9
0
/* Subroutine */ int zhbtrd_(char *vect, char *uplo, integer *n, integer *kd, 
	doublecomplex *ab, integer *ldab, doublereal *d__, doublereal *e, 
	doublecomplex *q, integer *ldq, doublecomplex *work, integer *info)
{
    /* System generated locals */
    integer ab_dim1, ab_offset, q_dim1, q_offset, i__1, i__2, i__3, i__4, 
	    i__5, i__6;
    doublereal d__1;
    doublecomplex z__1;

    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);
    double z_abs(doublecomplex *);

    /* Local variables */
    integer i__, j, k, l;
    doublecomplex t;
    integer i2, j1, j2, nq, nr, kd1, ibl, iqb, kdn, jin, nrt, kdm1, inca, 
	    jend, lend, jinc;
    doublereal abst;
    integer incx, last;
    doublecomplex temp;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    integer j1end, j1inc, iqend;
    extern logical lsame_(char *, char *);
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *);
    logical initq, wantq, upper;
    extern /* Subroutine */ int zlar2v_(integer *, doublecomplex *, 
	    doublecomplex *, doublecomplex *, integer *, doublereal *, 
	    doublecomplex *, integer *);
    integer iqaend;
    extern /* Subroutine */ int xerbla_(char *, integer *), zlacgv_(
	    integer *, doublecomplex *, integer *), zlaset_(char *, integer *, 
	     integer *, doublecomplex *, doublecomplex *, doublecomplex *, 
	    integer *), zlartg_(doublecomplex *, doublecomplex *, 
	    doublereal *, doublecomplex *, doublecomplex *), zlargv_(integer *
, doublecomplex *, integer *, doublecomplex *, integer *, 
	    doublereal *, integer *), zlartv_(integer *, doublecomplex *, 
	    integer *, doublecomplex *, integer *, doublereal *, 
	    doublecomplex *, integer *);


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZHBTRD reduces a complex Hermitian band matrix A to real symmetric */
/*  tridiagonal form T by a unitary similarity transformation: */
/*  Q**H * A * Q = T. */

/*  Arguments */
/*  ========= */

/*  VECT    (input) CHARACTER*1 */
/*          = 'N':  do not form Q; */
/*          = 'V':  form Q; */
/*          = 'U':  update a matrix X, by forming X*Q. */

/*  UPLO    (input) CHARACTER*1 */
/*          = 'U':  Upper triangle of A is stored; */
/*          = 'L':  Lower triangle of A is stored. */

/*  N       (input) INTEGER */
/*          The order of the matrix A.  N >= 0. */

/*  KD      (input) INTEGER */
/*          The number of superdiagonals of the matrix A if UPLO = 'U', */
/*          or the number of subdiagonals if UPLO = 'L'.  KD >= 0. */

/*  AB      (input/output) COMPLEX*16 array, dimension (LDAB,N) */
/*          On entry, the upper or lower triangle of the Hermitian band */
/*          matrix A, stored in the first KD+1 rows of the array.  The */
/*          j-th column of A is stored in the j-th column of the array AB */
/*          as follows: */
/*          if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */
/*          if UPLO = 'L', AB(1+i-j,j)    = A(i,j) for j<=i<=min(n,j+kd). */
/*          On exit, the diagonal elements of AB are overwritten by the */
/*          diagonal elements of the tridiagonal matrix T; if KD > 0, the */
/*          elements on the first superdiagonal (if UPLO = 'U') or the */
/*          first subdiagonal (if UPLO = 'L') are overwritten by the */
/*          off-diagonal elements of T; the rest of AB is overwritten by */
/*          values generated during the reduction. */

/*  LDAB    (input) INTEGER */
/*          The leading dimension of the array AB.  LDAB >= KD+1. */

/*  D       (output) DOUBLE PRECISION array, dimension (N) */
/*          The diagonal elements of the tridiagonal matrix T. */

/*  E       (output) DOUBLE PRECISION array, dimension (N-1) */
/*          The off-diagonal elements of the tridiagonal matrix T: */
/*          E(i) = T(i,i+1) if UPLO = 'U'; E(i) = T(i+1,i) if UPLO = 'L'. */

/*  Q       (input/output) COMPLEX*16 array, dimension (LDQ,N) */
/*          On entry, if VECT = 'U', then Q must contain an N-by-N */
/*          matrix X; if VECT = 'N' or 'V', then Q need not be set. */

/*          On exit: */
/*          if VECT = 'V', Q contains the N-by-N unitary matrix Q; */
/*          if VECT = 'U', Q contains the product X*Q; */
/*          if VECT = 'N', the array Q is not referenced. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. */
/*          LDQ >= 1, and LDQ >= N if VECT = 'V' or 'U'. */

/*  WORK    (workspace) COMPLEX*16 array, dimension (N) */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value */

/*  Further Details */
/*  =============== */

/*  Modified by Linda Kaufman, Bell Labs. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input parameters */

    /* Parameter adjustments */
    ab_dim1 = *ldab;
    ab_offset = 1 + ab_dim1;
    ab -= ab_offset;
    --d__;
    --e;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    --work;

    /* Function Body */
    initq = lsame_(vect, "V");
    wantq = initq || lsame_(vect, "U");
    upper = lsame_(uplo, "U");
    kd1 = *kd + 1;
    kdm1 = *kd - 1;
    incx = *ldab - 1;
    iqend = 1;

    *info = 0;
    if (! wantq && ! lsame_(vect, "N")) {
	*info = -1;
    } else if (! upper && ! lsame_(uplo, "L")) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    } else if (*kd < 0) {
	*info = -4;
    } else if (*ldab < kd1) {
	*info = -6;
    } else if (*ldq < max(1,*n) && wantq) {
	*info = -10;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZHBTRD", &i__1);
	return 0;
    }

/*     Quick return if possible */

    if (*n == 0) {
	return 0;
    }

/*     Initialize Q to the unit matrix, if needed */

    if (initq) {
	zlaset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
    }

/*     Wherever possible, plane rotations are generated and applied in */
/*     vector operations of length NR over the index set J1:J2:KD1. */

/*     The real cosines and complex sines of the plane rotations are */
/*     stored in the arrays D and WORK. */

    inca = kd1 * *ldab;
/* Computing MIN */
    i__1 = *n - 1;
    kdn = min(i__1,*kd);
    if (upper) {

	if (*kd > 1) {

/*           Reduce to complex Hermitian tridiagonal form, working with */
/*           the upper triangle */

	    nr = 0;
	    j1 = kdn + 2;
	    j2 = 1;

	    i__1 = kd1 + ab_dim1;
	    i__2 = kd1 + ab_dim1;
	    d__1 = ab[i__2].r;
	    ab[i__1].r = d__1, ab[i__1].i = 0.;
	    i__1 = *n - 2;
	    for (i__ = 1; i__ <= i__1; ++i__) {

/*              Reduce i-th row of matrix to tridiagonal form */

		for (k = kdn + 1; k >= 2; --k) {
		    j1 += kdn;
		    j2 += kdn;

		    if (nr > 0) {

/*                    generate plane rotations to annihilate nonzero */
/*                    elements which have been created outside the band */

			zlargv_(&nr, &ab[(j1 - 1) * ab_dim1 + 1], &inca, &
				work[j1], &kd1, &d__[j1], &kd1);

/*                    apply rotations from the right */


/*                    Dependent on the the number of diagonals either */
/*                    ZLARTV or ZROT is used */

			if (nr >= (*kd << 1) - 1) {
			    i__2 = *kd - 1;
			    for (l = 1; l <= i__2; ++l) {
				zlartv_(&nr, &ab[l + 1 + (j1 - 1) * ab_dim1], 
					&inca, &ab[l + j1 * ab_dim1], &inca, &
					d__[j1], &work[j1], &kd1);
/* L10: */
			    }

			} else {
			    jend = j1 + (nr - 1) * kd1;
			    i__2 = jend;
			    i__3 = kd1;
			    for (jinc = j1; i__3 < 0 ? jinc >= i__2 : jinc <= 
				    i__2; jinc += i__3) {
				zrot_(&kdm1, &ab[(jinc - 1) * ab_dim1 + 2], &
					c__1, &ab[jinc * ab_dim1 + 1], &c__1, 
					&d__[jinc], &work[jinc]);
/* L20: */
			    }
			}
		    }


		    if (k > 2) {
			if (k <= *n - i__ + 1) {

/*                       generate plane rotation to annihilate a(i,i+k-1) */
/*                       within the band */

			    zlartg_(&ab[*kd - k + 3 + (i__ + k - 2) * ab_dim1]
, &ab[*kd - k + 2 + (i__ + k - 1) * 
				    ab_dim1], &d__[i__ + k - 1], &work[i__ + 
				    k - 1], &temp);
			    i__3 = *kd - k + 3 + (i__ + k - 2) * ab_dim1;
			    ab[i__3].r = temp.r, ab[i__3].i = temp.i;

/*                       apply rotation from the right */

			    i__3 = k - 3;
			    zrot_(&i__3, &ab[*kd - k + 4 + (i__ + k - 2) * 
				    ab_dim1], &c__1, &ab[*kd - k + 3 + (i__ + 
				    k - 1) * ab_dim1], &c__1, &d__[i__ + k - 
				    1], &work[i__ + k - 1]);
			}
			++nr;
			j1 = j1 - kdn - 1;
		    }

/*                 apply plane rotations from both sides to diagonal */
/*                 blocks */

		    if (nr > 0) {
			zlar2v_(&nr, &ab[kd1 + (j1 - 1) * ab_dim1], &ab[kd1 + 
				j1 * ab_dim1], &ab[*kd + j1 * ab_dim1], &inca, 
				 &d__[j1], &work[j1], &kd1);
		    }

/*                 apply plane rotations from the left */

		    if (nr > 0) {
			zlacgv_(&nr, &work[j1], &kd1);
			if ((*kd << 1) - 1 < nr) {

/*                    Dependent on the the number of diagonals either */
/*                    ZLARTV or ZROT is used */

			    i__3 = *kd - 1;
			    for (l = 1; l <= i__3; ++l) {
				if (j2 + l > *n) {
				    nrt = nr - 1;
				} else {
				    nrt = nr;
				}
				if (nrt > 0) {
				    zlartv_(&nrt, &ab[*kd - l + (j1 + l) * 
					    ab_dim1], &inca, &ab[*kd - l + 1 
					    + (j1 + l) * ab_dim1], &inca, &
					    d__[j1], &work[j1], &kd1);
				}
/* L30: */
			    }
			} else {
			    j1end = j1 + kd1 * (nr - 2);
			    if (j1end >= j1) {
				i__3 = j1end;
				i__2 = kd1;
				for (jin = j1; i__2 < 0 ? jin >= i__3 : jin <=
					 i__3; jin += i__2) {
				    i__4 = *kd - 1;
				    zrot_(&i__4, &ab[*kd - 1 + (jin + 1) * 
					    ab_dim1], &incx, &ab[*kd + (jin + 
					    1) * ab_dim1], &incx, &d__[jin], &
					    work[jin]);
/* L40: */
				}
			    }
/* Computing MIN */
			    i__2 = kdm1, i__3 = *n - j2;
			    lend = min(i__2,i__3);
			    last = j1end + kd1;
			    if (lend > 0) {
				zrot_(&lend, &ab[*kd - 1 + (last + 1) * 
					ab_dim1], &incx, &ab[*kd + (last + 1) 
					* ab_dim1], &incx, &d__[last], &work[
					last]);
			    }
			}
		    }

		    if (wantq) {

/*                    accumulate product of plane rotations in Q */

			if (initq) {

/*                 take advantage of the fact that Q was */
/*                 initially the Identity matrix */

			    iqend = max(iqend,j2);
/* Computing MAX */
			    i__2 = 0, i__3 = k - 3;
			    i2 = max(i__2,i__3);
			    iqaend = i__ * *kd + 1;
			    if (k == 2) {
				iqaend += *kd;
			    }
			    iqaend = min(iqaend,iqend);
			    i__2 = j2;
			    i__3 = kd1;
			    for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j 
				    += i__3) {
				ibl = i__ - i2 / kdm1;
				++i2;
/* Computing MAX */
				i__4 = 1, i__5 = j - ibl;
				iqb = max(i__4,i__5);
				nq = iqaend + 1 - iqb;
/* Computing MIN */
				i__4 = iqaend + *kd;
				iqaend = min(i__4,iqend);
				d_cnjg(&z__1, &work[j]);
				zrot_(&nq, &q[iqb + (j - 1) * q_dim1], &c__1, 
					&q[iqb + j * q_dim1], &c__1, &d__[j], 
					&z__1);
/* L50: */
			    }
			} else {

			    i__3 = j2;
			    i__2 = kd1;
			    for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j 
				    += i__2) {
				d_cnjg(&z__1, &work[j]);
				zrot_(n, &q[(j - 1) * q_dim1 + 1], &c__1, &q[
					j * q_dim1 + 1], &c__1, &d__[j], &
					z__1);
/* L60: */
			    }
			}

		    }

		    if (j2 + kdn > *n) {

/*                    adjust J2 to keep within the bounds of the matrix */

			--nr;
			j2 = j2 - kdn - 1;
		    }

		    i__2 = j2;
		    i__3 = kd1;
		    for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j += i__3) 
			    {

/*                    create nonzero element a(j-1,j+kd) outside the band */
/*                    and store it in WORK */

			i__4 = j + *kd;
			i__5 = j;
			i__6 = (j + *kd) * ab_dim1 + 1;
			z__1.r = work[i__5].r * ab[i__6].r - work[i__5].i * 
				ab[i__6].i, z__1.i = work[i__5].r * ab[i__6]
				.i + work[i__5].i * ab[i__6].r;
			work[i__4].r = z__1.r, work[i__4].i = z__1.i;
			i__4 = (j + *kd) * ab_dim1 + 1;
			i__5 = j;
			i__6 = (j + *kd) * ab_dim1 + 1;
			z__1.r = d__[i__5] * ab[i__6].r, z__1.i = d__[i__5] * 
				ab[i__6].i;
			ab[i__4].r = z__1.r, ab[i__4].i = z__1.i;
/* L70: */
		    }
/* L80: */
		}
/* L90: */
	    }
	}

	if (*kd > 0) {

/*           make off-diagonal elements real and copy them to E */

	    i__1 = *n - 1;
	    for (i__ = 1; i__ <= i__1; ++i__) {
		i__3 = *kd + (i__ + 1) * ab_dim1;
		t.r = ab[i__3].r, t.i = ab[i__3].i;
		abst = z_abs(&t);
		i__3 = *kd + (i__ + 1) * ab_dim1;
		ab[i__3].r = abst, ab[i__3].i = 0.;
		e[i__] = abst;
		if (abst != 0.) {
		    z__1.r = t.r / abst, z__1.i = t.i / abst;
		    t.r = z__1.r, t.i = z__1.i;
		} else {
		    t.r = 1., t.i = 0.;
		}
		if (i__ < *n - 1) {
		    i__3 = *kd + (i__ + 2) * ab_dim1;
		    i__2 = *kd + (i__ + 2) * ab_dim1;
		    z__1.r = ab[i__2].r * t.r - ab[i__2].i * t.i, z__1.i = ab[
			    i__2].r * t.i + ab[i__2].i * t.r;
		    ab[i__3].r = z__1.r, ab[i__3].i = z__1.i;
		}
		if (wantq) {
		    d_cnjg(&z__1, &t);
		    zscal_(n, &z__1, &q[(i__ + 1) * q_dim1 + 1], &c__1);
		}
/* L100: */
	    }
	} else {

/*           set E to zero if original matrix was diagonal */

	    i__1 = *n - 1;
	    for (i__ = 1; i__ <= i__1; ++i__) {
		e[i__] = 0.;
/* L110: */
	    }
	}

/*        copy diagonal elements to D */

	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    i__3 = i__;
	    i__2 = kd1 + i__ * ab_dim1;
	    d__[i__3] = ab[i__2].r;
/* L120: */
	}

    } else {

	if (*kd > 1) {

/*           Reduce to complex Hermitian tridiagonal form, working with */
/*           the lower triangle */

	    nr = 0;
	    j1 = kdn + 2;
	    j2 = 1;

	    i__1 = ab_dim1 + 1;
	    i__3 = ab_dim1 + 1;
	    d__1 = ab[i__3].r;
	    ab[i__1].r = d__1, ab[i__1].i = 0.;
	    i__1 = *n - 2;
	    for (i__ = 1; i__ <= i__1; ++i__) {

/*              Reduce i-th column of matrix to tridiagonal form */

		for (k = kdn + 1; k >= 2; --k) {
		    j1 += kdn;
		    j2 += kdn;

		    if (nr > 0) {

/*                    generate plane rotations to annihilate nonzero */
/*                    elements which have been created outside the band */

			zlargv_(&nr, &ab[kd1 + (j1 - kd1) * ab_dim1], &inca, &
				work[j1], &kd1, &d__[j1], &kd1);

/*                    apply plane rotations from one side */


/*                    Dependent on the the number of diagonals either */
/*                    ZLARTV or ZROT is used */

			if (nr > (*kd << 1) - 1) {
			    i__3 = *kd - 1;
			    for (l = 1; l <= i__3; ++l) {
				zlartv_(&nr, &ab[kd1 - l + (j1 - kd1 + l) * 
					ab_dim1], &inca, &ab[kd1 - l + 1 + (
					j1 - kd1 + l) * ab_dim1], &inca, &d__[
					j1], &work[j1], &kd1);
/* L130: */
			    }
			} else {
			    jend = j1 + kd1 * (nr - 1);
			    i__3 = jend;
			    i__2 = kd1;
			    for (jinc = j1; i__2 < 0 ? jinc >= i__3 : jinc <= 
				    i__3; jinc += i__2) {
				zrot_(&kdm1, &ab[*kd + (jinc - *kd) * ab_dim1]
, &incx, &ab[kd1 + (jinc - *kd) * 
					ab_dim1], &incx, &d__[jinc], &work[
					jinc]);
/* L140: */
			    }
			}

		    }

		    if (k > 2) {
			if (k <= *n - i__ + 1) {

/*                       generate plane rotation to annihilate a(i+k-1,i) */
/*                       within the band */

			    zlartg_(&ab[k - 1 + i__ * ab_dim1], &ab[k + i__ * 
				    ab_dim1], &d__[i__ + k - 1], &work[i__ + 
				    k - 1], &temp);
			    i__2 = k - 1 + i__ * ab_dim1;
			    ab[i__2].r = temp.r, ab[i__2].i = temp.i;

/*                       apply rotation from the left */

			    i__2 = k - 3;
			    i__3 = *ldab - 1;
			    i__4 = *ldab - 1;
			    zrot_(&i__2, &ab[k - 2 + (i__ + 1) * ab_dim1], &
				    i__3, &ab[k - 1 + (i__ + 1) * ab_dim1], &
				    i__4, &d__[i__ + k - 1], &work[i__ + k - 
				    1]);
			}
			++nr;
			j1 = j1 - kdn - 1;
		    }

/*                 apply plane rotations from both sides to diagonal */
/*                 blocks */

		    if (nr > 0) {
			zlar2v_(&nr, &ab[(j1 - 1) * ab_dim1 + 1], &ab[j1 * 
				ab_dim1 + 1], &ab[(j1 - 1) * ab_dim1 + 2], &
				inca, &d__[j1], &work[j1], &kd1);
		    }

/*                 apply plane rotations from the right */


/*                    Dependent on the the number of diagonals either */
/*                    ZLARTV or ZROT is used */

		    if (nr > 0) {
			zlacgv_(&nr, &work[j1], &kd1);
			if (nr > (*kd << 1) - 1) {
			    i__2 = *kd - 1;
			    for (l = 1; l <= i__2; ++l) {
				if (j2 + l > *n) {
				    nrt = nr - 1;
				} else {
				    nrt = nr;
				}
				if (nrt > 0) {
				    zlartv_(&nrt, &ab[l + 2 + (j1 - 1) * 
					    ab_dim1], &inca, &ab[l + 1 + j1 * 
					    ab_dim1], &inca, &d__[j1], &work[
					    j1], &kd1);
				}
/* L150: */
			    }
			} else {
			    j1end = j1 + kd1 * (nr - 2);
			    if (j1end >= j1) {
				i__2 = j1end;
				i__3 = kd1;
				for (j1inc = j1; i__3 < 0 ? j1inc >= i__2 : 
					j1inc <= i__2; j1inc += i__3) {
				    zrot_(&kdm1, &ab[(j1inc - 1) * ab_dim1 + 
					    3], &c__1, &ab[j1inc * ab_dim1 + 
					    2], &c__1, &d__[j1inc], &work[
					    j1inc]);
/* L160: */
				}
			    }
/* Computing MIN */
			    i__3 = kdm1, i__2 = *n - j2;
			    lend = min(i__3,i__2);
			    last = j1end + kd1;
			    if (lend > 0) {
				zrot_(&lend, &ab[(last - 1) * ab_dim1 + 3], &
					c__1, &ab[last * ab_dim1 + 2], &c__1, 
					&d__[last], &work[last]);
			    }
			}
		    }



		    if (wantq) {

/*                    accumulate product of plane rotations in Q */

			if (initq) {

/*                 take advantage of the fact that Q was */
/*                 initially the Identity matrix */

			    iqend = max(iqend,j2);
/* Computing MAX */
			    i__3 = 0, i__2 = k - 3;
			    i2 = max(i__3,i__2);
			    iqaend = i__ * *kd + 1;
			    if (k == 2) {
				iqaend += *kd;
			    }
			    iqaend = min(iqaend,iqend);
			    i__3 = j2;
			    i__2 = kd1;
			    for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j 
				    += i__2) {
				ibl = i__ - i2 / kdm1;
				++i2;
/* Computing MAX */
				i__4 = 1, i__5 = j - ibl;
				iqb = max(i__4,i__5);
				nq = iqaend + 1 - iqb;
/* Computing MIN */
				i__4 = iqaend + *kd;
				iqaend = min(i__4,iqend);
				zrot_(&nq, &q[iqb + (j - 1) * q_dim1], &c__1, 
					&q[iqb + j * q_dim1], &c__1, &d__[j], 
					&work[j]);
/* L170: */
			    }
			} else {

			    i__2 = j2;
			    i__3 = kd1;
			    for (j = j1; i__3 < 0 ? j >= i__2 : j <= i__2; j 
				    += i__3) {
				zrot_(n, &q[(j - 1) * q_dim1 + 1], &c__1, &q[
					j * q_dim1 + 1], &c__1, &d__[j], &
					work[j]);
/* L180: */
			    }
			}
		    }

		    if (j2 + kdn > *n) {

/*                    adjust J2 to keep within the bounds of the matrix */

			--nr;
			j2 = j2 - kdn - 1;
		    }

		    i__3 = j2;
		    i__2 = kd1;
		    for (j = j1; i__2 < 0 ? j >= i__3 : j <= i__3; j += i__2) 
			    {

/*                    create nonzero element a(j+kd,j-1) outside the */
/*                    band and store it in WORK */

			i__4 = j + *kd;
			i__5 = j;
			i__6 = kd1 + j * ab_dim1;
			z__1.r = work[i__5].r * ab[i__6].r - work[i__5].i * 
				ab[i__6].i, z__1.i = work[i__5].r * ab[i__6]
				.i + work[i__5].i * ab[i__6].r;
			work[i__4].r = z__1.r, work[i__4].i = z__1.i;
			i__4 = kd1 + j * ab_dim1;
			i__5 = j;
			i__6 = kd1 + j * ab_dim1;
			z__1.r = d__[i__5] * ab[i__6].r, z__1.i = d__[i__5] * 
				ab[i__6].i;
			ab[i__4].r = z__1.r, ab[i__4].i = z__1.i;
/* L190: */
		    }
/* L200: */
		}
/* L210: */
	    }
	}

	if (*kd > 0) {

/*           make off-diagonal elements real and copy them to E */

	    i__1 = *n - 1;
	    for (i__ = 1; i__ <= i__1; ++i__) {
		i__2 = i__ * ab_dim1 + 2;
		t.r = ab[i__2].r, t.i = ab[i__2].i;
		abst = z_abs(&t);
		i__2 = i__ * ab_dim1 + 2;
		ab[i__2].r = abst, ab[i__2].i = 0.;
		e[i__] = abst;
		if (abst != 0.) {
		    z__1.r = t.r / abst, z__1.i = t.i / abst;
		    t.r = z__1.r, t.i = z__1.i;
		} else {
		    t.r = 1., t.i = 0.;
		}
		if (i__ < *n - 1) {
		    i__2 = (i__ + 1) * ab_dim1 + 2;
		    i__3 = (i__ + 1) * ab_dim1 + 2;
		    z__1.r = ab[i__3].r * t.r - ab[i__3].i * t.i, z__1.i = ab[
			    i__3].r * t.i + ab[i__3].i * t.r;
		    ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
		}
		if (wantq) {
		    zscal_(n, &t, &q[(i__ + 1) * q_dim1 + 1], &c__1);
		}
/* L220: */
	    }
	} else {

/*           set E to zero if original matrix was diagonal */

	    i__1 = *n - 1;
	    for (i__ = 1; i__ <= i__1; ++i__) {
		e[i__] = 0.;
/* L230: */
	    }
	}

/*        copy diagonal elements to D */

	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    i__2 = i__;
	    i__3 = i__ * ab_dim1 + 1;
	    d__[i__2] = ab[i__3].r;
/* L240: */
	}
    }

    return 0;

/*     End of ZHBTRD */

} /* zhbtrd_ */