Example #1
0
/* Subroutine */ int clapll_(integer *n, complex *x, integer *incx, complex *
	y, integer *incy, real *ssmin)
{
/*  -- LAPACK auxiliary routine (version 2.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    Given two column vectors X and Y, let   

                         A = ( X Y ).   

    The subroutine first computes the QR factorization of A = Q*R,   
    and then computes the SVD of the 2-by-2 upper triangular matrix R.   
    The smaller singular value of R is returned in SSMIN, which is used   
    as the measurement of the linear dependency of the vectors X and Y.   

    Arguments   
    =========   

    N       (input) INTEGER   
            The length of the vectors X and Y.   

    X       (input/output) COMPLEX array, dimension (1+(N-1)*INCX)   
            On entry, X contains the N-vector X.   
            On exit, X is overwritten.   

    INCX    (input) INTEGER   
            The increment between successive elements of X. INCX > 0.   

    Y       (input/output) COMPLEX array, dimension (1+(N-1)*INCY)   
            On entry, Y contains the N-vector Y.   
            On exit, Y is overwritten.   

    INCY    (input) INTEGER   
            The increment between successive elements of Y. INCY > 0.   

    SSMIN   (output) REAL   
            The smallest singular value of the N-by-2 matrix A = ( X Y ). 
  

    ===================================================================== 
  


       Quick return if possible   

    
   Parameter adjustments   
       Function Body */
    /* System generated locals */
    integer i__1;
    real r__1, r__2, r__3;
    complex q__1, q__2, q__3, q__4;
    /* Builtin functions */
    void r_cnjg(complex *, complex *);
    double c_abs(complex *);
    /* Local variables */
    extern /* Subroutine */ int slas2_(real *, real *, real *, real *, real *)
	    ;
    static complex c;
    extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer 
	    *, complex *, integer *);
    extern /* Subroutine */ int caxpy_(integer *, complex *, complex *, 
	    integer *, complex *, integer *);
    static real ssmax;
    static complex a11, a12, a22;
    extern /* Subroutine */ int clarfg_(integer *, complex *, complex *, 
	    integer *, complex *);
    static complex tau;


#define Y(I) y[(I)-1]
#define X(I) x[(I)-1]


    if (*n <= 1) {
	*ssmin = 0.f;
	return 0;
    }

/*     Compute the QR factorization of the N-by-2 matrix ( X Y ) */

    clarfg_(n, &X(1), &X(*incx + 1), incx, &tau);
    a11.r = X(1).r, a11.i = X(1).i;
    X(1).r = 1.f, X(1).i = 0.f;

    r_cnjg(&q__3, &tau);
    q__2.r = -(doublereal)q__3.r, q__2.i = -(doublereal)q__3.i;
    cdotc_(&q__4, n, &X(1), incx, &Y(1), incy);
    q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r * q__4.i + 
	    q__2.i * q__4.r;
    c.r = q__1.r, c.i = q__1.i;
    caxpy_(n, &c, &X(1), incx, &Y(1), incy);

    i__1 = *n - 1;
    clarfg_(&i__1, &Y(*incy + 1), &Y((*incy << 1) + 1), incy, &tau);

    a12.r = Y(1).r, a12.i = Y(1).i;
    i__1 = *incy + 1;
    a22.r = Y(*incy+1).r, a22.i = Y(*incy+1).i;

/*     Compute the SVD of 2-by-2 Upper triangular matrix. */

    r__1 = c_abs(&a11);
    r__2 = c_abs(&a12);
    r__3 = c_abs(&a22);
    slas2_(&r__1, &r__2, &r__3, ssmin, &ssmax);

    return 0;

/*     End of CLAPLL */

} /* clapll_ */
Example #2
0
/* Subroutine */ int cdrgev_(integer *nsizes, integer *nn, integer *ntypes, 
	logical *dotype, integer *iseed, real *thresh, integer *nounit, 
	complex *a, integer *lda, complex *b, complex *s, complex *t, complex 
	*q, integer *ldq, complex *z__, complex *qe, integer *ldqe, complex *
	alpha, complex *beta, complex *alpha1, complex *beta1, complex *work, 
	integer *lwork, real *rwork, real *result, integer *info)
{
    /* Initialized data */

    static integer kclass[26] = { 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,
	    2,2,2,3 };
    static integer kbmagn[26] = { 1,1,1,1,1,1,1,1,3,2,3,2,2,3,1,1,1,1,1,1,1,3,
	    2,3,2,1 };
    static integer ktrian[26] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
	    1,1,1,1 };
    static logical lasign[26] = { FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,
	    TRUE_,FALSE_,TRUE_,TRUE_,FALSE_,FALSE_,TRUE_,TRUE_,TRUE_,FALSE_,
	    TRUE_,FALSE_,FALSE_,FALSE_,TRUE_,TRUE_,TRUE_,TRUE_,TRUE_,FALSE_ };
    static logical lbsign[26] = { FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,
	    FALSE_,TRUE_,FALSE_,FALSE_,TRUE_,TRUE_,FALSE_,FALSE_,TRUE_,FALSE_,
	    TRUE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,
	    FALSE_ };
    static integer kz1[6] = { 0,1,2,1,3,3 };
    static integer kz2[6] = { 0,0,1,2,1,1 };
    static integer kadd[6] = { 0,0,0,0,3,2 };
    static integer katype[26] = { 0,1,0,1,2,3,4,1,4,4,1,1,4,4,4,2,4,5,8,7,9,4,
	    4,4,4,0 };
    static integer kbtype[26] = { 0,0,1,1,2,-3,1,4,1,1,4,4,1,1,-4,2,-4,8,8,8,
	    8,8,8,8,8,0 };
    static integer kazero[26] = { 1,1,1,1,1,1,2,1,2,2,1,1,2,2,3,1,3,5,5,5,5,3,
	    3,3,3,1 };
    static integer kbzero[26] = { 1,1,1,1,1,1,1,2,1,1,2,2,1,1,4,1,4,6,6,6,6,4,
	    4,4,4,1 };
    static integer kamagn[26] = { 1,1,1,1,1,1,1,1,2,3,2,3,2,3,1,1,1,1,1,1,1,2,
	    3,3,2,1 };

    /* Format strings */
    static char fmt_9999[] = "(\002 CDRGEV: \002,a,\002 returned INFO=\002,i"
	    "6,\002.\002,/3x,\002N=\002,i6,\002, JTYPE=\002,i6,\002, ISEED="
	    "(\002,3(i5,\002,\002),i5,\002)\002)";
    static char fmt_9998[] = "(\002 CDRGEV: \002,a,\002 Eigenvectors from"
	    " \002,a,\002 incorrectly \002,\002normalized.\002,/\002 Bits of "
	    "error=\002,0p,g10.3,\002,\002,3x,\002N=\002,i4,\002, JTYPE=\002,"
	    "i3,\002, ISEED=(\002,3(i4,\002,\002),i5,\002)\002)";
    static char fmt_9997[] = "(/1x,a3,\002 -- Complex Generalized eigenvalue"
	    " problem \002,\002driver\002)";
    static char fmt_9996[] = "(\002 Matrix types (see CDRGEV for details):"
	    " \002)";
    static char fmt_9995[] = "(\002 Special Matrices:\002,23x,\002(J'=transp"
	    "osed Jordan block)\002,/\002   1=(0,0)  2=(I,0)  3=(0,I)  4=(I,I"
	    ")  5=(J',J')  \002,\0026=(diag(J',I), diag(I,J'))\002,/\002 Diag"
	    "onal Matrices:  ( \002,\002D=diag(0,1,2,...) )\002,/\002   7=(D,"
	    "I)   9=(large*D, small*I\002,\002)  11=(large*I, small*D)  13=(l"
	    "arge*D, large*I)\002,/\002   8=(I,D)  10=(small*D, large*I)  12="
	    "(small*I, large*D) \002,\002 14=(small*D, small*I)\002,/\002  15"
	    "=(D, reversed D)\002)";
    static char fmt_9994[] = "(\002 Matrices Rotated by Random \002,a,\002 M"
	    "atrices U, V:\002,/\002  16=Transposed Jordan Blocks            "
	    " 19=geometric \002,\002alpha, beta=0,1\002,/\002  17=arithm. alp"
	    "ha&beta             \002,\002      20=arithmetic alpha, beta=0,"
	    "1\002,/\002  18=clustered \002,\002alpha, beta=0,1            21"
	    "=random alpha, beta=0,1\002,/\002 Large & Small Matrices:\002,"
	    "/\002  22=(large, small)   \002,\00223=(small,large)    24=(smal"
	    "l,small)    25=(large,large)\002,/\002  26=random O(1) matrices"
	    ".\002)";
    static char fmt_9993[] = "(/\002 Tests performed:    \002,/\002 1 = max "
	    "| ( b A - a B )'*l | / const.,\002,/\002 2 = | |VR(i)| - 1 | / u"
	    "lp,\002,/\002 3 = max | ( b A - a B )*r | / const.\002,/\002 4 ="
	    " | |VL(i)| - 1 | / ulp,\002,/\002 5 = 0 if W same no matter if r"
	    " or l computed,\002,/\002 6 = 0 if l same no matter if l compute"
	    "d,\002,/\002 7 = 0 if r same no matter if r computed,\002,/1x)";
    static char fmt_9992[] = "(\002 Matrix order=\002,i5,\002, type=\002,i2"
	    ",\002, seed=\002,4(i4,\002,\002),\002 result \002,i2,\002 is\002"
	    ",0p,f8.2)";
    static char fmt_9991[] = "(\002 Matrix order=\002,i5,\002, type=\002,i2"
	    ",\002, seed=\002,4(i4,\002,\002),\002 result \002,i2,\002 is\002"
	    ",1p,e10.3)";

    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, qe_dim1, 
	    qe_offset, s_dim1, s_offset, t_dim1, t_offset, z_dim1, z_offset, 
	    i__1, i__2, i__3, i__4, i__5, i__6, i__7;
    real r__1, r__2;
    complex q__1, q__2, q__3;

    /* Builtin functions */
    double r_sign(real *, real *), c_abs(complex *);
    void r_cnjg(complex *, complex *);
    integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void);

    /* Local variables */
    static integer iadd, ierr, nmax, i__, j, n;
    static logical badnn;
    extern /* Subroutine */ int cget52_(logical *, integer *, complex *, 
	    integer *, complex *, integer *, complex *, integer *, complex *, 
	    complex *, complex *, real *, real *), cggev_(char *, char *, 
	    integer *, complex *, integer *, complex *, integer *, complex *, 
	    complex *, complex *, integer *, complex *, integer *, complex *, 
	    integer *, real *, integer *);
    static real rmagn[4];
    static complex ctemp;
    static integer nmats, jsize, nerrs, jtype, n1;
    extern /* Subroutine */ int clatm4_(integer *, integer *, integer *, 
	    integer *, logical *, real *, real *, real *, integer *, integer *
	    , complex *, integer *), cunm2r_(char *, char *, integer *, 
	    integer *, integer *, complex *, integer *, complex *, complex *, 
	    integer *, complex *, integer *);
    static integer jc, nb, in;
    extern /* Subroutine */ int slabad_(real *, real *);
    static integer jr;
    extern /* Subroutine */ int clarfg_(integer *, complex *, complex *, 
	    integer *, complex *);
    extern /* Complex */ VOID clarnd_(complex *, integer *, integer *);
    extern doublereal slamch_(char *);
    extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex 
	    *, integer *, complex *, integer *), claset_(char *, 
	    integer *, integer *, complex *, complex *, complex *, integer *);
    static real safmin, safmax;
    static integer ioldsd[4];
    extern integer ilaenv_(integer *, char *, char *, integer *, integer *, 
	    integer *, integer *, ftnlen, ftnlen);
    extern /* Subroutine */ int alasvm_(char *, integer *, integer *, integer 
	    *, integer *), xerbla_(char *, integer *);
    static integer minwrk, maxwrk;
    static real ulpinv;
    static integer mtypes, ntestt;
    static real ulp;

    /* Fortran I/O blocks */
    static cilist io___40 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___42 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___43 = { 0, 0, 0, fmt_9998, 0 };
    static cilist io___44 = { 0, 0, 0, fmt_9998, 0 };
    static cilist io___45 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___46 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___47 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___48 = { 0, 0, 0, fmt_9997, 0 };
    static cilist io___49 = { 0, 0, 0, fmt_9996, 0 };
    static cilist io___50 = { 0, 0, 0, fmt_9995, 0 };
    static cilist io___51 = { 0, 0, 0, fmt_9994, 0 };
    static cilist io___52 = { 0, 0, 0, fmt_9993, 0 };
    static cilist io___53 = { 0, 0, 0, fmt_9992, 0 };
    static cilist io___54 = { 0, 0, 0, fmt_9991, 0 };



#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]
#define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1
#define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)]
#define q_subscr(a_1,a_2) (a_2)*q_dim1 + a_1
#define q_ref(a_1,a_2) q[q_subscr(a_1,a_2)]
#define z___subscr(a_1,a_2) (a_2)*z_dim1 + a_1
#define z___ref(a_1,a_2) z__[z___subscr(a_1,a_2)]
#define qe_subscr(a_1,a_2) (a_2)*qe_dim1 + a_1
#define qe_ref(a_1,a_2) qe[qe_subscr(a_1,a_2)]


/*  -- LAPACK test routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   


    Purpose   
    =======   

    CDRGEV checks the nonsymmetric generalized eigenvalue problem driver   
    routine CGGEV.   

    CGGEV computes for a pair of n-by-n nonsymmetric matrices (A,B) the   
    generalized eigenvalues and, optionally, the left and right   
    eigenvectors.   

    A generalized eigenvalue for a pair of matrices (A,B) is a scalar w   
    or a ratio  alpha/beta = w, such that A - w*B is singular.  It is   
    usually represented as the pair (alpha,beta), as there is reasonalbe   
    interpretation for beta=0, and even for both being zero.   

    A right generalized eigenvector corresponding to a generalized   
    eigenvalue  w  for a pair of matrices (A,B) is a vector r  such that   
    (A - wB) * r = 0.  A left generalized eigenvector is a vector l such   
    that l**H * (A - wB) = 0, where l**H is the conjugate-transpose of l.   

    When CDRGEV is called, a number of matrix "sizes" ("n's") and a   
    number of matrix "types" are specified.  For each size ("n")   
    and each type of matrix, a pair of matrices (A, B) will be generated   
    and used for testing.  For each matrix pair, the following tests   
    will be performed and compared with the threshhold THRESH.   

    Results from CGGEV:   

    (1)  max over all left eigenvalue/-vector pairs (alpha/beta,l) of   

         | VL**H * (beta A - alpha B) |/( ulp max(|beta A|, |alpha B|) )   

         where VL**H is the conjugate-transpose of VL.   

    (2)  | |VL(i)| - 1 | / ulp and whether largest component real   

         VL(i) denotes the i-th column of VL.   

    (3)  max over all left eigenvalue/-vector pairs (alpha/beta,r) of   

         | (beta A - alpha B) * VR | / ( ulp max(|beta A|, |alpha B|) )   

    (4)  | |VR(i)| - 1 | / ulp and whether largest component real   

         VR(i) denotes the i-th column of VR.   

    (5)  W(full) = W(partial)   
         W(full) denotes the eigenvalues computed when both l and r   
         are also computed, and W(partial) denotes the eigenvalues   
         computed when only W, only W and r, or only W and l are   
         computed.   

    (6)  VL(full) = VL(partial)   
         VL(full) denotes the left eigenvectors computed when both l   
         and r are computed, and VL(partial) denotes the result   
         when only l is computed.   

    (7)  VR(full) = VR(partial)   
         VR(full) denotes the right eigenvectors computed when both l   
         and r are also computed, and VR(partial) denotes the result   
         when only l is computed.   


    Test Matrices   
    ---- --------   

    The sizes of the test matrices are specified by an array   
    NN(1:NSIZES); the value of each element NN(j) specifies one size.   
    The "types" are specified by a logical array DOTYPE( 1:NTYPES ); if   
    DOTYPE(j) is .TRUE., then matrix type "j" will be generated.   
    Currently, the list of possible types is:   

    (1)  ( 0, 0 )         (a pair of zero matrices)   

    (2)  ( I, 0 )         (an identity and a zero matrix)   

    (3)  ( 0, I )         (an identity and a zero matrix)   

    (4)  ( I, I )         (a pair of identity matrices)   

            t   t   
    (5)  ( J , J  )       (a pair of transposed Jordan blocks)   

                                        t                ( I   0  )   
    (6)  ( X, Y )         where  X = ( J   0  )  and Y = (      t )   
                                     ( 0   I  )          ( 0   J  )   
                          and I is a k x k identity and J a (k+1)x(k+1)   
                          Jordan block; k=(N-1)/2   

    (7)  ( D, I )         where D is diag( 0, 1,..., N-1 ) (a diagonal   
                          matrix with those diagonal entries.)   
    (8)  ( I, D )   

    (9)  ( big*D, small*I ) where "big" is near overflow and small=1/big   

    (10) ( small*D, big*I )   

    (11) ( big*I, small*D )   

    (12) ( small*I, big*D )   

    (13) ( big*D, big*I )   

    (14) ( small*D, small*I )   

    (15) ( D1, D2 )        where D1 is diag( 0, 0, 1, ..., N-3, 0 ) and   
                           D2 is diag( 0, N-3, N-4,..., 1, 0, 0 )   
              t   t   
    (16) Q ( J , J ) Z     where Q and Z are random orthogonal matrices.   

    (17) Q ( T1, T2 ) Z    where T1 and T2 are upper triangular matrices   
                           with random O(1) entries above the diagonal   
                           and diagonal entries diag(T1) =   
                           ( 0, 0, 1, ..., N-3, 0 ) and diag(T2) =   
                           ( 0, N-3, N-4,..., 1, 0, 0 )   

    (18) Q ( T1, T2 ) Z    diag(T1) = ( 0, 0, 1, 1, s, ..., s, 0 )   
                           diag(T2) = ( 0, 1, 0, 1,..., 1, 0 )   
                           s = machine precision.   

    (19) Q ( T1, T2 ) Z    diag(T1)=( 0,0,1,1, 1-d, ..., 1-(N-5)*d=s, 0 )   
                           diag(T2) = ( 0, 1, 0, 1, ..., 1, 0 )   

                                                           N-5   
    (20) Q ( T1, T2 ) Z    diag(T1)=( 0, 0, 1, 1, a, ..., a   =s, 0 )   
                           diag(T2) = ( 0, 1, 0, 1, ..., 1, 0, 0 )   

    (21) Q ( T1, T2 ) Z    diag(T1)=( 0, 0, 1, r1, r2, ..., r(N-4), 0 )   
                           diag(T2) = ( 0, 1, 0, 1, ..., 1, 0, 0 )   
                           where r1,..., r(N-4) are random.   

    (22) Q ( big*T1, small*T2 ) Z    diag(T1) = ( 0, 0, 1, ..., N-3, 0 )   
                                     diag(T2) = ( 0, 1, ..., 1, 0, 0 )   

    (23) Q ( small*T1, big*T2 ) Z    diag(T1) = ( 0, 0, 1, ..., N-3, 0 )   
                                     diag(T2) = ( 0, 1, ..., 1, 0, 0 )   

    (24) Q ( small*T1, small*T2 ) Z  diag(T1) = ( 0, 0, 1, ..., N-3, 0 )   
                                     diag(T2) = ( 0, 1, ..., 1, 0, 0 )   

    (25) Q ( big*T1, big*T2 ) Z      diag(T1) = ( 0, 0, 1, ..., N-3, 0 )   
                                     diag(T2) = ( 0, 1, ..., 1, 0, 0 )   

    (26) Q ( T1, T2 ) Z     where T1 and T2 are random upper-triangular   
                            matrices.   


    Arguments   
    =========   

    NSIZES  (input) INTEGER   
            The number of sizes of matrices to use.  If it is zero,   
            CDRGES does nothing.  NSIZES >= 0.   

    NN      (input) INTEGER array, dimension (NSIZES)   
            An array containing the sizes to be used for the matrices.   
            Zero values will be skipped.  NN >= 0.   

    NTYPES  (input) INTEGER   
            The number of elements in DOTYPE.   If it is zero, CDRGEV   
            does nothing.  It must be at least zero.  If it is MAXTYP+1   
            and NSIZES is 1, then an additional type, MAXTYP+1 is   
            defined, which is to use whatever matrix is in A.  This   
            is only useful if DOTYPE(1:MAXTYP) is .FALSE. and   
            DOTYPE(MAXTYP+1) is .TRUE. .   

    DOTYPE  (input) LOGICAL array, dimension (NTYPES)   
            If DOTYPE(j) is .TRUE., then for each size in NN a   
            matrix of that size and of type j will be generated.   
            If NTYPES is smaller than the maximum number of types   
            defined (PARAMETER MAXTYP), then types NTYPES+1 through   
            MAXTYP will not be generated. If NTYPES is larger   
            than MAXTYP, DOTYPE(MAXTYP+1) through DOTYPE(NTYPES)   
            will be ignored.   

    ISEED   (input/output) INTEGER array, dimension (4)   
            On entry ISEED specifies the seed of the random number   
            generator. The array elements should be between 0 and 4095;   
            if not they will be reduced mod 4096. Also, ISEED(4) must   
            be odd.  The random number generator uses a linear   
            congruential sequence limited to small integers, and so   
            should produce machine independent random numbers. The   
            values of ISEED are changed on exit, and can be used in the   
            next call to CDRGES to continue the same random number   
            sequence.   

    THRESH  (input) REAL   
            A test will count as "failed" if the "error", computed as   
            described above, exceeds THRESH.  Note that the error is   
            scaled to be O(1), so THRESH should be a reasonably small   
            multiple of 1, e.g., 10 or 100.  In particular, it should   
            not depend on the precision (single vs. double) or the size   
            of the matrix.  It must be at least zero.   

    NOUNIT  (input) INTEGER   
            The FORTRAN unit number for printing out error messages   
            (e.g., if a routine returns IERR not equal to 0.)   

    A       (input/workspace) COMPLEX array, dimension(LDA, max(NN))   
            Used to hold the original A matrix.  Used as input only   
            if NTYPES=MAXTYP+1, DOTYPE(1:MAXTYP)=.FALSE., and   
            DOTYPE(MAXTYP+1)=.TRUE.   

    LDA     (input) INTEGER   
            The leading dimension of A, B, S, and T.   
            It must be at least 1 and at least max( NN ).   

    B       (input/workspace) COMPLEX array, dimension(LDA, max(NN))   
            Used to hold the original B matrix.  Used as input only   
            if NTYPES=MAXTYP+1, DOTYPE(1:MAXTYP)=.FALSE., and   
            DOTYPE(MAXTYP+1)=.TRUE.   

    S       (workspace) COMPLEX array, dimension (LDA, max(NN))   
            The Schur form matrix computed from A by CGGEV.  On exit, S   
            contains the Schur form matrix corresponding to the matrix   
            in A.   

    T       (workspace) COMPLEX array, dimension (LDA, max(NN))   
            The upper triangular matrix computed from B by CGGEV.   

    Q      (workspace) COMPLEX array, dimension (LDQ, max(NN))   
            The (left) eigenvectors matrix computed by CGGEV.   

    LDQ     (input) INTEGER   
            The leading dimension of Q and Z. It must   
            be at least 1 and at least max( NN ).   

    Z       (workspace) COMPLEX array, dimension( LDQ, max(NN) )   
            The (right) orthogonal matrix computed by CGGEV.   

    QE      (workspace) COMPLEX array, dimension( LDQ, max(NN) )   
            QE holds the computed right or left eigenvectors.   

    LDQE    (input) INTEGER   
            The leading dimension of QE. LDQE >= max(1,max(NN)).   

    ALPHA   (workspace) COMPLEX array, dimension (max(NN))   
    BETA    (workspace) COMPLEX array, dimension (max(NN))   
            The generalized eigenvalues of (A,B) computed by CGGEV.   
            ( ALPHAR(k)+ALPHAI(k)*i ) / BETA(k) is the k-th   
            generalized eigenvalue of A and B.   

    ALPHA1  (workspace) COMPLEX array, dimension (max(NN))   
    BETA1   (workspace) COMPLEX array, dimension (max(NN))   
            Like ALPHAR, ALPHAI, BETA, these arrays contain the   
            eigenvalues of A and B, but those computed when CGGEV only   
            computes a partial eigendecomposition, i.e. not the   
            eigenvalues and left and right eigenvectors.   

    WORK    (workspace) COMPLEX array, dimension (LWORK)   

    LWORK   (input) INTEGER   
            The number of entries in WORK.  LWORK >= N*(N+1)   

    RWORK   (workspace) REAL array, dimension (8*N)   
            Real workspace.   

    RESULT  (output) REAL array, dimension (2)   
            The values computed by the tests described above.   
            The values are currently limited to 1/ulp, to avoid overflow.   

    INFO    (output) INTEGER   
            = 0:  successful exit   
            < 0:  if INFO = -i, the i-th argument had an illegal value.   
            > 0:  A routine returned an error code.  INFO is the   
                  absolute value of the INFO value returned.   

    =====================================================================   

       Parameter adjustments */
    --nn;
    --dotype;
    --iseed;
    t_dim1 = *lda;
    t_offset = 1 + t_dim1 * 1;
    t -= t_offset;
    s_dim1 = *lda;
    s_offset = 1 + s_dim1 * 1;
    s -= s_offset;
    b_dim1 = *lda;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    z_dim1 = *ldq;
    z_offset = 1 + z_dim1 * 1;
    z__ -= z_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1 * 1;
    q -= q_offset;
    qe_dim1 = *ldqe;
    qe_offset = 1 + qe_dim1 * 1;
    qe -= qe_offset;
    --alpha;
    --beta;
    --alpha1;
    --beta1;
    --work;
    --rwork;
    --result;

    /* Function Body   

       Check for errors */

    *info = 0;

    badnn = FALSE_;
    nmax = 1;
    i__1 = *nsizes;
    for (j = 1; j <= i__1; ++j) {
/* Computing MAX */
	i__2 = nmax, i__3 = nn[j];
	nmax = max(i__2,i__3);
	if (nn[j] < 0) {
	    badnn = TRUE_;
	}
/* L10: */
    }

    if (*nsizes < 0) {
	*info = -1;
    } else if (badnn) {
	*info = -2;
    } else if (*ntypes < 0) {
	*info = -3;
    } else if (*thresh < 0.f) {
	*info = -6;
    } else if (*lda <= 1 || *lda < nmax) {
	*info = -9;
    } else if (*ldq <= 1 || *ldq < nmax) {
	*info = -14;
    } else if (*ldqe <= 1 || *ldqe < nmax) {
	*info = -17;
    }

/*     Compute workspace   
        (Note: Comments in the code beginning "Workspace:" describe the   
         minimal amount of workspace needed at that point in the code,   
         as well as the preferred amount for good performance.   
         NB refers to the optimal block size for the immediately   
         following subroutine, as returned by ILAENV. */

    minwrk = 1;
    if (*info == 0 && *lwork >= 1) {
	minwrk = nmax * (nmax + 1);
/* Computing MAX */
	i__1 = 1, i__2 = ilaenv_(&c__1, "CGEQRF", " ", &nmax, &nmax, &c_n1, &
		c_n1, (ftnlen)6, (ftnlen)1), i__1 = max(i__1,i__2), i__2 = 
		ilaenv_(&c__1, "CUNMQR", "LC", &nmax, &nmax, &nmax, &c_n1, (
		ftnlen)6, (ftnlen)2), i__1 = max(i__1,i__2), i__2 = ilaenv_(&
		c__1, "CUNGQR", " ", &nmax, &nmax, &nmax, &c_n1, (ftnlen)6, (
		ftnlen)1);
	nb = max(i__1,i__2);
/* Computing MAX */
	i__1 = nmax << 1, i__2 = nmax * (nb + 1), i__1 = max(i__1,i__2), i__2 
		= nmax * (nmax + 1);
	maxwrk = max(i__1,i__2);
	work[1].r = (real) maxwrk, work[1].i = 0.f;
    }

    if (*lwork < minwrk) {
	*info = -23;
    }

    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CDRGEV", &i__1);
	return 0;
    }

/*     Quick return if possible */

    if (*nsizes == 0 || *ntypes == 0) {
	return 0;
    }

    ulp = slamch_("Precision");
    safmin = slamch_("Safe minimum");
    safmin /= ulp;
    safmax = 1.f / safmin;
    slabad_(&safmin, &safmax);
    ulpinv = 1.f / ulp;

/*     The values RMAGN(2:3) depend on N, see below. */

    rmagn[0] = 0.f;
    rmagn[1] = 1.f;

/*     Loop over sizes, types */

    ntestt = 0;
    nerrs = 0;
    nmats = 0;

    i__1 = *nsizes;
    for (jsize = 1; jsize <= i__1; ++jsize) {
	n = nn[jsize];
	n1 = max(1,n);
	rmagn[2] = safmax * ulp / (real) n1;
	rmagn[3] = safmin * ulpinv * n1;

	if (*nsizes != 1) {
	    mtypes = min(26,*ntypes);
	} else {
	    mtypes = min(27,*ntypes);
	}

	i__2 = mtypes;
	for (jtype = 1; jtype <= i__2; ++jtype) {
	    if (! dotype[jtype]) {
		goto L210;
	    }
	    ++nmats;

/*           Save ISEED in case of an error. */

	    for (j = 1; j <= 4; ++j) {
		ioldsd[j - 1] = iseed[j];
/* L20: */
	    }

/*           Generate test matrices A and B   

             Description of control parameters:   

             KCLASS: =1 means w/o rotation, =2 means w/ rotation,   
                     =3 means random.   
             KATYPE: the "type" to be passed to CLATM4 for computing A.   
             KAZERO: the pattern of zeros on the diagonal for A:   
                     =1: ( xxx ), =2: (0, xxx ) =3: ( 0, 0, xxx, 0 ),   
                     =4: ( 0, xxx, 0, 0 ), =5: ( 0, 0, 1, xxx, 0 ),   
                     =6: ( 0, 1, 0, xxx, 0 ).  (xxx means a string of   
                     non-zero entries.)   
             KAMAGN: the magnitude of the matrix: =0: zero, =1: O(1),   
                     =2: large, =3: small.   
             LASIGN: .TRUE. if the diagonal elements of A are to be   
                     multiplied by a random magnitude 1 number.   
             KBTYPE, KBZERO, KBMAGN, LBSIGN: the same, but for B.   
             KTRIAN: =0: don't fill in the upper triangle, =1: do.   
             KZ1, KZ2, KADD: used to implement KAZERO and KBZERO.   
             RMAGN: used to implement KAMAGN and KBMAGN. */

	    if (mtypes > 26) {
		goto L100;
	    }
	    ierr = 0;
	    if (kclass[jtype - 1] < 3) {

/*              Generate A (w/o rotation) */

		if ((i__3 = katype[jtype - 1], abs(i__3)) == 3) {
		    in = ((n - 1) / 2 << 1) + 1;
		    if (in != n) {
			claset_("Full", &n, &n, &c_b1, &c_b1, &a[a_offset], 
				lda);
		    }
		} else {
		    in = n;
		}
		clatm4_(&katype[jtype - 1], &in, &kz1[kazero[jtype - 1] - 1], 
			&kz2[kazero[jtype - 1] - 1], &lasign[jtype - 1], &
			rmagn[kamagn[jtype - 1]], &ulp, &rmagn[ktrian[jtype - 
			1] * kamagn[jtype - 1]], &c__2, &iseed[1], &a[
			a_offset], lda);
		iadd = kadd[kazero[jtype - 1] - 1];
		if (iadd > 0 && iadd <= n) {
		    i__3 = a_subscr(iadd, iadd);
		    i__4 = kamagn[jtype - 1];
		    a[i__3].r = rmagn[i__4], a[i__3].i = 0.f;
		}

/*              Generate B (w/o rotation) */

		if ((i__3 = kbtype[jtype - 1], abs(i__3)) == 3) {
		    in = ((n - 1) / 2 << 1) + 1;
		    if (in != n) {
			claset_("Full", &n, &n, &c_b1, &c_b1, &b[b_offset], 
				lda);
		    }
		} else {
		    in = n;
		}
		clatm4_(&kbtype[jtype - 1], &in, &kz1[kbzero[jtype - 1] - 1], 
			&kz2[kbzero[jtype - 1] - 1], &lbsign[jtype - 1], &
			rmagn[kbmagn[jtype - 1]], &c_b28, &rmagn[ktrian[jtype 
			- 1] * kbmagn[jtype - 1]], &c__2, &iseed[1], &b[
			b_offset], lda);
		iadd = kadd[kbzero[jtype - 1] - 1];
		if (iadd != 0 && iadd <= n) {
		    i__3 = b_subscr(iadd, iadd);
		    i__4 = kbmagn[jtype - 1];
		    b[i__3].r = rmagn[i__4], b[i__3].i = 0.f;
		}

		if (kclass[jtype - 1] == 2 && n > 0) {

/*                 Include rotations   

                   Generate Q, Z as Householder transformations times   
                   a diagonal matrix. */

		    i__3 = n - 1;
		    for (jc = 1; jc <= i__3; ++jc) {
			i__4 = n;
			for (jr = jc; jr <= i__4; ++jr) {
			    i__5 = q_subscr(jr, jc);
			    clarnd_(&q__1, &c__3, &iseed[1]);
			    q[i__5].r = q__1.r, q[i__5].i = q__1.i;
			    i__5 = z___subscr(jr, jc);
			    clarnd_(&q__1, &c__3, &iseed[1]);
			    z__[i__5].r = q__1.r, z__[i__5].i = q__1.i;
/* L30: */
			}
			i__4 = n + 1 - jc;
			clarfg_(&i__4, &q_ref(jc, jc), &q_ref(jc + 1, jc), &
				c__1, &work[jc]);
			i__4 = (n << 1) + jc;
			i__5 = q_subscr(jc, jc);
			r__2 = q[i__5].r;
			r__1 = r_sign(&c_b28, &r__2);
			work[i__4].r = r__1, work[i__4].i = 0.f;
			i__4 = q_subscr(jc, jc);
			q[i__4].r = 1.f, q[i__4].i = 0.f;
			i__4 = n + 1 - jc;
			clarfg_(&i__4, &z___ref(jc, jc), &z___ref(jc + 1, jc),
				 &c__1, &work[n + jc]);
			i__4 = n * 3 + jc;
			i__5 = z___subscr(jc, jc);
			r__2 = z__[i__5].r;
			r__1 = r_sign(&c_b28, &r__2);
			work[i__4].r = r__1, work[i__4].i = 0.f;
			i__4 = z___subscr(jc, jc);
			z__[i__4].r = 1.f, z__[i__4].i = 0.f;
/* L40: */
		    }
		    clarnd_(&q__1, &c__3, &iseed[1]);
		    ctemp.r = q__1.r, ctemp.i = q__1.i;
		    i__3 = q_subscr(n, n);
		    q[i__3].r = 1.f, q[i__3].i = 0.f;
		    i__3 = n;
		    work[i__3].r = 0.f, work[i__3].i = 0.f;
		    i__3 = n * 3;
		    r__1 = c_abs(&ctemp);
		    q__1.r = ctemp.r / r__1, q__1.i = ctemp.i / r__1;
		    work[i__3].r = q__1.r, work[i__3].i = q__1.i;
		    clarnd_(&q__1, &c__3, &iseed[1]);
		    ctemp.r = q__1.r, ctemp.i = q__1.i;
		    i__3 = z___subscr(n, n);
		    z__[i__3].r = 1.f, z__[i__3].i = 0.f;
		    i__3 = n << 1;
		    work[i__3].r = 0.f, work[i__3].i = 0.f;
		    i__3 = n << 2;
		    r__1 = c_abs(&ctemp);
		    q__1.r = ctemp.r / r__1, q__1.i = ctemp.i / r__1;
		    work[i__3].r = q__1.r, work[i__3].i = q__1.i;

/*                 Apply the diagonal matrices */

		    i__3 = n;
		    for (jc = 1; jc <= i__3; ++jc) {
			i__4 = n;
			for (jr = 1; jr <= i__4; ++jr) {
			    i__5 = a_subscr(jr, jc);
			    i__6 = (n << 1) + jr;
			    r_cnjg(&q__3, &work[n * 3 + jc]);
			    q__2.r = work[i__6].r * q__3.r - work[i__6].i * 
				    q__3.i, q__2.i = work[i__6].r * q__3.i + 
				    work[i__6].i * q__3.r;
			    i__7 = a_subscr(jr, jc);
			    q__1.r = q__2.r * a[i__7].r - q__2.i * a[i__7].i, 
				    q__1.i = q__2.r * a[i__7].i + q__2.i * a[
				    i__7].r;
			    a[i__5].r = q__1.r, a[i__5].i = q__1.i;
			    i__5 = b_subscr(jr, jc);
			    i__6 = (n << 1) + jr;
			    r_cnjg(&q__3, &work[n * 3 + jc]);
			    q__2.r = work[i__6].r * q__3.r - work[i__6].i * 
				    q__3.i, q__2.i = work[i__6].r * q__3.i + 
				    work[i__6].i * q__3.r;
			    i__7 = b_subscr(jr, jc);
			    q__1.r = q__2.r * b[i__7].r - q__2.i * b[i__7].i, 
				    q__1.i = q__2.r * b[i__7].i + q__2.i * b[
				    i__7].r;
			    b[i__5].r = q__1.r, b[i__5].i = q__1.i;
/* L50: */
			}
/* L60: */
		    }
		    i__3 = n - 1;
		    cunm2r_("L", "N", &n, &n, &i__3, &q[q_offset], ldq, &work[
			    1], &a[a_offset], lda, &work[(n << 1) + 1], &ierr);
		    if (ierr != 0) {
			goto L90;
		    }
		    i__3 = n - 1;
		    cunm2r_("R", "C", &n, &n, &i__3, &z__[z_offset], ldq, &
			    work[n + 1], &a[a_offset], lda, &work[(n << 1) + 
			    1], &ierr);
		    if (ierr != 0) {
			goto L90;
		    }
		    i__3 = n - 1;
		    cunm2r_("L", "N", &n, &n, &i__3, &q[q_offset], ldq, &work[
			    1], &b[b_offset], lda, &work[(n << 1) + 1], &ierr);
		    if (ierr != 0) {
			goto L90;
		    }
		    i__3 = n - 1;
		    cunm2r_("R", "C", &n, &n, &i__3, &z__[z_offset], ldq, &
			    work[n + 1], &b[b_offset], lda, &work[(n << 1) + 
			    1], &ierr);
		    if (ierr != 0) {
			goto L90;
		    }
		}
	    } else {

/*              Random matrices */

		i__3 = n;
		for (jc = 1; jc <= i__3; ++jc) {
		    i__4 = n;
		    for (jr = 1; jr <= i__4; ++jr) {
			i__5 = a_subscr(jr, jc);
			i__6 = kamagn[jtype - 1];
			clarnd_(&q__2, &c__4, &iseed[1]);
			q__1.r = rmagn[i__6] * q__2.r, q__1.i = rmagn[i__6] * 
				q__2.i;
			a[i__5].r = q__1.r, a[i__5].i = q__1.i;
			i__5 = b_subscr(jr, jc);
			i__6 = kbmagn[jtype - 1];
			clarnd_(&q__2, &c__4, &iseed[1]);
			q__1.r = rmagn[i__6] * q__2.r, q__1.i = rmagn[i__6] * 
				q__2.i;
			b[i__5].r = q__1.r, b[i__5].i = q__1.i;
/* L70: */
		    }
/* L80: */
		}
	    }

L90:

	    if (ierr != 0) {
		io___40.ciunit = *nounit;
		s_wsfe(&io___40);
		do_fio(&c__1, "Generator", (ftnlen)9);
		do_fio(&c__1, (char *)&ierr, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(ierr);
		return 0;
	    }

L100:

	    for (i__ = 1; i__ <= 7; ++i__) {
		result[i__] = -1.f;
/* L110: */
	    }

/*           Call CGGEV to compute eigenvalues and eigenvectors. */

	    clacpy_(" ", &n, &n, &a[a_offset], lda, &s[s_offset], lda);
	    clacpy_(" ", &n, &n, &b[b_offset], lda, &t[t_offset], lda);
	    cggev_("V", "V", &n, &s[s_offset], lda, &t[t_offset], lda, &alpha[
		    1], &beta[1], &q[q_offset], ldq, &z__[z_offset], ldq, &
		    work[1], lwork, &rwork[1], &ierr);
	    if (ierr != 0 && ierr != n + 1) {
		result[1] = ulpinv;
		io___42.ciunit = *nounit;
		s_wsfe(&io___42);
		do_fio(&c__1, "CGGEV1", (ftnlen)6);
		do_fio(&c__1, (char *)&ierr, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(ierr);
		goto L190;
	    }

/*           Do the tests (1) and (2) */

	    cget52_(&c_true, &n, &a[a_offset], lda, &b[b_offset], lda, &q[
		    q_offset], ldq, &alpha[1], &beta[1], &work[1], &rwork[1], 
		    &result[1]);
	    if (result[2] > *thresh) {
		io___43.ciunit = *nounit;
		s_wsfe(&io___43);
		do_fio(&c__1, "Left", (ftnlen)4);
		do_fio(&c__1, "CGGEV1", (ftnlen)6);
		do_fio(&c__1, (char *)&result[2], (ftnlen)sizeof(real));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
	    }

/*           Do the tests (3) and (4) */

	    cget52_(&c_false, &n, &a[a_offset], lda, &b[b_offset], lda, &z__[
		    z_offset], ldq, &alpha[1], &beta[1], &work[1], &rwork[1], 
		    &result[3]);
	    if (result[4] > *thresh) {
		io___44.ciunit = *nounit;
		s_wsfe(&io___44);
		do_fio(&c__1, "Right", (ftnlen)5);
		do_fio(&c__1, "CGGEV1", (ftnlen)6);
		do_fio(&c__1, (char *)&result[4], (ftnlen)sizeof(real));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
	    }

/*           Do test (5) */

	    clacpy_(" ", &n, &n, &a[a_offset], lda, &s[s_offset], lda);
	    clacpy_(" ", &n, &n, &b[b_offset], lda, &t[t_offset], lda);
	    cggev_("N", "N", &n, &s[s_offset], lda, &t[t_offset], lda, &
		    alpha1[1], &beta1[1], &q[q_offset], ldq, &z__[z_offset], 
		    ldq, &work[1], lwork, &rwork[1], &ierr);
	    if (ierr != 0 && ierr != n + 1) {
		result[1] = ulpinv;
		io___45.ciunit = *nounit;
		s_wsfe(&io___45);
		do_fio(&c__1, "CGGEV2", (ftnlen)6);
		do_fio(&c__1, (char *)&ierr, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(ierr);
		goto L190;
	    }

	    i__3 = n;
	    for (j = 1; j <= i__3; ++j) {
		i__4 = j;
		i__5 = j;
		i__6 = j;
		i__7 = j;
		if (alpha[i__4].r != alpha1[i__5].r || alpha[i__4].i != 
			alpha1[i__5].i || (beta[i__6].r != beta1[i__7].r || 
			beta[i__6].i != beta1[i__7].i)) {
		    result[5] = ulpinv;
		}
/* L120: */
	    }

/*           Do test (6): Compute eigenvalues and left eigenvectors,   
             and test them */

	    clacpy_(" ", &n, &n, &a[a_offset], lda, &s[s_offset], lda);
	    clacpy_(" ", &n, &n, &b[b_offset], lda, &t[t_offset], lda);
	    cggev_("V", "N", &n, &s[s_offset], lda, &t[t_offset], lda, &
		    alpha1[1], &beta1[1], &qe[qe_offset], ldqe, &z__[z_offset]
		    , ldq, &work[1], lwork, &rwork[1], &ierr);
	    if (ierr != 0 && ierr != n + 1) {
		result[1] = ulpinv;
		io___46.ciunit = *nounit;
		s_wsfe(&io___46);
		do_fio(&c__1, "CGGEV3", (ftnlen)6);
		do_fio(&c__1, (char *)&ierr, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(ierr);
		goto L190;
	    }

	    i__3 = n;
	    for (j = 1; j <= i__3; ++j) {
		i__4 = j;
		i__5 = j;
		i__6 = j;
		i__7 = j;
		if (alpha[i__4].r != alpha1[i__5].r || alpha[i__4].i != 
			alpha1[i__5].i || (beta[i__6].r != beta1[i__7].r || 
			beta[i__6].i != beta1[i__7].i)) {
		    result[6] = ulpinv;
		}
/* L130: */
	    }

	    i__3 = n;
	    for (j = 1; j <= i__3; ++j) {
		i__4 = n;
		for (jc = 1; jc <= i__4; ++jc) {
		    i__5 = q_subscr(j, jc);
		    i__6 = qe_subscr(j, jc);
		    if (q[i__5].r != qe[i__6].r || q[i__5].i != qe[i__6].i) {
			result[6] = ulpinv;
		    }
/* L140: */
		}
/* L150: */
	    }

/*           Do test (7): Compute eigenvalues and right eigenvectors,   
             and test them */

	    clacpy_(" ", &n, &n, &a[a_offset], lda, &s[s_offset], lda);
	    clacpy_(" ", &n, &n, &b[b_offset], lda, &t[t_offset], lda);
	    cggev_("N", "V", &n, &s[s_offset], lda, &t[t_offset], lda, &
		    alpha1[1], &beta1[1], &q[q_offset], ldq, &qe[qe_offset], 
		    ldqe, &work[1], lwork, &rwork[1], &ierr);
	    if (ierr != 0 && ierr != n + 1) {
		result[1] = ulpinv;
		io___47.ciunit = *nounit;
		s_wsfe(&io___47);
		do_fio(&c__1, "CGGEV4", (ftnlen)6);
		do_fio(&c__1, (char *)&ierr, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(ierr);
		goto L190;
	    }

	    i__3 = n;
	    for (j = 1; j <= i__3; ++j) {
		i__4 = j;
		i__5 = j;
		i__6 = j;
		i__7 = j;
		if (alpha[i__4].r != alpha1[i__5].r || alpha[i__4].i != 
			alpha1[i__5].i || (beta[i__6].r != beta1[i__7].r || 
			beta[i__6].i != beta1[i__7].i)) {
		    result[7] = ulpinv;
		}
/* L160: */
	    }

	    i__3 = n;
	    for (j = 1; j <= i__3; ++j) {
		i__4 = n;
		for (jc = 1; jc <= i__4; ++jc) {
		    i__5 = z___subscr(j, jc);
		    i__6 = qe_subscr(j, jc);
		    if (z__[i__5].r != qe[i__6].r || z__[i__5].i != qe[i__6]
			    .i) {
			result[7] = ulpinv;
		    }
/* L170: */
		}
/* L180: */
	    }

/*           End of Loop -- Check for RESULT(j) > THRESH */

L190:

	    ntestt += 7;

/*           Print out tests which fail. */

	    for (jr = 1; jr <= 9; ++jr) {
		if (result[jr] >= *thresh) {

/*                 If this is the first test to fail,   
                   print a header to the data file. */

		    if (nerrs == 0) {
			io___48.ciunit = *nounit;
			s_wsfe(&io___48);
			do_fio(&c__1, "CGV", (ftnlen)3);
			e_wsfe();

/*                    Matrix types */

			io___49.ciunit = *nounit;
			s_wsfe(&io___49);
			e_wsfe();
			io___50.ciunit = *nounit;
			s_wsfe(&io___50);
			e_wsfe();
			io___51.ciunit = *nounit;
			s_wsfe(&io___51);
			do_fio(&c__1, "Orthogonal", (ftnlen)10);
			e_wsfe();

/*                    Tests performed */

			io___52.ciunit = *nounit;
			s_wsfe(&io___52);
			e_wsfe();

		    }
		    ++nerrs;
		    if (result[jr] < 1e4f) {
			io___53.ciunit = *nounit;
			s_wsfe(&io___53);
			do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer))
				;
			do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(
				integer));
			do_fio(&c__1, (char *)&jr, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&result[jr], (ftnlen)sizeof(
				real));
			e_wsfe();
		    } else {
			io___54.ciunit = *nounit;
			s_wsfe(&io___54);
			do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer))
				;
			do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(
				integer));
			do_fio(&c__1, (char *)&jr, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&result[jr], (ftnlen)sizeof(
				real));
			e_wsfe();
		    }
		}
/* L200: */
	    }

L210:
	    ;
	}
/* L220: */
    }

/*     Summary */

    alasvm_("CGV", nounit, &nerrs, &ntestt, &c__0);

    work[1].r = (real) maxwrk, work[1].i = 0.f;

    return 0;







/*     End of CDRGEV */

} /* cdrgev_ */
Example #3
0
/* Subroutine */
int cgelq2_(integer *m, integer *n, complex *a, integer *lda, complex *tau, complex *work, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    /* Local variables */
    integer i__, k;
    complex alpha;
    extern /* Subroutine */
    int clarf_(char *, integer *, integer *, complex * , integer *, complex *, complex *, integer *, complex *), clarfg_(integer *, complex *, complex *, integer *, complex *), clacgv_(integer *, complex *, integer *), xerbla_(char *, integer *);
    /* -- LAPACK computational routine (version 3.4.2) -- */
    /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
    /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
    /* September 2012 */
    /* .. Scalar Arguments .. */
    /* .. */
    /* .. Array Arguments .. */
    /* .. */
    /* ===================================================================== */
    /* .. Parameters .. */
    /* .. */
    /* .. Local Scalars .. */
    /* .. */
    /* .. External Subroutines .. */
    /* .. */
    /* .. Intrinsic Functions .. */
    /* .. */
    /* .. Executable Statements .. */
    /* Test the input arguments */
    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --tau;
    --work;
    /* Function Body */
    *info = 0;
    if (*m < 0)
    {
        *info = -1;
    }
    else if (*n < 0)
    {
        *info = -2;
    }
    else if (*lda < max(1,*m))
    {
        *info = -4;
    }
    if (*info != 0)
    {
        i__1 = -(*info);
        xerbla_("CGELQ2", &i__1);
        return 0;
    }
    k = min(*m,*n);
    i__1 = k;
    for (i__ = 1;
            i__ <= i__1;
            ++i__)
    {
        /* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */
        i__2 = *n - i__ + 1;
        clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
        i__2 = i__ + i__ * a_dim1;
        alpha.r = a[i__2].r;
        alpha.i = a[i__2].i; // , expr subst
        i__2 = *n - i__ + 1;
        /* Computing MIN */
        i__3 = i__ + 1;
        clarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, &tau[i__] );
        if (i__ < *m)
        {
            /* Apply H(i) to A(i+1:m,i:n) from the right */
            i__2 = i__ + i__ * a_dim1;
            a[i__2].r = 1.f;
            a[i__2].i = 0.f; // , expr subst
            i__2 = *m - i__;
            i__3 = *n - i__ + 1;
            clarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[ i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
        }
        i__2 = i__ + i__ * a_dim1;
        a[i__2].r = alpha.r;
        a[i__2].i = alpha.i; // , expr subst
        i__2 = *n - i__ + 1;
        clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
        /* L10: */
    }
    return 0;
    /* End of CGELQ2 */
}
Example #4
0
/* Subroutine */ int clahrd_(integer *n, integer *k, integer *nb, complex *a, 
	integer *lda, complex *tau, complex *t, integer *ldt, complex *y, 
	integer *ldy)
{
    /* System generated locals */
    integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2, 
	    i__3;
    complex q__1;

    /* Local variables */
    integer i__;
    complex ei;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *, 
	    integer *), cgemv_(char *, integer *, integer *, complex *, 
	    complex *, integer *, complex *, integer *, complex *, complex *, 
	    integer *), ccopy_(integer *, complex *, integer *, 
	    complex *, integer *), caxpy_(integer *, complex *, complex *, 
	    integer *, complex *, integer *), ctrmv_(char *, char *, char *, 
	    integer *, complex *, integer *, complex *, integer *), clarfg_(integer *, complex *, complex *, integer 
	    *, complex *), clacgv_(integer *, complex *, integer *);


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CLAHRD reduces the first NB columns of a complex general n-by-(n-k+1) */
/*  matrix A so that elements below the k-th subdiagonal are zero. The */
/*  reduction is performed by a unitary similarity transformation */
/*  Q' * A * Q. The routine returns the matrices V and T which determine */
/*  Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. */

/*  This is an OBSOLETE auxiliary routine. */
/*  This routine will be 'deprecated' in a  future release. */
/*  Please use the new routine CLAHR2 instead. */

/*  Arguments */
/*  ========= */

/*  N       (input) INTEGER */
/*          The order of the matrix A. */

/*  K       (input) INTEGER */
/*          The offset for the reduction. Elements below the k-th */
/*          subdiagonal in the first NB columns are reduced to zero. */

/*  NB      (input) INTEGER */
/*          The number of columns to be reduced. */

/*  A       (input/output) COMPLEX array, dimension (LDA,N-K+1) */
/*          On entry, the n-by-(n-k+1) general matrix A. */
/*          On exit, the elements on and above the k-th subdiagonal in */
/*          the first NB columns are overwritten with the corresponding */
/*          elements of the reduced matrix; the elements below the k-th */
/*          subdiagonal, with the array TAU, represent the matrix Q as a */
/*          product of elementary reflectors. The other columns of A are */
/*          unchanged. See Further Details. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,N). */

/*  TAU     (output) COMPLEX array, dimension (NB) */
/*          The scalar factors of the elementary reflectors. See Further */
/*          Details. */

/*  T       (output) COMPLEX array, dimension (LDT,NB) */
/*          The upper triangular matrix T. */

/*  LDT     (input) INTEGER */
/*          The leading dimension of the array T.  LDT >= NB. */

/*  Y       (output) COMPLEX array, dimension (LDY,NB) */
/*          The n-by-nb matrix Y. */

/*  LDY     (input) INTEGER */
/*          The leading dimension of the array Y. LDY >= max(1,N). */

/*  Further Details */
/*  =============== */

/*  The matrix Q is represented as a product of nb elementary reflectors */

/*     Q = H(1) H(2) . . . H(nb). */

/*  Each H(i) has the form */

/*     H(i) = I - tau * v * v' */

/*  where tau is a complex scalar, and v is a complex vector with */
/*  v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in */
/*  A(i+k+1:n,i), and tau in TAU(i). */

/*  The elements of the vectors v together form the (n-k+1)-by-nb matrix */
/*  V which is needed, with T and Y, to apply the transformation to the */
/*  unreduced part of the matrix, using an update of the form: */
/*  A := (I - V*T*V') * (A - Y*V'). */

/*  The contents of A on exit are illustrated by the following example */
/*  with n = 7, k = 3 and nb = 2: */

/*     ( a   h   a   a   a ) */
/*     ( a   h   a   a   a ) */
/*     ( a   h   a   a   a ) */
/*     ( h   h   a   a   a ) */
/*     ( v1  h   a   a   a ) */
/*     ( v1  v2  a   a   a ) */
/*     ( v1  v2  a   a   a ) */

/*  where a denotes an element of the original matrix A, h denotes a */
/*  modified element of the upper Hessenberg matrix H, and vi denotes an */
/*  element of the vector defining H(i). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Quick return if possible */

    /* Parameter adjustments */
    --tau;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    t_dim1 = *ldt;
    t_offset = 1 + t_dim1;
    t -= t_offset;
    y_dim1 = *ldy;
    y_offset = 1 + y_dim1;
    y -= y_offset;

    /* Function Body */
    if (*n <= 1) {
	return 0;
    }

    i__1 = *nb;
    for (i__ = 1; i__ <= i__1; ++i__) {
	if (i__ > 1) {

/*           Update A(1:n,i) */

/*           Compute i-th column of A - Y * V' */

	    i__2 = i__ - 1;
	    clacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
	    i__2 = i__ - 1;
	    q__1.r = -1.f, q__1.i = -0.f;
	    cgemv_("No transpose", n, &i__2, &q__1, &y[y_offset], ldy, &a[*k 
		    + i__ - 1 + a_dim1], lda, &c_b2, &a[i__ * a_dim1 + 1], &
		    c__1);
	    i__2 = i__ - 1;
	    clacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);

/*           Apply I - V * T' * V' to this column (call it b) from the */
/*           left, using the last column of T as workspace */

/*           Let  V = ( V1 )   and   b = ( b1 )   (first I-1 rows) */
/*                    ( V2 )             ( b2 ) */

/*           where V1 is unit lower triangular */

/*           w := V1' * b1 */

	    i__2 = i__ - 1;
	    ccopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 + 
		    1], &c__1);
	    i__2 = i__ - 1;
	    ctrmv_("Lower", "Conjugate transpose", "Unit", &i__2, &a[*k + 1 + 
		    a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1);

/*           w := w + V2'*b2 */

	    i__2 = *n - *k - i__ + 1;
	    i__3 = i__ - 1;
	    cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ + 
		    a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b2, &
		    t[*nb * t_dim1 + 1], &c__1);

/*           w := T'*w */

	    i__2 = i__ - 1;
	    ctrmv_("Upper", "Conjugate transpose", "Non-unit", &i__2, &t[
		    t_offset], ldt, &t[*nb * t_dim1 + 1], &c__1);

/*           b2 := b2 - V2*w */

	    i__2 = *n - *k - i__ + 1;
	    i__3 = i__ - 1;
	    q__1.r = -1.f, q__1.i = -0.f;
	    cgemv_("No transpose", &i__2, &i__3, &q__1, &a[*k + i__ + a_dim1], 
		     lda, &t[*nb * t_dim1 + 1], &c__1, &c_b2, &a[*k + i__ + 
		    i__ * a_dim1], &c__1);

/*           b1 := b1 - V1*w */

	    i__2 = i__ - 1;
	    ctrmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1]
, lda, &t[*nb * t_dim1 + 1], &c__1);
	    i__2 = i__ - 1;
	    q__1.r = -1.f, q__1.i = -0.f;
	    caxpy_(&i__2, &q__1, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__ 
		    * a_dim1], &c__1);

	    i__2 = *k + i__ - 1 + (i__ - 1) * a_dim1;
	    a[i__2].r = ei.r, a[i__2].i = ei.i;
	}

/*        Generate the elementary reflector H(i) to annihilate */
/*        A(k+i+1:n,i) */

	i__2 = *k + i__ + i__ * a_dim1;
	ei.r = a[i__2].r, ei.i = a[i__2].i;
	i__2 = *n - *k - i__ + 1;
/* Computing MIN */
	i__3 = *k + i__ + 1;
	clarfg_(&i__2, &ei, &a[min(i__3, *n)+ i__ * a_dim1], &c__1, &tau[i__])
		;
	i__2 = *k + i__ + i__ * a_dim1;
	a[i__2].r = 1.f, a[i__2].i = 0.f;

/*        Compute  Y(1:n,i) */

	i__2 = *n - *k - i__ + 1;
	cgemv_("No transpose", n, &i__2, &c_b2, &a[(i__ + 1) * a_dim1 + 1], 
		lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &y[i__ * 
		y_dim1 + 1], &c__1);
	i__2 = *n - *k - i__ + 1;
	i__3 = i__ - 1;
	cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ + 
		a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &t[
		i__ * t_dim1 + 1], &c__1);
	i__2 = i__ - 1;
	q__1.r = -1.f, q__1.i = -0.f;
	cgemv_("No transpose", n, &i__2, &q__1, &y[y_offset], ldy, &t[i__ * 
		t_dim1 + 1], &c__1, &c_b2, &y[i__ * y_dim1 + 1], &c__1);
	cscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1);

/*        Compute T(1:i,i) */

	i__2 = i__ - 1;
	i__3 = i__;
	q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
	cscal_(&i__2, &q__1, &t[i__ * t_dim1 + 1], &c__1);
	i__2 = i__ - 1;
	ctrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt, 
		&t[i__ * t_dim1 + 1], &c__1)
		;
	i__2 = i__ + i__ * t_dim1;
	i__3 = i__;
	t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;

/* L10: */
    }
    i__1 = *k + *nb + *nb * a_dim1;
    a[i__1].r = ei.r, a[i__1].i = ei.i;

    return 0;

/*     End of CLAHRD */

} /* clahrd_ */
Example #5
0
/* Subroutine */ int chseqr_(char *job, char *compz, integer *n, integer *ilo,
	 integer *ihi, complex *h__, integer *ldh, complex *w, complex *z__, 
	integer *ldz, complex *work, integer *lwork, integer *info)
{
    /* System generated locals */
    address a__1[2];
    integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4[2], 
	    i__5, i__6;
    real r__1, r__2, r__3, r__4;
    complex q__1;
    char ch__1[2];

    /* Builtin functions */
    double r_imag(complex *);
    void r_cnjg(complex *, complex *);
    /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);

    /* Local variables */
    static integer maxb, ierr;
    static real unfl;
    static complex temp;
    static real ovfl, opst;
    static integer i__, j, k, l;
    static complex s[225]	/* was [15][15] */;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *, 
	    integer *);
    static complex v[16];
    extern logical lsame_(char *, char *);
    extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
	    , complex *, integer *, complex *, integer *, complex *, complex *
	    , integer *), ccopy_(integer *, complex *, integer *, 
	    complex *, integer *);
    static integer itemp;
    static real rtemp;
    static integer i1, i2;
    static logical initz, wantt, wantz;
    static real rwork[1];
    extern doublereal slapy2_(real *, real *);
    static integer ii, nh;
    extern /* Subroutine */ int slabad_(real *, real *), clarfg_(integer *, 
	    complex *, complex *, integer *, complex *);
    static integer nr, ns;
    extern integer icamax_(integer *, complex *, integer *);
    static integer nv;
    extern doublereal slamch_(char *), clanhs_(char *, integer *, 
	    complex *, integer *, real *);
    extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer 
	    *), clahqr_(logical *, logical *, integer *, integer *, integer *,
	     complex *, integer *, complex *, integer *, integer *, complex *,
	     integer *, integer *), clacpy_(char *, integer *, integer *, 
	    complex *, integer *, complex *, integer *);
    static complex vv[16];
    extern /* Subroutine */ int claset_(char *, integer *, integer *, complex 
	    *, complex *, complex *, integer *);
    extern integer ilaenv_(integer *, char *, char *, integer *, integer *, 
	    integer *, integer *, ftnlen, ftnlen);
    extern /* Subroutine */ int clarfx_(char *, integer *, integer *, complex 
	    *, complex *, complex *, integer *, complex *), xerbla_(
	    char *, integer *);
    static real smlnum;
    static logical lquery;
    static integer itn;
    static complex tau;
    static integer its;
    static real ulp, tst1;


#define h___subscr(a_1,a_2) (a_2)*h_dim1 + a_1
#define h___ref(a_1,a_2) h__[h___subscr(a_1,a_2)]
#define s_subscr(a_1,a_2) (a_2)*15 + a_1 - 16
#define s_ref(a_1,a_2) s[s_subscr(a_1,a_2)]
#define z___subscr(a_1,a_2) (a_2)*z_dim1 + a_1
#define z___ref(a_1,a_2) z__[z___subscr(a_1,a_2)]


/*  -- LAPACK routine (instrumented to count operations, version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   

       Common block to return operation count.   

    Purpose   
    =======   

    CHSEQR computes the eigenvalues of a complex upper Hessenberg   
    matrix H, and, optionally, the matrices T and Z from the Schur   
    decomposition H = Z T Z**H, where T is an upper triangular matrix   
    (the Schur form), and Z is the unitary matrix of Schur vectors.   

    Optionally Z may be postmultiplied into an input unitary matrix Q,   
    so that this routine can give the Schur factorization of a matrix A   
    which has been reduced to the Hessenberg form H by the unitary   
    matrix Q:  A = Q*H*Q**H = (QZ)*T*(QZ)**H.   

    Arguments   
    =========   

    JOB     (input) CHARACTER*1   
            = 'E': compute eigenvalues only;   
            = 'S': compute eigenvalues and the Schur form T.   

    COMPZ   (input) CHARACTER*1   
            = 'N': no Schur vectors are computed;   
            = 'I': Z is initialized to the unit matrix and the matrix Z   
                   of Schur vectors of H is returned;   
            = 'V': Z must contain an unitary matrix Q on entry, and   
                   the product Q*Z is returned.   

    N       (input) INTEGER   
            The order of the matrix H.  N >= 0.   

    ILO     (input) INTEGER   
    IHI     (input) INTEGER   
            It is assumed that H is already upper triangular in rows   
            and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally   
            set by a previous call to CGEBAL, and then passed to CGEHRD   
            when the matrix output by CGEBAL is reduced to Hessenberg   
            form. Otherwise ILO and IHI should be set to 1 and N   
            respectively.   
            1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.   

    H       (input/output) COMPLEX array, dimension (LDH,N)   
            On entry, the upper Hessenberg matrix H.   
            On exit, if JOB = 'S', H contains the upper triangular matrix   
            T from the Schur decomposition (the Schur form). If   
            JOB = 'E', the contents of H are unspecified on exit.   

    LDH     (input) INTEGER   
            The leading dimension of the array H. LDH >= max(1,N).   

    W       (output) COMPLEX array, dimension (N)   
            The computed eigenvalues. If JOB = 'S', the eigenvalues are   
            stored in the same order as on the diagonal of the Schur form   
            returned in H, with W(i) = H(i,i).   

    Z       (input/output) COMPLEX array, dimension (LDZ,N)   
            If COMPZ = 'N': Z is not referenced.   
            If COMPZ = 'I': on entry, Z need not be set, and on exit, Z   
            contains the unitary matrix Z of the Schur vectors of H.   
            If COMPZ = 'V': on entry Z must contain an N-by-N matrix Q,   
            which is assumed to be equal to the unit matrix except for   
            the submatrix Z(ILO:IHI,ILO:IHI); on exit Z contains Q*Z.   
            Normally Q is the unitary matrix generated by CUNGHR after   
            the call to CGEHRD which formed the Hessenberg matrix H.   

    LDZ     (input) INTEGER   
            The leading dimension of the array Z.   
            LDZ >= max(1,N) if COMPZ = 'I' or 'V'; LDZ >= 1 otherwise.   

    WORK    (workspace/output) COMPLEX array, dimension (LWORK)   
            On exit, if INFO = 0, WORK(1) returns the optimal LWORK.   

    LWORK   (input) INTEGER   
            The dimension of the array WORK.  LWORK >= max(1,N).   

            If LWORK = -1, then a workspace query is assumed; the routine   
            only calculates the optimal size of the WORK array, returns   
            this value as the first entry of the WORK array, and no error   
            message related to LWORK is issued by XERBLA.   

    INFO    (output) INTEGER   
            = 0:  successful exit   
            < 0:  if INFO = -i, the i-th argument had an illegal value   
            > 0:  if INFO = i, CHSEQR failed to compute all the   
                  eigenvalues in a total of 30*(IHI-ILO+1) iterations;   
                  elements 1:ilo-1 and i+1:n of W contain those   
                  eigenvalues which have been successfully computed.   

    =====================================================================   


       Decode and test the input parameters   

       Parameter adjustments */
    h_dim1 = *ldh;
    h_offset = 1 + h_dim1 * 1;
    h__ -= h_offset;
    --w;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1 * 1;
    z__ -= z_offset;
    --work;

    /* Function Body */
    wantt = lsame_(job, "S");
    initz = lsame_(compz, "I");
    wantz = initz || lsame_(compz, "V");

    *info = 0;
    i__1 = max(1,*n);
    work[1].r = (real) i__1, work[1].i = 0.f;
    lquery = *lwork == -1;
    if (! lsame_(job, "E") && ! wantt) {
	*info = -1;
    } else if (! lsame_(compz, "N") && ! wantz) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    } else if (*ilo < 1 || *ilo > max(1,*n)) {
	*info = -4;
    } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
	*info = -5;
    } else if (*ldh < max(1,*n)) {
	*info = -7;
    } else if (*ldz < 1 || wantz && *ldz < max(1,*n)) {
	*info = -10;
    } else if (*lwork < max(1,*n) && ! lquery) {
	*info = -12;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CHSEQR", &i__1);
	return 0;
    } else if (lquery) {
	return 0;
    }
/* **   
       Initialize */
    opst = 0.f;
/* **   

       Initialize Z, if necessary */

    if (initz) {
	claset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
    }

/*     Store the eigenvalues isolated by CGEBAL. */

    i__1 = *ilo - 1;
    for (i__ = 1; i__ <= i__1; ++i__) {
	i__2 = i__;
	i__3 = h___subscr(i__, i__);
	w[i__2].r = h__[i__3].r, w[i__2].i = h__[i__3].i;
/* L10: */
    }
    i__1 = *n;
    for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
	i__2 = i__;
	i__3 = h___subscr(i__, i__);
	w[i__2].r = h__[i__3].r, w[i__2].i = h__[i__3].i;
/* L20: */
    }

/*     Quick return if possible. */

    if (*n == 0) {
	return 0;
    }
    if (*ilo == *ihi) {
	i__1 = *ilo;
	i__2 = h___subscr(*ilo, *ilo);
	w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
	return 0;
    }

/*     Set rows and columns ILO to IHI to zero below the first   
       subdiagonal. */

    i__1 = *ihi - 2;
    for (j = *ilo; j <= i__1; ++j) {
	i__2 = *n;
	for (i__ = j + 2; i__ <= i__2; ++i__) {
	    i__3 = h___subscr(i__, j);
	    h__[i__3].r = 0.f, h__[i__3].i = 0.f;
/* L30: */
	}
/* L40: */
    }
    nh = *ihi - *ilo + 1;

/*     I1 and I2 are the indices of the first row and last column of H   
       to which transformations must be applied. If eigenvalues only are   
       being computed, I1 and I2 are re-set inside the main loop. */

    if (wantt) {
	i1 = 1;
	i2 = *n;
    } else {
	i1 = *ilo;
	i2 = *ihi;
    }

/*     Ensure that the subdiagonal elements are real. */

    i__1 = *ihi;
    for (i__ = *ilo + 1; i__ <= i__1; ++i__) {
	i__2 = h___subscr(i__, i__ - 1);
	temp.r = h__[i__2].r, temp.i = h__[i__2].i;
	if (r_imag(&temp) != 0.f) {
	    r__1 = temp.r;
	    r__2 = r_imag(&temp);
	    rtemp = slapy2_(&r__1, &r__2);
	    i__2 = h___subscr(i__, i__ - 1);
	    h__[i__2].r = rtemp, h__[i__2].i = 0.f;
	    q__1.r = temp.r / rtemp, q__1.i = temp.i / rtemp;
	    temp.r = q__1.r, temp.i = q__1.i;
	    if (i2 > i__) {
		i__2 = i2 - i__;
		r_cnjg(&q__1, &temp);
		cscal_(&i__2, &q__1, &h___ref(i__, i__ + 1), ldh);
	    }
	    i__2 = i__ - i1;
	    cscal_(&i__2, &temp, &h___ref(i1, i__), &c__1);
	    if (i__ < *ihi) {
		i__2 = h___subscr(i__ + 1, i__);
		i__3 = h___subscr(i__ + 1, i__);
		q__1.r = temp.r * h__[i__3].r - temp.i * h__[i__3].i, q__1.i =
			 temp.r * h__[i__3].i + temp.i * h__[i__3].r;
		h__[i__2].r = q__1.r, h__[i__2].i = q__1.i;
	    }
/* **   
             Increment op count */
	    opst += (i2 - i1 + 2) * 6;
/* ** */
	    if (wantz) {
		cscal_(&nh, &temp, &z___ref(*ilo, i__), &c__1);
/* **   
                Increment op count */
		opst += nh * 6;
/* ** */
	    }
	}
/* L50: */
    }

/*     Determine the order of the multi-shift QR algorithm to be used.   

   Writing concatenation */
    i__4[0] = 1, a__1[0] = job;
    i__4[1] = 1, a__1[1] = compz;
    s_cat(ch__1, a__1, i__4, &c__2, (ftnlen)2);
    ns = ilaenv_(&c__4, "CHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
	    ftnlen)2);
/* Writing concatenation */
    i__4[0] = 1, a__1[0] = job;
    i__4[1] = 1, a__1[1] = compz;
    s_cat(ch__1, a__1, i__4, &c__2, (ftnlen)2);
    maxb = ilaenv_(&c__8, "CHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
	    ftnlen)2);
    if (ns <= 1 || ns > nh || maxb >= nh) {

/*        Use the standard double-shift algorithm */

	clahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1], ilo, 
		ihi, &z__[z_offset], ldz, info);
	return 0;
    }
    maxb = max(2,maxb);
/* Computing MIN */
    i__1 = min(ns,maxb);
    ns = min(i__1,15);

/*     Now 1 < NS <= MAXB < NH.   

       Set machine-dependent constants for the stopping criterion.   
       If norm(H) <= sqrt(OVFL), overflow should not occur. */

    unfl = slamch_("Safe minimum");
    ovfl = 1.f / unfl;
    slabad_(&unfl, &ovfl);
    ulp = slamch_("Precision");
    smlnum = unfl * (nh / ulp);

/*     ITN is the total number of multiple-shift QR iterations allowed. */

    itn = nh * 30;

/*     The main loop begins here. I is the loop index and decreases from   
       IHI to ILO in steps of at most MAXB. Each iteration of the loop   
       works with the active submatrix in rows and columns L to I.   
       Eigenvalues I+1 to IHI have already converged. Either L = ILO, or   
       H(L,L-1) is negligible so that the matrix splits. */

    i__ = *ihi;
L60:
    if (i__ < *ilo) {
	goto L180;
    }

/*     Perform multiple-shift QR iterations on rows and columns ILO to I   
       until a submatrix of order at most MAXB splits off at the bottom   
       because a subdiagonal element has become negligible. */

    l = *ilo;
    i__1 = itn;
    for (its = 0; its <= i__1; ++its) {

/*        Look for a single small subdiagonal element. */

	i__2 = l + 1;
	for (k = i__; k >= i__2; --k) {
	    i__3 = h___subscr(k - 1, k - 1);
	    i__5 = h___subscr(k, k);
	    tst1 = (r__1 = h__[i__3].r, dabs(r__1)) + (r__2 = r_imag(&h___ref(
		    k - 1, k - 1)), dabs(r__2)) + ((r__3 = h__[i__5].r, dabs(
		    r__3)) + (r__4 = r_imag(&h___ref(k, k)), dabs(r__4)));
	    if (tst1 == 0.f) {
		i__3 = i__ - l + 1;
		tst1 = clanhs_("1", &i__3, &h___ref(l, l), ldh, rwork);
/* **   
                Increment op count */
		latime_1.ops += (i__ - l + 1) * 5 * (i__ - l) / 2;
/* ** */
	    }
	    i__3 = h___subscr(k, k - 1);
/* Computing MAX */
	    r__2 = ulp * tst1;
	    if ((r__1 = h__[i__3].r, dabs(r__1)) <= dmax(r__2,smlnum)) {
		goto L80;
	    }
/* L70: */
	}
L80:
	l = k;
/* **   
          Increment op count */
	opst += (i__ - l + 1) * 5;
/* ** */
	if (l > *ilo) {

/*           H(L,L-1) is negligible. */

	    i__2 = h___subscr(l, l - 1);
	    h__[i__2].r = 0.f, h__[i__2].i = 0.f;
	}

/*        Exit from loop if a submatrix of order <= MAXB has split off. */

	if (l >= i__ - maxb + 1) {
	    goto L170;
	}

/*        Now the active submatrix is in rows and columns L to I. If   
          eigenvalues only are being computed, only the active submatrix   
          need be transformed. */

	if (! wantt) {
	    i1 = l;
	    i2 = i__;
	}

	if (its == 20 || its == 30) {

/*           Exceptional shifts. */

	    i__2 = i__;
	    for (ii = i__ - ns + 1; ii <= i__2; ++ii) {
		i__3 = ii;
		i__5 = h___subscr(ii, ii - 1);
		i__6 = h___subscr(ii, ii);
		r__3 = ((r__1 = h__[i__5].r, dabs(r__1)) + (r__2 = h__[i__6]
			.r, dabs(r__2))) * 1.5f;
		w[i__3].r = r__3, w[i__3].i = 0.f;
/* L90: */
	    }
/* **   
             Increment op count */
	    opst += ns << 1;
/* ** */
	} else {

/*           Use eigenvalues of trailing submatrix of order NS as shifts. */

	    clacpy_("Full", &ns, &ns, &h___ref(i__ - ns + 1, i__ - ns + 1), 
		    ldh, s, &c__15);
	    clahqr_(&c_false, &c_false, &ns, &c__1, &ns, s, &c__15, &w[i__ - 
		    ns + 1], &c__1, &ns, &z__[z_offset], ldz, &ierr);
	    if (ierr > 0) {

/*              If CLAHQR failed to compute all NS eigenvalues, use the   
                unconverged diagonal elements as the remaining shifts. */

		i__2 = ierr;
		for (ii = 1; ii <= i__2; ++ii) {
		    i__3 = i__ - ns + ii;
		    i__5 = s_subscr(ii, ii);
		    w[i__3].r = s[i__5].r, w[i__3].i = s[i__5].i;
/* L100: */
		}
	    }
	}

/*        Form the first column of (G-w(1)) (G-w(2)) . . . (G-w(ns))   
          where G is the Hessenberg submatrix H(L:I,L:I) and w is   
          the vector of shifts (stored in W). The result is   
          stored in the local array V. */

	v[0].r = 1.f, v[0].i = 0.f;
	i__2 = ns + 1;
	for (ii = 2; ii <= i__2; ++ii) {
	    i__3 = ii - 1;
	    v[i__3].r = 0.f, v[i__3].i = 0.f;
/* L110: */
	}
	nv = 1;
	i__2 = i__;
	for (j = i__ - ns + 1; j <= i__2; ++j) {
	    i__3 = nv + 1;
	    ccopy_(&i__3, v, &c__1, vv, &c__1);
	    i__3 = nv + 1;
	    i__5 = j;
	    q__1.r = -w[i__5].r, q__1.i = -w[i__5].i;
	    cgemv_("No transpose", &i__3, &nv, &c_b2, &h___ref(l, l), ldh, vv,
		     &c__1, &q__1, v, &c__1);
	    ++nv;
/* **   
             Increment op count */
	    opst = opst + (nv << 3) * (*n + 1) + (nv + 1) * 6;
/* **   

             Scale V(1:NV) so that max(abs(V(i))) = 1. If V is zero,   
             reset it to the unit vector. */

	    itemp = icamax_(&nv, v, &c__1);
/* **   
             Increment op count */
	    opst += nv << 1;
/* ** */
	    i__3 = itemp - 1;
	    rtemp = (r__1 = v[i__3].r, dabs(r__1)) + (r__2 = r_imag(&v[itemp 
		    - 1]), dabs(r__2));
	    if (rtemp == 0.f) {
		v[0].r = 1.f, v[0].i = 0.f;
		i__3 = nv;
		for (ii = 2; ii <= i__3; ++ii) {
		    i__5 = ii - 1;
		    v[i__5].r = 0.f, v[i__5].i = 0.f;
/* L120: */
		}
	    } else {
		rtemp = dmax(rtemp,smlnum);
		r__1 = 1.f / rtemp;
		csscal_(&nv, &r__1, v, &c__1);
/* **   
                Increment op count */
		opst += nv << 1;
/* ** */
	    }
/* L130: */
	}

/*        Multiple-shift QR step */

	i__2 = i__ - 1;
	for (k = l; k <= i__2; ++k) {

/*           The first iteration of this loop determines a reflection G   
             from the vector V and applies it from left and right to H,   
             thus creating a nonzero bulge below the subdiagonal.   

             Each subsequent iteration determines a reflection G to   
             restore the Hessenberg form in the (K-1)th column, and thus   
             chases the bulge one step toward the bottom of the active   
             submatrix. NR is the order of G.   

   Computing MIN */
	    i__3 = ns + 1, i__5 = i__ - k + 1;
	    nr = min(i__3,i__5);
	    if (k > l) {
		ccopy_(&nr, &h___ref(k, k - 1), &c__1, v, &c__1);
	    }
	    clarfg_(&nr, v, &v[1], &c__1, &tau);
/* **   
             Increment op count */
	    opst = opst + nr * 10 + 12;
/* ** */
	    if (k > l) {
		i__3 = h___subscr(k, k - 1);
		h__[i__3].r = v[0].r, h__[i__3].i = v[0].i;
		i__3 = i__;
		for (ii = k + 1; ii <= i__3; ++ii) {
		    i__5 = h___subscr(ii, k - 1);
		    h__[i__5].r = 0.f, h__[i__5].i = 0.f;
/* L140: */
		}
	    }
	    v[0].r = 1.f, v[0].i = 0.f;

/*           Apply G' from the left to transform the rows of the matrix   
             in columns K to I2. */

	    i__3 = i2 - k + 1;
	    r_cnjg(&q__1, &tau);
	    clarfx_("Left", &nr, &i__3, v, &q__1, &h___ref(k, k), ldh, &work[
		    1]);

/*           Apply G from the right to transform the columns of the   
             matrix in rows I1 to min(K+NR,I).   

   Computing MIN */
	    i__5 = k + nr;
	    i__3 = min(i__5,i__) - i1 + 1;
	    clarfx_("Right", &i__3, &nr, v, &tau, &h___ref(i1, k), ldh, &work[
		    1]);
/* **   
             Increment op count   
   Computing MIN */
	    i__3 = nr, i__5 = i__ - k;
	    latime_1.ops += ((nr << 2) - 2 << 2) * (i2 - i1 + 2 + min(i__3,
		    i__5));
/* ** */

	    if (wantz) {

/*              Accumulate transformations in the matrix Z */

		clarfx_("Right", &nh, &nr, v, &tau, &z___ref(*ilo, k), ldz, &
			work[1]);
/* **   
                Increment op count */
		latime_1.ops += ((nr << 2) - 2 << 2) * nh;
/* ** */
	    }
/* L150: */
	}

/*        Ensure that H(I,I-1) is real. */

	i__2 = h___subscr(i__, i__ - 1);
	temp.r = h__[i__2].r, temp.i = h__[i__2].i;
	if (r_imag(&temp) != 0.f) {
	    r__1 = temp.r;
	    r__2 = r_imag(&temp);
	    rtemp = slapy2_(&r__1, &r__2);
	    i__2 = h___subscr(i__, i__ - 1);
	    h__[i__2].r = rtemp, h__[i__2].i = 0.f;
	    q__1.r = temp.r / rtemp, q__1.i = temp.i / rtemp;
	    temp.r = q__1.r, temp.i = q__1.i;
	    if (i2 > i__) {
		i__2 = i2 - i__;
		r_cnjg(&q__1, &temp);
		cscal_(&i__2, &q__1, &h___ref(i__, i__ + 1), ldh);
	    }
	    i__2 = i__ - i1;
	    cscal_(&i__2, &temp, &h___ref(i1, i__), &c__1);
/* **   
             Increment op count */
	    opst += (i2 - i1 + 1) * 6;
/* ** */
	    if (wantz) {
		cscal_(&nh, &temp, &z___ref(*ilo, i__), &c__1);
/* **   
                Increment op count */
		opst += nh * 6;
/* ** */
	    }
	}

/* L160: */
    }

/*     Failure to converge in remaining number of iterations */

    *info = i__;
    return 0;

L170:

/*     A submatrix of order <= MAXB in rows and columns L to I has split   
       off. Use the double-shift QR algorithm to handle it. */

    clahqr_(&wantt, &wantz, n, &l, &i__, &h__[h_offset], ldh, &w[1], ilo, ihi,
	     &z__[z_offset], ldz, info);
    if (*info > 0) {
	return 0;
    }

/*     Decrement number of remaining iterations, and return to start of   
       the main loop with a new value of I. */

    itn -= its;
    i__ = l - 1;
    goto L60;

L180:
/* **   
       Compute final op count */
    latime_1.ops += opst;
/* ** */
    i__1 = max(1,*n);
    work[1].r = (real) i__1, work[1].i = 0.f;
    return 0;

/*     End of CHSEQR */

} /* chseqr_ */
/* Subroutine */ int cgebd2_(integer *m, integer *n, complex *a, integer *lda,
	 real *d__, real *e, complex *tauq, complex *taup, complex *work, 
	integer *info)
{
/*  -- LAPACK routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    CGEBD2 reduces a complex general m by n matrix A to upper or lower   
    real bidiagonal form B by a unitary transformation: Q' * A * P = B.   

    If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.   

    Arguments   
    =========   

    M       (input) INTEGER   
            The number of rows in the matrix A.  M >= 0.   

    N       (input) INTEGER   
            The number of columns in the matrix A.  N >= 0.   

    A       (input/output) COMPLEX array, dimension (LDA,N)   
            On entry, the m by n general matrix to be reduced.   
            On exit,   
            if m >= n, the diagonal and the first superdiagonal are   
              overwritten with the upper bidiagonal matrix B; the   
              elements below the diagonal, with the array TAUQ, represent   
              the unitary matrix Q as a product of elementary   
              reflectors, and the elements above the first superdiagonal,   
              with the array TAUP, represent the unitary matrix P as   
              a product of elementary reflectors;   
            if m < n, the diagonal and the first subdiagonal are   
              overwritten with the lower bidiagonal matrix B; the   
              elements below the first subdiagonal, with the array TAUQ,   
              represent the unitary matrix Q as a product of   
              elementary reflectors, and the elements above the diagonal,   
              with the array TAUP, represent the unitary matrix P as   
              a product of elementary reflectors.   
            See Further Details.   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= max(1,M).   

    D       (output) REAL array, dimension (min(M,N))   
            The diagonal elements of the bidiagonal matrix B:   
            D(i) = A(i,i).   

    E       (output) REAL array, dimension (min(M,N)-1)   
            The off-diagonal elements of the bidiagonal matrix B:   
            if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;   
            if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.   

    TAUQ    (output) COMPLEX array dimension (min(M,N))   
            The scalar factors of the elementary reflectors which   
            represent the unitary matrix Q. See Further Details.   

    TAUP    (output) COMPLEX array, dimension (min(M,N))   
            The scalar factors of the elementary reflectors which   
            represent the unitary matrix P. See Further Details.   

    WORK    (workspace) COMPLEX array, dimension (max(M,N))   

    INFO    (output) INTEGER   
            = 0: successful exit   
            < 0: if INFO = -i, the i-th argument had an illegal value.   

    Further Details   
    ===============   

    The matrices Q and P are represented as products of elementary   
    reflectors:   

    If m >= n,   

       Q = H(1) H(2) . . . H(n)  and  P = G(1) G(2) . . . G(n-1)   

    Each H(i) and G(i) has the form:   

       H(i) = I - tauq * v * v'  and G(i) = I - taup * u * u'   

    where tauq and taup are complex scalars, and v and u are complex   
    vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in   
    A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in   
    A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i).   

    If m < n,   

       Q = H(1) H(2) . . . H(m-1)  and  P = G(1) G(2) . . . G(m)   

    Each H(i) and G(i) has the form:   

       H(i) = I - tauq * v * v'  and G(i) = I - taup * u * u'   

    where tauq and taup are complex scalars, v and u are complex vectors;   
    v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i);   
    u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n);   
    tauq is stored in TAUQ(i) and taup in TAUP(i).   

    The contents of A on exit are illustrated by the following examples:   

    m = 6 and n = 5 (m > n):          m = 5 and n = 6 (m < n):   

      (  d   e   u1  u1  u1 )           (  d   u1  u1  u1  u1  u1 )   
      (  v1  d   e   u2  u2 )           (  e   d   u2  u2  u2  u2 )   
      (  v1  v2  d   e   u3 )           (  v1  e   d   u3  u3  u3 )   
      (  v1  v2  v3  d   e  )           (  v1  v2  e   d   u4  u4 )   
      (  v1  v2  v3  v4  d  )           (  v1  v2  v3  e   d   u5 )   
      (  v1  v2  v3  v4  v5 )   

    where d and e denote diagonal and off-diagonal elements of B, vi   
    denotes an element of the vector defining H(i), and ui an element of   
    the vector defining G(i).   

    =====================================================================   


       Test the input parameters   

       Parameter adjustments */
    /* Table of constant values */
    static integer c__1 = 1;
    
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
    complex q__1;
    /* Builtin functions */
    void r_cnjg(complex *, complex *);
    /* Local variables */
    static integer i__;
    static complex alpha;
    extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
	    , integer *, complex *, complex *, integer *, complex *), 
	    clarfg_(integer *, complex *, complex *, integer *, complex *), 
	    clacgv_(integer *, complex *, integer *), xerbla_(char *, integer 
	    *);
#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]


    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    --d__;
    --e;
    --tauq;
    --taup;
    --work;

    /* Function Body */
    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*lda < max(1,*m)) {
	*info = -4;
    }
    if (*info < 0) {
	i__1 = -(*info);
	xerbla_("CGEBD2", &i__1);
	return 0;
    }

    if (*m >= *n) {

/*        Reduce to upper bidiagonal form */

	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Generate elementary reflector H(i) to annihilate A(i+1:m,i) */

	    i__2 = a_subscr(i__, i__);
	    alpha.r = a[i__2].r, alpha.i = a[i__2].i;
/* Computing MIN */
	    i__2 = i__ + 1;
	    i__3 = *m - i__ + 1;
	    clarfg_(&i__3, &alpha, &a_ref(min(i__2,*m), i__), &c__1, &tauq[
		    i__]);
	    i__2 = i__;
	    d__[i__2] = alpha.r;
	    i__2 = a_subscr(i__, i__);
	    a[i__2].r = 1.f, a[i__2].i = 0.f;

/*           Apply H(i)' to A(i:m,i+1:n) from the left */

	    i__2 = *m - i__ + 1;
	    i__3 = *n - i__;
	    r_cnjg(&q__1, &tauq[i__]);
	    clarf_("Left", &i__2, &i__3, &a_ref(i__, i__), &c__1, &q__1, &
		    a_ref(i__, i__ + 1), lda, &work[1]);
	    i__2 = a_subscr(i__, i__);
	    i__3 = i__;
	    a[i__2].r = d__[i__3], a[i__2].i = 0.f;

	    if (i__ < *n) {

/*              Generate elementary reflector G(i) to annihilate   
                A(i,i+2:n) */

		i__2 = *n - i__;
		clacgv_(&i__2, &a_ref(i__, i__ + 1), lda);
		i__2 = a_subscr(i__, i__ + 1);
		alpha.r = a[i__2].r, alpha.i = a[i__2].i;
/* Computing MIN */
		i__2 = i__ + 2;
		i__3 = *n - i__;
		clarfg_(&i__3, &alpha, &a_ref(i__, min(i__2,*n)), lda, &taup[
			i__]);
		i__2 = i__;
		e[i__2] = alpha.r;
		i__2 = a_subscr(i__, i__ + 1);
		a[i__2].r = 1.f, a[i__2].i = 0.f;

/*              Apply G(i) to A(i+1:m,i+1:n) from the right */

		i__2 = *m - i__;
		i__3 = *n - i__;
		clarf_("Right", &i__2, &i__3, &a_ref(i__, i__ + 1), lda, &
			taup[i__], &a_ref(i__ + 1, i__ + 1), lda, &work[1]);
		i__2 = *n - i__;
		clacgv_(&i__2, &a_ref(i__, i__ + 1), lda);
		i__2 = a_subscr(i__, i__ + 1);
		i__3 = i__;
		a[i__2].r = e[i__3], a[i__2].i = 0.f;
	    } else {
		i__2 = i__;
		taup[i__2].r = 0.f, taup[i__2].i = 0.f;
	    }
/* L10: */
	}
    } else {

/*        Reduce to lower bidiagonal form */

	i__1 = *m;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Generate elementary reflector G(i) to annihilate A(i,i+1:n) */

	    i__2 = *n - i__ + 1;
	    clacgv_(&i__2, &a_ref(i__, i__), lda);
	    i__2 = a_subscr(i__, i__);
	    alpha.r = a[i__2].r, alpha.i = a[i__2].i;
/* Computing MIN */
	    i__2 = i__ + 1;
	    i__3 = *n - i__ + 1;
	    clarfg_(&i__3, &alpha, &a_ref(i__, min(i__2,*n)), lda, &taup[i__])
		    ;
	    i__2 = i__;
	    d__[i__2] = alpha.r;
	    i__2 = a_subscr(i__, i__);
	    a[i__2].r = 1.f, a[i__2].i = 0.f;

/*           Apply G(i) to A(i+1:m,i:n) from the right   

   Computing MIN */
	    i__2 = i__ + 1;
	    i__3 = *m - i__;
	    i__4 = *n - i__ + 1;
	    clarf_("Right", &i__3, &i__4, &a_ref(i__, i__), lda, &taup[i__], &
		    a_ref(min(i__2,*m), i__), lda, &work[1]);
	    i__2 = *n - i__ + 1;
	    clacgv_(&i__2, &a_ref(i__, i__), lda);
	    i__2 = a_subscr(i__, i__);
	    i__3 = i__;
	    a[i__2].r = d__[i__3], a[i__2].i = 0.f;

	    if (i__ < *m) {

/*              Generate elementary reflector H(i) to annihilate   
                A(i+2:m,i) */

		i__2 = a_subscr(i__ + 1, i__);
		alpha.r = a[i__2].r, alpha.i = a[i__2].i;
/* Computing MIN */
		i__2 = i__ + 2;
		i__3 = *m - i__;
		clarfg_(&i__3, &alpha, &a_ref(min(i__2,*m), i__), &c__1, &
			tauq[i__]);
		i__2 = i__;
		e[i__2] = alpha.r;
		i__2 = a_subscr(i__ + 1, i__);
		a[i__2].r = 1.f, a[i__2].i = 0.f;

/*              Apply H(i)' to A(i+1:m,i+1:n) from the left */

		i__2 = *m - i__;
		i__3 = *n - i__;
		r_cnjg(&q__1, &tauq[i__]);
		clarf_("Left", &i__2, &i__3, &a_ref(i__ + 1, i__), &c__1, &
			q__1, &a_ref(i__ + 1, i__ + 1), lda, &work[1]);
		i__2 = a_subscr(i__ + 1, i__);
		i__3 = i__;
		a[i__2].r = e[i__3], a[i__2].i = 0.f;
	    } else {
		i__2 = i__;
		tauq[i__2].r = 0.f, tauq[i__2].i = 0.f;
	    }
/* L20: */
	}
    }
    return 0;

/*     End of CGEBD2 */

} /* cgebd2_ */
Example #7
0
/* Subroutine */ int clahqr_(logical *wantt, logical *wantz, integer *n, 
	integer *ilo, integer *ihi, complex *h__, integer *ldh, complex *w, 
	integer *iloz, integer *ihiz, complex *z__, integer *ldz, integer *
	info)
{
    /* System generated locals */
    integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
    real r__1, r__2, r__3, r__4, r__5, r__6;
    complex q__1, q__2, q__3, q__4, q__5, q__6, q__7;

    /* Builtin functions */
    double r_imag(complex *);
    void r_cnjg(complex *, complex *);
    double c_abs(complex *);
    void c_sqrt(complex *, complex *), pow_ci(complex *, complex *, integer *)
	    ;

    /* Local variables */
    integer i__, j, k, l, m;
    real s;
    complex t, u, v[2], x, y;
    integer i1, i2;
    complex t1;
    real t2;
    complex v2;
    real aa, ab, ba, bb, h10;
    complex h11;
    real h21;
    complex h22, sc;
    integer nh, nz;
    real sx;
    integer jhi;
    complex h11s;
    integer jlo, its;
    real ulp;
    complex sum;
    real tst;
    complex temp;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *, 
	    integer *), ccopy_(integer *, complex *, integer *, complex *, 
	    integer *);
    real rtemp;
    extern /* Subroutine */ int slabad_(real *, real *), clarfg_(integer *, 
	    complex *, complex *, integer *, complex *);
    extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
    extern doublereal slamch_(char *);
    real safmin, safmax, smlnum;


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*     Purpose */
/*     ======= */

/*     CLAHQR is an auxiliary routine called by CHSEQR to update the */
/*     eigenvalues and Schur decomposition already computed by CHSEQR, by */
/*     dealing with the Hessenberg submatrix in rows and columns ILO to */
/*     IHI. */

/*     Arguments */
/*     ========= */

/*     WANTT   (input) LOGICAL */
/*          = .TRUE. : the full Schur form T is required; */
/*          = .FALSE.: only eigenvalues are required. */

/*     WANTZ   (input) LOGICAL */
/*          = .TRUE. : the matrix of Schur vectors Z is required; */
/*          = .FALSE.: Schur vectors are not required. */

/*     N       (input) INTEGER */
/*          The order of the matrix H.  N >= 0. */

/*     ILO     (input) INTEGER */
/*     IHI     (input) INTEGER */
/*          It is assumed that H is already upper triangular in rows and */
/*          columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless ILO = 1). */
/*          CLAHQR works primarily with the Hessenberg submatrix in rows */
/*          and columns ILO to IHI, but applies transformations to all of */
/*          H if WANTT is .TRUE.. */
/*          1 <= ILO <= max(1,IHI); IHI <= N. */

/*     H       (input/output) COMPLEX array, dimension (LDH,N) */
/*          On entry, the upper Hessenberg matrix H. */
/*          On exit, if INFO is zero and if WANTT is .TRUE., then H */
/*          is upper triangular in rows and columns ILO:IHI.  If INFO */
/*          is zero and if WANTT is .FALSE., then the contents of H */
/*          are unspecified on exit.  The output state of H in case */
/*          INF is positive is below under the description of INFO. */

/*     LDH     (input) INTEGER */
/*          The leading dimension of the array H. LDH >= max(1,N). */

/*     W       (output) COMPLEX array, dimension (N) */
/*          The computed eigenvalues ILO to IHI are stored in the */
/*          corresponding elements of W. If WANTT is .TRUE., the */
/*          eigenvalues are stored in the same order as on the diagonal */
/*          of the Schur form returned in H, with W(i) = H(i,i). */

/*     ILOZ    (input) INTEGER */
/*     IHIZ    (input) INTEGER */
/*          Specify the rows of Z to which transformations must be */
/*          applied if WANTZ is .TRUE.. */
/*          1 <= ILOZ <= ILO; IHI <= IHIZ <= N. */

/*     Z       (input/output) COMPLEX array, dimension (LDZ,N) */
/*          If WANTZ is .TRUE., on entry Z must contain the current */
/*          matrix Z of transformations accumulated by CHSEQR, and on */
/*          exit Z has been updated; transformations are applied only to */
/*          the submatrix Z(ILOZ:IHIZ,ILO:IHI). */
/*          If WANTZ is .FALSE., Z is not referenced. */

/*     LDZ     (input) INTEGER */
/*          The leading dimension of the array Z. LDZ >= max(1,N). */

/*     INFO    (output) INTEGER */
/*           =   0: successful exit */
/*          .GT. 0: if INFO = i, CLAHQR failed to compute all the */
/*                  eigenvalues ILO to IHI in a total of 30 iterations */
/*                  per eigenvalue; elements i+1:ihi of W contain */
/*                  those eigenvalues which have been successfully */
/*                  computed. */

/*                  If INFO .GT. 0 and WANTT is .FALSE., then on exit, */
/*                  the remaining unconverged eigenvalues are the */
/*                  eigenvalues of the upper Hessenberg matrix */
/*                  rows and columns ILO thorugh INFO of the final, */
/*                  output value of H. */

/*                  If INFO .GT. 0 and WANTT is .TRUE., then on exit */
/*          (*)       (initial value of H)*U  = U*(final value of H) */
/*                  where U is an orthognal matrix.    The final */
/*                  value of H is upper Hessenberg and triangular in */
/*                  rows and columns INFO+1 through IHI. */

/*                  If INFO .GT. 0 and WANTZ is .TRUE., then on exit */
/*                      (final value of Z)  = (initial value of Z)*U */
/*                  where U is the orthogonal matrix in (*) */
/*                  (regardless of the value of WANTT.) */

/*     Further Details */
/*     =============== */

/*     02-96 Based on modifications by */
/*     David Day, Sandia National Laboratory, USA */

/*     12-04 Further modifications by */
/*     Ralph Byers, University of Kansas, USA */
/*     This is a modified version of CLAHQR from LAPACK version 3.0. */
/*     It is (1) more robust against overflow and underflow and */
/*     (2) adopts the more conservative Ahues & Tisseur stopping */
/*     criterion (LAWN 122, 1997). */

/*     ========================================================= */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Statement Functions .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Statement Function definitions .. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    h_dim1 = *ldh;
    h_offset = 1 + h_dim1;
    h__ -= h_offset;
    --w;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1;
    z__ -= z_offset;

    /* Function Body */
    *info = 0;

/*     Quick return if possible */

    if (*n == 0) {
	return 0;
    }
    if (*ilo == *ihi) {
	i__1 = *ilo;
	i__2 = *ilo + *ilo * h_dim1;
	w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
	return 0;
    }

/*     ==== clear out the trash ==== */
    i__1 = *ihi - 3;
    for (j = *ilo; j <= i__1; ++j) {
	i__2 = j + 2 + j * h_dim1;
	h__[i__2].r = 0.f, h__[i__2].i = 0.f;
	i__2 = j + 3 + j * h_dim1;
	h__[i__2].r = 0.f, h__[i__2].i = 0.f;
/* L10: */
    }
    if (*ilo <= *ihi - 2) {
	i__1 = *ihi + (*ihi - 2) * h_dim1;
	h__[i__1].r = 0.f, h__[i__1].i = 0.f;
    }
/*     ==== ensure that subdiagonal entries are real ==== */
    if (*wantt) {
	jlo = 1;
	jhi = *n;
    } else {
	jlo = *ilo;
	jhi = *ihi;
    }
    i__1 = *ihi;
    for (i__ = *ilo + 1; i__ <= i__1; ++i__) {
	if (r_imag(&h__[i__ + (i__ - 1) * h_dim1]) != 0.f) {
/*           ==== The following redundant normalization */
/*           .    avoids problems with both gradual and */
/*           .    sudden underflow in ABS(H(I,I-1)) ==== */
	    i__2 = i__ + (i__ - 1) * h_dim1;
	    i__3 = i__ + (i__ - 1) * h_dim1;
	    r__3 = (r__1 = h__[i__3].r, dabs(r__1)) + (r__2 = r_imag(&h__[i__ 
		    + (i__ - 1) * h_dim1]), dabs(r__2));
	    q__1.r = h__[i__2].r / r__3, q__1.i = h__[i__2].i / r__3;
	    sc.r = q__1.r, sc.i = q__1.i;
	    r_cnjg(&q__2, &sc);
	    r__1 = c_abs(&sc);
	    q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
	    sc.r = q__1.r, sc.i = q__1.i;
	    i__2 = i__ + (i__ - 1) * h_dim1;
	    r__1 = c_abs(&h__[i__ + (i__ - 1) * h_dim1]);
	    h__[i__2].r = r__1, h__[i__2].i = 0.f;
	    i__2 = jhi - i__ + 1;
	    cscal_(&i__2, &sc, &h__[i__ + i__ * h_dim1], ldh);
/* Computing MIN */
	    i__3 = jhi, i__4 = i__ + 1;
	    i__2 = min(i__3,i__4) - jlo + 1;
	    r_cnjg(&q__1, &sc);
	    cscal_(&i__2, &q__1, &h__[jlo + i__ * h_dim1], &c__1);
	    if (*wantz) {
		i__2 = *ihiz - *iloz + 1;
		r_cnjg(&q__1, &sc);
		cscal_(&i__2, &q__1, &z__[*iloz + i__ * z_dim1], &c__1);
	    }
	}
/* L20: */
    }

    nh = *ihi - *ilo + 1;
    nz = *ihiz - *iloz + 1;

/*     Set machine-dependent constants for the stopping criterion. */

    safmin = slamch_("SAFE MINIMUM");
    safmax = 1.f / safmin;
    slabad_(&safmin, &safmax);
    ulp = slamch_("PRECISION");
    smlnum = safmin * ((real) nh / ulp);

/*     I1 and I2 are the indices of the first row and last column of H */
/*     to which transformations must be applied. If eigenvalues only are */
/*     being computed, I1 and I2 are set inside the main loop. */

    if (*wantt) {
	i1 = 1;
	i2 = *n;
    }

/*     The main loop begins here. I is the loop index and decreases from */
/*     IHI to ILO in steps of 1. Each iteration of the loop works */
/*     with the active submatrix in rows and columns L to I. */
/*     Eigenvalues I+1 to IHI have already converged. Either L = ILO, or */
/*     H(L,L-1) is negligible so that the matrix splits. */

    i__ = *ihi;
L30:
    if (i__ < *ilo) {
	goto L150;
    }

/*     Perform QR iterations on rows and columns ILO to I until a */
/*     submatrix of order 1 splits off at the bottom because a */
/*     subdiagonal element has become negligible. */

    l = *ilo;
    for (its = 0; its <= 30; ++its) {

/*        Look for a single small subdiagonal element. */

	i__1 = l + 1;
	for (k = i__; k >= i__1; --k) {
	    i__2 = k + (k - 1) * h_dim1;
	    if ((r__1 = h__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&h__[k + (k 
		    - 1) * h_dim1]), dabs(r__2)) <= smlnum) {
		goto L50;
	    }
	    i__2 = k - 1 + (k - 1) * h_dim1;
	    i__3 = k + k * h_dim1;
	    tst = (r__1 = h__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&h__[k - 
		    1 + (k - 1) * h_dim1]), dabs(r__2)) + ((r__3 = h__[i__3]
		    .r, dabs(r__3)) + (r__4 = r_imag(&h__[k + k * h_dim1]), 
		    dabs(r__4)));
	    if (tst == 0.f) {
		if (k - 2 >= *ilo) {
		    i__2 = k - 1 + (k - 2) * h_dim1;
		    tst += (r__1 = h__[i__2].r, dabs(r__1));
		}
		if (k + 1 <= *ihi) {
		    i__2 = k + 1 + k * h_dim1;
		    tst += (r__1 = h__[i__2].r, dabs(r__1));
		}
	    }
/*           ==== The following is a conservative small subdiagonal */
/*           .    deflation criterion due to Ahues & Tisseur (LAWN 122, */
/*           .    1997). It has better mathematical foundation and */
/*           .    improves accuracy in some examples.  ==== */
	    i__2 = k + (k - 1) * h_dim1;
	    if ((r__1 = h__[i__2].r, dabs(r__1)) <= ulp * tst) {
/* Computing MAX */
		i__2 = k + (k - 1) * h_dim1;
		i__3 = k - 1 + k * h_dim1;
		r__5 = (r__1 = h__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&h__[
			k + (k - 1) * h_dim1]), dabs(r__2)), r__6 = (r__3 = 
			h__[i__3].r, dabs(r__3)) + (r__4 = r_imag(&h__[k - 1 
			+ k * h_dim1]), dabs(r__4));
		ab = dmax(r__5,r__6);
/* Computing MIN */
		i__2 = k + (k - 1) * h_dim1;
		i__3 = k - 1 + k * h_dim1;
		r__5 = (r__1 = h__[i__2].r, dabs(r__1)) + (r__2 = r_imag(&h__[
			k + (k - 1) * h_dim1]), dabs(r__2)), r__6 = (r__3 = 
			h__[i__3].r, dabs(r__3)) + (r__4 = r_imag(&h__[k - 1 
			+ k * h_dim1]), dabs(r__4));
		ba = dmin(r__5,r__6);
		i__2 = k - 1 + (k - 1) * h_dim1;
		i__3 = k + k * h_dim1;
		q__2.r = h__[i__2].r - h__[i__3].r, q__2.i = h__[i__2].i - 
			h__[i__3].i;
		q__1.r = q__2.r, q__1.i = q__2.i;
/* Computing MAX */
		i__4 = k + k * h_dim1;
		r__5 = (r__1 = h__[i__4].r, dabs(r__1)) + (r__2 = r_imag(&h__[
			k + k * h_dim1]), dabs(r__2)), r__6 = (r__3 = q__1.r, 
			dabs(r__3)) + (r__4 = r_imag(&q__1), dabs(r__4));
		aa = dmax(r__5,r__6);
		i__2 = k - 1 + (k - 1) * h_dim1;
		i__3 = k + k * h_dim1;
		q__2.r = h__[i__2].r - h__[i__3].r, q__2.i = h__[i__2].i - 
			h__[i__3].i;
		q__1.r = q__2.r, q__1.i = q__2.i;
/* Computing MIN */
		i__4 = k + k * h_dim1;
		r__5 = (r__1 = h__[i__4].r, dabs(r__1)) + (r__2 = r_imag(&h__[
			k + k * h_dim1]), dabs(r__2)), r__6 = (r__3 = q__1.r, 
			dabs(r__3)) + (r__4 = r_imag(&q__1), dabs(r__4));
		bb = dmin(r__5,r__6);
		s = aa + ab;
/* Computing MAX */
		r__1 = smlnum, r__2 = ulp * (bb * (aa / s));
		if (ba * (ab / s) <= dmax(r__1,r__2)) {
		    goto L50;
		}
	    }
/* L40: */
	}
L50:
	l = k;
	if (l > *ilo) {

/*           H(L,L-1) is negligible */

	    i__1 = l + (l - 1) * h_dim1;
	    h__[i__1].r = 0.f, h__[i__1].i = 0.f;
	}

/*        Exit from loop if a submatrix of order 1 has split off. */

	if (l >= i__) {
	    goto L140;
	}

/*        Now the active submatrix is in rows and columns L to I. If */
/*        eigenvalues only are being computed, only the active submatrix */
/*        need be transformed. */

	if (! (*wantt)) {
	    i1 = l;
	    i2 = i__;
	}

	if (its == 10) {

/*           Exceptional shift. */

	    i__1 = l + 1 + l * h_dim1;
	    s = (r__1 = h__[i__1].r, dabs(r__1)) * .75f;
	    i__1 = l + l * h_dim1;
	    q__1.r = s + h__[i__1].r, q__1.i = h__[i__1].i;
	    t.r = q__1.r, t.i = q__1.i;
	} else if (its == 20) {

/*           Exceptional shift. */

	    i__1 = i__ + (i__ - 1) * h_dim1;
	    s = (r__1 = h__[i__1].r, dabs(r__1)) * .75f;
	    i__1 = i__ + i__ * h_dim1;
	    q__1.r = s + h__[i__1].r, q__1.i = h__[i__1].i;
	    t.r = q__1.r, t.i = q__1.i;
	} else {

/*           Wilkinson's shift. */

	    i__1 = i__ + i__ * h_dim1;
	    t.r = h__[i__1].r, t.i = h__[i__1].i;
	    c_sqrt(&q__2, &h__[i__ - 1 + i__ * h_dim1]);
	    c_sqrt(&q__3, &h__[i__ + (i__ - 1) * h_dim1]);
	    q__1.r = q__2.r * q__3.r - q__2.i * q__3.i, q__1.i = q__2.r * 
		    q__3.i + q__2.i * q__3.r;
	    u.r = q__1.r, u.i = q__1.i;
	    s = (r__1 = u.r, dabs(r__1)) + (r__2 = r_imag(&u), dabs(r__2));
	    if (s != 0.f) {
		i__1 = i__ - 1 + (i__ - 1) * h_dim1;
		q__2.r = h__[i__1].r - t.r, q__2.i = h__[i__1].i - t.i;
		q__1.r = q__2.r * .5f, q__1.i = q__2.i * .5f;
		x.r = q__1.r, x.i = q__1.i;
		sx = (r__1 = x.r, dabs(r__1)) + (r__2 = r_imag(&x), dabs(r__2)
			);
/* Computing MAX */
		r__3 = s, r__4 = (r__1 = x.r, dabs(r__1)) + (r__2 = r_imag(&x)
			, dabs(r__2));
		s = dmax(r__3,r__4);
		q__5.r = x.r / s, q__5.i = x.i / s;
		pow_ci(&q__4, &q__5, &c__2);
		q__7.r = u.r / s, q__7.i = u.i / s;
		pow_ci(&q__6, &q__7, &c__2);
		q__3.r = q__4.r + q__6.r, q__3.i = q__4.i + q__6.i;
		c_sqrt(&q__2, &q__3);
		q__1.r = s * q__2.r, q__1.i = s * q__2.i;
		y.r = q__1.r, y.i = q__1.i;
		if (sx > 0.f) {
		    q__1.r = x.r / sx, q__1.i = x.i / sx;
		    q__2.r = x.r / sx, q__2.i = x.i / sx;
		    if (q__1.r * y.r + r_imag(&q__2) * r_imag(&y) < 0.f) {
			q__3.r = -y.r, q__3.i = -y.i;
			y.r = q__3.r, y.i = q__3.i;
		    }
		}
		q__4.r = x.r + y.r, q__4.i = x.i + y.i;
		cladiv_(&q__3, &u, &q__4);
		q__2.r = u.r * q__3.r - u.i * q__3.i, q__2.i = u.r * q__3.i + 
			u.i * q__3.r;
		q__1.r = t.r - q__2.r, q__1.i = t.i - q__2.i;
		t.r = q__1.r, t.i = q__1.i;
	    }
	}

/*        Look for two consecutive small subdiagonal elements. */

	i__1 = l + 1;
	for (m = i__ - 1; m >= i__1; --m) {

/*           Determine the effect of starting the single-shift QR */
/*           iteration at row M, and see if this would make H(M,M-1) */
/*           negligible. */

	    i__2 = m + m * h_dim1;
	    h11.r = h__[i__2].r, h11.i = h__[i__2].i;
	    i__2 = m + 1 + (m + 1) * h_dim1;
	    h22.r = h__[i__2].r, h22.i = h__[i__2].i;
	    q__1.r = h11.r - t.r, q__1.i = h11.i - t.i;
	    h11s.r = q__1.r, h11s.i = q__1.i;
	    i__2 = m + 1 + m * h_dim1;
	    h21 = h__[i__2].r;
	    s = (r__1 = h11s.r, dabs(r__1)) + (r__2 = r_imag(&h11s), dabs(
		    r__2)) + dabs(h21);
	    q__1.r = h11s.r / s, q__1.i = h11s.i / s;
	    h11s.r = q__1.r, h11s.i = q__1.i;
	    h21 /= s;
	    v[0].r = h11s.r, v[0].i = h11s.i;
	    v[1].r = h21, v[1].i = 0.f;
	    i__2 = m + (m - 1) * h_dim1;
	    h10 = h__[i__2].r;
	    if (dabs(h10) * dabs(h21) <= ulp * (((r__1 = h11s.r, dabs(r__1)) 
		    + (r__2 = r_imag(&h11s), dabs(r__2))) * ((r__3 = h11.r, 
		    dabs(r__3)) + (r__4 = r_imag(&h11), dabs(r__4)) + ((r__5 =
		     h22.r, dabs(r__5)) + (r__6 = r_imag(&h22), dabs(r__6)))))
		    ) {
		goto L70;
	    }
/* L60: */
	}
	i__1 = l + l * h_dim1;
	h11.r = h__[i__1].r, h11.i = h__[i__1].i;
	i__1 = l + 1 + (l + 1) * h_dim1;
	h22.r = h__[i__1].r, h22.i = h__[i__1].i;
	q__1.r = h11.r - t.r, q__1.i = h11.i - t.i;
	h11s.r = q__1.r, h11s.i = q__1.i;
	i__1 = l + 1 + l * h_dim1;
	h21 = h__[i__1].r;
	s = (r__1 = h11s.r, dabs(r__1)) + (r__2 = r_imag(&h11s), dabs(r__2)) 
		+ dabs(h21);
	q__1.r = h11s.r / s, q__1.i = h11s.i / s;
	h11s.r = q__1.r, h11s.i = q__1.i;
	h21 /= s;
	v[0].r = h11s.r, v[0].i = h11s.i;
	v[1].r = h21, v[1].i = 0.f;
L70:

/*        Single-shift QR step */

	i__1 = i__ - 1;
	for (k = m; k <= i__1; ++k) {

/*           The first iteration of this loop determines a reflection G */
/*           from the vector V and applies it from left and right to H, */
/*           thus creating a nonzero bulge below the subdiagonal. */

/*           Each subsequent iteration determines a reflection G to */
/*           restore the Hessenberg form in the (K-1)th column, and thus */
/*           chases the bulge one step toward the bottom of the active */
/*           submatrix. */

/*           V(2) is always real before the call to CLARFG, and hence */
/*           after the call T2 ( = T1*V(2) ) is also real. */

	    if (k > m) {
		ccopy_(&c__2, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
	    }
	    clarfg_(&c__2, v, &v[1], &c__1, &t1);
	    if (k > m) {
		i__2 = k + (k - 1) * h_dim1;
		h__[i__2].r = v[0].r, h__[i__2].i = v[0].i;
		i__2 = k + 1 + (k - 1) * h_dim1;
		h__[i__2].r = 0.f, h__[i__2].i = 0.f;
	    }
	    v2.r = v[1].r, v2.i = v[1].i;
	    q__1.r = t1.r * v2.r - t1.i * v2.i, q__1.i = t1.r * v2.i + t1.i * 
		    v2.r;
	    t2 = q__1.r;

/*           Apply G from the left to transform the rows of the matrix */
/*           in columns K to I2. */

	    i__2 = i2;
	    for (j = k; j <= i__2; ++j) {
		r_cnjg(&q__3, &t1);
		i__3 = k + j * h_dim1;
		q__2.r = q__3.r * h__[i__3].r - q__3.i * h__[i__3].i, q__2.i =
			 q__3.r * h__[i__3].i + q__3.i * h__[i__3].r;
		i__4 = k + 1 + j * h_dim1;
		q__4.r = t2 * h__[i__4].r, q__4.i = t2 * h__[i__4].i;
		q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
		sum.r = q__1.r, sum.i = q__1.i;
		i__3 = k + j * h_dim1;
		i__4 = k + j * h_dim1;
		q__1.r = h__[i__4].r - sum.r, q__1.i = h__[i__4].i - sum.i;
		h__[i__3].r = q__1.r, h__[i__3].i = q__1.i;
		i__3 = k + 1 + j * h_dim1;
		i__4 = k + 1 + j * h_dim1;
		q__2.r = sum.r * v2.r - sum.i * v2.i, q__2.i = sum.r * v2.i + 
			sum.i * v2.r;
		q__1.r = h__[i__4].r - q__2.r, q__1.i = h__[i__4].i - q__2.i;
		h__[i__3].r = q__1.r, h__[i__3].i = q__1.i;
/* L80: */
	    }

/*           Apply G from the right to transform the columns of the */
/*           matrix in rows I1 to min(K+2,I). */

/* Computing MIN */
	    i__3 = k + 2;
	    i__2 = min(i__3,i__);
	    for (j = i1; j <= i__2; ++j) {
		i__3 = j + k * h_dim1;
		q__2.r = t1.r * h__[i__3].r - t1.i * h__[i__3].i, q__2.i = 
			t1.r * h__[i__3].i + t1.i * h__[i__3].r;
		i__4 = j + (k + 1) * h_dim1;
		q__3.r = t2 * h__[i__4].r, q__3.i = t2 * h__[i__4].i;
		q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
		sum.r = q__1.r, sum.i = q__1.i;
		i__3 = j + k * h_dim1;
		i__4 = j + k * h_dim1;
		q__1.r = h__[i__4].r - sum.r, q__1.i = h__[i__4].i - sum.i;
		h__[i__3].r = q__1.r, h__[i__3].i = q__1.i;
		i__3 = j + (k + 1) * h_dim1;
		i__4 = j + (k + 1) * h_dim1;
		r_cnjg(&q__3, &v2);
		q__2.r = sum.r * q__3.r - sum.i * q__3.i, q__2.i = sum.r * 
			q__3.i + sum.i * q__3.r;
		q__1.r = h__[i__4].r - q__2.r, q__1.i = h__[i__4].i - q__2.i;
		h__[i__3].r = q__1.r, h__[i__3].i = q__1.i;
/* L90: */
	    }

	    if (*wantz) {

/*              Accumulate transformations in the matrix Z */

		i__2 = *ihiz;
		for (j = *iloz; j <= i__2; ++j) {
		    i__3 = j + k * z_dim1;
		    q__2.r = t1.r * z__[i__3].r - t1.i * z__[i__3].i, q__2.i =
			     t1.r * z__[i__3].i + t1.i * z__[i__3].r;
		    i__4 = j + (k + 1) * z_dim1;
		    q__3.r = t2 * z__[i__4].r, q__3.i = t2 * z__[i__4].i;
		    q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
		    sum.r = q__1.r, sum.i = q__1.i;
		    i__3 = j + k * z_dim1;
		    i__4 = j + k * z_dim1;
		    q__1.r = z__[i__4].r - sum.r, q__1.i = z__[i__4].i - 
			    sum.i;
		    z__[i__3].r = q__1.r, z__[i__3].i = q__1.i;
		    i__3 = j + (k + 1) * z_dim1;
		    i__4 = j + (k + 1) * z_dim1;
		    r_cnjg(&q__3, &v2);
		    q__2.r = sum.r * q__3.r - sum.i * q__3.i, q__2.i = sum.r *
			     q__3.i + sum.i * q__3.r;
		    q__1.r = z__[i__4].r - q__2.r, q__1.i = z__[i__4].i - 
			    q__2.i;
		    z__[i__3].r = q__1.r, z__[i__3].i = q__1.i;
/* L100: */
		}
	    }

	    if (k == m && m > l) {

/*              If the QR step was started at row M > L because two */
/*              consecutive small subdiagonals were found, then extra */
/*              scaling must be performed to ensure that H(M,M-1) remains */
/*              real. */

		q__1.r = 1.f - t1.r, q__1.i = 0.f - t1.i;
		temp.r = q__1.r, temp.i = q__1.i;
		r__1 = c_abs(&temp);
		q__1.r = temp.r / r__1, q__1.i = temp.i / r__1;
		temp.r = q__1.r, temp.i = q__1.i;
		i__2 = m + 1 + m * h_dim1;
		i__3 = m + 1 + m * h_dim1;
		r_cnjg(&q__2, &temp);
		q__1.r = h__[i__3].r * q__2.r - h__[i__3].i * q__2.i, q__1.i =
			 h__[i__3].r * q__2.i + h__[i__3].i * q__2.r;
		h__[i__2].r = q__1.r, h__[i__2].i = q__1.i;
		if (m + 2 <= i__) {
		    i__2 = m + 2 + (m + 1) * h_dim1;
		    i__3 = m + 2 + (m + 1) * h_dim1;
		    q__1.r = h__[i__3].r * temp.r - h__[i__3].i * temp.i, 
			    q__1.i = h__[i__3].r * temp.i + h__[i__3].i * 
			    temp.r;
		    h__[i__2].r = q__1.r, h__[i__2].i = q__1.i;
		}
		i__2 = i__;
		for (j = m; j <= i__2; ++j) {
		    if (j != m + 1) {
			if (i2 > j) {
			    i__3 = i2 - j;
			    cscal_(&i__3, &temp, &h__[j + (j + 1) * h_dim1], 
				    ldh);
			}
			i__3 = j - i1;
			r_cnjg(&q__1, &temp);
			cscal_(&i__3, &q__1, &h__[i1 + j * h_dim1], &c__1);
			if (*wantz) {
			    r_cnjg(&q__1, &temp);
			    cscal_(&nz, &q__1, &z__[*iloz + j * z_dim1], &
				    c__1);
			}
		    }
/* L110: */
		}
	    }
/* L120: */
	}

/*        Ensure that H(I,I-1) is real. */

	i__1 = i__ + (i__ - 1) * h_dim1;
	temp.r = h__[i__1].r, temp.i = h__[i__1].i;
	if (r_imag(&temp) != 0.f) {
	    rtemp = c_abs(&temp);
	    i__1 = i__ + (i__ - 1) * h_dim1;
	    h__[i__1].r = rtemp, h__[i__1].i = 0.f;
	    q__1.r = temp.r / rtemp, q__1.i = temp.i / rtemp;
	    temp.r = q__1.r, temp.i = q__1.i;
	    if (i2 > i__) {
		i__1 = i2 - i__;
		r_cnjg(&q__1, &temp);
		cscal_(&i__1, &q__1, &h__[i__ + (i__ + 1) * h_dim1], ldh);
	    }
	    i__1 = i__ - i1;
	    cscal_(&i__1, &temp, &h__[i1 + i__ * h_dim1], &c__1);
	    if (*wantz) {
		cscal_(&nz, &temp, &z__[*iloz + i__ * z_dim1], &c__1);
	    }
	}

/* L130: */
    }

/*     Failure to converge in remaining number of iterations */

    *info = i__;
    return 0;

L140:

/*     H(I,I-1) is negligible: one eigenvalue has converged. */

    i__1 = i__;
    i__2 = i__ + i__ * h_dim1;
    w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;

/*     return to start of the main loop with new value of I. */

    i__ = l - 1;
    goto L30;

L150:
    return 0;

/*     End of CLAHQR */

} /* clahqr_ */
Example #8
0
/* Subroutine */ int cdrvgg_(integer *nsizes, integer *nn, integer *ntypes, 
	logical *dotype, integer *iseed, real *thresh, real *thrshn, integer *
	nounit, complex *a, integer *lda, complex *b, complex *s, complex *t, 
	complex *s2, complex *t2, complex *q, integer *ldq, complex *z__, 
	complex *alpha1, complex *beta1, complex *alpha2, complex *beta2, 
	complex *vl, complex *vr, complex *work, integer *lwork, real *rwork, 
	real *result, integer *info)
{
    /* Initialized data */

    static integer kclass[26] = { 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,
	    2,2,2,3 };
    static integer kbmagn[26] = { 1,1,1,1,1,1,1,1,3,2,3,2,2,3,1,1,1,1,1,1,1,3,
	    2,3,2,1 };
    static integer ktrian[26] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
	    1,1,1,1 };
    static logical lasign[26] = { FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,
	    TRUE_,FALSE_,TRUE_,TRUE_,FALSE_,FALSE_,TRUE_,TRUE_,TRUE_,FALSE_,
	    TRUE_,FALSE_,FALSE_,FALSE_,TRUE_,TRUE_,TRUE_,TRUE_,TRUE_,FALSE_ };
    static logical lbsign[26] = { FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,
	    FALSE_,TRUE_,FALSE_,FALSE_,TRUE_,TRUE_,FALSE_,FALSE_,TRUE_,FALSE_,
	    TRUE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,
	    FALSE_ };
    static integer kz1[6] = { 0,1,2,1,3,3 };
    static integer kz2[6] = { 0,0,1,2,1,1 };
    static integer kadd[6] = { 0,0,0,0,3,2 };
    static integer katype[26] = { 0,1,0,1,2,3,4,1,4,4,1,1,4,4,4,2,4,5,8,7,9,4,
	    4,4,4,0 };
    static integer kbtype[26] = { 0,0,1,1,2,-3,1,4,1,1,4,4,1,1,-4,2,-4,8,8,8,
	    8,8,8,8,8,0 };
    static integer kazero[26] = { 1,1,1,1,1,1,2,1,2,2,1,1,2,2,3,1,3,5,5,5,5,3,
	    3,3,3,1 };
    static integer kbzero[26] = { 1,1,1,1,1,1,1,2,1,1,2,2,1,1,4,1,4,6,6,6,6,4,
	    4,4,4,1 };
    static integer kamagn[26] = { 1,1,1,1,1,1,1,1,2,3,2,3,2,3,1,1,1,1,1,1,1,2,
	    3,3,2,1 };

    /* Format strings */
    static char fmt_9999[] = "(\002 CDRVGG: \002,a,\002 returned INFO=\002,i"
	    "6,\002.\002,/9x,\002N=\002,i6,\002, JTYPE=\002,i6,\002, ISEED="
	    "(\002,3(i5,\002,\002),i5,\002)\002)";
    static char fmt_9998[] = "(\002 CDRVGG: \002,a,\002 Eigenvectors from"
	    " \002,a,\002 incorrectly \002,\002normalized.\002,/\002 Bits of "
	    "error=\002,0p,g10.3,\002,\002,9x,\002N=\002,i6,\002, JTYPE=\002,"
	    "i6,\002, ISEED=(\002,3(i5,\002,\002),i5,\002)\002)";
    static char fmt_9997[] = "(/1x,a3,\002 -- Complex Generalized eigenvalue"
	    " problem driver\002)";
    static char fmt_9996[] = "(\002 Matrix types (see CDRVGG for details):"
	    " \002)";
    static char fmt_9995[] = "(\002 Special Matrices:\002,23x,\002(J'=transp"
	    "osed Jordan block)\002,/\002   1=(0,0)  2=(I,0)  3=(0,I)  4=(I,I"
	    ")  5=(J',J')  \002,\0026=(diag(J',I), diag(I,J'))\002,/\002 Diag"
	    "onal Matrices:  ( \002,\002D=diag(0,1,2,...) )\002,/\002   7=(D,"
	    "I)   9=(large*D, small*I\002,\002)  11=(large*I, small*D)  13=(l"
	    "arge*D, large*I)\002,/\002   8=(I,D)  10=(small*D, large*I)  12="
	    "(small*I, large*D) \002,\002 14=(small*D, small*I)\002,/\002  15"
	    "=(D, reversed D)\002)";
    static char fmt_9994[] = "(\002 Matrices Rotated by Random \002,a,\002 M"
	    "atrices U, V:\002,/\002  16=Transposed Jordan Blocks            "
	    " 19=geometric \002,\002alpha, beta=0,1\002,/\002  17=arithm. alp"
	    "ha&beta             \002,\002      20=arithmetic alpha, beta=0,"
	    "1\002,/\002  18=clustered \002,\002alpha, beta=0,1            21"
	    "=random alpha, beta=0,1\002,/\002 Large & Small Matrices:\002,"
	    "/\002  22=(large, small)   \002,\00223=(small,large)    24=(smal"
	    "l,small)    25=(large,large)\002,/\002  26=random O(1) matrices"
	    ".\002)";
    static char fmt_9993[] = "(/\002 Tests performed:  (S is Schur, T is tri"
	    "angular, \002,\002Q and Z are \002,a,\002,\002,/20x,\002l and r "
	    "are the appropriate left and right\002,/19x,\002eigenvectors, re"
	    "sp., a is alpha, b is beta, and\002,/19x,a,\002 means \002,a,"
	    "\002.)\002,/\002 1 = | A - Q S Z\002,a,\002 | / ( |A| n ulp )   "
	    "   2 = | B - Q T Z\002,a,\002 | / ( |B| n ulp )\002,/\002 3 = | "
	    "I - QQ\002,a,\002 | / ( n ulp )             4 = | I - ZZ\002,a"
	    ",\002 | / ( n ulp )\002,/\002 5 = difference between (alpha,beta"
	    ") and diagonals of\002,\002 (S,T)\002,/\002 6 = max | ( b A - a "
	    "B )\002,a,\002 l | / const.   7 = max | ( b A - a B ) r | / cons"
	    "t.\002,/1x)";
    static char fmt_9992[] = "(\002 Matrix order=\002,i5,\002, type=\002,i2"
	    ",\002, seed=\002,4(i4,\002,\002),\002 result \002,i3,\002 is\002"
	    ",0p,f8.2)";
    static char fmt_9991[] = "(\002 Matrix order=\002,i5,\002, type=\002,i2"
	    ",\002, seed=\002,4(i4,\002,\002),\002 result \002,i3,\002 is\002"
	    ",1p,e10.3)";

    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, s_dim1, 
	    s_offset, s2_dim1, s2_offset, t_dim1, t_offset, t2_dim1, 
	    t2_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, z_dim1, 
	    z_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8, i__9, 
	    i__10, i__11;
    real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8, r__9, r__10, r__11, 
	    r__12, r__13, r__14, r__15, r__16;
    complex q__1, q__2, q__3, q__4;

    /* Builtin functions */
    double r_sign(real *, real *), c_abs(complex *);
    void r_cnjg(complex *, complex *);
    integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void);
    double r_imag(complex *);

    /* Local variables */
    static integer iadd, nmax;
    static real temp1, temp2;
    static integer j, n;
    static logical badnn;
    extern /* Subroutine */ int cgegs_(char *, char *, integer *, complex *, 
	    integer *, complex *, integer *, complex *, complex *, complex *, 
	    integer *, complex *, integer *, complex *, integer *, real *, 
	    integer *), cgegv_(char *, char *, integer *, 
	    complex *, integer *, complex *, integer *, complex *, complex *, 
	    complex *, integer *, complex *, integer *, complex *, integer *, 
	    real *, integer *), cget51_(integer *, integer *, 
	    complex *, integer *, complex *, integer *, complex *, integer *, 
	    complex *, integer *, complex *, real *, real *), cget52_(logical 
	    *, integer *, complex *, integer *, complex *, integer *, complex 
	    *, integer *, complex *, complex *, complex *, real *, real *);
    static real dumma[4];
    static integer iinfo;
    static real rmagn[4];
    static complex ctemp;
    static integer nmats, jsize, nerrs, i1, jtype, ntest, n1;
    extern /* Subroutine */ int clatm4_(integer *, integer *, integer *, 
	    integer *, logical *, real *, real *, real *, integer *, integer *
	    , complex *, integer *), cunm2r_(char *, char *, integer *, 
	    integer *, integer *, complex *, integer *, complex *, complex *, 
	    integer *, complex *, integer *);
    static integer jc, nb;
    extern /* Subroutine */ int slabad_(real *, real *);
    static integer in, jr;
    extern /* Subroutine */ int clarfg_(integer *, complex *, complex *, 
	    integer *, complex *);
    static integer ns;
    extern /* Complex */ VOID clarnd_(complex *, integer *, integer *);
    extern doublereal slamch_(char *);
    extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex 
	    *, integer *, complex *, integer *);
    static real safmin, safmax;
    static integer ioldsd[4];
    extern integer ilaenv_(integer *, char *, char *, integer *, integer *, 
	    integer *, integer *, ftnlen, ftnlen);
    extern /* Subroutine */ int alasvm_(char *, integer *, integer *, integer 
	    *, integer *), claset_(char *, integer *, integer *, 
	    complex *, complex *, complex *, integer *), xerbla_(char 
	    *, integer *);
    static real ulpinv;
    static integer lwkopt, mtypes, ntestt, nbz;
    static real ulp;

    /* Fortran I/O blocks */
    static cilist io___43 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___44 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___47 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___49 = { 0, 0, 0, fmt_9998, 0 };
    static cilist io___50 = { 0, 0, 0, fmt_9998, 0 };
    static cilist io___51 = { 0, 0, 0, fmt_9997, 0 };
    static cilist io___52 = { 0, 0, 0, fmt_9996, 0 };
    static cilist io___53 = { 0, 0, 0, fmt_9995, 0 };
    static cilist io___54 = { 0, 0, 0, fmt_9994, 0 };
    static cilist io___55 = { 0, 0, 0, fmt_9993, 0 };
    static cilist io___56 = { 0, 0, 0, fmt_9992, 0 };
    static cilist io___57 = { 0, 0, 0, fmt_9991, 0 };



#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]
#define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1
#define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)]
#define q_subscr(a_1,a_2) (a_2)*q_dim1 + a_1
#define q_ref(a_1,a_2) q[q_subscr(a_1,a_2)]
#define s_subscr(a_1,a_2) (a_2)*s_dim1 + a_1
#define s_ref(a_1,a_2) s[s_subscr(a_1,a_2)]
#define t_subscr(a_1,a_2) (a_2)*t_dim1 + a_1
#define t_ref(a_1,a_2) t[t_subscr(a_1,a_2)]
#define z___subscr(a_1,a_2) (a_2)*z_dim1 + a_1
#define z___ref(a_1,a_2) z__[z___subscr(a_1,a_2)]


/*  -- LAPACK test routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    CDRVGG  checks the nonsymmetric generalized eigenvalue driver   
    routines.   
                                  T          T        T   
    CGEGS factors A and B as Q S Z  and Q T Z , where   means   
    transpose, T is upper triangular, S is in generalized Schur form   
    (upper triangular), and Q and Z are unitary.  It also   
    computes the generalized eigenvalues (alpha(1),beta(1)), ...,   
    (alpha(n),beta(n)), where alpha(j)=S(j,j) and beta(j)=T(j,j) --   
    thus, w(j) = alpha(j)/beta(j) is a root of the generalized   
    eigenvalue problem   

        det( A - w(j) B ) = 0   

    and m(j) = beta(j)/alpha(j) is a root of the essentially equivalent   
    problem   

        det( m(j) A - B ) = 0   

    CGEGV computes the generalized eigenvalues (alpha(1),beta(1)), ...,   
    (alpha(n),beta(n)), the matrix L whose columns contain the   
    generalized left eigenvectors l, and the matrix R whose columns   
    contain the generalized right eigenvectors r for the pair (A,B).   

    When CDRVGG is called, a number of matrix "sizes" ("n's") and a   
    number of matrix "types" are specified.  For each size ("n")   
    and each type of matrix, one matrix will be generated and used   
    to test the nonsymmetric eigenroutines.  For each matrix, 7   
    tests will be performed and compared with the threshhold THRESH:   

    Results from CGEGS:   

                     H   
    (1)   | A - Q S Z  | / ( |A| n ulp )   

                     H   
    (2)   | B - Q T Z  | / ( |B| n ulp )   

                  H   
    (3)   | I - QQ  | / ( n ulp )   

                  H   
    (4)   | I - ZZ  | / ( n ulp )   

    (5)   maximum over j of D(j)  where:   

                        |alpha(j) - S(j,j)|        |beta(j) - T(j,j)|   
              D(j) = ------------------------ + -----------------------   
                     max(|alpha(j)|,|S(j,j)|)   max(|beta(j)|,|T(j,j)|)   

    Results from CGEGV:   

    (6)   max over all left eigenvalue/-vector pairs (beta/alpha,l) of   

       | l**H * (beta A - alpha B) | / ( ulp max( |beta A|, |alpha B| ) )   

          where l**H is the conjugate tranpose of l.   

    (7)   max over all right eigenvalue/-vector pairs (beta/alpha,r) of   

          | (beta A - alpha B) r | / ( ulp max( |beta A|, |alpha B| ) )   

    Test Matrices   
    ---- --------   

    The sizes of the test matrices are specified by an array   
    NN(1:NSIZES); the value of each element NN(j) specifies one size.   
    The "types" are specified by a logical array DOTYPE( 1:NTYPES ); if   
    DOTYPE(j) is .TRUE., then matrix type "j" will be generated.   
    Currently, the list of possible types is:   

    (1)  ( 0, 0 )         (a pair of zero matrices)   

    (2)  ( I, 0 )         (an identity and a zero matrix)   

    (3)  ( 0, I )         (an identity and a zero matrix)   

    (4)  ( I, I )         (a pair of identity matrices)   

            t   t   
    (5)  ( J , J  )       (a pair of transposed Jordan blocks)   

                                        t                ( I   0  )   
    (6)  ( X, Y )         where  X = ( J   0  )  and Y = (      t )   
                                     ( 0   I  )          ( 0   J  )   
                          and I is a k x k identity and J a (k+1)x(k+1)   
                          Jordan block; k=(N-1)/2   

    (7)  ( D, I )         where D is diag( 0, 1,..., N-1 ) (a diagonal   
                          matrix with those diagonal entries.)   
    (8)  ( I, D )   

    (9)  ( big*D, small*I ) where "big" is near overflow and small=1/big   

    (10) ( small*D, big*I )   

    (11) ( big*I, small*D )   

    (12) ( small*I, big*D )   

    (13) ( big*D, big*I )   

    (14) ( small*D, small*I )   

    (15) ( D1, D2 )        where D1 is diag( 0, 0, 1, ..., N-3, 0 ) and   
                           D2 is diag( 0, N-3, N-4,..., 1, 0, 0 )   
              t   t   
    (16) Q ( J , J ) Z     where Q and Z are random unitary matrices.   

    (17) Q ( T1, T2 ) Z    where T1 and T2 are upper triangular matrices   
                           with random O(1) entries above the diagonal   
                           and diagonal entries diag(T1) =   
                           ( 0, 0, 1, ..., N-3, 0 ) and diag(T2) =   
                           ( 0, N-3, N-4,..., 1, 0, 0 )   

    (18) Q ( T1, T2 ) Z    diag(T1) = ( 0, 0, 1, 1, s, ..., s, 0 )   
                           diag(T2) = ( 0, 1, 0, 1,..., 1, 0 )   
                           s = machine precision.   

    (19) Q ( T1, T2 ) Z    diag(T1)=( 0,0,1,1, 1-d, ..., 1-(N-5)*d=s, 0 )   
                           diag(T2) = ( 0, 1, 0, 1, ..., 1, 0 )   

                                                           N-5   
    (20) Q ( T1, T2 ) Z    diag(T1)=( 0, 0, 1, 1, a, ..., a   =s, 0 )   
                           diag(T2) = ( 0, 1, 0, 1, ..., 1, 0, 0 )   

    (21) Q ( T1, T2 ) Z    diag(T1)=( 0, 0, 1, r1, r2, ..., r(N-4), 0 )   
                           diag(T2) = ( 0, 1, 0, 1, ..., 1, 0, 0 )   
                           where r1,..., r(N-4) are random.   

    (22) Q ( big*T1, small*T2 ) Z    diag(T1) = ( 0, 0, 1, ..., N-3, 0 )   
                                     diag(T2) = ( 0, 1, ..., 1, 0, 0 )   

    (23) Q ( small*T1, big*T2 ) Z    diag(T1) = ( 0, 0, 1, ..., N-3, 0 )   
                                     diag(T2) = ( 0, 1, ..., 1, 0, 0 )   

    (24) Q ( small*T1, small*T2 ) Z  diag(T1) = ( 0, 0, 1, ..., N-3, 0 )   
                                     diag(T2) = ( 0, 1, ..., 1, 0, 0 )   

    (25) Q ( big*T1, big*T2 ) Z      diag(T1) = ( 0, 0, 1, ..., N-3, 0 )   
                                     diag(T2) = ( 0, 1, ..., 1, 0, 0 )   

    (26) Q ( T1, T2 ) Z     where T1 and T2 are random upper-triangular   
                            matrices.   

    Arguments   
    =========   

    NSIZES  (input) INTEGER   
            The number of sizes of matrices to use.  If it is zero,   
            CDRVGG does nothing.  It must be at least zero.   

    NN      (input) INTEGER array, dimension (NSIZES)   
            An array containing the sizes to be used for the matrices.   
            Zero values will be skipped.  The values must be at least   
            zero.   

    NTYPES  (input) INTEGER   
            The number of elements in DOTYPE.   If it is zero, CDRVGG   
            does nothing.  It must be at least zero.  If it is MAXTYP+1   
            and NSIZES is 1, then an additional type, MAXTYP+1 is   
            defined, which is to use whatever matrix is in A.  This   
            is only useful if DOTYPE(1:MAXTYP) is .FALSE. and   
            DOTYPE(MAXTYP+1) is .TRUE. .   

    DOTYPE  (input) LOGICAL array, dimension (NTYPES)   
            If DOTYPE(j) is .TRUE., then for each size in NN a   
            matrix of that size and of type j will be generated.   
            If NTYPES is smaller than the maximum number of types   
            defined (PARAMETER MAXTYP), then types NTYPES+1 through   
            MAXTYP will not be generated.  If NTYPES is larger   
            than MAXTYP, DOTYPE(MAXTYP+1) through DOTYPE(NTYPES)   
            will be ignored.   

    ISEED   (input/output) INTEGER array, dimension (4)   
            On entry ISEED specifies the seed of the random number   
            generator. The array elements should be between 0 and 4095;   
            if not they will be reduced mod 4096.  Also, ISEED(4) must   
            be odd.  The random number generator uses a linear   
            congruential sequence limited to small integers, and so   
            should produce machine independent random numbers. The   
            values of ISEED are changed on exit, and can be used in the   
            next call to CDRVGG to continue the same random number   
            sequence.   

    THRESH  (input) REAL   
            A test will count as "failed" if the "error", computed as   
            described above, exceeds THRESH.  Note that the error is   
            scaled to be O(1), so THRESH should be a reasonably small   
            multiple of 1, e.g., 10 or 100.  In particular, it should   
            not depend on the precision (single vs. double) or the size   
            of the matrix.  It must be at least zero.   

    THRSHN  (input) REAL   
            Threshhold for reporting eigenvector normalization error.   
            If the normalization of any eigenvector differs from 1 by   
            more than THRSHN*ulp, then a special error message will be   
            printed.  (This is handled separately from the other tests,   
            since only a compiler or programming error should cause an   
            error message, at least if THRSHN is at least 5--10.)   

    NOUNIT  (input) INTEGER   
            The FORTRAN unit number for printing out error messages   
            (e.g., if a routine returns IINFO not equal to 0.)   

    A       (input/workspace) COMPLEX array, dimension (LDA, max(NN))   
            Used to hold the original A matrix.  Used as input only   
            if NTYPES=MAXTYP+1, DOTYPE(1:MAXTYP)=.FALSE., and   
            DOTYPE(MAXTYP+1)=.TRUE.   

    LDA     (input) INTEGER   
            The leading dimension of A, B, S, T, S2, and T2.   
            It must be at least 1 and at least max( NN ).   

    B       (input/workspace) COMPLEX array, dimension (LDA, max(NN))   
            Used to hold the original B matrix.  Used as input only   
            if NTYPES=MAXTYP+1, DOTYPE(1:MAXTYP)=.FALSE., and   
            DOTYPE(MAXTYP+1)=.TRUE.   

    S       (workspace) COMPLEX array, dimension (LDA, max(NN))   
            The upper triangular matrix computed from A by CGEGS.   

    T       (workspace) COMPLEX array, dimension (LDA, max(NN))   
            The upper triangular matrix computed from B by CGEGS.   

    S2      (workspace) COMPLEX array, dimension (LDA, max(NN))   
            The matrix computed from A by CGEGV.  This will be the   
            Schur (upper triangular) form of some matrix related to A,   
            but will not, in general, be the same as S.   

    T2      (workspace) COMPLEX array, dimension (LDA, max(NN))   
            The matrix computed from B by CGEGV.  This will be the   
            Schur form of some matrix related to B, but will not, in   
            general, be the same as T.   

    Q       (workspace) COMPLEX array, dimension (LDQ, max(NN))   
            The (left) unitary matrix computed by CGEGS.   

    LDQ     (input) INTEGER   
            The leading dimension of Q, Z, VL, and VR.  It must   
            be at least 1 and at least max( NN ).   

    Z       (workspace) COMPLEX array, dimension (LDQ, max(NN))   
            The (right) unitary matrix computed by CGEGS.   

    ALPHA1  (workspace) COMPLEX array, dimension (max(NN))   
    BETA1   (workspace) COMPLEX array, dimension (max(NN))   
            The generalized eigenvalues of (A,B) computed by CGEGS.   
            ALPHA1(k) / BETA1(k)  is the k-th generalized eigenvalue of   
            the matrices in A and B.   

    ALPHA2  (workspace) COMPLEX array, dimension (max(NN))   
    BETA2   (workspace) COMPLEX array, dimension (max(NN))   
            The generalized eigenvalues of (A,B) computed by CGEGV.   
            ALPHA2(k) / BETA2(k)  is the k-th generalized eigenvalue of   
            the matrices in A and B.   

    VL      (workspace) COMPLEX array, dimension (LDQ, max(NN))   
            The (lower triangular) left eigenvector matrix for the   
            matrices in A and B.   

    VR      (workspace) COMPLEX array, dimension (LDQ, max(NN))   
            The (upper triangular) right eigenvector matrix for the   
            matrices in A and B.   

    WORK    (workspace) COMPLEX array, dimension (LWORK)   

    LWORK   (input) INTEGER   
            The number of entries in WORK.  This must be at least   
            MAX( 2*N, N*(NB+1), (k+1)*(2*k+N+1) ), where "k" is the   
            sum of the blocksize and number-of-shifts for CHGEQZ, and   
            NB is the greatest of the blocksizes for CGEQRF, CUNMQR,   
            and CUNGQR.  (The blocksizes and the number-of-shifts are   
            retrieved through calls to ILAENV.)   

    RWORK   (workspace) REAL array, dimension (8*N)   

    RESULT  (output) REAL array, dimension (7)   
            The values computed by the tests described above.   
            The values are currently limited to 1/ulp, to avoid   
            overflow.   

    INFO    (output) INTEGER   
            = 0:  successful exit   
            < 0:  if INFO = -i, the i-th argument had an illegal value.   
            > 0:  A routine returned an error code.  INFO is the   
                  absolute value of the INFO value returned.   

    =====================================================================   

       Parameter adjustments */
    --nn;
    --dotype;
    --iseed;
    t2_dim1 = *lda;
    t2_offset = 1 + t2_dim1 * 1;
    t2 -= t2_offset;
    s2_dim1 = *lda;
    s2_offset = 1 + s2_dim1 * 1;
    s2 -= s2_offset;
    t_dim1 = *lda;
    t_offset = 1 + t_dim1 * 1;
    t -= t_offset;
    s_dim1 = *lda;
    s_offset = 1 + s_dim1 * 1;
    s -= s_offset;
    b_dim1 = *lda;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    vr_dim1 = *ldq;
    vr_offset = 1 + vr_dim1 * 1;
    vr -= vr_offset;
    vl_dim1 = *ldq;
    vl_offset = 1 + vl_dim1 * 1;
    vl -= vl_offset;
    z_dim1 = *ldq;
    z_offset = 1 + z_dim1 * 1;
    z__ -= z_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1 * 1;
    q -= q_offset;
    --alpha1;
    --beta1;
    --alpha2;
    --beta2;
    --work;
    --rwork;
    --result;

    /* Function Body   

       Check for errors */

    *info = 0;

    badnn = FALSE_;
    nmax = 1;
    i__1 = *nsizes;
    for (j = 1; j <= i__1; ++j) {
/* Computing MAX */
	i__2 = nmax, i__3 = nn[j];
	nmax = max(i__2,i__3);
	if (nn[j] < 0) {
	    badnn = TRUE_;
	}
/* L10: */
    }

/*     Maximum blocksize and shift -- we assume that blocksize and number   
       of shifts are monotone increasing functions of N.   

   Computing MAX */
    i__1 = 1, i__2 = ilaenv_(&c__1, "CGEQRF", " ", &nmax, &nmax, &c_n1, &c_n1,
	     (ftnlen)6, (ftnlen)1), i__1 = max(i__1,i__2), i__2 = ilaenv_(&
	    c__1, "CUNMQR", "LC", &nmax, &nmax, &nmax, &c_n1, (ftnlen)6, (
	    ftnlen)2), i__1 = max(i__1,i__2), i__2 = ilaenv_(&c__1, "CUNGQR", 
	    " ", &nmax, &nmax, &nmax, &c_n1, (ftnlen)6, (ftnlen)1);
    nb = max(i__1,i__2);
    nbz = ilaenv_(&c__1, "CHGEQZ", "SII", &nmax, &c__1, &nmax, &c__0, (ftnlen)
	    6, (ftnlen)3);
    ns = ilaenv_(&c__4, "CHGEQZ", "SII", &nmax, &c__1, &nmax, &c__0, (ftnlen)
	    6, (ftnlen)3);
    i1 = nbz + ns;
/* Computing MAX */
    i__1 = nmax << 1, i__2 = nmax * (nb + 1), i__1 = max(i__1,i__2), i__2 = ((
	    i1 << 1) + nmax + 1) * (i1 + 1);
    lwkopt = max(i__1,i__2);

/*     Check for errors */

    if (*nsizes < 0) {
	*info = -1;
    } else if (badnn) {
	*info = -2;
    } else if (*ntypes < 0) {
	*info = -3;
    } else if (*thresh < 0.f) {
	*info = -6;
    } else if (*lda <= 1 || *lda < nmax) {
	*info = -10;
    } else if (*ldq <= 1 || *ldq < nmax) {
	*info = -19;
    } else if (lwkopt > *lwork) {
	*info = -30;
    }

    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CDRVGG", &i__1);
	return 0;
    }

/*     Quick return if possible */

    if (*nsizes == 0 || *ntypes == 0) {
	return 0;
    }

    ulp = slamch_("Precision");
    safmin = slamch_("Safe minimum");
    safmin /= ulp;
    safmax = 1.f / safmin;
    slabad_(&safmin, &safmax);
    ulpinv = 1.f / ulp;

/*     The values RMAGN(2:3) depend on N, see below. */

    rmagn[0] = 0.f;
    rmagn[1] = 1.f;

/*     Loop over sizes, types */

    ntestt = 0;
    nerrs = 0;
    nmats = 0;

    i__1 = *nsizes;
    for (jsize = 1; jsize <= i__1; ++jsize) {
	n = nn[jsize];
	n1 = max(1,n);
	rmagn[2] = safmax * ulp / (real) n1;
	rmagn[3] = safmin * ulpinv * n1;

	if (*nsizes != 1) {
	    mtypes = min(26,*ntypes);
	} else {
	    mtypes = min(27,*ntypes);
	}

	i__2 = mtypes;
	for (jtype = 1; jtype <= i__2; ++jtype) {
	    if (! dotype[jtype]) {
		goto L150;
	    }
	    ++nmats;
	    ntest = 0;

/*           Save ISEED in case of an error. */

	    for (j = 1; j <= 4; ++j) {
		ioldsd[j - 1] = iseed[j];
/* L20: */
	    }

/*           Initialize RESULT */

	    for (j = 1; j <= 7; ++j) {
		result[j] = 0.f;
/* L30: */
	    }

/*           Compute A and B   

             Description of control parameters:   

             KCLASS: =1 means w/o rotation, =2 means w/ rotation,   
                     =3 means random.   
             KATYPE: the "type" to be passed to CLATM4 for computing A.   
             KAZERO: the pattern of zeros on the diagonal for A:   
                     =1: ( xxx ), =2: (0, xxx ) =3: ( 0, 0, xxx, 0 ),   
                     =4: ( 0, xxx, 0, 0 ), =5: ( 0, 0, 1, xxx, 0 ),   
                     =6: ( 0, 1, 0, xxx, 0 ).  (xxx means a string of   
                     non-zero entries.)   
             KAMAGN: the magnitude of the matrix: =0: zero, =1: O(1),   
                     =2: large, =3: small.   
             LASIGN: .TRUE. if the diagonal elements of A are to be   
                     multiplied by a random magnitude 1 number.   
             KBTYPE, KBZERO, KBMAGN, IBSIGN: the same, but for B.   
             KTRIAN: =0: don't fill in the upper triangle, =1: do.   
             KZ1, KZ2, KADD: used to implement KAZERO and KBZERO.   
             RMAGN:  used to implement KAMAGN and KBMAGN. */

	    if (mtypes > 26) {
		goto L110;
	    }
	    iinfo = 0;
	    if (kclass[jtype - 1] < 3) {

/*              Generate A (w/o rotation) */

		if ((i__3 = katype[jtype - 1], abs(i__3)) == 3) {
		    in = ((n - 1) / 2 << 1) + 1;
		    if (in != n) {
			claset_("Full", &n, &n, &c_b1, &c_b1, &a[a_offset], 
				lda);
		    }
		} else {
		    in = n;
		}
		clatm4_(&katype[jtype - 1], &in, &kz1[kazero[jtype - 1] - 1], 
			&kz2[kazero[jtype - 1] - 1], &lasign[jtype - 1], &
			rmagn[kamagn[jtype - 1]], &ulp, &rmagn[ktrian[jtype - 
			1] * kamagn[jtype - 1]], &c__2, &iseed[1], &a[
			a_offset], lda);
		iadd = kadd[kazero[jtype - 1] - 1];
		if (iadd > 0 && iadd <= n) {
		    i__3 = a_subscr(iadd, iadd);
		    i__4 = kamagn[jtype - 1];
		    a[i__3].r = rmagn[i__4], a[i__3].i = 0.f;
		}

/*              Generate B (w/o rotation) */

		if ((i__3 = kbtype[jtype - 1], abs(i__3)) == 3) {
		    in = ((n - 1) / 2 << 1) + 1;
		    if (in != n) {
			claset_("Full", &n, &n, &c_b1, &c_b1, &b[b_offset], 
				lda);
		    }
		} else {
		    in = n;
		}
		clatm4_(&kbtype[jtype - 1], &in, &kz1[kbzero[jtype - 1] - 1], 
			&kz2[kbzero[jtype - 1] - 1], &lbsign[jtype - 1], &
			rmagn[kbmagn[jtype - 1]], &c_b39, &rmagn[ktrian[jtype 
			- 1] * kbmagn[jtype - 1]], &c__2, &iseed[1], &b[
			b_offset], lda);
		iadd = kadd[kbzero[jtype - 1] - 1];
		if (iadd != 0 && iadd <= n) {
		    i__3 = b_subscr(iadd, iadd);
		    i__4 = kbmagn[jtype - 1];
		    b[i__3].r = rmagn[i__4], b[i__3].i = 0.f;
		}

		if (kclass[jtype - 1] == 2 && n > 0) {

/*                 Include rotations   

                   Generate Q, Z as Householder transformations times   
                   a diagonal matrix. */

		    i__3 = n - 1;
		    for (jc = 1; jc <= i__3; ++jc) {
			i__4 = n;
			for (jr = jc; jr <= i__4; ++jr) {
			    i__5 = q_subscr(jr, jc);
			    clarnd_(&q__1, &c__3, &iseed[1]);
			    q[i__5].r = q__1.r, q[i__5].i = q__1.i;
			    i__5 = z___subscr(jr, jc);
			    clarnd_(&q__1, &c__3, &iseed[1]);
			    z__[i__5].r = q__1.r, z__[i__5].i = q__1.i;
/* L40: */
			}
			i__4 = n + 1 - jc;
			clarfg_(&i__4, &q_ref(jc, jc), &q_ref(jc + 1, jc), &
				c__1, &work[jc]);
			i__4 = (n << 1) + jc;
			i__5 = q_subscr(jc, jc);
			r__2 = q[i__5].r;
			r__1 = r_sign(&c_b39, &r__2);
			work[i__4].r = r__1, work[i__4].i = 0.f;
			i__4 = q_subscr(jc, jc);
			q[i__4].r = 1.f, q[i__4].i = 0.f;
			i__4 = n + 1 - jc;
			clarfg_(&i__4, &z___ref(jc, jc), &z___ref(jc + 1, jc),
				 &c__1, &work[n + jc]);
			i__4 = n * 3 + jc;
			i__5 = z___subscr(jc, jc);
			r__2 = z__[i__5].r;
			r__1 = r_sign(&c_b39, &r__2);
			work[i__4].r = r__1, work[i__4].i = 0.f;
			i__4 = z___subscr(jc, jc);
			z__[i__4].r = 1.f, z__[i__4].i = 0.f;
/* L50: */
		    }
		    clarnd_(&q__1, &c__3, &iseed[1]);
		    ctemp.r = q__1.r, ctemp.i = q__1.i;
		    i__3 = q_subscr(n, n);
		    q[i__3].r = 1.f, q[i__3].i = 0.f;
		    i__3 = n;
		    work[i__3].r = 0.f, work[i__3].i = 0.f;
		    i__3 = n * 3;
		    r__1 = c_abs(&ctemp);
		    q__1.r = ctemp.r / r__1, q__1.i = ctemp.i / r__1;
		    work[i__3].r = q__1.r, work[i__3].i = q__1.i;
		    clarnd_(&q__1, &c__3, &iseed[1]);
		    ctemp.r = q__1.r, ctemp.i = q__1.i;
		    i__3 = z___subscr(n, n);
		    z__[i__3].r = 1.f, z__[i__3].i = 0.f;
		    i__3 = n << 1;
		    work[i__3].r = 0.f, work[i__3].i = 0.f;
		    i__3 = n << 2;
		    r__1 = c_abs(&ctemp);
		    q__1.r = ctemp.r / r__1, q__1.i = ctemp.i / r__1;
		    work[i__3].r = q__1.r, work[i__3].i = q__1.i;

/*                 Apply the diagonal matrices */

		    i__3 = n;
		    for (jc = 1; jc <= i__3; ++jc) {
			i__4 = n;
			for (jr = 1; jr <= i__4; ++jr) {
			    i__5 = a_subscr(jr, jc);
			    i__6 = (n << 1) + jr;
			    r_cnjg(&q__3, &work[n * 3 + jc]);
			    q__2.r = work[i__6].r * q__3.r - work[i__6].i * 
				    q__3.i, q__2.i = work[i__6].r * q__3.i + 
				    work[i__6].i * q__3.r;
			    i__7 = a_subscr(jr, jc);
			    q__1.r = q__2.r * a[i__7].r - q__2.i * a[i__7].i, 
				    q__1.i = q__2.r * a[i__7].i + q__2.i * a[
				    i__7].r;
			    a[i__5].r = q__1.r, a[i__5].i = q__1.i;
			    i__5 = b_subscr(jr, jc);
			    i__6 = (n << 1) + jr;
			    r_cnjg(&q__3, &work[n * 3 + jc]);
			    q__2.r = work[i__6].r * q__3.r - work[i__6].i * 
				    q__3.i, q__2.i = work[i__6].r * q__3.i + 
				    work[i__6].i * q__3.r;
			    i__7 = b_subscr(jr, jc);
			    q__1.r = q__2.r * b[i__7].r - q__2.i * b[i__7].i, 
				    q__1.i = q__2.r * b[i__7].i + q__2.i * b[
				    i__7].r;
			    b[i__5].r = q__1.r, b[i__5].i = q__1.i;
/* L60: */
			}
/* L70: */
		    }
		    i__3 = n - 1;
		    cunm2r_("L", "N", &n, &n, &i__3, &q[q_offset], ldq, &work[
			    1], &a[a_offset], lda, &work[(n << 1) + 1], &
			    iinfo);
		    if (iinfo != 0) {
			goto L100;
		    }
		    i__3 = n - 1;
		    cunm2r_("R", "C", &n, &n, &i__3, &z__[z_offset], ldq, &
			    work[n + 1], &a[a_offset], lda, &work[(n << 1) + 
			    1], &iinfo);
		    if (iinfo != 0) {
			goto L100;
		    }
		    i__3 = n - 1;
		    cunm2r_("L", "N", &n, &n, &i__3, &q[q_offset], ldq, &work[
			    1], &b[b_offset], lda, &work[(n << 1) + 1], &
			    iinfo);
		    if (iinfo != 0) {
			goto L100;
		    }
		    i__3 = n - 1;
		    cunm2r_("R", "C", &n, &n, &i__3, &z__[z_offset], ldq, &
			    work[n + 1], &b[b_offset], lda, &work[(n << 1) + 
			    1], &iinfo);
		    if (iinfo != 0) {
			goto L100;
		    }
		}
	    } else {

/*              Random matrices */

		i__3 = n;
		for (jc = 1; jc <= i__3; ++jc) {
		    i__4 = n;
		    for (jr = 1; jr <= i__4; ++jr) {
			i__5 = a_subscr(jr, jc);
			i__6 = kamagn[jtype - 1];
			clarnd_(&q__2, &c__4, &iseed[1]);
			q__1.r = rmagn[i__6] * q__2.r, q__1.i = rmagn[i__6] * 
				q__2.i;
			a[i__5].r = q__1.r, a[i__5].i = q__1.i;
			i__5 = b_subscr(jr, jc);
			i__6 = kbmagn[jtype - 1];
			clarnd_(&q__2, &c__4, &iseed[1]);
			q__1.r = rmagn[i__6] * q__2.r, q__1.i = rmagn[i__6] * 
				q__2.i;
			b[i__5].r = q__1.r, b[i__5].i = q__1.i;
/* L80: */
		    }
/* L90: */
		}
	    }

L100:

	    if (iinfo != 0) {
		io___43.ciunit = *nounit;
		s_wsfe(&io___43);
		do_fio(&c__1, "Generator", (ftnlen)9);
		do_fio(&c__1, (char *)&iinfo, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(iinfo);
		return 0;
	    }

L110:

/*           Call CGEGS to compute H, T, Q, Z, alpha, and beta. */

	    clacpy_(" ", &n, &n, &a[a_offset], lda, &s[s_offset], lda);
	    clacpy_(" ", &n, &n, &b[b_offset], lda, &t[t_offset], lda);
	    ntest = 1;
	    result[1] = ulpinv;

	    cgegs_("V", "V", &n, &s[s_offset], lda, &t[t_offset], lda, &
		    alpha1[1], &beta1[1], &q[q_offset], ldq, &z__[z_offset], 
		    ldq, &work[1], lwork, &rwork[1], &iinfo);
	    if (iinfo != 0) {
		io___44.ciunit = *nounit;
		s_wsfe(&io___44);
		do_fio(&c__1, "CGEGS", (ftnlen)5);
		do_fio(&c__1, (char *)&iinfo, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(iinfo);
		goto L130;
	    }

	    ntest = 4;

/*           Do tests 1--4 */

	    cget51_(&c__1, &n, &a[a_offset], lda, &s[s_offset], lda, &q[
		    q_offset], ldq, &z__[z_offset], ldq, &work[1], &rwork[1], 
		    &result[1]);
	    cget51_(&c__1, &n, &b[b_offset], lda, &t[t_offset], lda, &q[
		    q_offset], ldq, &z__[z_offset], ldq, &work[1], &rwork[1], 
		    &result[2]);
	    cget51_(&c__3, &n, &b[b_offset], lda, &t[t_offset], lda, &q[
		    q_offset], ldq, &q[q_offset], ldq, &work[1], &rwork[1], &
		    result[3]);
	    cget51_(&c__3, &n, &b[b_offset], lda, &t[t_offset], lda, &z__[
		    z_offset], ldq, &z__[z_offset], ldq, &work[1], &rwork[1], 
		    &result[4]);

/*           Do test 5: compare eigenvalues with diagonals. */

	    temp1 = 0.f;

	    i__3 = n;
	    for (j = 1; j <= i__3; ++j) {
		i__4 = j;
		i__5 = s_subscr(j, j);
		q__2.r = alpha1[i__4].r - s[i__5].r, q__2.i = alpha1[i__4].i 
			- s[i__5].i;
		q__1.r = q__2.r, q__1.i = q__2.i;
		i__6 = j;
		i__7 = t_subscr(j, j);
		q__4.r = beta1[i__6].r - t[i__7].r, q__4.i = beta1[i__6].i - 
			t[i__7].i;
		q__3.r = q__4.r, q__3.i = q__4.i;
/* Computing MAX */
		i__8 = j;
		i__9 = s_subscr(j, j);
		r__13 = safmin, r__14 = (r__1 = alpha1[i__8].r, dabs(r__1)) + 
			(r__2 = r_imag(&alpha1[j]), dabs(r__2)), r__13 = max(
			r__13,r__14), r__14 = (r__3 = s[i__9].r, dabs(r__3)) 
			+ (r__4 = r_imag(&s_ref(j, j)), dabs(r__4));
/* Computing MAX */
		i__10 = j;
		i__11 = t_subscr(j, j);
		r__15 = safmin, r__16 = (r__5 = beta1[i__10].r, dabs(r__5)) + 
			(r__6 = r_imag(&beta1[j]), dabs(r__6)), r__15 = max(
			r__15,r__16), r__16 = (r__7 = t[i__11].r, dabs(r__7)) 
			+ (r__8 = r_imag(&t_ref(j, j)), dabs(r__8));
		temp2 = (((r__9 = q__1.r, dabs(r__9)) + (r__10 = r_imag(&q__1)
			, dabs(r__10))) / dmax(r__13,r__14) + ((r__11 = 
			q__3.r, dabs(r__11)) + (r__12 = r_imag(&q__3), dabs(
			r__12))) / dmax(r__15,r__16)) / ulp;
		temp1 = dmax(temp1,temp2);
/* L120: */
	    }
	    result[5] = temp1;

/*           Call CGEGV to compute S2, T2, VL, and VR, do tests.   

             Eigenvalues and Eigenvectors */

	    clacpy_(" ", &n, &n, &a[a_offset], lda, &s2[s2_offset], lda);
	    clacpy_(" ", &n, &n, &b[b_offset], lda, &t2[t2_offset], lda);
	    ntest = 6;
	    result[6] = ulpinv;

	    cgegv_("V", "V", &n, &s2[s2_offset], lda, &t2[t2_offset], lda, &
		    alpha2[1], &beta2[1], &vl[vl_offset], ldq, &vr[vr_offset],
		     ldq, &work[1], lwork, &rwork[1], &iinfo);
	    if (iinfo != 0) {
		io___47.ciunit = *nounit;
		s_wsfe(&io___47);
		do_fio(&c__1, "CGEGV", (ftnlen)5);
		do_fio(&c__1, (char *)&iinfo, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(iinfo);
		goto L130;
	    }

	    ntest = 7;

/*           Do Tests 6 and 7 */

	    cget52_(&c_true, &n, &a[a_offset], lda, &b[b_offset], lda, &vl[
		    vl_offset], ldq, &alpha2[1], &beta2[1], &work[1], &rwork[
		    1], dumma);
	    result[6] = dumma[0];
	    if (dumma[1] > *thrshn) {
		io___49.ciunit = *nounit;
		s_wsfe(&io___49);
		do_fio(&c__1, "Left", (ftnlen)4);
		do_fio(&c__1, "CGEGV", (ftnlen)5);
		do_fio(&c__1, (char *)&dumma[1], (ftnlen)sizeof(real));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
	    }

	    cget52_(&c_false, &n, &a[a_offset], lda, &b[b_offset], lda, &vr[
		    vr_offset], ldq, &alpha2[1], &beta2[1], &work[1], &rwork[
		    1], dumma);
	    result[7] = dumma[0];
	    if (dumma[1] > *thresh) {
		io___50.ciunit = *nounit;
		s_wsfe(&io___50);
		do_fio(&c__1, "Right", (ftnlen)5);
		do_fio(&c__1, "CGEGV", (ftnlen)5);
		do_fio(&c__1, (char *)&dumma[1], (ftnlen)sizeof(real));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
	    }

/*           End of Loop -- Check for RESULT(j) > THRESH */

L130:

	    ntestt += ntest;

/*           Print out tests which fail. */

	    i__3 = ntest;
	    for (jr = 1; jr <= i__3; ++jr) {
		if (result[jr] >= *thresh) {

/*                 If this is the first test to fail,   
                   print a header to the data file. */

		    if (nerrs == 0) {
			io___51.ciunit = *nounit;
			s_wsfe(&io___51);
			do_fio(&c__1, "CGG", (ftnlen)3);
			e_wsfe();

/*                    Matrix types */

			io___52.ciunit = *nounit;
			s_wsfe(&io___52);
			e_wsfe();
			io___53.ciunit = *nounit;
			s_wsfe(&io___53);
			e_wsfe();
			io___54.ciunit = *nounit;
			s_wsfe(&io___54);
			do_fio(&c__1, "Unitary", (ftnlen)7);
			e_wsfe();

/*                    Tests performed */

			io___55.ciunit = *nounit;
			s_wsfe(&io___55);
			do_fio(&c__1, "unitary", (ftnlen)7);
			do_fio(&c__1, "*", (ftnlen)1);
			do_fio(&c__1, "conjugate transpose", (ftnlen)19);
			for (j = 1; j <= 5; ++j) {
			    do_fio(&c__1, "*", (ftnlen)1);
			}
			e_wsfe();

		    }
		    ++nerrs;
		    if (result[jr] < 1e4f) {
			io___56.ciunit = *nounit;
			s_wsfe(&io___56);
			do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer))
				;
			do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(
				integer));
			do_fio(&c__1, (char *)&jr, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&result[jr], (ftnlen)sizeof(
				real));
			e_wsfe();
		    } else {
			io___57.ciunit = *nounit;
			s_wsfe(&io___57);
			do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer))
				;
			do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(
				integer));
			do_fio(&c__1, (char *)&jr, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&result[jr], (ftnlen)sizeof(
				real));
			e_wsfe();
		    }
		}
/* L140: */
	    }

L150:
	    ;
	}
/* L160: */
    }

/*     Summary */

    alasvm_("CGG", nounit, &nerrs, &ntestt, &c__0);
    return 0;







/*     End of CDRVGG */

} /* cdrvgg_ */
Example #9
0
/* Subroutine */ int chptrd_(char *uplo, integer *n, complex *ap, real *d__, 
	real *e, complex *tau, integer *info)
{
    /* System generated locals */
    integer i__1, i__2, i__3;
    real r__1;
    complex q__1, q__2, q__3, q__4;

    /* Local variables */
    integer i__, i1, ii, i1i1;
    complex taui;
    extern /* Subroutine */ int chpr2_(char *, integer *, complex *, complex *
, integer *, complex *, integer *, complex *);
    complex alpha;
    extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer 
	    *, complex *, integer *);
    extern logical lsame_(char *, char *);
    extern /* Subroutine */ int chpmv_(char *, integer *, complex *, complex *
, complex *, integer *, complex *, complex *, integer *), 
	    caxpy_(integer *, complex *, complex *, integer *, complex *, 
	    integer *);
    logical upper;
    extern /* Subroutine */ int clarfg_(integer *, complex *, complex *, 
	    integer *, complex *), xerbla_(char *, integer *);


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CHPTRD reduces a complex Hermitian matrix A stored in packed form to */
/*  real symmetric tridiagonal form T by a unitary similarity */
/*  transformation: Q**H * A * Q = T. */

/*  Arguments */
/*  ========= */

/*  UPLO    (input) CHARACTER*1 */
/*          = 'U':  Upper triangle of A is stored; */
/*          = 'L':  Lower triangle of A is stored. */

/*  N       (input) INTEGER */
/*          The order of the matrix A.  N >= 0. */

/*  AP      (input/output) COMPLEX array, dimension (N*(N+1)/2) */
/*          On entry, the upper or lower triangle of the Hermitian matrix */
/*          A, packed columnwise in a linear array.  The j-th column of A */
/*          is stored in the array AP as follows: */
/*          if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
/*          if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. */
/*          On exit, if UPLO = 'U', the diagonal and first superdiagonal */
/*          of A are overwritten by the corresponding elements of the */
/*          tridiagonal matrix T, and the elements above the first */
/*          superdiagonal, with the array TAU, represent the unitary */
/*          matrix Q as a product of elementary reflectors; if UPLO */
/*          = 'L', the diagonal and first subdiagonal of A are over- */
/*          written by the corresponding elements of the tridiagonal */
/*          matrix T, and the elements below the first subdiagonal, with */
/*          the array TAU, represent the unitary matrix Q as a product */
/*          of elementary reflectors. See Further Details. */

/*  D       (output) REAL array, dimension (N) */
/*          The diagonal elements of the tridiagonal matrix T: */
/*          D(i) = A(i,i). */

/*  E       (output) REAL array, dimension (N-1) */
/*          The off-diagonal elements of the tridiagonal matrix T: */
/*          E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. */

/*  TAU     (output) COMPLEX array, dimension (N-1) */
/*          The scalar factors of the elementary reflectors (see Further */
/*          Details). */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value */

/*  Further Details */
/*  =============== */

/*  If UPLO = 'U', the matrix Q is represented as a product of elementary */
/*  reflectors */

/*     Q = H(n-1) . . . H(2) H(1). */

/*  Each H(i) has the form */

/*     H(i) = I - tau * v * v' */

/*  where tau is a complex scalar, and v is a complex vector with */
/*  v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in AP, */
/*  overwriting A(1:i-1,i+1), and tau is stored in TAU(i). */

/*  If UPLO = 'L', the matrix Q is represented as a product of elementary */
/*  reflectors */

/*     Q = H(1) H(2) . . . H(n-1). */

/*  Each H(i) has the form */

/*     H(i) = I - tau * v * v' */

/*  where tau is a complex scalar, and v is a complex vector with */
/*  v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in AP, */
/*  overwriting A(i+2:n,i), and tau is stored in TAU(i). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input parameters */

    /* Parameter adjustments */
    --tau;
    --e;
    --d__;
    --ap;

    /* Function Body */
    *info = 0;
    upper = lsame_(uplo, "U");
    if (! upper && ! lsame_(uplo, "L")) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CHPTRD", &i__1);
	return 0;
    }

/*     Quick return if possible */

    if (*n <= 0) {
	return 0;
    }

    if (upper) {

/*        Reduce the upper triangle of A. */
/*        I1 is the index in AP of A(1,I+1). */

	i1 = *n * (*n - 1) / 2 + 1;
	i__1 = i1 + *n - 1;
	i__2 = i1 + *n - 1;
	r__1 = ap[i__2].r;
	ap[i__1].r = r__1, ap[i__1].i = 0.f;
	for (i__ = *n - 1; i__ >= 1; --i__) {

/*           Generate elementary reflector H(i) = I - tau * v * v' */
/*           to annihilate A(1:i-1,i+1) */

	    i__1 = i1 + i__ - 1;
	    alpha.r = ap[i__1].r, alpha.i = ap[i__1].i;
	    clarfg_(&i__, &alpha, &ap[i1], &c__1, &taui);
	    i__1 = i__;
	    e[i__1] = alpha.r;

	    if (taui.r != 0.f || taui.i != 0.f) {

/*              Apply H(i) from both sides to A(1:i,1:i) */

		i__1 = i1 + i__ - 1;
		ap[i__1].r = 1.f, ap[i__1].i = 0.f;

/*              Compute  y := tau * A * v  storing y in TAU(1:i) */

		chpmv_(uplo, &i__, &taui, &ap[1], &ap[i1], &c__1, &c_b2, &tau[
			1], &c__1);

/*              Compute  w := y - 1/2 * tau * (y'*v) * v */

		q__3.r = -.5f, q__3.i = -0.f;
		q__2.r = q__3.r * taui.r - q__3.i * taui.i, q__2.i = q__3.r * 
			taui.i + q__3.i * taui.r;
		cdotc_(&q__4, &i__, &tau[1], &c__1, &ap[i1], &c__1);
		q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r * 
			q__4.i + q__2.i * q__4.r;
		alpha.r = q__1.r, alpha.i = q__1.i;
		caxpy_(&i__, &alpha, &ap[i1], &c__1, &tau[1], &c__1);

/*              Apply the transformation as a rank-2 update: */
/*                 A := A - v * w' - w * v' */

		q__1.r = -1.f, q__1.i = -0.f;
		chpr2_(uplo, &i__, &q__1, &ap[i1], &c__1, &tau[1], &c__1, &ap[
			1]);

	    }
	    i__1 = i1 + i__ - 1;
	    i__2 = i__;
	    ap[i__1].r = e[i__2], ap[i__1].i = 0.f;
	    i__1 = i__ + 1;
	    i__2 = i1 + i__;
	    d__[i__1] = ap[i__2].r;
	    i__1 = i__;
	    tau[i__1].r = taui.r, tau[i__1].i = taui.i;
	    i1 -= i__;
/* L10: */
	}
	d__[1] = ap[1].r;
    } else {

/*        Reduce the lower triangle of A. II is the index in AP of */
/*        A(i,i) and I1I1 is the index of A(i+1,i+1). */

	ii = 1;
	r__1 = ap[1].r;
	ap[1].r = r__1, ap[1].i = 0.f;
	i__1 = *n - 1;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    i1i1 = ii + *n - i__ + 1;

/*           Generate elementary reflector H(i) = I - tau * v * v' */
/*           to annihilate A(i+2:n,i) */

	    i__2 = ii + 1;
	    alpha.r = ap[i__2].r, alpha.i = ap[i__2].i;
	    i__2 = *n - i__;
	    clarfg_(&i__2, &alpha, &ap[ii + 2], &c__1, &taui);
	    i__2 = i__;
	    e[i__2] = alpha.r;

	    if (taui.r != 0.f || taui.i != 0.f) {

/*              Apply H(i) from both sides to A(i+1:n,i+1:n) */

		i__2 = ii + 1;
		ap[i__2].r = 1.f, ap[i__2].i = 0.f;

/*              Compute  y := tau * A * v  storing y in TAU(i:n-1) */

		i__2 = *n - i__;
		chpmv_(uplo, &i__2, &taui, &ap[i1i1], &ap[ii + 1], &c__1, &
			c_b2, &tau[i__], &c__1);

/*              Compute  w := y - 1/2 * tau * (y'*v) * v */

		q__3.r = -.5f, q__3.i = -0.f;
		q__2.r = q__3.r * taui.r - q__3.i * taui.i, q__2.i = q__3.r * 
			taui.i + q__3.i * taui.r;
		i__2 = *n - i__;
		cdotc_(&q__4, &i__2, &tau[i__], &c__1, &ap[ii + 1], &c__1);
		q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r * 
			q__4.i + q__2.i * q__4.r;
		alpha.r = q__1.r, alpha.i = q__1.i;
		i__2 = *n - i__;
		caxpy_(&i__2, &alpha, &ap[ii + 1], &c__1, &tau[i__], &c__1);

/*              Apply the transformation as a rank-2 update: */
/*                 A := A - v * w' - w * v' */

		i__2 = *n - i__;
		q__1.r = -1.f, q__1.i = -0.f;
		chpr2_(uplo, &i__2, &q__1, &ap[ii + 1], &c__1, &tau[i__], &
			c__1, &ap[i1i1]);

	    }
	    i__2 = ii + 1;
	    i__3 = i__;
	    ap[i__2].r = e[i__3], ap[i__2].i = 0.f;
	    i__2 = i__;
	    i__3 = ii;
	    d__[i__2] = ap[i__3].r;
	    i__2 = i__;
	    tau[i__2].r = taui.r, tau[i__2].i = taui.i;
	    ii = i1i1;
/* L20: */
	}
	i__1 = *n;
	i__2 = ii;
	d__[i__1] = ap[i__2].r;
    }

    return 0;

/*     End of CHPTRD */

} /* chptrd_ */
Example #10
0
/* Subroutine */ int clatrd_(char *uplo, integer *n, integer *nb, complex *a, 
	integer *lda, real *e, complex *tau, complex *w, integer *ldw, ftnlen 
	uplo_len)
{
    /* System generated locals */
    integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
    real r__1;
    complex q__1, q__2, q__3, q__4;

    /* Local variables */
    static integer i__, iw;
    static complex alpha;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *, 
	    integer *);
    extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer 
	    *, complex *, integer *);
    extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
	    , complex *, integer *, complex *, integer *, complex *, complex *
	    , integer *, ftnlen), chemv_(char *, integer *, complex *, 
	    complex *, integer *, complex *, integer *, complex *, complex *, 
	    integer *, ftnlen);
    extern logical lsame_(char *, char *, ftnlen, ftnlen);
    extern /* Subroutine */ int caxpy_(integer *, complex *, complex *, 
	    integer *, complex *, integer *), clarfg_(integer *, complex *, 
	    complex *, integer *, complex *), clacgv_(integer *, complex *, 
	    integer *);


/*  -- LAPACK auxiliary routine (version 3.0) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */
/*     Courant Institute, Argonne National Lab, and Rice University */
/*     September 30, 1994 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CLATRD reduces NB rows and columns of a complex Hermitian matrix A to */
/*  Hermitian tridiagonal form by a unitary similarity */
/*  transformation Q' * A * Q, and returns the matrices V and W which are */
/*  needed to apply the transformation to the unreduced part of A. */

/*  If UPLO = 'U', CLATRD reduces the last NB rows and columns of a */
/*  matrix, of which the upper triangle is supplied; */
/*  if UPLO = 'L', CLATRD reduces the first NB rows and columns of a */
/*  matrix, of which the lower triangle is supplied. */

/*  This is an auxiliary routine called by CHETRD. */

/*  Arguments */
/*  ========= */

/*  UPLO    (input) CHARACTER */
/*          Specifies whether the upper or lower triangular part of the */
/*          Hermitian matrix A is stored: */
/*          = 'U': Upper triangular */
/*          = 'L': Lower triangular */

/*  N       (input) INTEGER */
/*          The order of the matrix A. */

/*  NB      (input) INTEGER */
/*          The number of rows and columns to be reduced. */

/*  A       (input/output) COMPLEX array, dimension (LDA,N) */
/*          On entry, the Hermitian matrix A.  If UPLO = 'U', the leading */
/*          n-by-n upper triangular part of A contains the upper */
/*          triangular part of the matrix A, and the strictly lower */
/*          triangular part of A is not referenced.  If UPLO = 'L', the */
/*          leading n-by-n lower triangular part of A contains the lower */
/*          triangular part of the matrix A, and the strictly upper */
/*          triangular part of A is not referenced. */
/*          On exit: */
/*          if UPLO = 'U', the last NB columns have been reduced to */
/*            tridiagonal form, with the diagonal elements overwriting */
/*            the diagonal elements of A; the elements above the diagonal */
/*            with the array TAU, represent the unitary matrix Q as a */
/*            product of elementary reflectors; */
/*          if UPLO = 'L', the first NB columns have been reduced to */
/*            tridiagonal form, with the diagonal elements overwriting */
/*            the diagonal elements of A; the elements below the diagonal */
/*            with the array TAU, represent the  unitary matrix Q as a */
/*            product of elementary reflectors. */
/*          See Further Details. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,N). */

/*  E       (output) REAL array, dimension (N-1) */
/*          If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal */
/*          elements of the last NB columns of the reduced matrix; */
/*          if UPLO = 'L', E(1:nb) contains the subdiagonal elements of */
/*          the first NB columns of the reduced matrix. */

/*  TAU     (output) COMPLEX array, dimension (N-1) */
/*          The scalar factors of the elementary reflectors, stored in */
/*          TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'. */
/*          See Further Details. */

/*  W       (output) COMPLEX array, dimension (LDW,NB) */
/*          The n-by-nb matrix W required to update the unreduced part */
/*          of A. */

/*  LDW     (input) INTEGER */
/*          The leading dimension of the array W. LDW >= max(1,N). */

/*  Further Details */
/*  =============== */

/*  If UPLO = 'U', the matrix Q is represented as a product of elementary */
/*  reflectors */

/*     Q = H(n) H(n-1) . . . H(n-nb+1). */

/*  Each H(i) has the form */

/*     H(i) = I - tau * v * v' */

/*  where tau is a complex scalar, and v is a complex vector with */
/*  v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), */
/*  and tau in TAU(i-1). */

/*  If UPLO = 'L', the matrix Q is represented as a product of elementary */
/*  reflectors */

/*     Q = H(1) H(2) . . . H(nb). */

/*  Each H(i) has the form */

/*     H(i) = I - tau * v * v' */

/*  where tau is a complex scalar, and v is a complex vector with */
/*  v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), */
/*  and tau in TAU(i). */

/*  The elements of the vectors v together form the n-by-nb matrix V */
/*  which is needed, with W, to apply the transformation to the unreduced */
/*  part of the matrix, using a Hermitian rank-2k update of the form: */
/*  A := A - V*W' - W*V'. */

/*  The contents of A on exit are illustrated by the following examples */
/*  with n = 5 and nb = 2: */

/*  if UPLO = 'U':                       if UPLO = 'L': */

/*    (  a   a   a   v4  v5 )              (  d                  ) */
/*    (      a   a   v4  v5 )              (  1   d              ) */
/*    (          a   1   v5 )              (  v1  1   a          ) */
/*    (              d   1  )              (  v1  v2  a   a      ) */
/*    (                  d  )              (  v1  v2  a   a   a  ) */

/*  where d denotes a diagonal element of the reduced matrix, a denotes */
/*  an element of the original matrix that is unchanged, and vi denotes */
/*  an element of the vector defining H(i). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Quick return if possible */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --e;
    --tau;
    w_dim1 = *ldw;
    w_offset = 1 + w_dim1;
    w -= w_offset;

    /* Function Body */
    if (*n <= 0) {
	return 0;
    }

    if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) {

/*        Reduce last NB columns of upper triangle */

	i__1 = *n - *nb + 1;
	for (i__ = *n; i__ >= i__1; --i__) {
	    iw = i__ - *n + *nb;
	    if (i__ < *n) {

/*              Update A(1:i,i) */

		i__2 = i__ + i__ * a_dim1;
		i__3 = i__ + i__ * a_dim1;
		r__1 = a[i__3].r;
		a[i__2].r = r__1, a[i__2].i = 0.f;
		i__2 = *n - i__;
		clacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
		i__2 = *n - i__;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__, &i__2, &q__1, &a[(i__ + 1) * 
			a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, &
			c_b2, &a[i__ * a_dim1 + 1], &c__1, (ftnlen)12);
		i__2 = *n - i__;
		clacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
		i__2 = *n - i__;
		clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
		i__2 = *n - i__;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__, &i__2, &q__1, &w[(iw + 1) * 
			w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, &
			c_b2, &a[i__ * a_dim1 + 1], &c__1, (ftnlen)12);
		i__2 = *n - i__;
		clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
		i__2 = i__ + i__ * a_dim1;
		i__3 = i__ + i__ * a_dim1;
		r__1 = a[i__3].r;
		a[i__2].r = r__1, a[i__2].i = 0.f;
	    }
	    if (i__ > 1) {

/*              Generate elementary reflector H(i) to annihilate */
/*              A(1:i-2,i) */

		i__2 = i__ - 1 + i__ * a_dim1;
		alpha.r = a[i__2].r, alpha.i = a[i__2].i;
		i__2 = i__ - 1;
		clarfg_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &tau[i__ 
			- 1]);
		i__2 = i__ - 1;
		e[i__2] = alpha.r;
		i__2 = i__ - 1 + i__ * a_dim1;
		a[i__2].r = 1.f, a[i__2].i = 0.f;

/*              Compute W(1:i-1,i) */

		i__2 = i__ - 1;
		chemv_("Upper", &i__2, &c_b2, &a[a_offset], lda, &a[i__ * 
			a_dim1 + 1], &c__1, &c_b1, &w[iw * w_dim1 + 1], &c__1,
			 (ftnlen)5);
		if (i__ < *n) {
		    i__2 = i__ - 1;
		    i__3 = *n - i__;
		    cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[(iw 
			    + 1) * w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &
			    c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1, (
			    ftnlen)19);
		    i__2 = i__ - 1;
		    i__3 = *n - i__;
		    q__1.r = -1.f, q__1.i = -0.f;
		    cgemv_("No transpose", &i__2, &i__3, &q__1, &a[(i__ + 1) *
			     a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], &
			    c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1, (ftnlen)
			    12);
		    i__2 = i__ - 1;
		    i__3 = *n - i__;
		    cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[(
			    i__ + 1) * a_dim1 + 1], lda, &a[i__ * a_dim1 + 1],
			     &c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1, (
			    ftnlen)19);
		    i__2 = i__ - 1;
		    i__3 = *n - i__;
		    q__1.r = -1.f, q__1.i = -0.f;
		    cgemv_("No transpose", &i__2, &i__3, &q__1, &w[(iw + 1) * 
			    w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], &
			    c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1, (ftnlen)
			    12);
		}
		i__2 = i__ - 1;
		cscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
		q__3.r = -.5f, q__3.i = -0.f;
		i__2 = i__ - 1;
		q__2.r = q__3.r * tau[i__2].r - q__3.i * tau[i__2].i, q__2.i =
			 q__3.r * tau[i__2].i + q__3.i * tau[i__2].r;
		i__3 = i__ - 1;
		cdotc_(&q__4, &i__3, &w[iw * w_dim1 + 1], &c__1, &a[i__ * 
			a_dim1 + 1], &c__1);
		q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r * 
			q__4.i + q__2.i * q__4.r;
		alpha.r = q__1.r, alpha.i = q__1.i;
		i__2 = i__ - 1;
		caxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw * 
			w_dim1 + 1], &c__1);
	    }

/* L10: */
	}
    } else {

/*        Reduce first NB columns of lower triangle */

	i__1 = *nb;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Update A(i:n,i) */

	    i__2 = i__ + i__ * a_dim1;
	    i__3 = i__ + i__ * a_dim1;
	    r__1 = a[i__3].r;
	    a[i__2].r = r__1, a[i__2].i = 0.f;
	    i__2 = i__ - 1;
	    clacgv_(&i__2, &w[i__ + w_dim1], ldw);
	    i__2 = *n - i__ + 1;
	    i__3 = i__ - 1;
	    q__1.r = -1.f, q__1.i = -0.f;
	    cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + a_dim1], lda,
		     &w[i__ + w_dim1], ldw, &c_b2, &a[i__ + i__ * a_dim1], &
		    c__1, (ftnlen)12);
	    i__2 = i__ - 1;
	    clacgv_(&i__2, &w[i__ + w_dim1], ldw);
	    i__2 = i__ - 1;
	    clacgv_(&i__2, &a[i__ + a_dim1], lda);
	    i__2 = *n - i__ + 1;
	    i__3 = i__ - 1;
	    q__1.r = -1.f, q__1.i = -0.f;
	    cgemv_("No transpose", &i__2, &i__3, &q__1, &w[i__ + w_dim1], ldw,
		     &a[i__ + a_dim1], lda, &c_b2, &a[i__ + i__ * a_dim1], &
		    c__1, (ftnlen)12);
	    i__2 = i__ - 1;
	    clacgv_(&i__2, &a[i__ + a_dim1], lda);
	    i__2 = i__ + i__ * a_dim1;
	    i__3 = i__ + i__ * a_dim1;
	    r__1 = a[i__3].r;
	    a[i__2].r = r__1, a[i__2].i = 0.f;
	    if (i__ < *n) {

/*              Generate elementary reflector H(i) to annihilate */
/*              A(i+2:n,i) */

		i__2 = i__ + 1 + i__ * a_dim1;
		alpha.r = a[i__2].r, alpha.i = a[i__2].i;
		i__2 = *n - i__;
/* Computing MIN */
		i__3 = i__ + 2;
		clarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1,
			 &tau[i__]);
		i__2 = i__;
		e[i__2] = alpha.r;
		i__2 = i__ + 1 + i__ * a_dim1;
		a[i__2].r = 1.f, a[i__2].i = 0.f;

/*              Compute W(i+1:n,i) */

		i__2 = *n - i__;
		chemv_("Lower", &i__2, &c_b2, &a[i__ + 1 + (i__ + 1) * a_dim1]
			, lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b1, &w[
			i__ + 1 + i__ * w_dim1], &c__1, (ftnlen)5);
		i__2 = *n - i__;
		i__3 = i__ - 1;
		cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[i__ + 1 
			+ w_dim1], ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &
			c_b1, &w[i__ * w_dim1 + 1], &c__1, (ftnlen)19);
		i__2 = *n - i__;
		i__3 = i__ - 1;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + 1 + 
			a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[
			i__ + 1 + i__ * w_dim1], &c__1, (ftnlen)12);
		i__2 = *n - i__;
		i__3 = i__ - 1;
		cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 
			+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
			c_b1, &w[i__ * w_dim1 + 1], &c__1, (ftnlen)19);
		i__2 = *n - i__;
		i__3 = i__ - 1;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__2, &i__3, &q__1, &w[i__ + 1 + 
			w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[
			i__ + 1 + i__ * w_dim1], &c__1, (ftnlen)12);
		i__2 = *n - i__;
		cscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
		q__3.r = -.5f, q__3.i = -0.f;
		i__2 = i__;
		q__2.r = q__3.r * tau[i__2].r - q__3.i * tau[i__2].i, q__2.i =
			 q__3.r * tau[i__2].i + q__3.i * tau[i__2].r;
		i__3 = *n - i__;
		cdotc_(&q__4, &i__3, &w[i__ + 1 + i__ * w_dim1], &c__1, &a[
			i__ + 1 + i__ * a_dim1], &c__1);
		q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r * 
			q__4.i + q__2.i * q__4.r;
		alpha.r = q__1.r, alpha.i = q__1.i;
		i__2 = *n - i__;
		caxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[
			i__ + 1 + i__ * w_dim1], &c__1);
	    }

/* L20: */
	}
    }

    return 0;

/*     End of CLATRD */

} /* clatrd_ */
Example #11
0
/* Subroutine */ int cgeql2_(integer *m, integer *n, complex *a, integer *lda, 
	 complex *tau, complex *work, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2;
    complex q__1;

    /* Builtin functions */
    void r_cnjg(complex *, complex *);

    /* Local variables */
    integer i__, k;
    complex alpha;
    extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
, integer *, complex *, complex *, integer *, complex *), 
	    clarfg_(integer *, complex *, complex *, integer *, complex *), 
	    xerbla_(char *, integer *);


/*  -- LAPACK routine (version 3.1) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CGEQL2 computes a QL factorization of a complex m by n matrix A: */
/*  A = Q * L. */

/*  Arguments */
/*  ========= */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A.  M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix A.  N >= 0. */

/*  A       (input/output) COMPLEX array, dimension (LDA,N) */
/*          On entry, the m by n matrix A. */
/*          On exit, if m >= n, the lower triangle of the subarray */
/*          A(m-n+1:m,1:n) contains the n by n lower triangular matrix L; */
/*          if m <= n, the elements on and below the (n-m)-th */
/*          superdiagonal contain the m by n lower trapezoidal matrix L; */
/*          the remaining elements, with the array TAU, represent the */
/*          unitary matrix Q as a product of elementary reflectors */
/*          (see Further Details). */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,M). */

/*  TAU     (output) COMPLEX array, dimension (min(M,N)) */
/*          The scalar factors of the elementary reflectors (see Further */
/*          Details). */

/*  WORK    (workspace) COMPLEX array, dimension (N) */

/*  INFO    (output) INTEGER */
/*          = 0: successful exit */
/*          < 0: if INFO = -i, the i-th argument had an illegal value */

/*  Further Details */
/*  =============== */

/*  The matrix Q is represented as a product of elementary reflectors */

/*     Q = H(k) . . . H(2) H(1), where k = min(m,n). */

/*  Each H(i) has the form */

/*     H(i) = I - tau * v * v' */

/*  where tau is a complex scalar, and v is a complex vector with */
/*  v(m-k+i+1:m) = 0 and v(m-k+i) = 1; v(1:m-k+i-1) is stored on exit in */
/*  A(1:m-k+i-1,n-k+i), and tau in TAU(i). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input arguments */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --tau;
    --work;

    /* Function Body */
    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*lda < max(1,*m)) {
	*info = -4;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CGEQL2", &i__1);
	return 0;
    }

    k = min(*m,*n);

    for (i__ = k; i__ >= 1; --i__) {

/*        Generate elementary reflector H(i) to annihilate */
/*        A(1:m-k+i-1,n-k+i) */

	i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
	alpha.r = a[i__1].r, alpha.i = a[i__1].i;
	i__1 = *m - k + i__;
	clarfg_(&i__1, &alpha, &a[(*n - k + i__) * a_dim1 + 1], &c__1, &tau[
		i__]);

/*        Apply H(i)' to A(1:m-k+i,1:n-k+i-1) from the left */

	i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
	a[i__1].r = 1.f, a[i__1].i = 0.f;
	i__1 = *m - k + i__;
	i__2 = *n - k + i__ - 1;
	r_cnjg(&q__1, &tau[i__]);
	clarf_("Left", &i__1, &i__2, &a[(*n - k + i__) * a_dim1 + 1], &c__1, &
		q__1, &a[a_offset], lda, &work[1]);
	i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
	a[i__1].r = alpha.r, a[i__1].i = alpha.i;
/* L10: */
    }
    return 0;

/*     End of CGEQL2 */

} /* cgeql2_ */
Example #12
0
/* Subroutine */ int cgelq2_(integer *m, integer *n, complex *a, integer *lda,
	 complex *tau, complex *work, integer *info)
{
/*  -- LAPACK routine (version 2.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    CGELQ2 computes an LQ factorization of a complex m by n matrix A:   
    A = L * Q.   

    Arguments   
    =========   

    M       (input) INTEGER   
            The number of rows of the matrix A.  M >= 0.   

    N       (input) INTEGER   
            The number of columns of the matrix A.  N >= 0.   

    A       (input/output) COMPLEX array, dimension (LDA,N)   
            On entry, the m by n matrix A.   
            On exit, the elements on and below the diagonal of the array 
  
            contain the m by min(m,n) lower trapezoidal matrix L (L is   
            lower triangular if m <= n); the elements above the diagonal, 
  
            with the array TAU, represent the unitary matrix Q as a   
            product of elementary reflectors (see Further Details).   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= max(1,M).   

    TAU     (output) COMPLEX array, dimension (min(M,N))   
            The scalar factors of the elementary reflectors (see Further 
  
            Details).   

    WORK    (workspace) COMPLEX array, dimension (M)   

    INFO    (output) INTEGER   
            = 0: successful exit   
            < 0: if INFO = -i, the i-th argument had an illegal value   

    Further Details   
    ===============   

    The matrix Q is represented as a product of elementary reflectors   

       Q = H(k)' . . . H(2)' H(1)', where k = min(m,n).   

    Each H(i) has the form   

       H(i) = I - tau * v * v'   

    where tau is a complex scalar, and v is a complex vector with   
    v(1:i-1) = 0 and v(i) = 1; conjg(v(i+1:n)) is stored on exit in   
    A(i,i+1:n), and tau in TAU(i).   

    ===================================================================== 
  


       Test the input arguments   

    
   Parameter adjustments   
       Function Body */
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    /* Local variables */
    static integer i, k;
    static complex alpha;
    extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
	    , integer *, complex *, complex *, integer *, complex *), 
	    clarfg_(integer *, complex *, complex *, integer *, complex *), 
	    clacgv_(integer *, complex *, integer *), xerbla_(char *, integer 
	    *);


#define TAU(I) tau[(I)-1]
#define WORK(I) work[(I)-1]

#define A(I,J) a[(I)-1 + ((J)-1)* ( *lda)]

    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*lda < max(1,*m)) {
	*info = -4;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CGELQ2", &i__1);
	return 0;
    }

    k = min(*m,*n);

    i__1 = k;
    for (i = 1; i <= k; ++i) {

/*        Generate elementary reflector H(i) to annihilate A(i,i+1:n) 
*/

	i__2 = *n - i + 1;
	clacgv_(&i__2, &A(i,i), lda);
	i__2 = i + i * a_dim1;
	alpha.r = A(i,i).r, alpha.i = A(i,i).i;
	i__2 = *n - i + 1;
/* Computing MIN */
	i__3 = i + 1;
	clarfg_(&i__2, &alpha, &A(i,min(i+1,*n)), lda, &TAU(i));
	if (i < *m) {

/*           Apply H(i) to A(i+1:m,i:n) from the right */

	    i__2 = i + i * a_dim1;
	    A(i,i).r = 1.f, A(i,i).i = 0.f;
	    i__2 = *m - i;
	    i__3 = *n - i + 1;
	    clarf_("Right", &i__2, &i__3, &A(i,i), lda, &TAU(i), &
		    A(i+1,i), lda, &WORK(1));
	}
	i__2 = i + i * a_dim1;
	A(i,i).r = alpha.r, A(i,i).i = alpha.i;
	i__2 = *n - i + 1;
	clacgv_(&i__2, &A(i,i), lda);
/* L10: */
    }
    return 0;

/*     End of CGELQ2 */

} /* cgelq2_ */
Example #13
0
/* Subroutine */ int cgehd2_(integer *n, integer *ilo, integer *ihi, complex *
	a, integer *lda, complex *tau, complex *work, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    complex q__1;

    /* Local variables */
    integer i__;
    complex alpha;

/*  -- LAPACK routine (version 3.2) -- */
/*     November 2006 */

/*  Purpose */
/*  ======= */

/*  CGEHD2 reduces a complex general matrix A to upper Hessenberg form H */
/*  by a unitary similarity transformation:  Q' * A * Q = H . */

/*  Arguments */
/*  ========= */

/*  N       (input) INTEGER */
/*          The order of the matrix A.  N >= 0. */

/*  ILO     (input) INTEGER */
/*  IHI     (input) INTEGER */
/*          It is assumed that A is already upper triangular in rows */
/*          and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally */
/*          set by a previous call to CGEBAL; otherwise they should be */
/*          set to 1 and N respectively. See Further Details. */
/*          1 <= ILO <= IHI <= max(1,N). */

/*  A       (input/output) COMPLEX array, dimension (LDA,N) */
/*          On entry, the n by n general matrix to be reduced. */
/*          On exit, the upper triangle and the first subdiagonal of A */
/*          are overwritten with the upper Hessenberg matrix H, and the */
/*          elements below the first subdiagonal, with the array TAU, */
/*          represent the unitary matrix Q as a product of elementary */
/*          reflectors. See Further Details. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,N). */

/*  TAU     (output) COMPLEX array, dimension (N-1) */
/*          The scalar factors of the elementary reflectors (see Further */
/*          Details). */

/*  WORK    (workspace) COMPLEX array, dimension (N) */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value. */

/*  Further Details */
/*  =============== */

/*  The matrix Q is represented as a product of (ihi-ilo) elementary */
/*  reflectors */

/*     Q = H(ilo) H(ilo+1) . . . H(ihi-1). */

/*  Each H(i) has the form */

/*     H(i) = I - tau * v * v' */

/*  where tau is a complex scalar, and v is a complex vector with */
/*  v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on */
/*  exit in A(i+2:ihi,i), and tau in TAU(i). */

/*  The contents of A are illustrated by the following example, with */
/*  n = 7, ilo = 2 and ihi = 6: */

/*  on entry,                        on exit, */

/*  ( a   a   a   a   a   a   a )    (  a   a   h   h   h   h   a ) */
/*  (     a   a   a   a   a   a )    (      a   h   h   h   h   a ) */
/*  (     a   a   a   a   a   a )    (      h   h   h   h   h   h ) */
/*  (     a   a   a   a   a   a )    (      v2  h   h   h   h   h ) */
/*  (     a   a   a   a   a   a )    (      v2  v3  h   h   h   h ) */
/*  (     a   a   a   a   a   a )    (      v2  v3  v4  h   h   h ) */
/*  (                         a )    (                          a ) */

/*  where a denotes an element of the original matrix A, h denotes a */
/*  modified element of the upper Hessenberg matrix H, and vi denotes an */
/*  element of the vector defining H(i). */

/*  ===================================================================== */

/*     Test the input parameters */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --tau;
    --work;

    /* Function Body */
    *info = 0;
    if (*n < 0) {
	*info = -1;
    } else if (*ilo < 1 || *ilo > max(1,*n)) {
	*info = -2;
    } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
	*info = -3;
    } else if (*lda < max(1,*n)) {
	*info = -5;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CGEHD2", &i__1);
	return 0;
    }

    i__1 = *ihi - 1;
    for (i__ = *ilo; i__ <= i__1; ++i__) {

/*        Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */

	i__2 = i__ + 1 + i__ * a_dim1;
	alpha.r = a[i__2].r, alpha.i = a[i__2].i;
	i__2 = *ihi - i__;
/* Computing MIN */
	i__3 = i__ + 2;
	clarfg_(&i__2, &alpha, &a[min(i__3, *n)+ i__ * a_dim1], &c__1, &tau[
		i__]);
	i__2 = i__ + 1 + i__ * a_dim1;
	a[i__2].r = 1.f, a[i__2].i = 0.f;

/*        Apply H(i) to A(1:ihi,i+1:ihi) from the right */

	i__2 = *ihi - i__;
	clarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
		i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]);

/*        Apply H(i)' to A(i+1:ihi,i+1:n) from the left */

	i__2 = *ihi - i__;
	i__3 = *n - i__;
	r_cnjg(&q__1, &tau[i__]);
	clarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &q__1, 
		 &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]);

	i__2 = i__ + 1 + i__ * a_dim1;
	a[i__2].r = alpha.r, a[i__2].i = alpha.i;
    }

    return 0;

/*     End of CGEHD2 */

} /* cgehd2_ */
Example #14
0
/* Subroutine */
int clatrz_(integer *m, integer *n, integer *l, complex *a, integer *lda, complex *tau, complex *work)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2;
    complex q__1;
    /* Builtin functions */
    void r_cnjg(complex *, complex *);
    /* Local variables */
    integer i__;
    complex alpha;
    extern /* Subroutine */
    int clarz_(char *, integer *, integer *, integer * , complex *, integer *, complex *, complex *, integer *, complex * ), clarfg_(integer *, complex *, complex *, integer *, complex *), clacgv_(integer *, complex *, integer *);
    /* -- LAPACK computational routine (version 3.4.2) -- */
    /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
    /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
    /* September 2012 */
    /* .. Scalar Arguments .. */
    /* .. */
    /* .. Array Arguments .. */
    /* .. */
    /* ===================================================================== */
    /* .. Parameters .. */
    /* .. */
    /* .. Local Scalars .. */
    /* .. */
    /* .. External Subroutines .. */
    /* .. */
    /* .. Intrinsic Functions .. */
    /* .. */
    /* .. Executable Statements .. */
    /* Quick return if possible */
    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --tau;
    --work;
    /* Function Body */
    if (*m == 0)
    {
        return 0;
    }
    else if (*m == *n)
    {
        i__1 = *n;
        for (i__ = 1;
                i__ <= i__1;
                ++i__)
        {
            i__2 = i__;
            tau[i__2].r = 0.f;
            tau[i__2].i = 0.f; // , expr subst
            /* L10: */
        }
        return 0;
    }
    for (i__ = *m;
            i__ >= 1;
            --i__)
    {
        /* Generate elementary reflector H(i) to annihilate */
        /* [ A(i,i) A(i,n-l+1:n) ] */
        clacgv_(l, &a[i__ + (*n - *l + 1) * a_dim1], lda);
        r_cnjg(&q__1, &a[i__ + i__ * a_dim1]);
        alpha.r = q__1.r;
        alpha.i = q__1.i; // , expr subst
        i__1 = *l + 1;
        clarfg_(&i__1, &alpha, &a[i__ + (*n - *l + 1) * a_dim1], lda, &tau[ i__]);
        i__1 = i__;
        r_cnjg(&q__1, &tau[i__]);
        tau[i__1].r = q__1.r;
        tau[i__1].i = q__1.i; // , expr subst
        /* Apply H(i) to A(1:i-1,i:n) from the right */
        i__1 = i__ - 1;
        i__2 = *n - i__ + 1;
        r_cnjg(&q__1, &tau[i__]);
        clarz_("Right", &i__1, &i__2, l, &a[i__ + (*n - *l + 1) * a_dim1], lda, &q__1, &a[i__ * a_dim1 + 1], lda, &work[1]);
        i__1 = i__ + i__ * a_dim1;
        r_cnjg(&q__1, &alpha);
        a[i__1].r = q__1.r;
        a[i__1].i = q__1.i; // , expr subst
        /* L20: */
    }
    return 0;
    /* End of CLATRZ */
}
Example #15
0
/* Subroutine */ int cgeqr2_(integer *m, integer *n, complex *a, integer *lda,
	 complex *tau, complex *work, integer *info)
{
/*  -- LAPACK routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    CGEQR2 computes a QR factorization of a complex m by n matrix A:   
    A = Q * R.   

    Arguments   
    =========   

    M       (input) INTEGER   
            The number of rows of the matrix A.  M >= 0.   

    N       (input) INTEGER   
            The number of columns of the matrix A.  N >= 0.   

    A       (input/output) COMPLEX array, dimension (LDA,N)   
            On entry, the m by n matrix A.   
            On exit, the elements on and above the diagonal of the array   
            contain the min(m,n) by n upper trapezoidal matrix R (R is   
            upper triangular if m >= n); the elements below the diagonal,   
            with the array TAU, represent the unitary matrix Q as a   
            product of elementary reflectors (see Further Details).   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= max(1,M).   

    TAU     (output) COMPLEX array, dimension (min(M,N))   
            The scalar factors of the elementary reflectors (see Further   
            Details).   

    WORK    (workspace) COMPLEX array, dimension (N)   

    INFO    (output) INTEGER   
            = 0: successful exit   
            < 0: if INFO = -i, the i-th argument had an illegal value   

    Further Details   
    ===============   

    The matrix Q is represented as a product of elementary reflectors   

       Q = H(1) H(2) . . . H(k), where k = min(m,n).   

    Each H(i) has the form   

       H(i) = I - tau * v * v'   

    where tau is a complex scalar, and v is a complex vector with   
    v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),   
    and tau in TAU(i).   

    =====================================================================   


       Test the input arguments   

       Parameter adjustments */
    /* Table of constant values */
    static integer c__1 = 1;
    
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    complex q__1;
    /* Builtin functions */
    void r_cnjg(complex *, complex *);
    /* Local variables */
    static integer i__, k;
    static complex alpha;
    extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
	    , integer *, complex *, complex *, integer *, complex *), 
	    clarfg_(integer *, complex *, complex *, integer *, complex *), 
	    xerbla_(char *, integer *);
#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]


    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    --tau;
    --work;

    /* Function Body */
    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*lda < max(1,*m)) {
	*info = -4;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CGEQR2", &i__1);
	return 0;
    }

    k = min(*m,*n);

    i__1 = k;
    for (i__ = 1; i__ <= i__1; ++i__) {

/*        Generate elementary reflector H(i) to annihilate A(i+1:m,i)   

   Computing MIN */
	i__2 = i__ + 1;
	i__3 = *m - i__ + 1;
	clarfg_(&i__3, &a_ref(i__, i__), &a_ref(min(i__2,*m), i__), &c__1, &
		tau[i__]);
	if (i__ < *n) {

/*           Apply H(i)' to A(i:m,i+1:n) from the left */

	    i__2 = a_subscr(i__, i__);
	    alpha.r = a[i__2].r, alpha.i = a[i__2].i;
	    i__2 = a_subscr(i__, i__);
	    a[i__2].r = 1.f, a[i__2].i = 0.f;
	    i__2 = *m - i__ + 1;
	    i__3 = *n - i__;
	    r_cnjg(&q__1, &tau[i__]);
	    clarf_("Left", &i__2, &i__3, &a_ref(i__, i__), &c__1, &q__1, &
		    a_ref(i__, i__ + 1), lda, &work[1]);
	    i__2 = a_subscr(i__, i__);
	    a[i__2].r = alpha.r, a[i__2].i = alpha.i;
	}
/* L10: */
    }
    return 0;

/*     End of CGEQR2 */

} /* cgeqr2_ */
Example #16
0
/* Subroutine */ int clatme_(integer *n, char *dist, integer *iseed, complex *
	d__, integer *mode, real *cond, complex *dmax__, char *ei, char *
	rsign, char *upper, char *sim, real *ds, integer *modes, real *conds, 
	integer *kl, integer *ku, real *anorm, complex *a, integer *lda, 
	complex *work, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2;
    real r__1, r__2;
    complex q__1, q__2;

    /* Builtin functions */
    double c_abs(complex *);
    void r_cnjg(complex *, complex *);

    /* Local variables */
    integer i__, j, ic, jc, ir, jcr;
    complex tau;
    logical bads;
    integer isim;
    real temp;
    extern /* Subroutine */ int cgerc_(integer *, integer *, complex *, 
	    complex *, integer *, complex *, integer *, complex *, integer *);
    complex alpha;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *, 
	    integer *);
    extern logical lsame_(char *, char *);
    extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
, complex *, integer *, complex *, integer *, complex *, complex *
, integer *);
    integer iinfo;
    real tempa[1];
    integer icols, idist;
    extern /* Subroutine */ int ccopy_(integer *, complex *, integer *, 
	    complex *, integer *);
    integer irows;
    extern /* Subroutine */ int clatm1_(integer *, real *, integer *, integer 
	    *, integer *, complex *, integer *, integer *), slatm1_(integer *, 
	     real *, integer *, integer *, integer *, real *, integer *, 
	    integer *);
    extern doublereal clange_(char *, integer *, integer *, complex *, 
	    integer *, real *);
    extern /* Subroutine */ int clarge_(integer *, complex *, integer *, 
	    integer *, complex *, integer *), clarfg_(integer *, complex *, 
	    complex *, integer *, complex *), clacgv_(integer *, complex *, 
	    integer *);
    extern /* Complex */ VOID clarnd_(complex *, integer *, integer *);
    real ralpha;
    extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer 
	    *), claset_(char *, integer *, integer *, complex *, complex *, 
	    complex *, integer *), xerbla_(char *, integer *),
	     clarnv_(integer *, integer *, integer *, complex *);
    integer irsign, iupper;
    complex xnorms;


/*  -- LAPACK test routine (version 3.1) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*     CLATME generates random non-symmetric square matrices with */
/*     specified eigenvalues for testing LAPACK programs. */

/*     CLATME operates by applying the following sequence of */
/*     operations: */

/*     1. Set the diagonal to D, where D may be input or */
/*          computed according to MODE, COND, DMAX, and RSIGN */
/*          as described below. */

/*     2. If UPPER='T', the upper triangle of A is set to random values */
/*          out of distribution DIST. */

/*     3. If SIM='T', A is multiplied on the left by a random matrix */
/*          X, whose singular values are specified by DS, MODES, and */
/*          CONDS, and on the right by X inverse. */

/*     4. If KL < N-1, the lower bandwidth is reduced to KL using */
/*          Householder transformations.  If KU < N-1, the upper */
/*          bandwidth is reduced to KU. */

/*     5. If ANORM is not negative, the matrix is scaled to have */
/*          maximum-element-norm ANORM. */

/*     (Note: since the matrix cannot be reduced beyond Hessenberg form, */
/*      no packing options are available.) */

/*  Arguments */
/*  ========= */

/*  N      - INTEGER */
/*           The number of columns (or rows) of A. Not modified. */

/*  DIST   - CHARACTER*1 */
/*           On entry, DIST specifies the type of distribution to be used */
/*           to generate the random eigen-/singular values, and on the */
/*           upper triangle (see UPPER). */
/*           'U' => UNIFORM( 0, 1 )  ( 'U' for uniform ) */
/*           'S' => UNIFORM( -1, 1 ) ( 'S' for symmetric ) */
/*           'N' => NORMAL( 0, 1 )   ( 'N' for normal ) */
/*           'D' => uniform on the complex disc |z| < 1. */
/*           Not modified. */

/*  ISEED  - INTEGER array, dimension ( 4 ) */
/*           On entry ISEED specifies the seed of the random number */
/*           generator. They should lie between 0 and 4095 inclusive, */
/*           and ISEED(4) should be odd. The random number generator */
/*           uses a linear congruential sequence limited to small */
/*           integers, and so should produce machine independent */
/*           random numbers. The values of ISEED are changed on */
/*           exit, and can be used in the next call to CLATME */
/*           to continue the same random number sequence. */
/*           Changed on exit. */

/*  D      - COMPLEX array, dimension ( N ) */
/*           This array is used to specify the eigenvalues of A.  If */
/*           MODE=0, then D is assumed to contain the eigenvalues */
/*           otherwise they will be computed according to MODE, COND, */
/*           DMAX, and RSIGN and placed in D. */
/*           Modified if MODE is nonzero. */

/*  MODE   - INTEGER */
/*           On entry this describes how the eigenvalues are to */
/*           be specified: */
/*           MODE = 0 means use D as input */
/*           MODE = 1 sets D(1)=1 and D(2:N)=1.0/COND */
/*           MODE = 2 sets D(1:N-1)=1 and D(N)=1.0/COND */
/*           MODE = 3 sets D(I)=COND**(-(I-1)/(N-1)) */
/*           MODE = 4 sets D(i)=1 - (i-1)/(N-1)*(1 - 1/COND) */
/*           MODE = 5 sets D to random numbers in the range */
/*                    ( 1/COND , 1 ) such that their logarithms */
/*                    are uniformly distributed. */
/*           MODE = 6 set D to random numbers from same distribution */
/*                    as the rest of the matrix. */
/*           MODE < 0 has the same meaning as ABS(MODE), except that */
/*              the order of the elements of D is reversed. */
/*           Thus if MODE is between 1 and 4, D has entries ranging */
/*              from 1 to 1/COND, if between -1 and -4, D has entries */
/*              ranging from 1/COND to 1, */
/*           Not modified. */

/*  COND   - REAL */
/*           On entry, this is used as described under MODE above. */
/*           If used, it must be >= 1. Not modified. */

/*  DMAX   - COMPLEX */
/*           If MODE is neither -6, 0 nor 6, the contents of D, as */
/*           computed according to MODE and COND, will be scaled by */
/*           DMAX / max(abs(D(i))).  Note that DMAX need not be */
/*           positive or real: if DMAX is negative or complex (or zero), */
/*           D will be scaled by a negative or complex number (or zero). */
/*           If RSIGN='F' then the largest (absolute) eigenvalue will be */
/*           equal to DMAX. */
/*           Not modified. */

/*  EI     - CHARACTER*1 (ignored) */
/*           Not modified. */

/*  RSIGN  - CHARACTER*1 */
/*           If MODE is not 0, 6, or -6, and RSIGN='T', then the */
/*           elements of D, as computed according to MODE and COND, will */
/*           be multiplied by a random complex number from the unit */
/*           circle |z| = 1.  If RSIGN='F', they will not be.  RSIGN may */
/*           only have the values 'T' or 'F'. */
/*           Not modified. */

/*  UPPER  - CHARACTER*1 */
/*           If UPPER='T', then the elements of A above the diagonal */
/*           will be set to random numbers out of DIST.  If UPPER='F', */
/*           they will not.  UPPER may only have the values 'T' or 'F'. */
/*           Not modified. */

/*  SIM    - CHARACTER*1 */
/*           If SIM='T', then A will be operated on by a "similarity */
/*           transform", i.e., multiplied on the left by a matrix X and */
/*           on the right by X inverse.  X = U S V, where U and V are */
/*           random unitary matrices and S is a (diagonal) matrix of */
/*           singular values specified by DS, MODES, and CONDS.  If */
/*           SIM='F', then A will not be transformed. */
/*           Not modified. */

/*  DS     - REAL array, dimension ( N ) */
/*           This array is used to specify the singular values of X, */
/*           in the same way that D specifies the eigenvalues of A. */
/*           If MODE=0, the DS contains the singular values, which */
/*           may not be zero. */
/*           Modified if MODE is nonzero. */

/*  MODES  - INTEGER */
/*  CONDS  - REAL */
/*           Similar to MODE and COND, but for specifying the diagonal */
/*           of S.  MODES=-6 and +6 are not allowed (since they would */
/*           result in randomly ill-conditioned eigenvalues.) */

/*  KL     - INTEGER */
/*           This specifies the lower bandwidth of the  matrix.  KL=1 */
/*           specifies upper Hessenberg form.  If KL is at least N-1, */
/*           then A will have full lower bandwidth. */
/*           Not modified. */

/*  KU     - INTEGER */
/*           This specifies the upper bandwidth of the  matrix.  KU=1 */
/*           specifies lower Hessenberg form.  If KU is at least N-1, */
/*           then A will have full upper bandwidth; if KU and KL */
/*           are both at least N-1, then A will be dense.  Only one of */
/*           KU and KL may be less than N-1. */
/*           Not modified. */

/*  ANORM  - REAL */
/*           If ANORM is not negative, then A will be scaled by a non- */
/*           negative real number to make the maximum-element-norm of A */
/*           to be ANORM. */
/*           Not modified. */

/*  A      - COMPLEX array, dimension ( LDA, N ) */
/*           On exit A is the desired test matrix. */
/*           Modified. */

/*  LDA    - INTEGER */
/*           LDA specifies the first dimension of A as declared in the */
/*           calling program.  LDA must be at least M. */
/*           Not modified. */

/*  WORK   - COMPLEX array, dimension ( 3*N ) */
/*           Workspace. */
/*           Modified. */

/*  INFO   - INTEGER */
/*           Error code.  On exit, INFO will be set to one of the */
/*           following values: */
/*             0 => normal return */
/*            -1 => N negative */
/*            -2 => DIST illegal string */
/*            -5 => MODE not in range -6 to 6 */
/*            -6 => COND less than 1.0, and MODE neither -6, 0 nor 6 */
/*            -9 => RSIGN is not 'T' or 'F' */
/*           -10 => UPPER is not 'T' or 'F' */
/*           -11 => SIM   is not 'T' or 'F' */
/*           -12 => MODES=0 and DS has a zero singular value. */
/*           -13 => MODES is not in the range -5 to 5. */
/*           -14 => MODES is nonzero and CONDS is less than 1. */
/*           -15 => KL is less than 1. */
/*           -16 => KU is less than 1, or KL and KU are both less than */
/*                  N-1. */
/*           -19 => LDA is less than M. */
/*            1  => Error return from CLATM1 (computing D) */
/*            2  => Cannot scale to DMAX (max. eigenvalue is 0) */
/*            3  => Error return from SLATM1 (computing DS) */
/*            4  => Error return from CLARGE */
/*            5  => Zero singular value from SLATM1. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     1)      Decode and Test the input parameters. */
/*             Initialize flags & seed. */

    /* Parameter adjustments */
    --iseed;
    --d__;
    --ds;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --work;

    /* Function Body */
    *info = 0;

/*     Quick return if possible */

    if (*n == 0) {
	return 0;
    }

/*     Decode DIST */

    if (lsame_(dist, "U")) {
	idist = 1;
    } else if (lsame_(dist, "S")) {
	idist = 2;
    } else if (lsame_(dist, "N")) {
	idist = 3;
    } else if (lsame_(dist, "D")) {
	idist = 4;
    } else {
	idist = -1;
    }

/*     Decode RSIGN */

    if (lsame_(rsign, "T")) {
	irsign = 1;
    } else if (lsame_(rsign, "F")) {
	irsign = 0;
    } else {
	irsign = -1;
    }

/*     Decode UPPER */

    if (lsame_(upper, "T")) {
	iupper = 1;
    } else if (lsame_(upper, "F")) {
	iupper = 0;
    } else {
	iupper = -1;
    }

/*     Decode SIM */

    if (lsame_(sim, "T")) {
	isim = 1;
    } else if (lsame_(sim, "F")) {
	isim = 0;
    } else {
	isim = -1;
    }

/*     Check DS, if MODES=0 and ISIM=1 */

    bads = FALSE_;
    if (*modes == 0 && isim == 1) {
	i__1 = *n;
	for (j = 1; j <= i__1; ++j) {
	    if (ds[j] == 0.f) {
		bads = TRUE_;
	    }
/* L10: */
	}
    }

/*     Set INFO if an error */

    if (*n < 0) {
	*info = -1;
    } else if (idist == -1) {
	*info = -2;
    } else if (abs(*mode) > 6) {
	*info = -5;
    } else if (*mode != 0 && abs(*mode) != 6 && *cond < 1.f) {
	*info = -6;
    } else if (irsign == -1) {
	*info = -9;
    } else if (iupper == -1) {
	*info = -10;
    } else if (isim == -1) {
	*info = -11;
    } else if (bads) {
	*info = -12;
    } else if (isim == 1 && abs(*modes) > 5) {
	*info = -13;
    } else if (isim == 1 && *modes != 0 && *conds < 1.f) {
	*info = -14;
    } else if (*kl < 1) {
	*info = -15;
    } else if (*ku < 1 || *ku < *n - 1 && *kl < *n - 1) {
	*info = -16;
    } else if (*lda < max(1,*n)) {
	*info = -19;
    }

    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CLATME", &i__1);
	return 0;
    }

/*     Initialize random number generator */

    for (i__ = 1; i__ <= 4; ++i__) {
	iseed[i__] = (i__1 = iseed[i__], abs(i__1)) % 4096;
/* L20: */
    }

    if (iseed[4] % 2 != 1) {
	++iseed[4];
    }

/*     2)      Set up diagonal of A */

/*             Compute D according to COND and MODE */

    clatm1_(mode, cond, &irsign, &idist, &iseed[1], &d__[1], n, &iinfo);
    if (iinfo != 0) {
	*info = 1;
	return 0;
    }
    if (*mode != 0 && abs(*mode) != 6) {

/*        Scale by DMAX */

	temp = c_abs(&d__[1]);
	i__1 = *n;
	for (i__ = 2; i__ <= i__1; ++i__) {
/* Computing MAX */
	    r__1 = temp, r__2 = c_abs(&d__[i__]);
	    temp = dmax(r__1,r__2);
/* L30: */
	}

	if (temp > 0.f) {
	    q__1.r = dmax__->r / temp, q__1.i = dmax__->i / temp;
	    alpha.r = q__1.r, alpha.i = q__1.i;
	} else {
	    *info = 2;
	    return 0;
	}

	cscal_(n, &alpha, &d__[1], &c__1);

    }

    claset_("Full", n, n, &c_b1, &c_b1, &a[a_offset], lda);
    i__1 = *lda + 1;
    ccopy_(n, &d__[1], &c__1, &a[a_offset], &i__1);

/*     3)      If UPPER='T', set upper triangle of A to random numbers. */

    if (iupper != 0) {
	i__1 = *n;
	for (jc = 2; jc <= i__1; ++jc) {
	    i__2 = jc - 1;
	    clarnv_(&idist, &iseed[1], &i__2, &a[jc * a_dim1 + 1]);
/* L40: */
	}
    }

/*     4)      If SIM='T', apply similarity transformation. */

/*                                -1 */
/*             Transform is  X A X  , where X = U S V, thus */

/*             it is  U S V A V' (1/S) U' */

    if (isim != 0) {

/*        Compute S (singular values of the eigenvector matrix) */
/*        according to CONDS and MODES */

	slatm1_(modes, conds, &c__0, &c__0, &iseed[1], &ds[1], n, &iinfo);
	if (iinfo != 0) {
	    *info = 3;
	    return 0;
	}

/*        Multiply by V and V' */

	clarge_(n, &a[a_offset], lda, &iseed[1], &work[1], &iinfo);
	if (iinfo != 0) {
	    *info = 4;
	    return 0;
	}

/*        Multiply by S and (1/S) */

	i__1 = *n;
	for (j = 1; j <= i__1; ++j) {
	    csscal_(n, &ds[j], &a[j + a_dim1], lda);
	    if (ds[j] != 0.f) {
		r__1 = 1.f / ds[j];
		csscal_(n, &r__1, &a[j * a_dim1 + 1], &c__1);
	    } else {
		*info = 5;
		return 0;
	    }
/* L50: */
	}

/*        Multiply by U and U' */

	clarge_(n, &a[a_offset], lda, &iseed[1], &work[1], &iinfo);
	if (iinfo != 0) {
	    *info = 4;
	    return 0;
	}
    }

/*     5)      Reduce the bandwidth. */

    if (*kl < *n - 1) {

/*        Reduce bandwidth -- kill column */

	i__1 = *n - 1;
	for (jcr = *kl + 1; jcr <= i__1; ++jcr) {
	    ic = jcr - *kl;
	    irows = *n + 1 - jcr;
	    icols = *n + *kl - jcr;

	    ccopy_(&irows, &a[jcr + ic * a_dim1], &c__1, &work[1], &c__1);
	    xnorms.r = work[1].r, xnorms.i = work[1].i;
	    clarfg_(&irows, &xnorms, &work[2], &c__1, &tau);
	    r_cnjg(&q__1, &tau);
	    tau.r = q__1.r, tau.i = q__1.i;
	    work[1].r = 1.f, work[1].i = 0.f;
	    clarnd_(&q__1, &c__5, &iseed[1]);
	    alpha.r = q__1.r, alpha.i = q__1.i;

	    cgemv_("C", &irows, &icols, &c_b2, &a[jcr + (ic + 1) * a_dim1], 
		    lda, &work[1], &c__1, &c_b1, &work[irows + 1], &c__1);
	    q__1.r = -tau.r, q__1.i = -tau.i;
	    cgerc_(&irows, &icols, &q__1, &work[1], &c__1, &work[irows + 1], &
		    c__1, &a[jcr + (ic + 1) * a_dim1], lda);

	    cgemv_("N", n, &irows, &c_b2, &a[jcr * a_dim1 + 1], lda, &work[1], 
		     &c__1, &c_b1, &work[irows + 1], &c__1);
	    r_cnjg(&q__2, &tau);
	    q__1.r = -q__2.r, q__1.i = -q__2.i;
	    cgerc_(n, &irows, &q__1, &work[irows + 1], &c__1, &work[1], &c__1, 
		     &a[jcr * a_dim1 + 1], lda);

	    i__2 = jcr + ic * a_dim1;
	    a[i__2].r = xnorms.r, a[i__2].i = xnorms.i;
	    i__2 = irows - 1;
	    claset_("Full", &i__2, &c__1, &c_b1, &c_b1, &a[jcr + 1 + ic * 
		    a_dim1], lda);

	    i__2 = icols + 1;
	    cscal_(&i__2, &alpha, &a[jcr + ic * a_dim1], lda);
	    r_cnjg(&q__1, &alpha);
	    cscal_(n, &q__1, &a[jcr * a_dim1 + 1], &c__1);
/* L60: */
	}
    } else if (*ku < *n - 1) {

/*        Reduce upper bandwidth -- kill a row at a time. */

	i__1 = *n - 1;
	for (jcr = *ku + 1; jcr <= i__1; ++jcr) {
	    ir = jcr - *ku;
	    irows = *n + *ku - jcr;
	    icols = *n + 1 - jcr;

	    ccopy_(&icols, &a[ir + jcr * a_dim1], lda, &work[1], &c__1);
	    xnorms.r = work[1].r, xnorms.i = work[1].i;
	    clarfg_(&icols, &xnorms, &work[2], &c__1, &tau);
	    r_cnjg(&q__1, &tau);
	    tau.r = q__1.r, tau.i = q__1.i;
	    work[1].r = 1.f, work[1].i = 0.f;
	    i__2 = icols - 1;
	    clacgv_(&i__2, &work[2], &c__1);
	    clarnd_(&q__1, &c__5, &iseed[1]);
	    alpha.r = q__1.r, alpha.i = q__1.i;

	    cgemv_("N", &irows, &icols, &c_b2, &a[ir + 1 + jcr * a_dim1], lda, 
		     &work[1], &c__1, &c_b1, &work[icols + 1], &c__1);
	    q__1.r = -tau.r, q__1.i = -tau.i;
	    cgerc_(&irows, &icols, &q__1, &work[icols + 1], &c__1, &work[1], &
		    c__1, &a[ir + 1 + jcr * a_dim1], lda);

	    cgemv_("C", &icols, n, &c_b2, &a[jcr + a_dim1], lda, &work[1], &
		    c__1, &c_b1, &work[icols + 1], &c__1);
	    r_cnjg(&q__2, &tau);
	    q__1.r = -q__2.r, q__1.i = -q__2.i;
	    cgerc_(&icols, n, &q__1, &work[1], &c__1, &work[icols + 1], &c__1, 
		     &a[jcr + a_dim1], lda);

	    i__2 = ir + jcr * a_dim1;
	    a[i__2].r = xnorms.r, a[i__2].i = xnorms.i;
	    i__2 = icols - 1;
	    claset_("Full", &c__1, &i__2, &c_b1, &c_b1, &a[ir + (jcr + 1) * 
		    a_dim1], lda);

	    i__2 = irows + 1;
	    cscal_(&i__2, &alpha, &a[ir + jcr * a_dim1], &c__1);
	    r_cnjg(&q__1, &alpha);
	    cscal_(n, &q__1, &a[jcr + a_dim1], lda);
/* L70: */
	}
    }

/*     Scale the matrix to have norm ANORM */

    if (*anorm >= 0.f) {
	temp = clange_("M", n, n, &a[a_offset], lda, tempa);
	if (temp > 0.f) {
	    ralpha = *anorm / temp;
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		csscal_(n, &ralpha, &a[j * a_dim1 + 1], &c__1);
/* L80: */
	    }
	}
    }

    return 0;

/*     End of CLATME */

} /* clatme_ */
Example #17
0
/* Subroutine */ int cgebd2_(integer *m, integer *n, complex *a, integer *lda, 
	 real *d__, real *e, complex *tauq, complex *taup, complex *work, 
	integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    complex q__1;

    /* Builtin functions */
    void r_cnjg(complex *, complex *);

    /* Local variables */
    integer i__;
    complex alpha;
    extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
, integer *, complex *, complex *, integer *, complex *), 
	    clarfg_(integer *, complex *, complex *, integer *, complex *), 
	    clacgv_(integer *, complex *, integer *), xerbla_(char *, integer 
	    *);


/*  -- LAPACK routine (version 3.1) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CGEBD2 reduces a complex general m by n matrix A to upper or lower */
/*  real bidiagonal form B by a unitary transformation: Q' * A * P = B. */

/*  If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. */

/*  Arguments */
/*  ========= */

/*  M       (input) INTEGER */
/*          The number of rows in the matrix A.  M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns in the matrix A.  N >= 0. */

/*  A       (input/output) COMPLEX array, dimension (LDA,N) */
/*          On entry, the m by n general matrix to be reduced. */
/*          On exit, */
/*          if m >= n, the diagonal and the first superdiagonal are */
/*            overwritten with the upper bidiagonal matrix B; the */
/*            elements below the diagonal, with the array TAUQ, represent */
/*            the unitary matrix Q as a product of elementary */
/*            reflectors, and the elements above the first superdiagonal, */
/*            with the array TAUP, represent the unitary matrix P as */
/*            a product of elementary reflectors; */
/*          if m < n, the diagonal and the first subdiagonal are */
/*            overwritten with the lower bidiagonal matrix B; the */
/*            elements below the first subdiagonal, with the array TAUQ, */
/*            represent the unitary matrix Q as a product of */
/*            elementary reflectors, and the elements above the diagonal, */
/*            with the array TAUP, represent the unitary matrix P as */
/*            a product of elementary reflectors. */
/*          See Further Details. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,M). */

/*  D       (output) REAL array, dimension (min(M,N)) */
/*          The diagonal elements of the bidiagonal matrix B: */
/*          D(i) = A(i,i). */

/*  E       (output) REAL array, dimension (min(M,N)-1) */
/*          The off-diagonal elements of the bidiagonal matrix B: */
/*          if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; */
/*          if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. */

/*  TAUQ    (output) COMPLEX array dimension (min(M,N)) */
/*          The scalar factors of the elementary reflectors which */
/*          represent the unitary matrix Q. See Further Details. */

/*  TAUP    (output) COMPLEX array, dimension (min(M,N)) */
/*          The scalar factors of the elementary reflectors which */
/*          represent the unitary matrix P. See Further Details. */

/*  WORK    (workspace) COMPLEX array, dimension (max(M,N)) */

/*  INFO    (output) INTEGER */
/*          = 0: successful exit */
/*          < 0: if INFO = -i, the i-th argument had an illegal value. */

/*  Further Details */
/*  =============== */

/*  The matrices Q and P are represented as products of elementary */
/*  reflectors: */

/*  If m >= n, */

/*     Q = H(1) H(2) . . . H(n)  and  P = G(1) G(2) . . . G(n-1) */

/*  Each H(i) and G(i) has the form: */

/*     H(i) = I - tauq * v * v'  and G(i) = I - taup * u * u' */

/*  where tauq and taup are complex scalars, and v and u are complex */
/*  vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in */
/*  A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in */
/*  A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */

/*  If m < n, */

/*     Q = H(1) H(2) . . . H(m-1)  and  P = G(1) G(2) . . . G(m) */

/*  Each H(i) and G(i) has the form: */

/*     H(i) = I - tauq * v * v'  and G(i) = I - taup * u * u' */

/*  where tauq and taup are complex scalars, v and u are complex vectors; */
/*  v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); */
/*  u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); */
/*  tauq is stored in TAUQ(i) and taup in TAUP(i). */

/*  The contents of A on exit are illustrated by the following examples: */

/*  m = 6 and n = 5 (m > n):          m = 5 and n = 6 (m < n): */

/*    (  d   e   u1  u1  u1 )           (  d   u1  u1  u1  u1  u1 ) */
/*    (  v1  d   e   u2  u2 )           (  e   d   u2  u2  u2  u2 ) */
/*    (  v1  v2  d   e   u3 )           (  v1  e   d   u3  u3  u3 ) */
/*    (  v1  v2  v3  d   e  )           (  v1  v2  e   d   u4  u4 ) */
/*    (  v1  v2  v3  v4  d  )           (  v1  v2  v3  e   d   u5 ) */
/*    (  v1  v2  v3  v4  v5 ) */

/*  where d and e denote diagonal and off-diagonal elements of B, vi */
/*  denotes an element of the vector defining H(i), and ui an element of */
/*  the vector defining G(i). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input parameters */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --d__;
    --e;
    --tauq;
    --taup;
    --work;

    /* Function Body */
    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*lda < max(1,*m)) {
	*info = -4;
    }
    if (*info < 0) {
	i__1 = -(*info);
	xerbla_("CGEBD2", &i__1);
	return 0;
    }

    if (*m >= *n) {

/*        Reduce to upper bidiagonal form */

	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Generate elementary reflector H(i) to annihilate A(i+1:m,i) */

	    i__2 = i__ + i__ * a_dim1;
	    alpha.r = a[i__2].r, alpha.i = a[i__2].i;
	    i__2 = *m - i__ + 1;
/* Computing MIN */
	    i__3 = i__ + 1;
	    clarfg_(&i__2, &alpha, &a[min(i__3, *m)+ i__ * a_dim1], &c__1, &
		    tauq[i__]);
	    i__2 = i__;
	    d__[i__2] = alpha.r;
	    i__2 = i__ + i__ * a_dim1;
	    a[i__2].r = 1.f, a[i__2].i = 0.f;

/*           Apply H(i)' to A(i:m,i+1:n) from the left */

	    if (i__ < *n) {
		i__2 = *m - i__ + 1;
		i__3 = *n - i__;
		r_cnjg(&q__1, &tauq[i__]);
		clarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &
			q__1, &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
	    }
	    i__2 = i__ + i__ * a_dim1;
	    i__3 = i__;
	    a[i__2].r = d__[i__3], a[i__2].i = 0.f;

	    if (i__ < *n) {

/*              Generate elementary reflector G(i) to annihilate */
/*              A(i,i+2:n) */

		i__2 = *n - i__;
		clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
		i__2 = i__ + (i__ + 1) * a_dim1;
		alpha.r = a[i__2].r, alpha.i = a[i__2].i;
		i__2 = *n - i__;
/* Computing MIN */
		i__3 = i__ + 2;
		clarfg_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &
			taup[i__]);
		i__2 = i__;
		e[i__2] = alpha.r;
		i__2 = i__ + (i__ + 1) * a_dim1;
		a[i__2].r = 1.f, a[i__2].i = 0.f;

/*              Apply G(i) to A(i+1:m,i+1:n) from the right */

		i__2 = *m - i__;
		i__3 = *n - i__;
		clarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1], 
			lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1], 
			lda, &work[1]);
		i__2 = *n - i__;
		clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
		i__2 = i__ + (i__ + 1) * a_dim1;
		i__3 = i__;
		a[i__2].r = e[i__3], a[i__2].i = 0.f;
	    } else {
		i__2 = i__;
		taup[i__2].r = 0.f, taup[i__2].i = 0.f;
	    }
/* L10: */
	}
    } else {

/*        Reduce to lower bidiagonal form */

	i__1 = *m;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Generate elementary reflector G(i) to annihilate A(i,i+1:n) */

	    i__2 = *n - i__ + 1;
	    clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
	    i__2 = i__ + i__ * a_dim1;
	    alpha.r = a[i__2].r, alpha.i = a[i__2].i;
	    i__2 = *n - i__ + 1;
/* Computing MIN */
	    i__3 = i__ + 1;
	    clarfg_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &
		    taup[i__]);
	    i__2 = i__;
	    d__[i__2] = alpha.r;
	    i__2 = i__ + i__ * a_dim1;
	    a[i__2].r = 1.f, a[i__2].i = 0.f;

/*           Apply G(i) to A(i+1:m,i:n) from the right */

	    if (i__ < *m) {
		i__2 = *m - i__;
		i__3 = *n - i__ + 1;
		clarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &
			taup[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
	    }
	    i__2 = *n - i__ + 1;
	    clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
	    i__2 = i__ + i__ * a_dim1;
	    i__3 = i__;
	    a[i__2].r = d__[i__3], a[i__2].i = 0.f;

	    if (i__ < *m) {

/*              Generate elementary reflector H(i) to annihilate */
/*              A(i+2:m,i) */

		i__2 = i__ + 1 + i__ * a_dim1;
		alpha.r = a[i__2].r, alpha.i = a[i__2].i;
		i__2 = *m - i__;
/* Computing MIN */
		i__3 = i__ + 2;
		clarfg_(&i__2, &alpha, &a[min(i__3, *m)+ i__ * a_dim1], &c__1, 
			 &tauq[i__]);
		i__2 = i__;
		e[i__2] = alpha.r;
		i__2 = i__ + 1 + i__ * a_dim1;
		a[i__2].r = 1.f, a[i__2].i = 0.f;

/*              Apply H(i)' to A(i+1:m,i+1:n) from the left */

		i__2 = *m - i__;
		i__3 = *n - i__;
		r_cnjg(&q__1, &tauq[i__]);
		clarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &
			c__1, &q__1, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &
			work[1]);
		i__2 = i__ + 1 + i__ * a_dim1;
		i__3 = i__;
		a[i__2].r = e[i__3], a[i__2].i = 0.f;
	    } else {
		i__2 = i__;
		tauq[i__2].r = 0.f, tauq[i__2].i = 0.f;
	    }
/* L20: */
	}
    }
    return 0;

/*     End of CGEBD2 */

} /* cgebd2_ */
Example #18
0
/* Subroutine */
int chetd2_(char *uplo, integer *n, complex *a, integer *lda, real *d__, real *e, complex *tau, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    real r__1;
    complex q__1, q__2, q__3, q__4;
    /* Local variables */
    integer i__;
    complex taui;
    extern /* Subroutine */
    int cher2_(char *, integer *, complex *, complex * , integer *, complex *, integer *, complex *, integer *);
    complex alpha;
    extern /* Complex */
    VOID cdotc_f2c_(complex *, integer *, complex *, integer *, complex *, integer *);
    extern logical lsame_(char *, char *);
    extern /* Subroutine */
    int chemv_(char *, integer *, complex *, complex * , integer *, complex *, integer *, complex *, complex *, integer * ), caxpy_(integer *, complex *, complex *, integer *, complex *, integer *);
    logical upper;
    extern /* Subroutine */
    int clarfg_(integer *, complex *, complex *, integer *, complex *), xerbla_(char *, integer *);
    /* -- LAPACK computational routine (version 3.4.2) -- */
    /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
    /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
    /* September 2012 */
    /* .. Scalar Arguments .. */
    /* .. */
    /* .. Array Arguments .. */
    /* .. */
    /* ===================================================================== */
    /* .. Parameters .. */
    /* .. */
    /* .. Local Scalars .. */
    /* .. */
    /* .. External Subroutines .. */
    /* .. */
    /* .. External Functions .. */
    /* .. */
    /* .. Intrinsic Functions .. */
    /* .. */
    /* .. Executable Statements .. */
    /* Test the input parameters */
    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --d__;
    --e;
    --tau;
    /* Function Body */
    *info = 0;
    upper = lsame_(uplo, "U");
    if (! upper && ! lsame_(uplo, "L"))
    {
        *info = -1;
    }
    else if (*n < 0)
    {
        *info = -2;
    }
    else if (*lda < max(1,*n))
    {
        *info = -4;
    }
    if (*info != 0)
    {
        i__1 = -(*info);
        xerbla_("CHETD2", &i__1);
        return 0;
    }
    /* Quick return if possible */
    if (*n <= 0)
    {
        return 0;
    }
    if (upper)
    {
        /* Reduce the upper triangle of A */
        i__1 = *n + *n * a_dim1;
        i__2 = *n + *n * a_dim1;
        r__1 = a[i__2].r;
        a[i__1].r = r__1;
        a[i__1].i = 0.f; // , expr subst
        for (i__ = *n - 1;
                i__ >= 1;
                --i__)
        {
            /* Generate elementary reflector H(i) = I - tau * v * v**H */
            /* to annihilate A(1:i-1,i+1) */
            i__1 = i__ + (i__ + 1) * a_dim1;
            alpha.r = a[i__1].r;
            alpha.i = a[i__1].i; // , expr subst
            clarfg_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &taui);
            i__1 = i__;
            e[i__1] = alpha.r;
            if (taui.r != 0.f || taui.i != 0.f)
            {
                /* Apply H(i) from both sides to A(1:i,1:i) */
                i__1 = i__ + (i__ + 1) * a_dim1;
                a[i__1].r = 1.f;
                a[i__1].i = 0.f; // , expr subst
                /* Compute x := tau * A * v storing x in TAU(1:i) */
                chemv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) * a_dim1 + 1], &c__1, &c_b2, &tau[1], &c__1);
                /* Compute w := x - 1/2 * tau * (x**H * v) * v */
                q__3.r = -.5f;
                q__3.i = -0.f; // , expr subst
                q__2.r = q__3.r * taui.r - q__3.i * taui.i;
                q__2.i = q__3.r * taui.i + q__3.i * taui.r; // , expr subst
                cdotc_f2c_(&q__4, &i__, &tau[1], &c__1, &a[(i__ + 1) * a_dim1 + 1] , &c__1);
                q__1.r = q__2.r * q__4.r - q__2.i * q__4.i;
                q__1.i = q__2.r * q__4.i + q__2.i * q__4.r; // , expr subst
                alpha.r = q__1.r;
                alpha.i = q__1.i; // , expr subst
                caxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[ 1], &c__1);
                /* Apply the transformation as a rank-2 update: */
                /* A := A - v * w**H - w * v**H */
                q__1.r = -1.f;
                q__1.i = -0.f; // , expr subst
                cher2_(uplo, &i__, &q__1, &a[(i__ + 1) * a_dim1 + 1], &c__1, & tau[1], &c__1, &a[a_offset], lda);
            }
            else
            {
                i__1 = i__ + i__ * a_dim1;
                i__2 = i__ + i__ * a_dim1;
                r__1 = a[i__2].r;
                a[i__1].r = r__1;
                a[i__1].i = 0.f; // , expr subst
            }
            i__1 = i__ + (i__ + 1) * a_dim1;
            i__2 = i__;
            a[i__1].r = e[i__2];
            a[i__1].i = 0.f; // , expr subst
            i__1 = i__ + 1;
            i__2 = i__ + 1 + (i__ + 1) * a_dim1;
            d__[i__1] = a[i__2].r;
            i__1 = i__;
            tau[i__1].r = taui.r;
            tau[i__1].i = taui.i; // , expr subst
            /* L10: */
        }
        i__1 = a_dim1 + 1;
        d__[1] = a[i__1].r;
    }
    else
    {
        /* Reduce the lower triangle of A */
        i__1 = a_dim1 + 1;
        i__2 = a_dim1 + 1;
        r__1 = a[i__2].r;
        a[i__1].r = r__1;
        a[i__1].i = 0.f; // , expr subst
        i__1 = *n - 1;
        for (i__ = 1;
                i__ <= i__1;
                ++i__)
        {
            /* Generate elementary reflector H(i) = I - tau * v * v**H */
            /* to annihilate A(i+2:n,i) */
            i__2 = i__ + 1 + i__ * a_dim1;
            alpha.r = a[i__2].r;
            alpha.i = a[i__2].i; // , expr subst
            i__2 = *n - i__;
            /* Computing MIN */
            i__3 = i__ + 2;
            clarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1, & taui);
            i__2 = i__;
            e[i__2] = alpha.r;
            if (taui.r != 0.f || taui.i != 0.f)
            {
                /* Apply H(i) from both sides to A(i+1:n,i+1:n) */
                i__2 = i__ + 1 + i__ * a_dim1;
                a[i__2].r = 1.f;
                a[i__2].i = 0.f; // , expr subst
                /* Compute x := tau * A * v storing y in TAU(i:n-1) */
                i__2 = *n - i__;
                chemv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b2, &tau[ i__], &c__1);
                /* Compute w := x - 1/2 * tau * (x**H * v) * v */
                q__3.r = -.5f;
                q__3.i = -0.f; // , expr subst
                q__2.r = q__3.r * taui.r - q__3.i * taui.i;
                q__2.i = q__3.r * taui.i + q__3.i * taui.r; // , expr subst
                i__2 = *n - i__;
                cdotc_f2c_(&q__4, &i__2, &tau[i__], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
                q__1.r = q__2.r * q__4.r - q__2.i * q__4.i;
                q__1.i = q__2.r * q__4.i + q__2.i * q__4.r; // , expr subst
                alpha.r = q__1.r;
                alpha.i = q__1.i; // , expr subst
                i__2 = *n - i__;
                caxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ i__], &c__1);
                /* Apply the transformation as a rank-2 update: */
                /* A := A - v * w**H - w * v**H */
                i__2 = *n - i__;
                q__1.r = -1.f;
                q__1.i = -0.f; // , expr subst
                cher2_(uplo, &i__2, &q__1, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) * a_dim1], lda);
            }
            else
            {
                i__2 = i__ + 1 + (i__ + 1) * a_dim1;
                i__3 = i__ + 1 + (i__ + 1) * a_dim1;
                r__1 = a[i__3].r;
                a[i__2].r = r__1;
                a[i__2].i = 0.f; // , expr subst
            }
            i__2 = i__ + 1 + i__ * a_dim1;
            i__3 = i__;
            a[i__2].r = e[i__3];
            a[i__2].i = 0.f; // , expr subst
            i__2 = i__;
            i__3 = i__ + i__ * a_dim1;
            d__[i__2] = a[i__3].r;
            i__2 = i__;
            tau[i__2].r = taui.r;
            tau[i__2].i = taui.i; // , expr subst
            /* L20: */
        }
        i__1 = *n;
        i__2 = *n + *n * a_dim1;
        d__[i__1] = a[i__2].r;
    }
    return 0;
    /* End of CHETD2 */
}
Example #19
0
/* Subroutine */ int claqr2_(logical *wantt, logical *wantz, integer *n, 
	integer *ktop, integer *kbot, integer *nw, complex *h__, integer *ldh, 
	 integer *iloz, integer *ihiz, complex *z__, integer *ldz, integer *
	ns, integer *nd, complex *sh, complex *v, integer *ldv, integer *nh, 
	complex *t, integer *ldt, integer *nv, complex *wv, integer *ldwv, 
	complex *work, integer *lwork)
{
    /* System generated locals */
    integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1, 
	    wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
    real r__1, r__2, r__3, r__4, r__5, r__6;
    complex q__1, q__2;

    /* Builtin functions */
    double r_imag(complex *);
    void r_cnjg(complex *, complex *);

    /* Local variables */
    integer i__, j;
    complex s;
    integer jw;
    real foo;
    integer kln;
    complex tau;
    integer knt;
    real ulp;
    integer lwk1, lwk2;
    complex beta;
    integer kcol, info, ifst, ilst, ltop, krow;
    extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
, integer *, complex *, complex *, integer *, complex *), 
	    cgemm_(char *, char *, integer *, integer *, integer *, complex *, 
	     complex *, integer *, complex *, integer *, complex *, complex *, 
	     integer *), ccopy_(integer *, complex *, integer 
	    *, complex *, integer *);
    integer infqr, kwtop;
    extern /* Subroutine */ int slabad_(real *, real *), cgehrd_(integer *, 
	    integer *, integer *, complex *, integer *, complex *, complex *, 
	    integer *, integer *), clarfg_(integer *, complex *, complex *, 
	    integer *, complex *);
    extern doublereal slamch_(char *);
    extern /* Subroutine */ int clahqr_(logical *, logical *, integer *, 
	    integer *, integer *, complex *, integer *, complex *, integer *, 
	    integer *, complex *, integer *, integer *), clacpy_(char *, 
	    integer *, integer *, complex *, integer *, complex *, integer *), claset_(char *, integer *, integer *, complex *, complex 
	    *, complex *, integer *);
    real safmin, safmax;
    extern /* Subroutine */ int ctrexc_(char *, integer *, complex *, integer 
	    *, complex *, integer *, integer *, integer *, integer *),
	     cunmhr_(char *, char *, integer *, integer *, integer *, integer 
	    *, complex *, integer *, complex *, complex *, integer *, complex 
	    *, integer *, integer *);
    real smlnum;
    integer lwkopt;


/*  -- LAPACK auxiliary routine (version 3.2.1)                        -- */
/*     Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd.. */
/*  -- April 2009                                                      -- */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*     This subroutine is identical to CLAQR3 except that it avoids */
/*     recursion by calling CLAHQR instead of CLAQR4. */


/*     ****************************************************************** */
/*     Aggressive early deflation: */

/*     This subroutine accepts as input an upper Hessenberg matrix */
/*     H and performs an unitary similarity transformation */
/*     designed to detect and deflate fully converged eigenvalues from */
/*     a trailing principal submatrix.  On output H has been over- */
/*     written by a new Hessenberg matrix that is a perturbation of */
/*     an unitary similarity transformation of H.  It is to be */
/*     hoped that the final version of H has many zero subdiagonal */
/*     entries. */

/*     ****************************************************************** */
/*     WANTT   (input) LOGICAL */
/*          If .TRUE., then the Hessenberg matrix H is fully updated */
/*          so that the triangular Schur factor may be */
/*          computed (in cooperation with the calling subroutine). */
/*          If .FALSE., then only enough of H is updated to preserve */
/*          the eigenvalues. */

/*     WANTZ   (input) LOGICAL */
/*          If .TRUE., then the unitary matrix Z is updated so */
/*          so that the unitary Schur factor may be computed */
/*          (in cooperation with the calling subroutine). */
/*          If .FALSE., then Z is not referenced. */

/*     N       (input) INTEGER */
/*          The order of the matrix H and (if WANTZ is .TRUE.) the */
/*          order of the unitary matrix Z. */

/*     KTOP    (input) INTEGER */
/*          It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. */
/*          KBOT and KTOP together determine an isolated block */
/*          along the diagonal of the Hessenberg matrix. */

/*     KBOT    (input) INTEGER */
/*          It is assumed without a check that either */
/*          KBOT = N or H(KBOT+1,KBOT)=0.  KBOT and KTOP together */
/*          determine an isolated block along the diagonal of the */
/*          Hessenberg matrix. */

/*     NW      (input) INTEGER */
/*          Deflation window size.  1 .LE. NW .LE. (KBOT-KTOP+1). */

/*     H       (input/output) COMPLEX array, dimension (LDH,N) */
/*          On input the initial N-by-N section of H stores the */
/*          Hessenberg matrix undergoing aggressive early deflation. */
/*          On output H has been transformed by a unitary */
/*          similarity transformation, perturbed, and the returned */
/*          to Hessenberg form that (it is to be hoped) has some */
/*          zero subdiagonal entries. */

/*     LDH     (input) integer */
/*          Leading dimension of H just as declared in the calling */
/*          subroutine.  N .LE. LDH */

/*     ILOZ    (input) INTEGER */
/*     IHIZ    (input) INTEGER */
/*          Specify the rows of Z to which transformations must be */
/*          applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. */

/*     Z       (input/output) COMPLEX array, dimension (LDZ,N) */
/*          IF WANTZ is .TRUE., then on output, the unitary */
/*          similarity transformation mentioned above has been */
/*          accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. */
/*          If WANTZ is .FALSE., then Z is unreferenced. */

/*     LDZ     (input) integer */
/*          The leading dimension of Z just as declared in the */
/*          calling subroutine.  1 .LE. LDZ. */

/*     NS      (output) integer */
/*          The number of unconverged (ie approximate) eigenvalues */
/*          returned in SR and SI that may be used as shifts by the */
/*          calling subroutine. */

/*     ND      (output) integer */
/*          The number of converged eigenvalues uncovered by this */
/*          subroutine. */

/*     SH      (output) COMPLEX array, dimension KBOT */
/*          On output, approximate eigenvalues that may */
/*          be used for shifts are stored in SH(KBOT-ND-NS+1) */
/*          through SR(KBOT-ND).  Converged eigenvalues are */
/*          stored in SH(KBOT-ND+1) through SH(KBOT). */

/*     V       (workspace) COMPLEX array, dimension (LDV,NW) */
/*          An NW-by-NW work array. */

/*     LDV     (input) integer scalar */
/*          The leading dimension of V just as declared in the */
/*          calling subroutine.  NW .LE. LDV */

/*     NH      (input) integer scalar */
/*          The number of columns of T.  NH.GE.NW. */

/*     T       (workspace) COMPLEX array, dimension (LDT,NW) */

/*     LDT     (input) integer */
/*          The leading dimension of T just as declared in the */
/*          calling subroutine.  NW .LE. LDT */

/*     NV      (input) integer */
/*          The number of rows of work array WV available for */
/*          workspace.  NV.GE.NW. */

/*     WV      (workspace) COMPLEX array, dimension (LDWV,NW) */

/*     LDWV    (input) integer */
/*          The leading dimension of W just as declared in the */
/*          calling subroutine.  NW .LE. LDV */

/*     WORK    (workspace) COMPLEX array, dimension LWORK. */
/*          On exit, WORK(1) is set to an estimate of the optimal value */
/*          of LWORK for the given values of N, NW, KTOP and KBOT. */

/*     LWORK   (input) integer */
/*          The dimension of the work array WORK.  LWORK = 2*NW */
/*          suffices, but greater efficiency may result from larger */
/*          values of LWORK. */

/*          If LWORK = -1, then a workspace query is assumed; CLAQR2 */
/*          only estimates the optimal workspace size for the given */
/*          values of N, NW, KTOP and KBOT.  The estimate is returned */
/*          in WORK(1).  No error message related to LWORK is issued */
/*          by XERBLA.  Neither H nor Z are accessed. */

/*     ================================================================ */
/*     Based on contributions by */
/*        Karen Braman and Ralph Byers, Department of Mathematics, */
/*        University of Kansas, USA */

/*     ================================================================ */
/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Statement Functions .. */
/*     .. */
/*     .. Statement Function definitions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     ==== Estimate optimal workspace. ==== */

    /* Parameter adjustments */
    h_dim1 = *ldh;
    h_offset = 1 + h_dim1;
    h__ -= h_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1;
    z__ -= z_offset;
    --sh;
    v_dim1 = *ldv;
    v_offset = 1 + v_dim1;
    v -= v_offset;
    t_dim1 = *ldt;
    t_offset = 1 + t_dim1;
    t -= t_offset;
    wv_dim1 = *ldwv;
    wv_offset = 1 + wv_dim1;
    wv -= wv_offset;
    --work;

    /* Function Body */
/* Computing MIN */
    i__1 = *nw, i__2 = *kbot - *ktop + 1;
    jw = min(i__1,i__2);
    if (jw <= 2) {
	lwkopt = 1;
    } else {

/*        ==== Workspace query call to CGEHRD ==== */

	i__1 = jw - 1;
	cgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], &
		c_n1, &info);
	lwk1 = (integer) work[1].r;

/*        ==== Workspace query call to CUNMHR ==== */

	i__1 = jw - 1;
	cunmhr_("R", "N", &jw, &jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], 
		 &v[v_offset], ldv, &work[1], &c_n1, &info);
	lwk2 = (integer) work[1].r;

/*        ==== Optimal workspace ==== */

	lwkopt = jw + max(lwk1,lwk2);
    }

/*     ==== Quick return in case of workspace query. ==== */

    if (*lwork == -1) {
	r__1 = (real) lwkopt;
	q__1.r = r__1, q__1.i = 0.f;
	work[1].r = q__1.r, work[1].i = q__1.i;
	return 0;
    }

/*     ==== Nothing to do ... */
/*     ... for an empty active block ... ==== */
    *ns = 0;
    *nd = 0;
    work[1].r = 1.f, work[1].i = 0.f;
    if (*ktop > *kbot) {
	return 0;
    }
/*     ... nor for an empty deflation window. ==== */
    if (*nw < 1) {
	return 0;
    }

/*     ==== Machine constants ==== */

    safmin = slamch_("SAFE MINIMUM");
    safmax = 1.f / safmin;
    slabad_(&safmin, &safmax);
    ulp = slamch_("PRECISION");
    smlnum = safmin * ((real) (*n) / ulp);

/*     ==== Setup deflation window ==== */

/* Computing MIN */
    i__1 = *nw, i__2 = *kbot - *ktop + 1;
    jw = min(i__1,i__2);
    kwtop = *kbot - jw + 1;
    if (kwtop == *ktop) {
	s.r = 0.f, s.i = 0.f;
    } else {
	i__1 = kwtop + (kwtop - 1) * h_dim1;
	s.r = h__[i__1].r, s.i = h__[i__1].i;
    }

    if (*kbot == kwtop) {

/*        ==== 1-by-1 deflation window: not much to do ==== */

	i__1 = kwtop;
	i__2 = kwtop + kwtop * h_dim1;
	sh[i__1].r = h__[i__2].r, sh[i__1].i = h__[i__2].i;
	*ns = 1;
	*nd = 0;
/* Computing MAX */
	i__1 = kwtop + kwtop * h_dim1;
	r__5 = smlnum, r__6 = ulp * ((r__1 = h__[i__1].r, dabs(r__1)) + (r__2 
		= r_imag(&h__[kwtop + kwtop * h_dim1]), dabs(r__2)));
	if ((r__3 = s.r, dabs(r__3)) + (r__4 = r_imag(&s), dabs(r__4)) <= 
		dmax(r__5,r__6)) {
	    *ns = 0;
	    *nd = 1;
	    if (kwtop > *ktop) {
		i__1 = kwtop + (kwtop - 1) * h_dim1;
		h__[i__1].r = 0.f, h__[i__1].i = 0.f;
	    }
	}
	work[1].r = 1.f, work[1].i = 0.f;
	return 0;
    }

/*     ==== Convert to spike-triangular form.  (In case of a */
/*     .    rare QR failure, this routine continues to do */
/*     .    aggressive early deflation using that part of */
/*     .    the deflation window that converged using INFQR */
/*     .    here and there to keep track.) ==== */

    clacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset], 
	    ldt);
    i__1 = jw - 1;
    i__2 = *ldh + 1;
    i__3 = *ldt + 1;
    ccopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], &
	    i__3);

    claset_("A", &jw, &jw, &c_b1, &c_b2, &v[v_offset], ldv);
    clahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sh[kwtop], 
	    &c__1, &jw, &v[v_offset], ldv, &infqr);

/*     ==== Deflation detection loop ==== */

    *ns = jw;
    ilst = infqr + 1;
    i__1 = jw;
    for (knt = infqr + 1; knt <= i__1; ++knt) {

/*        ==== Small spike tip deflation test ==== */

	i__2 = *ns + *ns * t_dim1;
	foo = (r__1 = t[i__2].r, dabs(r__1)) + (r__2 = r_imag(&t[*ns + *ns * 
		t_dim1]), dabs(r__2));
	if (foo == 0.f) {
	    foo = (r__1 = s.r, dabs(r__1)) + (r__2 = r_imag(&s), dabs(r__2));
	}
	i__2 = *ns * v_dim1 + 1;
/* Computing MAX */
	r__5 = smlnum, r__6 = ulp * foo;
	if (((r__1 = s.r, dabs(r__1)) + (r__2 = r_imag(&s), dabs(r__2))) * ((
		r__3 = v[i__2].r, dabs(r__3)) + (r__4 = r_imag(&v[*ns * 
		v_dim1 + 1]), dabs(r__4))) <= dmax(r__5,r__6)) {

/*           ==== One more converged eigenvalue ==== */

	    --(*ns);
	} else {

/*           ==== One undeflatable eigenvalue.  Move it up out of the */
/*           .    way.   (CTREXC can not fail in this case.) ==== */

	    ifst = *ns;
	    ctrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &
		    ilst, &info);
	    ++ilst;
	}
/* L10: */
    }

/*        ==== Return to Hessenberg form ==== */

    if (*ns == 0) {
	s.r = 0.f, s.i = 0.f;
    }

    if (*ns < jw) {

/*        ==== sorting the diagonal of T improves accuracy for */
/*        .    graded matrices.  ==== */

	i__1 = *ns;
	for (i__ = infqr + 1; i__ <= i__1; ++i__) {
	    ifst = i__;
	    i__2 = *ns;
	    for (j = i__ + 1; j <= i__2; ++j) {
		i__3 = j + j * t_dim1;
		i__4 = ifst + ifst * t_dim1;
		if ((r__1 = t[i__3].r, dabs(r__1)) + (r__2 = r_imag(&t[j + j *
			 t_dim1]), dabs(r__2)) > (r__3 = t[i__4].r, dabs(r__3)
			) + (r__4 = r_imag(&t[ifst + ifst * t_dim1]), dabs(
			r__4))) {
		    ifst = j;
		}
/* L20: */
	    }
	    ilst = i__;
	    if (ifst != ilst) {
		ctrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, 
			 &ilst, &info);
	    }
/* L30: */
	}
    }

/*     ==== Restore shift/eigenvalue array from T ==== */

    i__1 = jw;
    for (i__ = infqr + 1; i__ <= i__1; ++i__) {
	i__2 = kwtop + i__ - 1;
	i__3 = i__ + i__ * t_dim1;
	sh[i__2].r = t[i__3].r, sh[i__2].i = t[i__3].i;
/* L40: */
    }


    if (*ns < jw || s.r == 0.f && s.i == 0.f) {
	if (*ns > 1 && (s.r != 0.f || s.i != 0.f)) {

/*           ==== Reflect spike back into lower triangle ==== */

	    ccopy_(ns, &v[v_offset], ldv, &work[1], &c__1);
	    i__1 = *ns;
	    for (i__ = 1; i__ <= i__1; ++i__) {
		i__2 = i__;
		r_cnjg(&q__1, &work[i__]);
		work[i__2].r = q__1.r, work[i__2].i = q__1.i;
/* L50: */
	    }
	    beta.r = work[1].r, beta.i = work[1].i;
	    clarfg_(ns, &beta, &work[2], &c__1, &tau);
	    work[1].r = 1.f, work[1].i = 0.f;

	    i__1 = jw - 2;
	    i__2 = jw - 2;
	    claset_("L", &i__1, &i__2, &c_b1, &c_b1, &t[t_dim1 + 3], ldt);

	    r_cnjg(&q__1, &tau);
	    clarf_("L", ns, &jw, &work[1], &c__1, &q__1, &t[t_offset], ldt, &
		    work[jw + 1]);
	    clarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, &
		    work[jw + 1]);
	    clarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, &
		    work[jw + 1]);

	    i__1 = *lwork - jw;
	    cgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1]
, &i__1, &info);
	}

/*        ==== Copy updated reduced window into place ==== */

	if (kwtop > 1) {
	    i__1 = kwtop + (kwtop - 1) * h_dim1;
	    r_cnjg(&q__2, &v[v_dim1 + 1]);
	    q__1.r = s.r * q__2.r - s.i * q__2.i, q__1.i = s.r * q__2.i + s.i 
		    * q__2.r;
	    h__[i__1].r = q__1.r, h__[i__1].i = q__1.i;
	}
	clacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1]
, ldh);
	i__1 = jw - 1;
	i__2 = *ldt + 1;
	i__3 = *ldh + 1;
	ccopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1], 
		 &i__3);

/*        ==== Accumulate orthogonal matrix in order update */
/*        .    H and Z, if requested.  ==== */

	if (*ns > 1 && (s.r != 0.f || s.i != 0.f)) {
	    i__1 = *lwork - jw;
	    cunmhr_("R", "N", &jw, ns, &c__1, ns, &t[t_offset], ldt, &work[1], 
		     &v[v_offset], ldv, &work[jw + 1], &i__1, &info);
	}

/*        ==== Update vertical slab in H ==== */

	if (*wantt) {
	    ltop = 1;
	} else {
	    ltop = *ktop;
	}
	i__1 = kwtop - 1;
	i__2 = *nv;
	for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += 
		i__2) {
/* Computing MIN */
	    i__3 = *nv, i__4 = kwtop - krow;
	    kln = min(i__3,i__4);
	    cgemm_("N", "N", &kln, &jw, &jw, &c_b2, &h__[krow + kwtop * 
		    h_dim1], ldh, &v[v_offset], ldv, &c_b1, &wv[wv_offset], 
		    ldwv);
	    clacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop * 
		    h_dim1], ldh);
/* L60: */
	}

/*        ==== Update horizontal slab in H ==== */

	if (*wantt) {
	    i__2 = *n;
	    i__1 = *nh;
	    for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2; 
		    kcol += i__1) {
/* Computing MIN */
		i__3 = *nh, i__4 = *n - kcol + 1;
		kln = min(i__3,i__4);
		cgemm_("C", "N", &jw, &kln, &jw, &c_b2, &v[v_offset], ldv, &
			h__[kwtop + kcol * h_dim1], ldh, &c_b1, &t[t_offset], 
			ldt);
		clacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol *
			 h_dim1], ldh);
/* L70: */
	    }
	}

/*        ==== Update vertical slab in Z ==== */

	if (*wantz) {
	    i__1 = *ihiz;
	    i__2 = *nv;
	    for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow +=
		     i__2) {
/* Computing MIN */
		i__3 = *nv, i__4 = *ihiz - krow + 1;
		kln = min(i__3,i__4);
		cgemm_("N", "N", &kln, &jw, &jw, &c_b2, &z__[krow + kwtop * 
			z_dim1], ldz, &v[v_offset], ldv, &c_b1, &wv[wv_offset]
, ldwv);
		clacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow + 
			kwtop * z_dim1], ldz);
/* L80: */
	    }
	}
    }

/*     ==== Return the number of deflations ... ==== */

    *nd = jw - *ns;

/*     ==== ... and the number of shifts. (Subtracting */
/*     .    INFQR from the spike length takes care */
/*     .    of the case of a rare QR failure while */
/*     .    calculating eigenvalues of the deflation */
/*     .    window.)  ==== */

    *ns -= infqr;

/*      ==== Return optimal workspace. ==== */

    r__1 = (real) lwkopt;
    q__1.r = r__1, q__1.i = 0.f;
    work[1].r = q__1.r, work[1].i = q__1.i;

/*     ==== End of CLAQR2 ==== */

    return 0;
} /* claqr2_ */
Example #20
0
/* Subroutine */ int clatrz_(integer *m, integer *n, integer *l, complex *a, 
	integer *lda, complex *tau, complex *work)
{
/*  -- LAPACK routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   


    Purpose   
    =======   

    CLATRZ factors the M-by-(M+L) complex upper trapezoidal matrix   
    [ A1 A2 ] = [ A(1:M,1:M) A(1:M,N-L+1:N) ] as ( R  0 ) * Z by means   
    of unitary transformations, where  Z is an (M+L)-by-(M+L) unitary   
    matrix and, R and A1 are M-by-M upper triangular matrices.   

    Arguments   
    =========   

    M       (input) INTEGER   
            The number of rows of the matrix A.  M >= 0.   

    N       (input) INTEGER   
            The number of columns of the matrix A.  N >= 0.   

    L       (input) INTEGER   
            The number of columns of the matrix A containing the   
            meaningful part of the Householder vectors. N-M >= L >= 0.   

    A       (input/output) COMPLEX array, dimension (LDA,N)   
            On entry, the leading M-by-N upper trapezoidal part of the   
            array A must contain the matrix to be factorized.   
            On exit, the leading M-by-M upper triangular part of A   
            contains the upper triangular matrix R, and elements N-L+1 to   
            N of the first M rows of A, with the array TAU, represent the   
            unitary matrix Z as a product of M elementary reflectors.   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= max(1,M).   

    TAU     (output) COMPLEX array, dimension (M)   
            The scalar factors of the elementary reflectors.   

    WORK    (workspace) COMPLEX array, dimension (M)   

    Further Details   
    ===============   

    Based on contributions by   
      A. Petitet, Computer Science Dept., Univ. of Tenn., Knoxville, USA   

    The factorization is obtained by Householder's method.  The kth   
    transformation matrix, Z( k ), which is used to introduce zeros into   
    the ( m - k + 1 )th row of A, is given in the form   

       Z( k ) = ( I     0   ),   
                ( 0  T( k ) )   

    where   

       T( k ) = I - tau*u( k )*u( k )',   u( k ) = (   1    ),   
                                                   (   0    )   
                                                   ( z( k ) )   

    tau is a scalar and z( k ) is an l element vector. tau and z( k )   
    are chosen to annihilate the elements of the kth row of A2.   

    The scalar tau is returned in the kth element of TAU and the vector   
    u( k ) in the kth row of A2, such that the elements of z( k ) are   
    in  a( k, l + 1 ), ..., a( k, n ). The elements of R are returned in   
    the upper triangular part of A1.   

    Z is given by   

       Z =  Z( 1 ) * Z( 2 ) * ... * Z( m ).   

    =====================================================================   


       Quick return if possible   

       Parameter adjustments */
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2;
    complex q__1;
    /* Builtin functions */
    void r_cnjg(complex *, complex *);
    /* Local variables */
    static integer i__;
    static complex alpha;
    extern /* Subroutine */ int clarz_(char *, integer *, integer *, integer *
	    , complex *, integer *, complex *, complex *, integer *, complex *
	    ), clarfg_(integer *, complex *, complex *, integer *, 
	    complex *), clacgv_(integer *, complex *, integer *);
#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]

    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    --tau;
    --work;

    /* Function Body */
    if (*m == 0) {
	return 0;
    } else if (*m == *n) {
	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    i__2 = i__;
	    tau[i__2].r = 0.f, tau[i__2].i = 0.f;
/* L10: */
	}
	return 0;
    }

    for (i__ = *m; i__ >= 1; --i__) {

/*        Generate elementary reflector H(i) to annihilate   
          [ A(i,i) A(i,n-l+1:n) ] */

	clacgv_(l, &a_ref(i__, *n - *l + 1), lda);
	r_cnjg(&q__1, &a_ref(i__, i__));
	alpha.r = q__1.r, alpha.i = q__1.i;
	i__1 = *l + 1;
	clarfg_(&i__1, &alpha, &a_ref(i__, *n - *l + 1), lda, &tau[i__]);
	i__1 = i__;
	r_cnjg(&q__1, &tau[i__]);
	tau[i__1].r = q__1.r, tau[i__1].i = q__1.i;

/*        Apply H(i) to A(1:i-1,i:n) from the right */

	i__1 = i__ - 1;
	i__2 = *n - i__ + 1;
	r_cnjg(&q__1, &tau[i__]);
	clarz_("Right", &i__1, &i__2, l, &a_ref(i__, *n - *l + 1), lda, &q__1,
		 &a_ref(1, i__), lda, &work[1]);
	i__1 = a_subscr(i__, i__);
	r_cnjg(&q__1, &alpha);
	a[i__1].r = q__1.r, a[i__1].i = q__1.i;

/* L20: */
    }

    return 0;

/*     End of CLATRZ */

} /* clatrz_ */
Example #21
0
/* Subroutine */ int cgerq2_(integer *m, integer *n, complex *a, integer *lda,
	 complex *tau, complex *work, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2;

    /* Local variables */
    static integer i__, k;
    static complex alpha;
    extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
	    , integer *, complex *, complex *, integer *, complex *, ftnlen), 
	    clarfg_(integer *, complex *, complex *, integer *, complex *), 
	    clacgv_(integer *, complex *, integer *), xerbla_(char *, integer 
	    *, ftnlen);


/*  -- LAPACK routine (version 3.0) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */
/*     Courant Institute, Argonne National Lab, and Rice University */
/*     September 30, 1994 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CGERQ2 computes an RQ factorization of a complex m by n matrix A: */
/*  A = R * Q. */

/*  Arguments */
/*  ========= */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A.  M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix A.  N >= 0. */

/*  A       (input/output) COMPLEX array, dimension (LDA,N) */
/*          On entry, the m by n matrix A. */
/*          On exit, if m <= n, the upper triangle of the subarray */
/*          A(1:m,n-m+1:n) contains the m by m upper triangular matrix R; */
/*          if m >= n, the elements on and above the (m-n)-th subdiagonal */
/*          contain the m by n upper trapezoidal matrix R; the remaining */
/*          elements, with the array TAU, represent the unitary matrix */
/*          Q as a product of elementary reflectors (see Further */
/*          Details). */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,M). */

/*  TAU     (output) COMPLEX array, dimension (min(M,N)) */
/*          The scalar factors of the elementary reflectors (see Further */
/*          Details). */

/*  WORK    (workspace) COMPLEX array, dimension (M) */

/*  INFO    (output) INTEGER */
/*          = 0: successful exit */
/*          < 0: if INFO = -i, the i-th argument had an illegal value */

/*  Further Details */
/*  =============== */

/*  The matrix Q is represented as a product of elementary reflectors */

/*     Q = H(1)' H(2)' . . . H(k)', where k = min(m,n). */

/*  Each H(i) has the form */

/*     H(i) = I - tau * v * v' */

/*  where tau is a complex scalar, and v is a complex vector with */
/*  v(n-k+i+1:n) = 0 and v(n-k+i) = 1; conjg(v(1:n-k+i-1)) is stored on */
/*  exit in A(m-k+i,1:n-k+i-1), and tau in TAU(i). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input arguments */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --tau;
    --work;

    /* Function Body */
    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*lda < max(1,*m)) {
	*info = -4;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CGERQ2", &i__1, (ftnlen)6);
	return 0;
    }

    k = min(*m,*n);

    for (i__ = k; i__ >= 1; --i__) {

/*        Generate elementary reflector H(i) to annihilate */
/*        A(m-k+i,1:n-k+i-1) */

	i__1 = *n - k + i__;
	clacgv_(&i__1, &a[*m - k + i__ + a_dim1], lda);
	i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
	alpha.r = a[i__1].r, alpha.i = a[i__1].i;
	i__1 = *n - k + i__;
	clarfg_(&i__1, &alpha, &a[*m - k + i__ + a_dim1], lda, &tau[i__]);

/*        Apply H(i) to A(1:m-k+i-1,1:n-k+i) from the right */

	i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
	a[i__1].r = 1.f, a[i__1].i = 0.f;
	i__1 = *m - k + i__ - 1;
	i__2 = *n - k + i__;
	clarf_("Right", &i__1, &i__2, &a[*m - k + i__ + a_dim1], lda, &tau[
		i__], &a[a_offset], lda, &work[1], (ftnlen)5);
	i__1 = *m - k + i__ + (*n - k + i__) * a_dim1;
	a[i__1].r = alpha.r, a[i__1].i = alpha.i;
	i__1 = *n - k + i__ - 1;
	clacgv_(&i__1, &a[*m - k + i__ + a_dim1], lda);
/* L10: */
    }
    return 0;

/*     End of CGERQ2 */

} /* cgerq2_ */
Example #22
0
/* Subroutine */
int clatrd_(char *uplo, integer *n, integer *nb, complex *a, integer *lda, real *e, complex *tau, complex *w, integer *ldw)
{
    /* System generated locals */
    integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
    real r__1;
    complex q__1, q__2, q__3, q__4;
    /* Local variables */
    integer i__, iw;
    complex alpha;
    extern /* Subroutine */
    int cscal_(integer *, complex *, complex *, integer *);
    extern /* Complex */
    VOID cdotc_f2c_(complex *, integer *, complex *, integer *, complex *, integer *);
    extern /* Subroutine */
    int cgemv_(char *, integer *, integer *, complex * , complex *, integer *, complex *, integer *, complex *, complex * , integer *), chemv_(char *, integer *, complex *, complex *, integer *, complex *, integer *, complex *, complex *, integer *);
    extern logical lsame_(char *, char *);
    extern /* Subroutine */
    int caxpy_(integer *, complex *, complex *, integer *, complex *, integer *), clarfg_(integer *, complex *, complex *, integer *, complex *), clacgv_(integer *, complex *, integer *);
    /* -- LAPACK auxiliary routine (version 3.4.2) -- */
    /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
    /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
    /* September 2012 */
    /* .. Scalar Arguments .. */
    /* .. */
    /* .. Array Arguments .. */
    /* .. */
    /* ===================================================================== */
    /* .. Parameters .. */
    /* .. */
    /* .. Local Scalars .. */
    /* .. */
    /* .. External Subroutines .. */
    /* .. */
    /* .. External Functions .. */
    /* .. */
    /* .. Intrinsic Functions .. */
    /* .. */
    /* .. Executable Statements .. */
    /* Quick return if possible */
    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --e;
    --tau;
    w_dim1 = *ldw;
    w_offset = 1 + w_dim1;
    w -= w_offset;
    /* Function Body */
    if (*n <= 0)
    {
        return 0;
    }
    if (lsame_(uplo, "U"))
    {
        /* Reduce last NB columns of upper triangle */
        i__1 = *n - *nb + 1;
        for (i__ = *n;
                i__ >= i__1;
                --i__)
        {
            iw = i__ - *n + *nb;
            if (i__ < *n)
            {
                /* Update A(1:i,i) */
                i__2 = i__ + i__ * a_dim1;
                i__3 = i__ + i__ * a_dim1;
                r__1 = a[i__3].r;
                a[i__2].r = r__1;
                a[i__2].i = 0.f; // , expr subst
                i__2 = *n - i__;
                clacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
                i__2 = *n - i__;
                q__1.r = -1.f;
                q__1.i = -0.f; // , expr subst
                cgemv_("No transpose", &i__, &i__2, &q__1, &a[(i__ + 1) * a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, & c_b2, &a[i__ * a_dim1 + 1], &c__1);
                i__2 = *n - i__;
                clacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
                i__2 = *n - i__;
                clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
                i__2 = *n - i__;
                q__1.r = -1.f;
                q__1.i = -0.f; // , expr subst
                cgemv_("No transpose", &i__, &i__2, &q__1, &w[(iw + 1) * w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, & c_b2, &a[i__ * a_dim1 + 1], &c__1);
                i__2 = *n - i__;
                clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
                i__2 = i__ + i__ * a_dim1;
                i__3 = i__ + i__ * a_dim1;
                r__1 = a[i__3].r;
                a[i__2].r = r__1;
                a[i__2].i = 0.f; // , expr subst
            }
            if (i__ > 1)
            {
                /* Generate elementary reflector H(i) to annihilate */
                /* A(1:i-2,i) */
                i__2 = i__ - 1 + i__ * a_dim1;
                alpha.r = a[i__2].r;
                alpha.i = a[i__2].i; // , expr subst
                i__2 = i__ - 1;
                clarfg_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &tau[i__ - 1]);
                i__2 = i__ - 1;
                e[i__2] = alpha.r;
                i__2 = i__ - 1 + i__ * a_dim1;
                a[i__2].r = 1.f;
                a[i__2].i = 0.f; // , expr subst
                /* Compute W(1:i-1,i) */
                i__2 = i__ - 1;
                chemv_("Upper", &i__2, &c_b2, &a[a_offset], lda, &a[i__ * a_dim1 + 1], &c__1, &c_b1, &w[iw * w_dim1 + 1], &c__1);
                if (i__ < *n)
                {
                    i__2 = i__ - 1;
                    i__3 = *n - i__;
                    cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[(iw + 1) * w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], & c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1);
                    i__2 = i__ - 1;
                    i__3 = *n - i__;
                    q__1.r = -1.f;
                    q__1.i = -0.f; // , expr subst
                    cgemv_("No transpose", &i__2, &i__3, &q__1, &a[(i__ + 1) * a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], & c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1);
                    i__2 = i__ - 1;
                    i__3 = *n - i__;
                    cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[( i__ + 1) * a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], &c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1);
                    i__2 = i__ - 1;
                    i__3 = *n - i__;
                    q__1.r = -1.f;
                    q__1.i = -0.f; // , expr subst
                    cgemv_("No transpose", &i__2, &i__3, &q__1, &w[(iw + 1) * w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], & c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1);
                }
                i__2 = i__ - 1;
                cscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
                q__3.r = -.5f;
                q__3.i = -0.f; // , expr subst
                i__2 = i__ - 1;
                q__2.r = q__3.r * tau[i__2].r - q__3.i * tau[i__2].i;
                q__2.i = q__3.r * tau[i__2].i + q__3.i * tau[i__2].r; // , expr subst
                i__3 = i__ - 1;
                cdotc_f2c_(&q__4, &i__3, &w[iw * w_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &c__1);
                q__1.r = q__2.r * q__4.r - q__2.i * q__4.i;
                q__1.i = q__2.r * q__4.i + q__2.i * q__4.r; // , expr subst
                alpha.r = q__1.r;
                alpha.i = q__1.i; // , expr subst
                i__2 = i__ - 1;
                caxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw * w_dim1 + 1], &c__1);
            }
            /* L10: */
        }
    }
    else
    {
        /* Reduce first NB columns of lower triangle */
        i__1 = *nb;
        for (i__ = 1;
                i__ <= i__1;
                ++i__)
        {
            /* Update A(i:n,i) */
            i__2 = i__ + i__ * a_dim1;
            i__3 = i__ + i__ * a_dim1;
            r__1 = a[i__3].r;
            a[i__2].r = r__1;
            a[i__2].i = 0.f; // , expr subst
            i__2 = i__ - 1;
            clacgv_(&i__2, &w[i__ + w_dim1], ldw);
            i__2 = *n - i__ + 1;
            i__3 = i__ - 1;
            q__1.r = -1.f;
            q__1.i = -0.f; // , expr subst
            cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + a_dim1], lda, &w[i__ + w_dim1], ldw, &c_b2, &a[i__ + i__ * a_dim1], & c__1);
            i__2 = i__ - 1;
            clacgv_(&i__2, &w[i__ + w_dim1], ldw);
            i__2 = i__ - 1;
            clacgv_(&i__2, &a[i__ + a_dim1], lda);
            i__2 = *n - i__ + 1;
            i__3 = i__ - 1;
            q__1.r = -1.f;
            q__1.i = -0.f; // , expr subst
            cgemv_("No transpose", &i__2, &i__3, &q__1, &w[i__ + w_dim1], ldw, &a[i__ + a_dim1], lda, &c_b2, &a[i__ + i__ * a_dim1], & c__1);
            i__2 = i__ - 1;
            clacgv_(&i__2, &a[i__ + a_dim1], lda);
            i__2 = i__ + i__ * a_dim1;
            i__3 = i__ + i__ * a_dim1;
            r__1 = a[i__3].r;
            a[i__2].r = r__1;
            a[i__2].i = 0.f; // , expr subst
            if (i__ < *n)
            {
                /* Generate elementary reflector H(i) to annihilate */
                /* A(i+2:n,i) */
                i__2 = i__ + 1 + i__ * a_dim1;
                alpha.r = a[i__2].r;
                alpha.i = a[i__2].i; // , expr subst
                i__2 = *n - i__;
                /* Computing MIN */
                i__3 = i__ + 2;
                clarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[i__]);
                i__2 = i__;
                e[i__2] = alpha.r;
                i__2 = i__ + 1 + i__ * a_dim1;
                a[i__2].r = 1.f;
                a[i__2].i = 0.f; // , expr subst
                /* Compute W(i+1:n,i) */
                i__2 = *n - i__;
                chemv_("Lower", &i__2, &c_b2, &a[i__ + 1 + (i__ + 1) * a_dim1] , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b1, &w[ i__ + 1 + i__ * w_dim1], &c__1);
                i__2 = *n - i__;
                i__3 = i__ - 1;
                cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[i__ + 1 + w_dim1], ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, & c_b1, &w[i__ * w_dim1 + 1], &c__1);
                i__2 = *n - i__;
                i__3 = i__ - 1;
                q__1.r = -1.f;
                q__1.i = -0.f; // , expr subst
                cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + 1 + a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[ i__ + 1 + i__ * w_dim1], &c__1);
                i__2 = *n - i__;
                i__3 = i__ - 1;
                cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, & c_b1, &w[i__ * w_dim1 + 1], &c__1);
                i__2 = *n - i__;
                i__3 = i__ - 1;
                q__1.r = -1.f;
                q__1.i = -0.f; // , expr subst
                cgemv_("No transpose", &i__2, &i__3, &q__1, &w[i__ + 1 + w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[ i__ + 1 + i__ * w_dim1], &c__1);
                i__2 = *n - i__;
                cscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
                q__3.r = -.5f;
                q__3.i = -0.f; // , expr subst
                i__2 = i__;
                q__2.r = q__3.r * tau[i__2].r - q__3.i * tau[i__2].i;
                q__2.i = q__3.r * tau[i__2].i + q__3.i * tau[i__2].r; // , expr subst
                i__3 = *n - i__;
                cdotc_f2c_(&q__4, &i__3, &w[i__ + 1 + i__ * w_dim1], &c__1, &a[ i__ + 1 + i__ * a_dim1], &c__1);
                q__1.r = q__2.r * q__4.r - q__2.i * q__4.i;
                q__1.i = q__2.r * q__4.i + q__2.i * q__4.r; // , expr subst
                alpha.r = q__1.r;
                alpha.i = q__1.i; // , expr subst
                i__2 = *n - i__;
                caxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[ i__ + 1 + i__ * w_dim1], &c__1);
            }
            /* L20: */
        }
    }
    return 0;
    /* End of CLATRD */
}
Example #23
0
/* Subroutine */ int claqp2_(integer *m, integer *n, integer *offset, complex 
	*a, integer *lda, integer *jpvt, complex *tau, real *vn1, real *vn2, 
	complex *work)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    real r__1;
    complex q__1;

    /* Builtin functions */
    void r_cnjg(complex *, complex *);
    double c_abs(complex *), sqrt(doublereal);

    /* Local variables */
    static integer i__, j, mn;
    static complex aii;
    static integer pvt;
    static real temp, temp2;
    extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
	    , integer *, complex *, complex *, integer *, complex *, ftnlen);
    static integer offpi;
    extern /* Subroutine */ int cswap_(integer *, complex *, integer *, 
	    complex *, integer *);
    static integer itemp;
    extern doublereal scnrm2_(integer *, complex *, integer *);
    extern /* Subroutine */ int clarfg_(integer *, complex *, complex *, 
	    integer *, complex *);
    extern integer isamax_(integer *, real *, integer *);


/*  -- LAPACK auxiliary routine (version 3.0) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */
/*     Courant Institute, Argonne National Lab, and Rice University */
/*     June 30, 1999 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CLAQP2 computes a QR factorization with column pivoting of */
/*  the block A(OFFSET+1:M,1:N). */
/*  The block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */

/*  Arguments */
/*  ========= */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A. M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix A. N >= 0. */

/*  OFFSET  (input) INTEGER */
/*          The number of rows of the matrix A that must be pivoted */
/*          but no factorized. OFFSET >= 0. */

/*  A       (input/output) COMPLEX array, dimension (LDA,N) */
/*          On entry, the M-by-N matrix A. */
/*          On exit, the upper triangle of block A(OFFSET+1:M,1:N) is */
/*          the triangular factor obtained; the elements in block */
/*          A(OFFSET+1:M,1:N) below the diagonal, together with the */
/*          array TAU, represent the orthogonal matrix Q as a product of */
/*          elementary reflectors. Block A(1:OFFSET,1:N) has been */
/*          accordingly pivoted, but no factorized. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A. LDA >= max(1,M). */

/*  JPVT    (input/output) INTEGER array, dimension (N) */
/*          On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
/*          to the front of A*P (a leading column); if JPVT(i) = 0, */
/*          the i-th column of A is a free column. */
/*          On exit, if JPVT(i) = k, then the i-th column of A*P */
/*          was the k-th column of A. */

/*  TAU     (output) COMPLEX array, dimension (min(M,N)) */
/*          The scalar factors of the elementary reflectors. */

/*  VN1     (input/output) REAL array, dimension (N) */
/*          The vector with the partial column norms. */

/*  VN2     (input/output) REAL array, dimension (N) */
/*          The vector with the exact column norms. */

/*  WORK    (workspace) COMPLEX array, dimension (N) */

/*  Further Details */
/*  =============== */

/*  Based on contributions by */
/*    G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
/*    X. Sun, Computer Science Dept., Duke University, USA */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --jpvt;
    --tau;
    --vn1;
    --vn2;
    --work;

    /* Function Body */
/* Computing MIN */
    i__1 = *m - *offset;
    mn = min(i__1,*n);

/*     Compute factorization. */

    i__1 = mn;
    for (i__ = 1; i__ <= i__1; ++i__) {

	offpi = *offset + i__;

/*        Determine ith pivot column and swap if necessary. */

	i__2 = *n - i__ + 1;
	pvt = i__ - 1 + isamax_(&i__2, &vn1[i__], &c__1);

	if (pvt != i__) {
	    cswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
		    c__1);
	    itemp = jpvt[pvt];
	    jpvt[pvt] = jpvt[i__];
	    jpvt[i__] = itemp;
	    vn1[pvt] = vn1[i__];
	    vn2[pvt] = vn2[i__];
	}

/*        Generate elementary reflector H(i). */

	if (offpi < *m) {
	    i__2 = *m - offpi + 1;
	    clarfg_(&i__2, &a[offpi + i__ * a_dim1], &a[offpi + 1 + i__ * 
		    a_dim1], &c__1, &tau[i__]);
	} else {
	    clarfg_(&c__1, &a[*m + i__ * a_dim1], &a[*m + i__ * a_dim1], &
		    c__1, &tau[i__]);
	}

	if (i__ < *n) {

/*           Apply H(i)' to A(offset+i:m,i+1:n) from the left. */

	    i__2 = offpi + i__ * a_dim1;
	    aii.r = a[i__2].r, aii.i = a[i__2].i;
	    i__2 = offpi + i__ * a_dim1;
	    a[i__2].r = 1.f, a[i__2].i = 0.f;
	    i__2 = *m - offpi + 1;
	    i__3 = *n - i__;
	    r_cnjg(&q__1, &tau[i__]);
	    clarf_("Left", &i__2, &i__3, &a[offpi + i__ * a_dim1], &c__1, &
		    q__1, &a[offpi + (i__ + 1) * a_dim1], lda, &work[1], (
		    ftnlen)4);
	    i__2 = offpi + i__ * a_dim1;
	    a[i__2].r = aii.r, a[i__2].i = aii.i;
	}

/*        Update partial column norms. */

	i__2 = *n;
	for (j = i__ + 1; j <= i__2; ++j) {
	    if (vn1[j] != 0.f) {
/* Computing 2nd power */
		r__1 = c_abs(&a[offpi + j * a_dim1]) / vn1[j];
		temp = 1.f - r__1 * r__1;
		temp = dmax(temp,0.f);
/* Computing 2nd power */
		r__1 = vn1[j] / vn2[j];
		temp2 = temp * .05f * (r__1 * r__1) + 1.f;
		if (temp2 == 1.f) {
		    if (offpi < *m) {
			i__3 = *m - offpi;
			vn1[j] = scnrm2_(&i__3, &a[offpi + 1 + j * a_dim1], &
				c__1);
			vn2[j] = vn1[j];
		    } else {
			vn1[j] = 0.f;
			vn2[j] = 0.f;
		    }
		} else {
		    vn1[j] *= sqrt(temp);
		}
	    }
/* L10: */
	}

/* L20: */
    }

    return 0;

/*     End of CLAQP2 */

} /* claqp2_ */
Example #24
0
/* Subroutine */ int cgeqpf_(integer *m, integer *n, complex *a, integer *lda,
	 integer *jpvt, complex *tau, complex *work, real *rwork, integer *
	info)
{
/*  -- LAPACK auxiliary routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   


    Purpose   
    =======   

    This routine is deprecated and has been replaced by routine CGEQP3.   

    CGEQPF computes a QR factorization with column pivoting of a   
    complex M-by-N matrix A: A*P = Q*R.   

    Arguments   
    =========   

    M       (input) INTEGER   
            The number of rows of the matrix A. M >= 0.   

    N       (input) INTEGER   
            The number of columns of the matrix A. N >= 0   

    A       (input/output) COMPLEX array, dimension (LDA,N)   
            On entry, the M-by-N matrix A.   
            On exit, the upper triangle of the array contains the   
            min(M,N)-by-N upper triangular matrix R; the elements   
            below the diagonal, together with the array TAU,   
            represent the unitary matrix Q as a product of   
            min(m,n) elementary reflectors.   

    LDA     (input) INTEGER   
            The leading dimension of the array A. LDA >= max(1,M).   

    JPVT    (input/output) INTEGER array, dimension (N)   
            On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted   
            to the front of A*P (a leading column); if JPVT(i) = 0,   
            the i-th column of A is a free column.   
            On exit, if JPVT(i) = k, then the i-th column of A*P   
            was the k-th column of A.   

    TAU     (output) COMPLEX array, dimension (min(M,N))   
            The scalar factors of the elementary reflectors.   

    WORK    (workspace) COMPLEX array, dimension (N)   

    RWORK   (workspace) REAL array, dimension (2*N)   

    INFO    (output) INTEGER   
            = 0:  successful exit   
            < 0:  if INFO = -i, the i-th argument had an illegal value   

    Further Details   
    ===============   

    The matrix Q is represented as a product of elementary reflectors   

       Q = H(1) H(2) . . . H(n)   

    Each H(i) has the form   

       H = I - tau * v * v'   

    where tau is a complex scalar, and v is a complex vector with   
    v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i).   

    The matrix P is represented in jpvt as follows: If   
       jpvt(j) = i   
    then the jth column of P is the ith canonical unit vector.   

    =====================================================================   


       Test the input arguments   

       Parameter adjustments */
    /* Table of constant values */
    static integer c__1 = 1;
    
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    real r__1;
    complex q__1;
    /* Builtin functions */
    void r_cnjg(complex *, complex *);
    double c_abs(complex *), sqrt(doublereal);
    /* Local variables */
    static real temp, temp2;
    static integer i__, j;
    extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
	    , integer *, complex *, complex *, integer *, complex *), 
	    cswap_(integer *, complex *, integer *, complex *, integer *);
    static integer itemp;
    extern /* Subroutine */ int cgeqr2_(integer *, integer *, complex *, 
	    integer *, complex *, complex *, integer *);
    extern doublereal scnrm2_(integer *, complex *, integer *);
    extern /* Subroutine */ int cunm2r_(char *, char *, integer *, integer *, 
	    integer *, complex *, integer *, complex *, complex *, integer *, 
	    complex *, integer *);
    static integer ma, mn;
    extern /* Subroutine */ int clarfg_(integer *, complex *, complex *, 
	    integer *, complex *), xerbla_(char *, integer *);
    extern integer isamax_(integer *, real *, integer *);
    static complex aii;
    static integer pvt;
#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]


    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    --jpvt;
    --tau;
    --work;
    --rwork;

    /* Function Body */
    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*lda < max(1,*m)) {
	*info = -4;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CGEQPF", &i__1);
	return 0;
    }

    mn = min(*m,*n);

/*     Move initial columns up front */

    itemp = 1;
    i__1 = *n;
    for (i__ = 1; i__ <= i__1; ++i__) {
	if (jpvt[i__] != 0) {
	    if (i__ != itemp) {
		cswap_(m, &a_ref(1, i__), &c__1, &a_ref(1, itemp), &c__1);
		jpvt[i__] = jpvt[itemp];
		jpvt[itemp] = i__;
	    } else {
		jpvt[i__] = i__;
	    }
	    ++itemp;
	} else {
	    jpvt[i__] = i__;
	}
/* L10: */
    }
    --itemp;

/*     Compute the QR factorization and update remaining columns */

    if (itemp > 0) {
	ma = min(itemp,*m);
	cgeqr2_(m, &ma, &a[a_offset], lda, &tau[1], &work[1], info);
	if (ma < *n) {
	    i__1 = *n - ma;
	    cunm2r_("Left", "Conjugate transpose", m, &i__1, &ma, &a[a_offset]
		    , lda, &tau[1], &a_ref(1, ma + 1), lda, &work[1], info);
	}
    }

    if (itemp < mn) {

/*        Initialize partial column norms. The first n elements of   
          work store the exact column norms. */

	i__1 = *n;
	for (i__ = itemp + 1; i__ <= i__1; ++i__) {
	    i__2 = *m - itemp;
	    rwork[i__] = scnrm2_(&i__2, &a_ref(itemp + 1, i__), &c__1);
	    rwork[*n + i__] = rwork[i__];
/* L20: */
	}

/*        Compute factorization */

	i__1 = mn;
	for (i__ = itemp + 1; i__ <= i__1; ++i__) {

/*           Determine ith pivot column and swap if necessary */

	    i__2 = *n - i__ + 1;
	    pvt = i__ - 1 + isamax_(&i__2, &rwork[i__], &c__1);

	    if (pvt != i__) {
		cswap_(m, &a_ref(1, pvt), &c__1, &a_ref(1, i__), &c__1);
		itemp = jpvt[pvt];
		jpvt[pvt] = jpvt[i__];
		jpvt[i__] = itemp;
		rwork[pvt] = rwork[i__];
		rwork[*n + pvt] = rwork[*n + i__];
	    }

/*           Generate elementary reflector H(i) */

	    i__2 = a_subscr(i__, i__);
	    aii.r = a[i__2].r, aii.i = a[i__2].i;
/* Computing MIN */
	    i__2 = i__ + 1;
	    i__3 = *m - i__ + 1;
	    clarfg_(&i__3, &aii, &a_ref(min(i__2,*m), i__), &c__1, &tau[i__]);
	    i__2 = a_subscr(i__, i__);
	    a[i__2].r = aii.r, a[i__2].i = aii.i;

	    if (i__ < *n) {

/*              Apply H(i) to A(i:m,i+1:n) from the left */

		i__2 = a_subscr(i__, i__);
		aii.r = a[i__2].r, aii.i = a[i__2].i;
		i__2 = a_subscr(i__, i__);
		a[i__2].r = 1.f, a[i__2].i = 0.f;
		i__2 = *m - i__ + 1;
		i__3 = *n - i__;
		r_cnjg(&q__1, &tau[i__]);
		clarf_("Left", &i__2, &i__3, &a_ref(i__, i__), &c__1, &q__1, &
			a_ref(i__, i__ + 1), lda, &work[1]);
		i__2 = a_subscr(i__, i__);
		a[i__2].r = aii.r, a[i__2].i = aii.i;
	    }

/*           Update partial column norms */

	    i__2 = *n;
	    for (j = i__ + 1; j <= i__2; ++j) {
		if (rwork[j] != 0.f) {
/* Computing 2nd power */
		    r__1 = c_abs(&a_ref(i__, j)) / rwork[j];
		    temp = 1.f - r__1 * r__1;
		    temp = dmax(temp,0.f);
/* Computing 2nd power */
		    r__1 = rwork[j] / rwork[*n + j];
		    temp2 = temp * .05f * (r__1 * r__1) + 1.f;
		    if (temp2 == 1.f) {
			if (*m - i__ > 0) {
			    i__3 = *m - i__;
			    rwork[j] = scnrm2_(&i__3, &a_ref(i__ + 1, j), &
				    c__1);
			    rwork[*n + j] = rwork[j];
			} else {
			    rwork[j] = 0.f;
			    rwork[*n + j] = 0.f;
			}
		    } else {
			rwork[j] *= sqrt(temp);
		    }
		}
/* L30: */
	    }

/* L40: */
	}
    }
    return 0;

/*     End of CGEQPF */

} /* cgeqpf_ */
Example #25
0
/* Subroutine */ int clabrd_(integer *m, integer *n, integer *nb, complex *a, 
	integer *lda, real *d__, real *e, complex *tauq, complex *taup, 
	complex *x, integer *ldx, complex *y, integer *ldy)
{
    /* System generated locals */
    integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2, 
	    i__3;
    complex q__1;

    /* Local variables */
    integer i__;
    complex alpha;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *, 
	    integer *), cgemv_(char *, integer *, integer *, complex *, 
	    complex *, integer *, complex *, integer *, complex *, complex *, 
	    integer *), clarfg_(integer *, complex *, complex *, 
	    integer *, complex *), clacgv_(integer *, complex *, integer *);


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CLABRD reduces the first NB rows and columns of a complex general */
/*  m by n matrix A to upper or lower real bidiagonal form by a unitary */
/*  transformation Q' * A * P, and returns the matrices X and Y which */
/*  are needed to apply the transformation to the unreduced part of A. */

/*  If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower */
/*  bidiagonal form. */

/*  This is an auxiliary routine called by CGEBRD */

/*  Arguments */
/*  ========= */

/*  M       (input) INTEGER */
/*          The number of rows in the matrix A. */

/*  N       (input) INTEGER */
/*          The number of columns in the matrix A. */

/*  NB      (input) INTEGER */
/*          The number of leading rows and columns of A to be reduced. */

/*  A       (input/output) COMPLEX array, dimension (LDA,N) */
/*          On entry, the m by n general matrix to be reduced. */
/*          On exit, the first NB rows and columns of the matrix are */
/*          overwritten; the rest of the array is unchanged. */
/*          If m >= n, elements on and below the diagonal in the first NB */
/*            columns, with the array TAUQ, represent the unitary */
/*            matrix Q as a product of elementary reflectors; and */
/*            elements above the diagonal in the first NB rows, with the */
/*            array TAUP, represent the unitary matrix P as a product */
/*            of elementary reflectors. */
/*          If m < n, elements below the diagonal in the first NB */
/*            columns, with the array TAUQ, represent the unitary */
/*            matrix Q as a product of elementary reflectors, and */
/*            elements on and above the diagonal in the first NB rows, */
/*            with the array TAUP, represent the unitary matrix P as */
/*            a product of elementary reflectors. */
/*          See Further Details. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,M). */

/*  D       (output) REAL array, dimension (NB) */
/*          The diagonal elements of the first NB rows and columns of */
/*          the reduced matrix.  D(i) = A(i,i). */

/*  E       (output) REAL array, dimension (NB) */
/*          The off-diagonal elements of the first NB rows and columns of */
/*          the reduced matrix. */

/*  TAUQ    (output) COMPLEX array dimension (NB) */
/*          The scalar factors of the elementary reflectors which */
/*          represent the unitary matrix Q. See Further Details. */

/*  TAUP    (output) COMPLEX array, dimension (NB) */
/*          The scalar factors of the elementary reflectors which */
/*          represent the unitary matrix P. See Further Details. */

/*  X       (output) COMPLEX array, dimension (LDX,NB) */
/*          The m-by-nb matrix X required to update the unreduced part */
/*          of A. */

/*  LDX     (input) INTEGER */
/*          The leading dimension of the array X. LDX >= max(1,M). */

/*  Y       (output) COMPLEX array, dimension (LDY,NB) */
/*          The n-by-nb matrix Y required to update the unreduced part */
/*          of A. */

/*  LDY     (input) INTEGER */
/*          The leading dimension of the array Y. LDY >= max(1,N). */

/*  Further Details */
/*  =============== */

/*  The matrices Q and P are represented as products of elementary */
/*  reflectors: */

/*     Q = H(1) H(2) . . . H(nb)  and  P = G(1) G(2) . . . G(nb) */

/*  Each H(i) and G(i) has the form: */

/*     H(i) = I - tauq * v * v'  and G(i) = I - taup * u * u' */

/*  where tauq and taup are complex scalars, and v and u are complex */
/*  vectors. */

/*  If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in */
/*  A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in */
/*  A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */

/*  If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in */
/*  A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in */
/*  A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */

/*  The elements of the vectors v and u together form the m-by-nb matrix */
/*  V and the nb-by-n matrix U' which are needed, with X and Y, to apply */
/*  the transformation to the unreduced part of the matrix, using a block */
/*  update of the form:  A := A - V*Y' - X*U'. */

/*  The contents of A on exit are illustrated by the following examples */
/*  with nb = 2: */

/*  m = 6 and n = 5 (m > n):          m = 5 and n = 6 (m < n): */

/*    (  1   1   u1  u1  u1 )           (  1   u1  u1  u1  u1  u1 ) */
/*    (  v1  1   1   u2  u2 )           (  1   1   u2  u2  u2  u2 ) */
/*    (  v1  v2  a   a   a  )           (  v1  1   a   a   a   a  ) */
/*    (  v1  v2  a   a   a  )           (  v1  v2  a   a   a   a  ) */
/*    (  v1  v2  a   a   a  )           (  v1  v2  a   a   a   a  ) */
/*    (  v1  v2  a   a   a  ) */

/*  where a denotes an element of the original matrix which is unchanged, */
/*  vi denotes an element of the vector defining H(i), and ui an element */
/*  of the vector defining G(i). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Quick return if possible */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --d__;
    --e;
    --tauq;
    --taup;
    x_dim1 = *ldx;
    x_offset = 1 + x_dim1;
    x -= x_offset;
    y_dim1 = *ldy;
    y_offset = 1 + y_dim1;
    y -= y_offset;

    /* Function Body */
    if (*m <= 0 || *n <= 0) {
	return 0;
    }

    if (*m >= *n) {

/*        Reduce to upper bidiagonal form */

	i__1 = *nb;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Update A(i:m,i) */

	    i__2 = i__ - 1;
	    clacgv_(&i__2, &y[i__ + y_dim1], ldy);
	    i__2 = *m - i__ + 1;
	    i__3 = i__ - 1;
	    q__1.r = -1.f, q__1.i = -0.f;
	    cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + a_dim1], lda, 
		     &y[i__ + y_dim1], ldy, &c_b2, &a[i__ + i__ * a_dim1], &
		    c__1);
	    i__2 = i__ - 1;
	    clacgv_(&i__2, &y[i__ + y_dim1], ldy);
	    i__2 = *m - i__ + 1;
	    i__3 = i__ - 1;
	    q__1.r = -1.f, q__1.i = -0.f;
	    cgemv_("No transpose", &i__2, &i__3, &q__1, &x[i__ + x_dim1], ldx, 
		     &a[i__ * a_dim1 + 1], &c__1, &c_b2, &a[i__ + i__ * 
		    a_dim1], &c__1);

/*           Generate reflection Q(i) to annihilate A(i+1:m,i) */

	    i__2 = i__ + i__ * a_dim1;
	    alpha.r = a[i__2].r, alpha.i = a[i__2].i;
	    i__2 = *m - i__ + 1;
/* Computing MIN */
	    i__3 = i__ + 1;
	    clarfg_(&i__2, &alpha, &a[min(i__3, *m)+ i__ * a_dim1], &c__1, &
		    tauq[i__]);
	    i__2 = i__;
	    d__[i__2] = alpha.r;
	    if (i__ < *n) {
		i__2 = i__ + i__ * a_dim1;
		a[i__2].r = 1.f, a[i__2].i = 0.f;

/*              Compute Y(i+1:n,i) */

		i__2 = *m - i__ + 1;
		i__3 = *n - i__;
		cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + (
			i__ + 1) * a_dim1], lda, &a[i__ + i__ * a_dim1], &
			c__1, &c_b1, &y[i__ + 1 + i__ * y_dim1], &c__1);
		i__2 = *m - i__ + 1;
		i__3 = i__ - 1;
		cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 
			a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b1, &
			y[i__ * y_dim1 + 1], &c__1);
		i__2 = *n - i__;
		i__3 = i__ - 1;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__2, &i__3, &q__1, &y[i__ + 1 + 
			y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b2, &y[
			i__ + 1 + i__ * y_dim1], &c__1);
		i__2 = *m - i__ + 1;
		i__3 = i__ - 1;
		cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &x[i__ + 
			x_dim1], ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b1, &
			y[i__ * y_dim1 + 1], &c__1);
		i__2 = i__ - 1;
		i__3 = *n - i__;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("Conjugate transpose", &i__2, &i__3, &q__1, &a[(i__ + 
			1) * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
			c_b2, &y[i__ + 1 + i__ * y_dim1], &c__1);
		i__2 = *n - i__;
		cscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);

/*              Update A(i,i+1:n) */

		i__2 = *n - i__;
		clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
		clacgv_(&i__, &a[i__ + a_dim1], lda);
		i__2 = *n - i__;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__2, &i__, &q__1, &y[i__ + 1 + 
			y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b2, &a[i__ + (
			i__ + 1) * a_dim1], lda);
		clacgv_(&i__, &a[i__ + a_dim1], lda);
		i__2 = i__ - 1;
		clacgv_(&i__2, &x[i__ + x_dim1], ldx);
		i__2 = i__ - 1;
		i__3 = *n - i__;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("Conjugate transpose", &i__2, &i__3, &q__1, &a[(i__ + 
			1) * a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b2, &
			a[i__ + (i__ + 1) * a_dim1], lda);
		i__2 = i__ - 1;
		clacgv_(&i__2, &x[i__ + x_dim1], ldx);

/*              Generate reflection P(i) to annihilate A(i,i+2:n) */

		i__2 = i__ + (i__ + 1) * a_dim1;
		alpha.r = a[i__2].r, alpha.i = a[i__2].i;
		i__2 = *n - i__;
/* Computing MIN */
		i__3 = i__ + 2;
		clarfg_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &
			taup[i__]);
		i__2 = i__;
		e[i__2] = alpha.r;
		i__2 = i__ + (i__ + 1) * a_dim1;
		a[i__2].r = 1.f, a[i__2].i = 0.f;

/*              Compute X(i+1:m,i) */

		i__2 = *m - i__;
		i__3 = *n - i__;
		cgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + (i__ 
			+ 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1], 
			lda, &c_b1, &x[i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = *n - i__;
		cgemv_("Conjugate transpose", &i__2, &i__, &c_b2, &y[i__ + 1 
			+ y_dim1], ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &
			c_b1, &x[i__ * x_dim1 + 1], &c__1);
		i__2 = *m - i__;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__2, &i__, &q__1, &a[i__ + 1 + 
			a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
			i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = i__ - 1;
		i__3 = *n - i__;
		cgemv_("No transpose", &i__2, &i__3, &c_b2, &a[(i__ + 1) * 
			a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
			c_b1, &x[i__ * x_dim1 + 1], &c__1);
		i__2 = *m - i__;
		i__3 = i__ - 1;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__2, &i__3, &q__1, &x[i__ + 1 + 
			x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
			i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = *m - i__;
		cscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = *n - i__;
		clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
	    }
/* L10: */
	}
    } else {

/*        Reduce to lower bidiagonal form */

	i__1 = *nb;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Update A(i,i:n) */

	    i__2 = *n - i__ + 1;
	    clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
	    i__2 = i__ - 1;
	    clacgv_(&i__2, &a[i__ + a_dim1], lda);
	    i__2 = *n - i__ + 1;
	    i__3 = i__ - 1;
	    q__1.r = -1.f, q__1.i = -0.f;
	    cgemv_("No transpose", &i__2, &i__3, &q__1, &y[i__ + y_dim1], ldy, 
		     &a[i__ + a_dim1], lda, &c_b2, &a[i__ + i__ * a_dim1], 
		    lda);
	    i__2 = i__ - 1;
	    clacgv_(&i__2, &a[i__ + a_dim1], lda);
	    i__2 = i__ - 1;
	    clacgv_(&i__2, &x[i__ + x_dim1], ldx);
	    i__2 = i__ - 1;
	    i__3 = *n - i__ + 1;
	    q__1.r = -1.f, q__1.i = -0.f;
	    cgemv_("Conjugate transpose", &i__2, &i__3, &q__1, &a[i__ * 
		    a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b2, &a[i__ + 
		    i__ * a_dim1], lda);
	    i__2 = i__ - 1;
	    clacgv_(&i__2, &x[i__ + x_dim1], ldx);

/*           Generate reflection P(i) to annihilate A(i,i+1:n) */

	    i__2 = i__ + i__ * a_dim1;
	    alpha.r = a[i__2].r, alpha.i = a[i__2].i;
	    i__2 = *n - i__ + 1;
/* Computing MIN */
	    i__3 = i__ + 1;
	    clarfg_(&i__2, &alpha, &a[i__ + min(i__3, *n)* a_dim1], lda, &
		    taup[i__]);
	    i__2 = i__;
	    d__[i__2] = alpha.r;
	    if (i__ < *m) {
		i__2 = i__ + i__ * a_dim1;
		a[i__2].r = 1.f, a[i__2].i = 0.f;

/*              Compute X(i+1:m,i) */

		i__2 = *m - i__;
		i__3 = *n - i__ + 1;
		cgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + i__ *
			 a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b1, &x[
			i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = *n - i__ + 1;
		i__3 = i__ - 1;
		cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &y[i__ + 
			y_dim1], ldy, &a[i__ + i__ * a_dim1], lda, &c_b1, &x[
			i__ * x_dim1 + 1], &c__1);
		i__2 = *m - i__;
		i__3 = i__ - 1;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + 1 + 
			a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
			i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = i__ - 1;
		i__3 = *n - i__ + 1;
		cgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ * a_dim1 + 
			1], lda, &a[i__ + i__ * a_dim1], lda, &c_b1, &x[i__ * 
			x_dim1 + 1], &c__1);
		i__2 = *m - i__;
		i__3 = i__ - 1;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__2, &i__3, &q__1, &x[i__ + 1 + 
			x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
			i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = *m - i__;
		cscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = *n - i__ + 1;
		clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);

/*              Update A(i+1:m,i) */

		i__2 = i__ - 1;
		clacgv_(&i__2, &y[i__ + y_dim1], ldy);
		i__2 = *m - i__;
		i__3 = i__ - 1;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + 1 + 
			a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b2, &a[i__ + 
			1 + i__ * a_dim1], &c__1);
		i__2 = i__ - 1;
		clacgv_(&i__2, &y[i__ + y_dim1], ldy);
		i__2 = *m - i__;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__2, &i__, &q__1, &x[i__ + 1 + 
			x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b2, &a[
			i__ + 1 + i__ * a_dim1], &c__1);

/*              Generate reflection Q(i) to annihilate A(i+2:m,i) */

		i__2 = i__ + 1 + i__ * a_dim1;
		alpha.r = a[i__2].r, alpha.i = a[i__2].i;
		i__2 = *m - i__;
/* Computing MIN */
		i__3 = i__ + 2;
		clarfg_(&i__2, &alpha, &a[min(i__3, *m)+ i__ * a_dim1], &c__1, 
			 &tauq[i__]);
		i__2 = i__;
		e[i__2] = alpha.r;
		i__2 = i__ + 1 + i__ * a_dim1;
		a[i__2].r = 1.f, a[i__2].i = 0.f;

/*              Compute Y(i+1:n,i) */

		i__2 = *m - i__;
		i__3 = *n - i__;
		cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 
			+ (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1]
, &c__1, &c_b1, &y[i__ + 1 + i__ * y_dim1], &c__1);
		i__2 = *m - i__;
		i__3 = i__ - 1;
		cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 
			+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
			c_b1, &y[i__ * y_dim1 + 1], &c__1);
		i__2 = *n - i__;
		i__3 = i__ - 1;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("No transpose", &i__2, &i__3, &q__1, &y[i__ + 1 + 
			y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b2, &y[
			i__ + 1 + i__ * y_dim1], &c__1);
		i__2 = *m - i__;
		cgemv_("Conjugate transpose", &i__2, &i__, &c_b2, &x[i__ + 1 
			+ x_dim1], ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &
			c_b1, &y[i__ * y_dim1 + 1], &c__1);
		i__2 = *n - i__;
		q__1.r = -1.f, q__1.i = -0.f;
		cgemv_("Conjugate transpose", &i__, &i__2, &q__1, &a[(i__ + 1)
			 * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
			c_b2, &y[i__ + 1 + i__ * y_dim1], &c__1);
		i__2 = *n - i__;
		cscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
	    } else {
		i__2 = *n - i__ + 1;
		clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
	    }
/* L20: */
	}
    }
    return 0;

/*     End of CLABRD */

} /* clabrd_ */
Example #26
0
/* Subroutine */ int clahrd_(integer *n, integer *k, integer *nb, complex *a, 
	integer *lda, complex *tau, complex *t, integer *ldt, complex *y, 
	integer *ldy)
{
/*  -- LAPACK auxiliary routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   


    Purpose   
    =======   

    CLAHRD reduces the first NB columns of a complex general n-by-(n-k+1)   
    matrix A so that elements below the k-th subdiagonal are zero. The   
    reduction is performed by a unitary similarity transformation   
    Q' * A * Q. The routine returns the matrices V and T which determine   
    Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T.   

    This is an auxiliary routine called by CGEHRD.   

    Arguments   
    =========   

    N       (input) INTEGER   
            The order of the matrix A.   

    K       (input) INTEGER   
            The offset for the reduction. Elements below the k-th   
            subdiagonal in the first NB columns are reduced to zero.   

    NB      (input) INTEGER   
            The number of columns to be reduced.   

    A       (input/output) COMPLEX array, dimension (LDA,N-K+1)   
            On entry, the n-by-(n-k+1) general matrix A.   
            On exit, the elements on and above the k-th subdiagonal in   
            the first NB columns are overwritten with the corresponding   
            elements of the reduced matrix; the elements below the k-th   
            subdiagonal, with the array TAU, represent the matrix Q as a   
            product of elementary reflectors. The other columns of A are   
            unchanged. See Further Details.   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= max(1,N).   

    TAU     (output) COMPLEX array, dimension (NB)   
            The scalar factors of the elementary reflectors. See Further   
            Details.   

    T       (output) COMPLEX array, dimension (LDT,NB)   
            The upper triangular matrix T.   

    LDT     (input) INTEGER   
            The leading dimension of the array T.  LDT >= NB.   

    Y       (output) COMPLEX array, dimension (LDY,NB)   
            The n-by-nb matrix Y.   

    LDY     (input) INTEGER   
            The leading dimension of the array Y. LDY >= max(1,N).   

    Further Details   
    ===============   

    The matrix Q is represented as a product of nb elementary reflectors   

       Q = H(1) H(2) . . . H(nb).   

    Each H(i) has the form   

       H(i) = I - tau * v * v'   

    where tau is a complex scalar, and v is a complex vector with   
    v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in   
    A(i+k+1:n,i), and tau in TAU(i).   

    The elements of the vectors v together form the (n-k+1)-by-nb matrix   
    V which is needed, with T and Y, to apply the transformation to the   
    unreduced part of the matrix, using an update of the form:   
    A := (I - V*T*V') * (A - Y*V').   

    The contents of A on exit are illustrated by the following example   
    with n = 7, k = 3 and nb = 2:   

       ( a   h   a   a   a )   
       ( a   h   a   a   a )   
       ( a   h   a   a   a )   
       ( h   h   a   a   a )   
       ( v1  h   a   a   a )   
       ( v1  v2  a   a   a )   
       ( v1  v2  a   a   a )   

    where a denotes an element of the original matrix A, h denotes a   
    modified element of the upper Hessenberg matrix H, and vi denotes an   
    element of the vector defining H(i).   

    =====================================================================   


       Quick return if possible   

       Parameter adjustments */
    /* Table of constant values */
    static complex c_b1 = {0.f,0.f};
    static complex c_b2 = {1.f,0.f};
    static integer c__1 = 1;
    
    /* System generated locals */
    integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2, 
	    i__3;
    complex q__1;
    /* Local variables */
    static integer i__;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *, 
	    integer *), cgemv_(char *, integer *, integer *, complex *, 
	    complex *, integer *, complex *, integer *, complex *, complex *, 
	    integer *), ccopy_(integer *, complex *, integer *, 
	    complex *, integer *), caxpy_(integer *, complex *, complex *, 
	    integer *, complex *, integer *), ctrmv_(char *, char *, char *, 
	    integer *, complex *, integer *, complex *, integer *);
    static complex ei;
    extern /* Subroutine */ int clarfg_(integer *, complex *, complex *, 
	    integer *, complex *), clacgv_(integer *, complex *, integer *);
#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]
#define t_subscr(a_1,a_2) (a_2)*t_dim1 + a_1
#define t_ref(a_1,a_2) t[t_subscr(a_1,a_2)]
#define y_subscr(a_1,a_2) (a_2)*y_dim1 + a_1
#define y_ref(a_1,a_2) y[y_subscr(a_1,a_2)]


    --tau;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    t_dim1 = *ldt;
    t_offset = 1 + t_dim1 * 1;
    t -= t_offset;
    y_dim1 = *ldy;
    y_offset = 1 + y_dim1 * 1;
    y -= y_offset;

    /* Function Body */
    if (*n <= 1) {
	return 0;
    }

    i__1 = *nb;
    for (i__ = 1; i__ <= i__1; ++i__) {
	if (i__ > 1) {

/*           Update A(1:n,i)   

             Compute i-th column of A - Y * V' */

	    i__2 = i__ - 1;
	    clacgv_(&i__2, &a_ref(*k + i__ - 1, 1), lda);
	    i__2 = i__ - 1;
	    q__1.r = -1.f, q__1.i = 0.f;
	    cgemv_("No transpose", n, &i__2, &q__1, &y[y_offset], ldy, &a_ref(
		    *k + i__ - 1, 1), lda, &c_b2, &a_ref(1, i__), &c__1);
	    i__2 = i__ - 1;
	    clacgv_(&i__2, &a_ref(*k + i__ - 1, 1), lda);

/*           Apply I - V * T' * V' to this column (call it b) from the   
             left, using the last column of T as workspace   

             Let  V = ( V1 )   and   b = ( b1 )   (first I-1 rows)   
                      ( V2 )             ( b2 )   

             where V1 is unit lower triangular   

             w := V1' * b1 */

	    i__2 = i__ - 1;
	    ccopy_(&i__2, &a_ref(*k + 1, i__), &c__1, &t_ref(1, *nb), &c__1);
	    i__2 = i__ - 1;
	    ctrmv_("Lower", "Conjugate transpose", "Unit", &i__2, &a_ref(*k + 
		    1, 1), lda, &t_ref(1, *nb), &c__1);

/*           w := w + V2'*b2 */

	    i__2 = *n - *k - i__ + 1;
	    i__3 = i__ - 1;
	    cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a_ref(*k + 
		    i__, 1), lda, &a_ref(*k + i__, i__), &c__1, &c_b2, &t_ref(
		    1, *nb), &c__1);

/*           w := T'*w */

	    i__2 = i__ - 1;
	    ctrmv_("Upper", "Conjugate transpose", "Non-unit", &i__2, &t[
		    t_offset], ldt, &t_ref(1, *nb), &c__1);

/*           b2 := b2 - V2*w */

	    i__2 = *n - *k - i__ + 1;
	    i__3 = i__ - 1;
	    q__1.r = -1.f, q__1.i = 0.f;
	    cgemv_("No transpose", &i__2, &i__3, &q__1, &a_ref(*k + i__, 1), 
		    lda, &t_ref(1, *nb), &c__1, &c_b2, &a_ref(*k + i__, i__), 
		    &c__1);

/*           b1 := b1 - V1*w */

	    i__2 = i__ - 1;
	    ctrmv_("Lower", "No transpose", "Unit", &i__2, &a_ref(*k + 1, 1), 
		    lda, &t_ref(1, *nb), &c__1);
	    i__2 = i__ - 1;
	    q__1.r = -1.f, q__1.i = 0.f;
	    caxpy_(&i__2, &q__1, &t_ref(1, *nb), &c__1, &a_ref(*k + 1, i__), &
		    c__1);

	    i__2 = a_subscr(*k + i__ - 1, i__ - 1);
	    a[i__2].r = ei.r, a[i__2].i = ei.i;
	}

/*        Generate the elementary reflector H(i) to annihilate   
          A(k+i+1:n,i) */

	i__2 = a_subscr(*k + i__, i__);
	ei.r = a[i__2].r, ei.i = a[i__2].i;
/* Computing MIN */
	i__2 = *k + i__ + 1;
	i__3 = *n - *k - i__ + 1;
	clarfg_(&i__3, &ei, &a_ref(min(i__2,*n), i__), &c__1, &tau[i__]);
	i__2 = a_subscr(*k + i__, i__);
	a[i__2].r = 1.f, a[i__2].i = 0.f;

/*        Compute  Y(1:n,i) */

	i__2 = *n - *k - i__ + 1;
	cgemv_("No transpose", n, &i__2, &c_b2, &a_ref(1, i__ + 1), lda, &
		a_ref(*k + i__, i__), &c__1, &c_b1, &y_ref(1, i__), &c__1);
	i__2 = *n - *k - i__ + 1;
	i__3 = i__ - 1;
	cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a_ref(*k + i__, 1)
		, lda, &a_ref(*k + i__, i__), &c__1, &c_b1, &t_ref(1, i__), &
		c__1);
	i__2 = i__ - 1;
	q__1.r = -1.f, q__1.i = 0.f;
	cgemv_("No transpose", n, &i__2, &q__1, &y[y_offset], ldy, &t_ref(1, 
		i__), &c__1, &c_b2, &y_ref(1, i__), &c__1);
	cscal_(n, &tau[i__], &y_ref(1, i__), &c__1);

/*        Compute T(1:i,i) */

	i__2 = i__ - 1;
	i__3 = i__;
	q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
	cscal_(&i__2, &q__1, &t_ref(1, i__), &c__1);
	i__2 = i__ - 1;
	ctrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt, 
		&t_ref(1, i__), &c__1);
	i__2 = t_subscr(i__, i__);
	i__3 = i__;
	t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;

/* L10: */
    }
    i__1 = a_subscr(*k + *nb, *nb);
    a[i__1].r = ei.r, a[i__1].i = ei.i;

    return 0;

/*     End of CLAHRD */

} /* clahrd_ */
Example #27
0
/* Subroutine */ int cgeql2_(integer *m, integer *n, complex *a, integer *lda,
	 complex *tau, complex *work, integer *info)
{
/*  -- LAPACK routine (version 2.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    CGEQL2 computes a QL factorization of a complex m by n matrix A:   
    A = Q * L.   

    Arguments   
    =========   

    M       (input) INTEGER   
            The number of rows of the matrix A.  M >= 0.   

    N       (input) INTEGER   
            The number of columns of the matrix A.  N >= 0.   

    A       (input/output) COMPLEX array, dimension (LDA,N)   
            On entry, the m by n matrix A.   
            On exit, if m >= n, the lower triangle of the subarray   
            A(m-n+1:m,1:n) contains the n by n lower triangular matrix L; 
  
            if m <= n, the elements on and below the (n-m)-th   
            superdiagonal contain the m by n lower trapezoidal matrix L; 
  
            the remaining elements, with the array TAU, represent the   
            unitary matrix Q as a product of elementary reflectors   
            (see Further Details).   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= max(1,M).   

    TAU     (output) COMPLEX array, dimension (min(M,N))   
            The scalar factors of the elementary reflectors (see Further 
  
            Details).   

    WORK    (workspace) COMPLEX array, dimension (N)   

    INFO    (output) INTEGER   
            = 0: successful exit   
            < 0: if INFO = -i, the i-th argument had an illegal value   

    Further Details   
    ===============   

    The matrix Q is represented as a product of elementary reflectors   

       Q = H(k) . . . H(2) H(1), where k = min(m,n).   

    Each H(i) has the form   

       H(i) = I - tau * v * v'   

    where tau is a complex scalar, and v is a complex vector with   
    v(m-k+i+1:m) = 0 and v(m-k+i) = 1; v(1:m-k+i-1) is stored on exit in 
  
    A(1:m-k+i-1,n-k+i), and tau in TAU(i).   

    ===================================================================== 
  


       Test the input arguments   

    
   Parameter adjustments   
       Function Body */
    /* Table of constant values */
    static integer c__1 = 1;
    
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2;
    complex q__1;
    /* Builtin functions */
    void r_cnjg(complex *, complex *);
    /* Local variables */
    static integer i, k;
    static complex alpha;
    extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
	    , integer *, complex *, complex *, integer *, complex *), 
	    clarfg_(integer *, complex *, complex *, integer *, complex *), 
	    xerbla_(char *, integer *);



#define TAU(I) tau[(I)-1]
#define WORK(I) work[(I)-1]

#define A(I,J) a[(I)-1 + ((J)-1)* ( *lda)]

    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*lda < max(1,*m)) {
	*info = -4;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CGEQL2", &i__1);
	return 0;
    }

    k = min(*m,*n);

    for (i = k; i >= 1; --i) {

/*        Generate elementary reflector H(i) to annihilate   
          A(1:m-k+i-1,n-k+i) */

	i__1 = *m - k + i + (*n - k + i) * a_dim1;
	alpha.r = A(*m-k+i,*n-k+i).r, alpha.i = A(*m-k+i,*n-k+i).i;
	i__1 = *m - k + i;
	clarfg_(&i__1, &alpha, &A(1,*n-k+i), &c__1, &TAU(i));

/*        Apply H(i)' to A(1:m-k+i,1:n-k+i-1) from the left */

	i__1 = *m - k + i + (*n - k + i) * a_dim1;
	A(*m-k+i,*n-k+i).r = 1.f, A(*m-k+i,*n-k+i).i = 0.f;
	i__1 = *m - k + i;
	i__2 = *n - k + i - 1;
	r_cnjg(&q__1, &TAU(i));
	clarf_("Left", &i__1, &i__2, &A(1,*n-k+i), &c__1, &
		q__1, &A(1,1), lda, &WORK(1));
	i__1 = *m - k + i + (*n - k + i) * a_dim1;
	A(*m-k+i,*n-k+i).r = alpha.r, A(*m-k+i,*n-k+i).i = alpha.i;
/* L10: */
    }
    return 0;

/*     End of CGEQL2 */

} /* cgeql2_ */
Example #28
0
/* Subroutine */
int cgeqrt2_(integer *m, integer *n, complex *a, integer * lda, complex *t, integer *ldt, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, t_dim1, t_offset, i__1, i__2, i__3;
    complex q__1, q__2;
    /* Builtin functions */
    void r_cnjg(complex *, complex *);
    /* Local variables */
    integer i__, k;
    complex aii;
    extern /* Subroutine */
    int cgerc_(integer *, integer *, complex *, complex *, integer *, complex *, integer *, complex *, integer *);
    complex alpha;
    extern /* Subroutine */
    int cgemv_(char *, integer *, integer *, complex * , complex *, integer *, complex *, integer *, complex *, complex * , integer *), ctrmv_(char *, char *, char *, integer *, complex *, integer *, complex *, integer *), clarfg_(integer *, complex *, complex *, integer *, complex *), xerbla_(char *, integer *);
    /* -- LAPACK computational routine (version 3.4.2) -- */
    /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
    /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
    /* September 2012 */
    /* .. Scalar Arguments .. */
    /* .. */
    /* .. Array Arguments .. */
    /* .. */
    /* ===================================================================== */
    /* .. Parameters .. */
    /* .. */
    /* .. Local Scalars .. */
    /* .. */
    /* .. External Subroutines .. */
    /* .. */
    /* .. Executable Statements .. */
    /* Test the input arguments */
    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    t_dim1 = *ldt;
    t_offset = 1 + t_dim1;
    t -= t_offset;
    /* Function Body */
    *info = 0;
    if (*m < 0)
    {
        *info = -1;
    }
    else if (*n < 0)
    {
        *info = -2;
    }
    else if (*lda < max(1,*m))
    {
        *info = -4;
    }
    else if (*ldt < max(1,*n))
    {
        *info = -6;
    }
    if (*info != 0)
    {
        i__1 = -(*info);
        xerbla_("CGEQRT2", &i__1);
        return 0;
    }
    k = min(*m,*n);
    i__1 = k;
    for (i__ = 1;
            i__ <= i__1;
            ++i__)
    {
        /* Generate elem. refl. H(i) to annihilate A(i+1:m,i), tau(I) -> T(I,1) */
        i__2 = *m - i__ + 1;
        /* Computing MIN */
        i__3 = i__ + 1;
        clarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1] , &c__1, &t[i__ + t_dim1]);
        if (i__ < *n)
        {
            /* Apply H(i) to A(I:M,I+1:N) from the left */
            i__2 = i__ + i__ * a_dim1;
            aii.r = a[i__2].r;
            aii.i = a[i__2].i; // , expr subst
            i__2 = i__ + i__ * a_dim1;
            a[i__2].r = 1.f;
            a[i__2].i = 0.f; // , expr subst
            /* W(1:N-I) := A(I:M,I+1:N)**H * A(I:M,I) [W = T(:,N)] */
            i__2 = *m - i__ + 1;
            i__3 = *n - i__;
            cgemv_("C", &i__2, &i__3, &c_b1, &a[i__ + (i__ + 1) * a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b2, &t[*n * t_dim1 + 1], &c__1);
            /* A(I:M,I+1:N) = A(I:m,I+1:N) + alpha*A(I:M,I)*W(1:N-1)**H */
            r_cnjg(&q__2, &t[i__ + t_dim1]);
            q__1.r = -q__2.r;
            q__1.i = -q__2.i; // , expr subst
            alpha.r = q__1.r;
            alpha.i = q__1.i; // , expr subst
            i__2 = *m - i__ + 1;
            i__3 = *n - i__;
            cgerc_(&i__2, &i__3, &alpha, &a[i__ + i__ * a_dim1], &c__1, &t[*n * t_dim1 + 1], &c__1, &a[i__ + (i__ + 1) * a_dim1], lda);
            i__2 = i__ + i__ * a_dim1;
            a[i__2].r = aii.r;
            a[i__2].i = aii.i; // , expr subst
        }
    }
    i__1 = *n;
    for (i__ = 2;
            i__ <= i__1;
            ++i__)
    {
        i__2 = i__ + i__ * a_dim1;
        aii.r = a[i__2].r;
        aii.i = a[i__2].i; // , expr subst
        i__2 = i__ + i__ * a_dim1;
        a[i__2].r = 1.f;
        a[i__2].i = 0.f; // , expr subst
        /* T(1:I-1,I) := alpha * A(I:M,1:I-1)**H * A(I:M,I) */
        i__2 = i__ + t_dim1;
        q__1.r = -t[i__2].r;
        q__1.i = -t[i__2].i; // , expr subst
        alpha.r = q__1.r;
        alpha.i = q__1.i; // , expr subst
        i__2 = *m - i__ + 1;
        i__3 = i__ - 1;
        cgemv_("C", &i__2, &i__3, &alpha, &a[i__ + a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b2, &t[i__ * t_dim1 + 1], &c__1);
        i__2 = i__ + i__ * a_dim1;
        a[i__2].r = aii.r;
        a[i__2].i = aii.i; // , expr subst
        /* T(1:I-1,I) := T(1:I-1,1:I-1) * T(1:I-1,I) */
        i__2 = i__ - 1;
        ctrmv_("U", "N", "N", &i__2, &t[t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1);
        /* T(I,I) = tau(I) */
        i__2 = i__ + i__ * t_dim1;
        i__3 = i__ + t_dim1;
        t[i__2].r = t[i__3].r;
        t[i__2].i = t[i__3].i; // , expr subst
        i__2 = i__ + t_dim1;
        t[i__2].r = 0.f;
        t[i__2].i = 0.f; // , expr subst
    }
    /* End of CGEQRT2 */
    return 0;
}