示例#1
0
CAMLprim value LFUN(sqr_nrm2_stub)(
  value vSTABLE, value vN, value vOFSX, value vINCX, value vX)
{
  CAMLparam1(vX);

  integer GET_INT(N), GET_INT(INCX);
  REAL res;

  VEC_PARAMS(X);

  caml_enter_blocking_section();  /* Allow other threads */
  if (Bool_val(vSTABLE)) {
#ifndef LACAML_DOUBLE
  res = scnrm2_(&N, X_data, &INCX);
#else
  res = dznrm2_(&N, X_data, &INCX);
#endif
  res *= res;
  } else {
    COMPLEX cres = FUN(dotc)(&N, X_data, &INCX, X_data, &INCX);
    res = cres.r;
  }
  caml_leave_blocking_section();  /* Disallow other threads */

  CAMLreturn(caml_copy_double(res));
}
示例#2
0
int toScalarQ(int code, KQVEC(x), FVEC(r)) {
    REQUIRES(rn==1,BAD_SIZE);
    DEBUGMSG("toScalarQ");
    float res;
    integer one = 1;
    integer n = xn;
    switch(code) {
        case 0: { res = scnrm2_(&n,xp,&one); break; }
        case 1: { res = scasum_(&n,xp,&one);  break; }
        default: ERROR(BAD_CODE);
    }
    rp[0] = res;
    OK
}
示例#3
0
/* Subroutine */ int ctgsna_(char *job, char *howmny, logical *select, 
	integer *n, complex *a, integer *lda, complex *b, integer *ldb, 
	complex *vl, integer *ldvl, complex *vr, integer *ldvr, real *s, real 
	*dif, integer *mm, integer *m, complex *work, integer *lwork, integer 
	*iwork, integer *info)
{
/*  -- LAPACK routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   


    Purpose   
    =======   

    CTGSNA estimates reciprocal condition numbers for specified   
    eigenvalues and/or eigenvectors of a matrix pair (A, B).   

    (A, B) must be in generalized Schur canonical form, that is, A and   
    B are both upper triangular.   

    Arguments   
    =========   

    JOB     (input) CHARACTER*1   
            Specifies whether condition numbers are required for   
            eigenvalues (S) or eigenvectors (DIF):   
            = 'E': for eigenvalues only (S);   
            = 'V': for eigenvectors only (DIF);   
            = 'B': for both eigenvalues and eigenvectors (S and DIF).   

    HOWMNY  (input) CHARACTER*1   
            = 'A': compute condition numbers for all eigenpairs;   
            = 'S': compute condition numbers for selected eigenpairs   
                   specified by the array SELECT.   

    SELECT  (input) LOGICAL array, dimension (N)   
            If HOWMNY = 'S', SELECT specifies the eigenpairs for which   
            condition numbers are required. To select condition numbers   
            for the corresponding j-th eigenvalue and/or eigenvector,   
            SELECT(j) must be set to .TRUE..   
            If HOWMNY = 'A', SELECT is not referenced.   

    N       (input) INTEGER   
            The order of the square matrix pair (A, B). N >= 0.   

    A       (input) COMPLEX array, dimension (LDA,N)   
            The upper triangular matrix A in the pair (A,B).   

    LDA     (input) INTEGER   
            The leading dimension of the array A. LDA >= max(1,N).   

    B       (input) COMPLEX array, dimension (LDB,N)   
            The upper triangular matrix B in the pair (A, B).   

    LDB     (input) INTEGER   
            The leading dimension of the array B. LDB >= max(1,N).   

    VL      (input) COMPLEX array, dimension (LDVL,M)   
            IF JOB = 'E' or 'B', VL must contain left eigenvectors of   
            (A, B), corresponding to the eigenpairs specified by HOWMNY   
            and SELECT.  The eigenvectors must be stored in consecutive   
            columns of VL, as returned by CTGEVC.   
            If JOB = 'V', VL is not referenced.   

    LDVL    (input) INTEGER   
            The leading dimension of the array VL. LDVL >= 1; and   
            If JOB = 'E' or 'B', LDVL >= N.   

    VR      (input) COMPLEX array, dimension (LDVR,M)   
            IF JOB = 'E' or 'B', VR must contain right eigenvectors of   
            (A, B), corresponding to the eigenpairs specified by HOWMNY   
            and SELECT.  The eigenvectors must be stored in consecutive   
            columns of VR, as returned by CTGEVC.   
            If JOB = 'V', VR is not referenced.   

    LDVR    (input) INTEGER   
            The leading dimension of the array VR. LDVR >= 1;   
            If JOB = 'E' or 'B', LDVR >= N.   

    S       (output) REAL array, dimension (MM)   
            If JOB = 'E' or 'B', the reciprocal condition numbers of the   
            selected eigenvalues, stored in consecutive elements of the   
            array.   
            If JOB = 'V', S is not referenced.   

    DIF     (output) REAL array, dimension (MM)   
            If JOB = 'V' or 'B', the estimated reciprocal condition   
            numbers of the selected eigenvectors, stored in consecutive   
            elements of the array.   
            If the eigenvalues cannot be reordered to compute DIF(j),   
            DIF(j) is set to 0; this can only occur when the true value   
            would be very small anyway.   
            For each eigenvalue/vector specified by SELECT, DIF stores   
            a Frobenius norm-based estimate of Difl.   
            If JOB = 'E', DIF is not referenced.   

    MM      (input) INTEGER   
            The number of elements in the arrays S and DIF. MM >= M.   

    M       (output) INTEGER   
            The number of elements of the arrays S and DIF used to store   
            the specified condition numbers; for each selected eigenvalue   
            one element is used. If HOWMNY = 'A', M is set to N.   

    WORK    (workspace/output) COMPLEX array, dimension (LWORK)   
            If JOB = 'E', WORK is not referenced.  Otherwise,   
            on exit, if INFO = 0, WORK(1) returns the optimal LWORK.   

    LWORK  (input) INTEGER   
            The dimension of the array WORK. LWORK >= 1.   
            If JOB = 'V' or 'B', LWORK >= 2*N*N.   

    IWORK   (workspace) INTEGER array, dimension (N+2)   
            If JOB = 'E', IWORK is not referenced.   

    INFO    (output) INTEGER   
            = 0: Successful exit   
            < 0: If INFO = -i, the i-th argument had an illegal value   

    Further Details   
    ===============   

    The reciprocal of the condition number of the i-th generalized   
    eigenvalue w = (a, b) is defined as   

            S(I) = (|v'Au|**2 + |v'Bu|**2)**(1/2) / (norm(u)*norm(v))   

    where u and v are the right and left eigenvectors of (A, B)   
    corresponding to w; |z| denotes the absolute value of the complex   
    number, and norm(u) denotes the 2-norm of the vector u. The pair   
    (a, b) corresponds to an eigenvalue w = a/b (= v'Au/v'Bu) of the   
    matrix pair (A, B). If both a and b equal zero, then (A,B) is   
    singular and S(I) = -1 is returned.   

    An approximate error bound on the chordal distance between the i-th   
    computed generalized eigenvalue w and the corresponding exact   
    eigenvalue lambda is   

            chord(w, lambda) <=   EPS * norm(A, B) / S(I),   

    where EPS is the machine precision.   

    The reciprocal of the condition number of the right eigenvector u   
    and left eigenvector v corresponding to the generalized eigenvalue w   
    is defined as follows. Suppose   

                     (A, B) = ( a   *  ) ( b  *  )  1   
                              ( 0  A22 ),( 0 B22 )  n-1   
                                1  n-1     1 n-1   

    Then the reciprocal condition number DIF(I) is   

            Difl[(a, b), (A22, B22)]  = sigma-min( Zl )   

    where sigma-min(Zl) denotes the smallest singular value of   

           Zl = [ kron(a, In-1) -kron(1, A22) ]   
                [ kron(b, In-1) -kron(1, B22) ].   

    Here In-1 is the identity matrix of size n-1 and X' is the conjugate   
    transpose of X. kron(X, Y) is the Kronecker product between the   
    matrices X and Y.   

    We approximate the smallest singular value of Zl with an upper   
    bound. This is done by CLATDF.   

    An approximate error bound for a computed eigenvector VL(i) or   
    VR(i) is given by   

                        EPS * norm(A, B) / DIF(i).   

    See ref. [2-3] for more details and further references.   

    Based on contributions by   
       Bo Kagstrom and Peter Poromaa, Department of Computing Science,   
       Umea University, S-901 87 Umea, Sweden.   

    References   
    ==========   

    [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the   
        Generalized Real Schur Form of a Regular Matrix Pair (A, B), in   
        M.S. Moonen et al (eds), Linear Algebra for Large Scale and   
        Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218.   

    [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified   
        Eigenvalues of a Regular Matrix Pair (A, B) and Condition   
        Estimation: Theory, Algorithms and Software, Report   
        UMINF - 94.04, Department of Computing Science, Umea University,   
        S-901 87 Umea, Sweden, 1994. Also as LAPACK Working Note 87.   
        To appear in Numerical Algorithms, 1996.   

    [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software   
        for Solving the Generalized Sylvester Equation and Estimating the   
        Separation between Regular Matrix Pairs, Report UMINF - 93.23,   
        Department of Computing Science, Umea University, S-901 87 Umea,   
        Sweden, December 1993, Revised April 1994, Also as LAPACK Working   
        Note 75.   
        To appear in ACM Trans. on Math. Software, Vol 22, No 1, 1996.   

    =====================================================================   


       Decode and test the input parameters   

       Parameter adjustments */
    /* Table of constant values */
    static integer c__1 = 1;
    static complex c_b19 = {1.f,0.f};
    static complex c_b20 = {0.f,0.f};
    static logical c_false = FALSE_;
    static integer c__3 = 3;
    
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1, 
	    vr_offset, i__1, i__2;
    real r__1, r__2;
    complex q__1;
    /* Builtin functions */
    double c_abs(complex *);
    /* Local variables */
    static real cond;
    static integer ierr, ifst;
    static real lnrm;
    static complex yhax, yhbx;
    static integer ilst;
    static real rnrm;
    static integer i__, k;
    static real scale;
    extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer 
	    *, complex *, integer *);
    extern logical lsame_(char *, char *);
    extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
	    , complex *, integer *, complex *, integer *, complex *, complex *
	    , integer *);
    static integer lwmin;
    static logical wants;
    static integer llwrk, n1, n2;
    static complex dummy[1];
    extern doublereal scnrm2_(integer *, complex *, integer *), slapy2_(real *
	    , real *);
    static complex dummy1[1];
    extern /* Subroutine */ int slabad_(real *, real *);
    static integer ks;
    extern doublereal slamch_(char *);
    extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex 
	    *, integer *, complex *, integer *), ctgexc_(logical *, 
	    logical *, integer *, complex *, integer *, complex *, integer *, 
	    complex *, integer *, complex *, integer *, integer *, integer *, 
	    integer *), xerbla_(char *, integer *);
    static real bignum;
    static logical wantbh, wantdf, somcon;
    extern /* Subroutine */ int ctgsyl_(char *, integer *, integer *, integer 
	    *, complex *, integer *, complex *, integer *, complex *, integer 
	    *, complex *, integer *, complex *, integer *, complex *, integer 
	    *, real *, real *, complex *, integer *, integer *, integer *);
    static real smlnum;
    static logical lquery;
    static real eps;
#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]
#define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1
#define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)]
#define vl_subscr(a_1,a_2) (a_2)*vl_dim1 + a_1
#define vl_ref(a_1,a_2) vl[vl_subscr(a_1,a_2)]
#define vr_subscr(a_1,a_2) (a_2)*vr_dim1 + a_1
#define vr_ref(a_1,a_2) vr[vr_subscr(a_1,a_2)]


    --select;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;
    vl_dim1 = *ldvl;
    vl_offset = 1 + vl_dim1 * 1;
    vl -= vl_offset;
    vr_dim1 = *ldvr;
    vr_offset = 1 + vr_dim1 * 1;
    vr -= vr_offset;
    --s;
    --dif;
    --work;
    --iwork;

    /* Function Body */
    wantbh = lsame_(job, "B");
    wants = lsame_(job, "E") || wantbh;
    wantdf = lsame_(job, "V") || wantbh;

    somcon = lsame_(howmny, "S");

    *info = 0;
    lquery = *lwork == -1;

    if (lsame_(job, "V") || lsame_(job, "B")) {
/* Computing MAX */
	i__1 = 1, i__2 = (*n << 1) * *n;
	lwmin = max(i__1,i__2);
    } else {
	lwmin = 1;
    }

    if (! wants && ! wantdf) {
	*info = -1;
    } else if (! lsame_(howmny, "A") && ! somcon) {
	*info = -2;
    } else if (*n < 0) {
	*info = -4;
    } else if (*lda < max(1,*n)) {
	*info = -6;
    } else if (*ldb < max(1,*n)) {
	*info = -8;
    } else if (wants && *ldvl < *n) {
	*info = -10;
    } else if (wants && *ldvr < *n) {
	*info = -12;
    } else {

/*        Set M to the number of eigenpairs for which condition numbers   
          are required, and test MM. */

	if (somcon) {
	    *m = 0;
	    i__1 = *n;
	    for (k = 1; k <= i__1; ++k) {
		if (select[k]) {
		    ++(*m);
		}
/* L10: */
	    }
	} else {
	    *m = *n;
	}

	if (*mm < *m) {
	    *info = -15;
	} else if (*lwork < lwmin && ! lquery) {
	    *info = -18;
	}
    }

    if (*info == 0) {
	work[1].r = (real) lwmin, work[1].i = 0.f;
    }

    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CTGSNA", &i__1);
	return 0;
    } else if (lquery) {
	return 0;
    }

/*     Quick return if possible */

    if (*n == 0) {
	return 0;
    }

/*     Get machine constants */

    eps = slamch_("P");
    smlnum = slamch_("S") / eps;
    bignum = 1.f / smlnum;
    slabad_(&smlnum, &bignum);
    llwrk = *lwork - (*n << 1) * *n;
    ks = 0;
    i__1 = *n;
    for (k = 1; k <= i__1; ++k) {

/*        Determine whether condition numbers are required for the k-th   
          eigenpair. */

	if (somcon) {
	    if (! select[k]) {
		goto L20;
	    }
	}

	++ks;

	if (wants) {

/*           Compute the reciprocal condition number of the k-th   
             eigenvalue. */

	    rnrm = scnrm2_(n, &vr_ref(1, ks), &c__1);
	    lnrm = scnrm2_(n, &vl_ref(1, ks), &c__1);
	    cgemv_("N", n, n, &c_b19, &a[a_offset], lda, &vr_ref(1, ks), &
		    c__1, &c_b20, &work[1], &c__1);
	    cdotc_(&q__1, n, &work[1], &c__1, &vl_ref(1, ks), &c__1);
	    yhax.r = q__1.r, yhax.i = q__1.i;
	    cgemv_("N", n, n, &c_b19, &b[b_offset], ldb, &vr_ref(1, ks), &
		    c__1, &c_b20, &work[1], &c__1);
	    cdotc_(&q__1, n, &work[1], &c__1, &vl_ref(1, ks), &c__1);
	    yhbx.r = q__1.r, yhbx.i = q__1.i;
	    r__1 = c_abs(&yhax);
	    r__2 = c_abs(&yhbx);
	    cond = slapy2_(&r__1, &r__2);
	    if (cond == 0.f) {
		s[ks] = -1.f;
	    } else {
		s[ks] = cond / (rnrm * lnrm);
	    }
	}

	if (wantdf) {
	    if (*n == 1) {
		r__1 = c_abs(&a_ref(1, 1));
		r__2 = c_abs(&b_ref(1, 1));
		dif[ks] = slapy2_(&r__1, &r__2);
		goto L20;
	    }

/*           Estimate the reciprocal condition number of the k-th   
             eigenvectors.   

             Copy the matrix (A, B) to the array WORK and move the   
             (k,k)th pair to the (1,1) position. */

	    clacpy_("Full", n, n, &a[a_offset], lda, &work[1], n);
	    clacpy_("Full", n, n, &b[b_offset], ldb, &work[*n * *n + 1], n);
	    ifst = k;
	    ilst = 1;

	    ctgexc_(&c_false, &c_false, n, &work[1], n, &work[*n * *n + 1], n,
		     dummy, &c__1, dummy1, &c__1, &ifst, &ilst, &ierr);

	    if (ierr > 0) {

/*              Ill-conditioned problem - swap rejected. */

		dif[ks] = 0.f;
	    } else {

/*              Reordering successful, solve generalized Sylvester   
                equation for R and L,   
                           A22 * R - L * A11 = A12   
                           B22 * R - L * B11 = B12,   
                and compute estimate of Difl[(A11,B11), (A22, B22)]. */

		n1 = 1;
		n2 = *n - n1;
		i__ = *n * *n + 1;
		ctgsyl_("N", &c__3, &n2, &n1, &work[*n * n1 + n1 + 1], n, &
			work[1], n, &work[n1 + 1], n, &work[*n * n1 + n1 + 
			i__], n, &work[i__], n, &work[n1 + i__], n, &scale, &
			dif[ks], &work[(*n * *n << 1) + 1], &llwrk, &iwork[1],
			 &ierr);
	    }
	}

L20:
	;
    }
    work[1].r = (real) lwmin, work[1].i = 0.f;
    return 0;

/*     End of CTGSNA */

} /* ctgsna_ */
示例#4
0
/* Subroutine */ int cgeevx_(char *balanc, char *jobvl, char *jobvr, char *
	sense, integer *n, complex *a, integer *lda, complex *w, complex *vl, 
	integer *ldvl, complex *vr, integer *ldvr, integer *ilo, integer *ihi, 
	 real *scale, real *abnrm, real *rconde, real *rcondv, complex *work, 
	integer *lwork, real *rwork, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1, 
	    i__2, i__3;
    real r__1, r__2;
    complex q__1, q__2;

    /* Local variables */
    integer i__, k;
    char job[1];
    real scl, dum[1], eps;
    complex tmp;
    char side[1];
    real anrm;
    integer ierr, itau, iwrk, nout;
    integer icond;
    logical scalea;
    real cscale;
    logical select[1];
    real bignum;
    integer minwrk, maxwrk;
    logical wantvl, wntsnb;
    integer hswork;
    logical wntsne;
    real smlnum;
    logical lquery, wantvr, wntsnn, wntsnv;

/*  -- LAPACK driver routine (version 3.2) -- */
/*     November 2006 */

/*  Purpose */
/*  ======= */

/*  CGEEVX computes for an N-by-N complex nonsymmetric matrix A, the */
/*  eigenvalues and, optionally, the left and/or right eigenvectors. */

/*  Optionally also, it computes a balancing transformation to improve */
/*  the conditioning of the eigenvalues and eigenvectors (ILO, IHI, */
/*  SCALE, and ABNRM), reciprocal condition numbers for the eigenvalues */
/*  (RCONDE), and reciprocal condition numbers for the right */
/*  eigenvectors (RCONDV). */

/*  The right eigenvector v(j) of A satisfies */
/*                   A * v(j) = lambda(j) * v(j) */
/*  where lambda(j) is its eigenvalue. */
/*  The left eigenvector u(j) of A satisfies */
/*                u(j)**H * A = lambda(j) * u(j)**H */
/*  where u(j)**H denotes the conjugate transpose of u(j). */

/*  The computed eigenvectors are normalized to have Euclidean norm */
/*  equal to 1 and largest component real. */

/*  Balancing a matrix means permuting the rows and columns to make it */
/*  more nearly upper triangular, and applying a diagonal similarity */
/*  transformation D * A * D**(-1), where D is a diagonal matrix, to */
/*  make its rows and columns closer in norm and the condition numbers */
/*  of its eigenvalues and eigenvectors smaller.  The computed */
/*  reciprocal condition numbers correspond to the balanced matrix. */
/*  Permuting rows and columns will not change the condition numbers */
/*  (in exact arithmetic) but diagonal scaling will.  For further */
/*  explanation of balancing, see section 4.10.2 of the LAPACK */
/*  Users' Guide. */

/*  Arguments */
/*  ========= */

/*  BALANC  (input) CHARACTER*1 */
/*          Indicates how the input matrix should be diagonally scaled */
/*          and/or permuted to improve the conditioning of its */
/*          eigenvalues. */
/*          = 'N': Do not diagonally scale or permute; */
/*          = 'P': Perform permutations to make the matrix more nearly */
/*                 upper triangular. Do not diagonally scale; */
/*          = 'S': Diagonally scale the matrix, ie. replace A by */
/*                 D*A*D**(-1), where D is a diagonal matrix chosen */
/*                 to make the rows and columns of A more equal in */
/*                 norm. Do not permute; */
/*          = 'B': Both diagonally scale and permute A. */

/*          Computed reciprocal condition numbers will be for the matrix */
/*          after balancing and/or permuting. Permuting does not change */
/*          condition numbers (in exact arithmetic), but balancing does. */

/*  JOBVL   (input) CHARACTER*1 */
/*          = 'N': left eigenvectors of A are not computed; */
/*          = 'V': left eigenvectors of A are computed. */
/*          If SENSE = 'E' or 'B', JOBVL must = 'V'. */

/*  JOBVR   (input) CHARACTER*1 */
/*          = 'N': right eigenvectors of A are not computed; */
/*          = 'V': right eigenvectors of A are computed. */
/*          If SENSE = 'E' or 'B', JOBVR must = 'V'. */

/*  SENSE   (input) CHARACTER*1 */
/*          Determines which reciprocal condition numbers are computed. */
/*          = 'N': None are computed; */
/*          = 'E': Computed for eigenvalues only; */
/*          = 'V': Computed for right eigenvectors only; */
/*          = 'B': Computed for eigenvalues and right eigenvectors. */

/*          If SENSE = 'E' or 'B', both left and right eigenvectors */
/*          must also be computed (JOBVL = 'V' and JOBVR = 'V'). */

/*  N       (input) INTEGER */
/*          The order of the matrix A. N >= 0. */

/*  A       (input/output) COMPLEX array, dimension (LDA,N) */
/*          On entry, the N-by-N matrix A. */
/*          On exit, A has been overwritten.  If JOBVL = 'V' or */
/*          JOBVR = 'V', A contains the Schur form of the balanced */
/*          version of the matrix A. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,N). */

/*  W       (output) COMPLEX array, dimension (N) */
/*          W contains the computed eigenvalues. */

/*  VL      (output) COMPLEX array, dimension (LDVL,N) */
/*          If JOBVL = 'V', the left eigenvectors u(j) are stored one */
/*          after another in the columns of VL, in the same order */
/*          as their eigenvalues. */
/*          If JOBVL = 'N', VL is not referenced. */
/*          u(j) = VL(:,j), the j-th column of VL. */

/*  LDVL    (input) INTEGER */
/*          The leading dimension of the array VL.  LDVL >= 1; if */
/*          JOBVL = 'V', LDVL >= N. */

/*  VR      (output) COMPLEX array, dimension (LDVR,N) */
/*          If JOBVR = 'V', the right eigenvectors v(j) are stored one */
/*          after another in the columns of VR, in the same order */
/*          as their eigenvalues. */
/*          If JOBVR = 'N', VR is not referenced. */
/*          v(j) = VR(:,j), the j-th column of VR. */

/*  LDVR    (input) INTEGER */
/*          The leading dimension of the array VR.  LDVR >= 1; if */
/*          JOBVR = 'V', LDVR >= N. */

/*  ILO     (output) INTEGER */
/*  IHI     (output) INTEGER */
/*          ILO and IHI are integer values determined when A was */
/*          balanced.  The balanced A(i,j) = 0 if I > J and */

/*  SCALE   (output) REAL array, dimension (N) */
/*          Details of the permutations and scaling factors applied */
/*          when balancing A.  If P(j) is the index of the row and column */
/*          interchanged with row and column j, and D(j) is the scaling */
/*          factor applied to row and column j, then */
/*          The order in which the interchanges are made is N to IHI+1, */
/*          then 1 to ILO-1. */

/*  ABNRM   (output) REAL */
/*          The one-norm of the balanced matrix (the maximum */
/*          of the sum of absolute values of elements of any column). */

/*  RCONDE  (output) REAL array, dimension (N) */
/*          RCONDE(j) is the reciprocal condition number of the j-th */
/*          eigenvalue. */

/*  RCONDV  (output) REAL array, dimension (N) */
/*          RCONDV(j) is the reciprocal condition number of the j-th */
/*          right eigenvector. */

/*  WORK    (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
/*          On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */

/*  LWORK   (input) INTEGER */
/*          The dimension of the array WORK.  If SENSE = 'N' or 'E', */
/*          LWORK >= max(1,2*N), and if SENSE = 'V' or 'B', */
/*          LWORK >= N*N+2*N. */
/*          For good performance, LWORK must generally be larger. */

/*          If LWORK = -1, then a workspace query is assumed; the routine */
/*          only calculates the optimal size of the WORK array, returns */
/*          this value as the first entry of the WORK array, and no error */
/*          message related to LWORK is issued by XERBLA. */

/*  RWORK   (workspace) REAL array, dimension (2*N) */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value. */
/*          > 0:  if INFO = i, the QR algorithm failed to compute all the */
/*                eigenvalues, and no eigenvectors or condition numbers */
/*                have been computed; elements 1:ILO-1 and i+1:N of W */
/*                contain eigenvalues which have converged. */

/*  ===================================================================== */

/*     Test the input arguments */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --w;
    vl_dim1 = *ldvl;
    vl_offset = 1 + vl_dim1;
    vl -= vl_offset;
    vr_dim1 = *ldvr;
    vr_offset = 1 + vr_dim1;
    vr -= vr_offset;
    --scale;
    --rconde;
    --rcondv;
    --work;
    --rwork;

    /* Function Body */
    *info = 0;
    lquery = *lwork == -1;
    wantvl = lsame_(jobvl, "V");
    wantvr = lsame_(jobvr, "V");
    wntsnn = lsame_(sense, "N");
    wntsne = lsame_(sense, "E");
    wntsnv = lsame_(sense, "V");
    wntsnb = lsame_(sense, "B");
    if (! (lsame_(balanc, "N") || lsame_(balanc, "S") || lsame_(balanc, "P") 
	    || lsame_(balanc, "B"))) {
	*info = -1;
    } else if (! wantvl && ! lsame_(jobvl, "N")) {
	*info = -2;
    } else if (! wantvr && ! lsame_(jobvr, "N")) {
	*info = -3;
    } else if (! (wntsnn || wntsne || wntsnb || wntsnv) || (wntsne || wntsnb) 
	    && ! (wantvl && wantvr)) {
	*info = -4;
    } else if (*n < 0) {
	*info = -5;
    } else if (*lda < max(1,*n)) {
	*info = -7;
    } else if (*ldvl < 1 || wantvl && *ldvl < *n) {
	*info = -10;
    } else if (*ldvr < 1 || wantvr && *ldvr < *n) {
	*info = -12;
    }

/*     Compute workspace */
/*      (Note: Comments in the code beginning "Workspace:" describe the */
/*       minimal amount of workspace needed at that point in the code, */
/*       as well as the preferred amount for good performance. */
/*       CWorkspace refers to complex workspace, and RWorkspace to real */
/*       workspace. NB refers to the optimal block size for the */
/*       immediately following subroutine, as returned by ILAENV. */
/*       HSWORK refers to the workspace preferred by CHSEQR, as */
/*       calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
/*       the worst case.) */

    if (*info == 0) {
	if (*n == 0) {
	    minwrk = 1;
	    maxwrk = 1;
	} else {
	    maxwrk = *n + *n * ilaenv_(&c__1, "CGEHRD", " ", n, &c__1, n, &
		    c__0);

	    if (wantvl) {
		chseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &w[1], &vl[
			vl_offset], ldvl, &work[1], &c_n1, info);
	    } else if (wantvr) {
		chseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &w[1], &vr[
			vr_offset], ldvr, &work[1], &c_n1, info);
	    } else {
		if (wntsnn) {
		    chseqr_("E", "N", n, &c__1, n, &a[a_offset], lda, &w[1], &
			    vr[vr_offset], ldvr, &work[1], &c_n1, info);
		} else {
		    chseqr_("S", "N", n, &c__1, n, &a[a_offset], lda, &w[1], &
			    vr[vr_offset], ldvr, &work[1], &c_n1, info);
		}
	    }
	    hswork = work[1].r;

	    if (! wantvl && ! wantvr) {
		minwrk = *n << 1;
		if (! (wntsnn || wntsne)) {
/* Computing MAX */
		    i__1 = minwrk, i__2 = *n * *n + (*n << 1);
		    minwrk = max(i__1,i__2);
		}
		maxwrk = max(maxwrk,hswork);
		if (! (wntsnn || wntsne)) {
/* Computing MAX */
		    i__1 = maxwrk, i__2 = *n * *n + (*n << 1);
		    maxwrk = max(i__1,i__2);
		}
	    } else {
		minwrk = *n << 1;
		if (! (wntsnn || wntsne)) {
/* Computing MAX */
		    i__1 = minwrk, i__2 = *n * *n + (*n << 1);
		    minwrk = max(i__1,i__2);
		}
		maxwrk = max(maxwrk,hswork);
/* Computing MAX */
		i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "CUNGHR", 
			 " ", n, &c__1, n, &c_n1);
		maxwrk = max(i__1,i__2);
		if (! (wntsnn || wntsne)) {
/* Computing MAX */
		    i__1 = maxwrk, i__2 = *n * *n + (*n << 1);
		    maxwrk = max(i__1,i__2);
		}
/* Computing MAX */
		i__1 = maxwrk, i__2 = *n << 1;
		maxwrk = max(i__1,i__2);
	    }
	    maxwrk = max(maxwrk,minwrk);
	}
	work[1].r = (real) maxwrk, work[1].i = 0.f;

	if (*lwork < minwrk && ! lquery) {
	    *info = -20;
	}
    }

    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CGEEVX", &i__1);
	return 0;
    } else if (lquery) {
	return 0;
    }

/*     Quick return if possible */

    if (*n == 0) {
	return 0;
    }

/*     Get machine constants */

    eps = slamch_("P");
    smlnum = slamch_("S");
    bignum = 1.f / smlnum;
    slabad_(&smlnum, &bignum);
    smlnum = sqrt(smlnum) / eps;
    bignum = 1.f / smlnum;

/*     Scale A if max element outside range [SMLNUM,BIGNUM] */

    icond = 0;
    anrm = clange_("M", n, n, &a[a_offset], lda, dum);
    scalea = FALSE_;
    if (anrm > 0.f && anrm < smlnum) {
	scalea = TRUE_;
	cscale = smlnum;
    } else if (anrm > bignum) {
	scalea = TRUE_;
	cscale = bignum;
    }
    if (scalea) {
	clascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
		ierr);
    }

/*     Balance the matrix and compute ABNRM */

    cgebal_(balanc, n, &a[a_offset], lda, ilo, ihi, &scale[1], &ierr);
    *abnrm = clange_("1", n, n, &a[a_offset], lda, dum);
    if (scalea) {
	dum[0] = *abnrm;
	slascl_("G", &c__0, &c__0, &cscale, &anrm, &c__1, &c__1, dum, &c__1, &
		ierr);
	*abnrm = dum[0];
    }

/*     Reduce to upper Hessenberg form */
/*     (CWorkspace: need 2*N, prefer N+N*NB) */
/*     (RWorkspace: none) */

    itau = 1;
    iwrk = itau + *n;
    i__1 = *lwork - iwrk + 1;
    cgehrd_(n, ilo, ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, &
	    ierr);

    if (wantvl) {

/*        Want left eigenvectors */
/*        Copy Householder vectors to VL */

	*(unsigned char *)side = 'L';
	clacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
		;

/*        Generate unitary matrix in VL */
/*        (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
/*        (RWorkspace: none) */

	i__1 = *lwork - iwrk + 1;
	cunghr_(n, ilo, ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk], &
		i__1, &ierr);

/*        Perform QR iteration, accumulating Schur vectors in VL */
/*        (CWorkspace: need 1, prefer HSWORK (see comments) ) */
/*        (RWorkspace: none) */

	iwrk = itau;
	i__1 = *lwork - iwrk + 1;
	chseqr_("S", "V", n, ilo, ihi, &a[a_offset], lda, &w[1], &vl[
		vl_offset], ldvl, &work[iwrk], &i__1, info);

	if (wantvr) {

/*           Want left and right eigenvectors */
/*           Copy Schur vectors to VR */

	    *(unsigned char *)side = 'B';
	    clacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
	}

    } else if (wantvr) {

/*        Want right eigenvectors */
/*        Copy Householder vectors to VR */

	*(unsigned char *)side = 'R';
	clacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
		;

/*        Generate unitary matrix in VR */
/*        (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
/*        (RWorkspace: none) */

	i__1 = *lwork - iwrk + 1;
	cunghr_(n, ilo, ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk], &
		i__1, &ierr);

/*        Perform QR iteration, accumulating Schur vectors in VR */
/*        (CWorkspace: need 1, prefer HSWORK (see comments) ) */
/*        (RWorkspace: none) */

	iwrk = itau;
	i__1 = *lwork - iwrk + 1;
	chseqr_("S", "V", n, ilo, ihi, &a[a_offset], lda, &w[1], &vr[
		vr_offset], ldvr, &work[iwrk], &i__1, info);

    } else {

/*        Compute eigenvalues only */
/*        If condition numbers desired, compute Schur form */

	if (wntsnn) {
	    *(unsigned char *)job = 'E';
	} else {
	    *(unsigned char *)job = 'S';
	}

/*        (CWorkspace: need 1, prefer HSWORK (see comments) ) */
/*        (RWorkspace: none) */

	iwrk = itau;
	i__1 = *lwork - iwrk + 1;
	chseqr_(job, "N", n, ilo, ihi, &a[a_offset], lda, &w[1], &vr[
		vr_offset], ldvr, &work[iwrk], &i__1, info);
    }

/*     If INFO > 0 from CHSEQR, then quit */

    if (*info > 0) {
	goto L50;
    }

    if (wantvl || wantvr) {

/*        Compute left and/or right eigenvectors */
/*        (CWorkspace: need 2*N) */
/*        (RWorkspace: need N) */

	ctrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl, 
		 &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &rwork[1], &
		ierr);
    }

/*     Compute condition numbers if desired */
/*     (CWorkspace: need N*N+2*N unless SENSE = 'E') */
/*     (RWorkspace: need 2*N unless SENSE = 'E') */

    if (! wntsnn) {
	ctrsna_(sense, "A", select, n, &a[a_offset], lda, &vl[vl_offset], 
		ldvl, &vr[vr_offset], ldvr, &rconde[1], &rcondv[1], n, &nout, 
		&work[iwrk], n, &rwork[1], &icond);
    }

    if (wantvl) {

/*        Undo balancing of left eigenvectors */

	cgebak_(balanc, "L", n, ilo, ihi, &scale[1], n, &vl[vl_offset], ldvl, 
		&ierr);

/*        Normalize left eigenvectors and make largest component real */

	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    scl = 1.f / scnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
	    csscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
	    i__2 = *n;
	    for (k = 1; k <= i__2; ++k) {
		i__3 = k + i__ * vl_dim1;
/* Computing 2nd power */
		r__1 = vl[i__3].r;
/* Computing 2nd power */
		r__2 = r_imag(&vl[k + i__ * vl_dim1]);
		rwork[k] = r__1 * r__1 + r__2 * r__2;
	    }
	    k = isamax_(n, &rwork[1], &c__1);
	    r_cnjg(&q__2, &vl[k + i__ * vl_dim1]);
	    r__1 = sqrt(rwork[k]);
	    q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
	    tmp.r = q__1.r, tmp.i = q__1.i;
	    cscal_(n, &tmp, &vl[i__ * vl_dim1 + 1], &c__1);
	    i__2 = k + i__ * vl_dim1;
	    i__3 = k + i__ * vl_dim1;
	    r__1 = vl[i__3].r;
	    q__1.r = r__1, q__1.i = 0.f;
	    vl[i__2].r = q__1.r, vl[i__2].i = q__1.i;
	}
    }

    if (wantvr) {

/*        Undo balancing of right eigenvectors */

	cgebak_(balanc, "R", n, ilo, ihi, &scale[1], n, &vr[vr_offset], ldvr, 
		&ierr);

/*        Normalize right eigenvectors and make largest component real */

	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    scl = 1.f / scnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
	    csscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
	    i__2 = *n;
	    for (k = 1; k <= i__2; ++k) {
		i__3 = k + i__ * vr_dim1;
/* Computing 2nd power */
		r__1 = vr[i__3].r;
/* Computing 2nd power */
		r__2 = r_imag(&vr[k + i__ * vr_dim1]);
		rwork[k] = r__1 * r__1 + r__2 * r__2;
	    }
	    k = isamax_(n, &rwork[1], &c__1);
	    r_cnjg(&q__2, &vr[k + i__ * vr_dim1]);
	    r__1 = sqrt(rwork[k]);
	    q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
	    tmp.r = q__1.r, tmp.i = q__1.i;
	    cscal_(n, &tmp, &vr[i__ * vr_dim1 + 1], &c__1);
	    i__2 = k + i__ * vr_dim1;
	    i__3 = k + i__ * vr_dim1;
	    r__1 = vr[i__3].r;
	    q__1.r = r__1, q__1.i = 0.f;
	    vr[i__2].r = q__1.r, vr[i__2].i = q__1.i;
	}
    }

/*     Undo scaling if necessary */

L50:
    if (scalea) {
	i__1 = *n - *info;
/* Computing MAX */
	i__3 = *n - *info;
	i__2 = max(i__3,1);
	clascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[*info + 1]
, &i__2, &ierr);
	if (*info == 0) {
	    if ((wntsnv || wntsnb) && icond == 0) {
		slascl_("G", &c__0, &c__0, &cscale, &anrm, n, &c__1, &rcondv[
			1], n, &ierr);
	    }
	} else {
	    i__1 = *ilo - 1;
	    clascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[1], n, 
		     &ierr);
	}
    }

    work[1].r = (real) maxwrk, work[1].i = 0.f;
    return 0;

/*     End of CGEEVX */

} /* cgeevx_ */
示例#5
0
/* DECK CDSTP */
/* Subroutine */ int cdstp_(real *eps, S_fp f, U_fp fa, real *hmax, integer *
	impl, integer *ierror, U_fp jacobn, integer *matdim, integer *maxord, 
	integer *mint, integer *miter, integer *ml, integer *mu, integer *n, 
	integer *nde, complex *ywt, real *uround, U_fp users, real *avgh, 
	real *avgord, real *h__, real *hused, integer *jtask, integer *mntold,
	 integer *mtrold, integer *nfe, integer *nje, integer *nqused, 
	integer *nstep, real *t, complex *y, complex *yh, complex *a, logical 
	*convrg, complex *dfdy, real *el, complex *fac, real *hold, integer *
	ipvt, integer *jstate, integer *jstepl, integer *nq, integer *nwait, 
	real *rc, real *rmax, complex *save1, complex *save2, real *tq, real *
	trend, integer *iswflg, integer *mtrsv, integer *mxrdsv)
{
    /* Initialized data */

    static logical ier = FALSE_;

    /* System generated locals */
    integer a_dim1, a_offset, dfdy_dim1, dfdy_offset, yh_dim1, yh_offset, 
	    i__1, i__2, i__3, i__4, i__5, i__6;
    real r__1, r__2, r__3;
    doublereal d__1, d__2;
    complex q__1, q__2;

    /* Local variables */
    static real d__;
    static integer i__, j;
    static real d1, hn, rh, hs, rh1, rh2, rh3, bnd;
    static integer nsv;
    static real erdn, told;
    static integer iter;
    static real erup;
    static integer ntry;
    static real y0nrm;
    extern /* Subroutine */ int cdscl_(real *, integer *, integer *, real *, 
	    real *, real *, real *, complex *);
    static integer nfail;
    extern /* Subroutine */ int cdcor_(complex *, real *, U_fp, real *, 
	    integer *, integer *, integer *, integer *, integer *, integer *, 
	    integer *, integer *, integer *, integer *, real *, U_fp, complex 
	    *, complex *, complex *, logical *, complex *, complex *, complex 
	    *, real *, integer *), cdpsc_(integer *, integer *, integer *, 
	    complex *), cdcst_(integer *, integer *, integer *, real *, real *
	    );
    static real denom;
    extern /* Subroutine */ int cdntl_(real *, S_fp, U_fp, real *, real *, 
	    integer *, integer *, integer *, integer *, integer *, integer *, 
	    integer *, integer *, integer *, integer *, complex *, real *, 
	    real *, U_fp, complex *, complex *, real *, integer *, integer *, 
	    integer *, real *, complex *, complex *, logical *, real *, 
	    complex *, logical *, integer *, integer *, integer *, real *, 
	    real *, complex *, real *, real *, integer *, integer *), cdpst_(
	    real *, S_fp, U_fp, real *, integer *, U_fp, integer *, integer *,
	     integer *, integer *, integer *, integer *, integer *, complex *,
	     real *, U_fp, complex *, complex *, complex *, real *, integer *,
	     integer *, complex *, complex *, complex *, logical *, integer *,
	     complex *, integer *, real *, integer *);
    static real ctest, etest, numer;
    extern doublereal scnrm2_(integer *, complex *, integer *);
    static logical evalfa, evaljc, switch__;

/* ***BEGIN PROLOGUE  CDSTP */
/* ***SUBSIDIARY */
/* ***PURPOSE  CDSTP performs one step of the integration of an initial */
/*            value problem for a system of ordinary differential */
/*            equations. */
/* ***LIBRARY   SLATEC (SDRIVE) */
/* ***TYPE      COMPLEX (SDSTP-S, DDSTP-D, CDSTP-C) */
/* ***AUTHOR  Kahaner, D. K., (NIST) */
/*             National Institute of Standards and Technology */
/*             Gaithersburg, MD  20899 */
/*           Sutherland, C. D., (LANL) */
/*             Mail Stop D466 */
/*             Los Alamos National Laboratory */
/*             Los Alamos, NM  87545 */
/* ***DESCRIPTION */

/*  Communication with CDSTP is done with the following variables: */

/*    YH      An N by MAXORD+1 array containing the dependent variables */
/*              and their scaled derivatives.  MAXORD, the maximum order */
/*              used, is currently 12 for the Adams methods and 5 for the */
/*              Gear methods.  YH(I,J+1) contains the J-th derivative of */
/*              Y(I), scaled by H**J/factorial(J).  Only Y(I), */
/*              1 .LE. I .LE. N, need be set by the calling program on */
/*              the first entry.  The YH array should not be altered by */
/*              the calling program.  When referencing YH as a */
/*              2-dimensional array, use a column length of N, as this is */
/*              the value used in CDSTP. */
/*    DFDY    A block of locations used for partial derivatives if MITER */
/*              is not 0.  If MITER is 1 or 2 its length must be at least */
/*              N*N.  If MITER is 4 or 5 its length must be at least */
/*              (2*ML+MU+1)*N. */
/*    YWT     An array of N locations used in convergence and error tests */
/*    SAVE1 */
/*    SAVE2   Arrays of length N used for temporary storage. */
/*    IPVT    An integer array of length N used by the linear system */
/*              solvers for the storage of row interchange information. */
/*    A       A block of locations used to store the matrix A, when using */
/*              the implicit method.  If IMPL is 1, A is a MATDIM by N */
/*              array.  If MITER is 1 or 2 MATDIM is N, and if MITER is 4 */
/*              or 5 MATDIM is 2*ML+MU+1.  If IMPL is 2 its length is N. */
/*              If IMPL is 3, A is a MATDIM by NDE array. */
/*    JTASK   An integer used on input. */
/*              It has the following values and meanings: */
/*                 .EQ. 0  Perform the first step.  This value enables */
/*                         the subroutine to initialize itself. */
/*                .GT. 0  Take a new step continuing from the last. */
/*                         Assumes the last step was successful and */
/*                         user has not changed any parameters. */
/*                 .LT. 0  Take a new step with a new value of H and/or */
/*                         MINT and/or MITER. */
/*    JSTATE  A completion code with the following meanings: */
/*                1  The step was successful. */
/*                2  A solution could not be obtained with H .NE. 0. */
/*                3  A solution was not obtained in MXTRY attempts. */
/*                4  For IMPL .NE. 0, the matrix A is singular. */
/*              On a return with JSTATE .GT. 1, the values of T and */
/*              the YH array are as of the beginning of the last */
/*              step, and H is the last step size attempted. */

/* ***ROUTINES CALLED  CDCOR, CDCST, CDNTL, CDPSC, CDPST, CDSCL, SCNRM2 */
/* ***REVISION HISTORY  (YYMMDD) */
/*   790601  DATE WRITTEN */
/*   900329  Initial submission to SLATEC. */
/* ***END PROLOGUE  CDSTP */
    /* Parameter adjustments */
    dfdy_dim1 = *matdim;
    dfdy_offset = 1 + dfdy_dim1;
    dfdy -= dfdy_offset;
    a_dim1 = *matdim;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    yh_dim1 = *n;
    yh_offset = 1 + yh_dim1;
    yh -= yh_offset;
    --ywt;
    --y;
    el -= 14;
    --fac;
    --ipvt;
    --save1;
    --save2;
    tq -= 4;

    /* Function Body */
/* ***FIRST EXECUTABLE STATEMENT  CDSTP */
    nsv = *n;
    bnd = 0.f;
    switch__ = FALSE_;
    ntry = 0;
    told = *t;
    nfail = 0;
    if (*jtask <= 0) {
	cdntl_(eps, (S_fp)f, (U_fp)fa, hmax, hold, impl, jtask, matdim, 
		maxord, mint, miter, ml, mu, n, nde, &save1[1], t, uround, (
		U_fp)users, &y[1], &ywt[1], h__, mntold, mtrold, nfe, rc, &yh[
		yh_offset], &a[a_offset], convrg, &el[14], &fac[1], &ier, &
		ipvt[1], nq, nwait, &rh, rmax, &save2[1], &tq[4], trend, 
		iswflg, jstate);
	if (*n == 0) {
	    goto L440;
	}
	if (*h__ == 0.f) {
	    goto L400;
	}
	if (ier) {
	    goto L420;
	}
    }
L100:
    ++ntry;
    if (ntry > 50) {
	goto L410;
    }
    *t += *h__;
    cdpsc_(&c__1, n, nq, &yh[yh_offset]);
    evaljc = ((r__1 = *rc - 1.f, dabs(r__1)) > .3f || *nstep >= *jstepl + 10) 
	    && *miter != 0;
    evalfa = ! evaljc;

L110:
    iter = 0;
    i__1 = *n;
    for (i__ = 1; i__ <= i__1; ++i__) {
/* L115: */
	i__2 = i__;
	i__3 = i__ + yh_dim1;
	y[i__2].r = yh[i__3].r, y[i__2].i = yh[i__3].i;
    }
    (*f)(n, t, &y[1], &save2[1]);
    if (*n == 0) {
	*jstate = 6;
	goto L430;
    }
    ++(*nfe);
    if (evaljc || ier) {
	cdpst_(&el[14], (S_fp)f, (U_fp)fa, h__, impl, (U_fp)jacobn, matdim, 
		miter, ml, mu, n, nde, nq, &save2[1], t, (U_fp)users, &y[1], &
		yh[yh_offset], &ywt[1], uround, nfe, nje, &a[a_offset], &dfdy[
		dfdy_offset], &fac[1], &ier, &ipvt[1], &save1[1], iswflg, &
		bnd, jstate);
	if (*n == 0) {
	    goto L430;
	}
	if (ier) {
	    goto L160;
	}
	*convrg = FALSE_;
	*rc = 1.f;
	*jstepl = *nstep;
    }
    i__2 = *n;
    for (i__ = 1; i__ <= i__2; ++i__) {
/* L125: */
	i__3 = i__;
	save1[i__3].r = 0.f, save1[i__3].i = 0.f;
    }
/*                      Up to MXITER corrector iterations are taken. */
/*                      Convergence is tested by requiring the r.m.s. */
/*                      norm of changes to be less than EPS.  The sum of */
/*                      the corrections is accumulated in the vector */
/*                      SAVE1(I).  It is approximately equal to the L-th */
/*                      derivative of Y multiplied by */
/*                      H**L/(factorial(L-1)*EL(L,NQ)), and is thus */
/*                      proportional to the actual errors to the lowest */
/*                      power of H present (H**L).  The YH array is not */
/*                      altered in the correction loop.  The norm of the */
/*                      iterate difference is stored in D.  If */
/*                      ITER .GT. 0, an estimate of the convergence rate */
/*                      constant is stored in TREND, and this is used in */
/*                      the convergence test. */

L130:
    cdcor_(&dfdy[dfdy_offset], &el[14], (U_fp)fa, h__, ierror, impl, &ipvt[1],
	     matdim, miter, ml, mu, n, nde, nq, t, (U_fp)users, &y[1], &yh[
	    yh_offset], &ywt[1], &evalfa, &save1[1], &save2[1], &a[a_offset], 
	    &d__, jstate);
    if (*n == 0) {
	goto L430;
    }
    if (*iswflg == 3 && *mint == 1) {
	if (iter == 0) {
	    numer = scnrm2_(n, &save1[1], &c__1);
	    i__3 = *n;
	    for (i__ = 1; i__ <= i__3; ++i__) {
/* L132: */
		i__2 = i__ * dfdy_dim1 + 1;
		i__1 = i__;
		dfdy[i__2].r = save1[i__1].r, dfdy[i__2].i = save1[i__1].i;
	    }
	    y0nrm = scnrm2_(n, &yh[yh_offset], &c__1);
	} else {
	    denom = numer;
	    i__2 = *n;
	    for (i__ = 1; i__ <= i__2; ++i__) {
/* L134: */
		i__1 = i__ * dfdy_dim1 + 1;
		i__3 = i__;
		i__4 = i__ * dfdy_dim1 + 1;
		q__1.r = save1[i__3].r - dfdy[i__4].r, q__1.i = save1[i__3].i 
			- dfdy[i__4].i;
		dfdy[i__1].r = q__1.r, dfdy[i__1].i = q__1.i;
	    }
	    numer = scnrm2_(n, &dfdy[dfdy_offset], matdim);
	    if (el[*nq * 13 + 1] * numer <= *uround * 100.f * y0nrm) {
		if (*rmax == 2.f) {
		    switch__ = TRUE_;
		    goto L170;
		}
	    }
	    i__1 = *n;
	    for (i__ = 1; i__ <= i__1; ++i__) {
/* L136: */
		i__3 = i__ * dfdy_dim1 + 1;
		i__4 = i__;
		dfdy[i__3].r = save1[i__4].r, dfdy[i__3].i = save1[i__4].i;
	    }
	    if (denom != 0.f) {
/* Computing MAX */
		r__1 = bnd, r__2 = numer / (denom * dabs(*h__) * el[*nq * 13 
			+ 1]);
		bnd = dmax(r__1,r__2);
	    }
	}
    }
    if (iter > 0) {
/* Computing MAX */
	r__1 = *trend * .9f, r__2 = d__ / d1;
	*trend = dmax(r__1,r__2);
    }
    d1 = d__;
/* Computing MIN */
    r__1 = *trend * 2.f;
    ctest = dmin(r__1,1.f) * d__;
    if (ctest <= *eps) {
	goto L170;
    }
    ++iter;
    if (iter < 3) {
	i__3 = *n;
	for (i__ = 1; i__ <= i__3; ++i__) {
/* L140: */
	    i__4 = i__;
	    i__1 = i__ + yh_dim1;
	    i__2 = *nq * 13 + 1;
	    i__5 = i__;
	    q__2.r = el[i__2] * save1[i__5].r, q__2.i = el[i__2] * save1[i__5]
		    .i;
	    q__1.r = yh[i__1].r + q__2.r, q__1.i = yh[i__1].i + q__2.i;
	    y[i__4].r = q__1.r, y[i__4].i = q__1.i;
	}
	(*f)(n, t, &y[1], &save2[1]);
	if (*n == 0) {
	    *jstate = 6;
	    goto L430;
	}
	++(*nfe);
	goto L130;
    }
/*                     The corrector iteration failed to converge in */
/*                     MXITER tries.  If partials are involved but are */
/*                     not up to date, they are reevaluated for the next */
/*                     try.  Otherwise the YH array is retracted to its */
/*                     values before prediction, and H is reduced, if */
/*                     possible.  If not, a no-convergence exit is taken. */
    if (*convrg) {
	evaljc = TRUE_;
	evalfa = FALSE_;
	goto L110;
    }
L160:
    *t = told;
    cdpsc_(&c_n1, n, nq, &yh[yh_offset]);
    *nwait = *nq + 2;
    if (*jtask != 0 && *jtask != 2) {
	*rmax = 2.f;
    }
    if (iter == 0) {
	rh = .3f;
    } else {
	d__1 = (doublereal) (*eps / ctest);
	rh = pow_dd(&d__1, &c_b22) * .9f;
    }
    if (rh * *h__ == 0.f) {
	goto L400;
    }
    cdscl_(hmax, n, nq, rmax, h__, rc, &rh, &yh[yh_offset]);
    goto L100;
/*                          The corrector has converged.  CONVRG is set */
/*                          to .TRUE. if partial derivatives were used, */
/*                          to indicate that they may need updating on */
/*                          subsequent steps.  The error test is made. */
L170:
    *convrg = *miter != 0;
    if (*ierror == 1 || *ierror == 5) {
	i__4 = *nde;
	for (i__ = 1; i__ <= i__4; ++i__) {
/* L180: */
	    i__1 = i__;
	    c_div(&q__1, &save1[i__], &ywt[i__]);
	    save2[i__1].r = q__1.r, save2[i__1].i = q__1.i;
	}
    } else {
	i__1 = *nde;
	for (i__ = 1; i__ <= i__1; ++i__) {
/* L185: */
	    i__4 = i__;
	    i__2 = i__;
/* Computing MAX */
	    r__2 = c_abs(&y[i__]), r__3 = c_abs(&ywt[i__]);
	    r__1 = dmax(r__2,r__3);
	    q__1.r = save1[i__2].r / r__1, q__1.i = save1[i__2].i / r__1;
	    save2[i__4].r = q__1.r, save2[i__4].i = q__1.i;
	}
    }
    etest = scnrm2_(nde, &save2[1], &c__1) / (tq[*nq * 3 + 2] * sqrt((real) (*
	    nde)));

/*                           The error test failed.  NFAIL keeps track of */
/*                           multiple failures.  Restore T and the YH */
/*                           array to their previous values, and prepare */
/*                           to try the step again.  Compute the optimum */
/*                           step size for this or one lower order. */
    if (etest > *eps) {
	*t = told;
	cdpsc_(&c_n1, n, nq, &yh[yh_offset]);
	++nfail;
	if (nfail < 3 || *nq == 1) {
	    if (*jtask != 0 && *jtask != 2) {
		*rmax = 2.f;
	    }
	    d__1 = (doublereal) (etest / *eps);
	    d__2 = (doublereal) (1.f / (*nq + 1));
	    rh2 = 1.f / (pow_dd(&d__1, &d__2) * 1.2f);
	    if (*nq > 1) {
		if (*ierror == 1 || *ierror == 5) {
		    i__4 = *nde;
		    for (i__ = 1; i__ <= i__4; ++i__) {
/* L190: */
			i__2 = i__;
			c_div(&q__1, &yh[i__ + (*nq + 1) * yh_dim1], &ywt[i__]
				);
			save2[i__2].r = q__1.r, save2[i__2].i = q__1.i;
		    }
		} else {
		    i__2 = *nde;
		    for (i__ = 1; i__ <= i__2; ++i__) {
/* L195: */
			i__4 = i__;
			i__1 = i__ + (*nq + 1) * yh_dim1;
/* Computing MAX */
			r__2 = c_abs(&y[i__]), r__3 = c_abs(&ywt[i__]);
			r__1 = dmax(r__2,r__3);
			q__1.r = yh[i__1].r / r__1, q__1.i = yh[i__1].i / 
				r__1;
			save2[i__4].r = q__1.r, save2[i__4].i = q__1.i;
		    }
		}
		erdn = scnrm2_(nde, &save2[1], &c__1) / (tq[*nq * 3 + 1] * 
			sqrt((real) (*nde)));
/* Computing MAX */
		d__1 = (doublereal) (erdn / *eps);
		d__2 = (doublereal) (1.f / *nq);
		r__1 = 1.f, r__2 = pow_dd(&d__1, &d__2) * 1.3f;
		rh1 = 1.f / dmax(r__1,r__2);
		if (rh2 < rh1) {
		    --(*nq);
		    *rc = *rc * el[*nq * 13 + 1] / el[(*nq + 1) * 13 + 1];
		    rh = rh1;
		} else {
		    rh = rh2;
		}
	    } else {
		rh = rh2;
	    }
	    *nwait = *nq + 2;
	    if (rh * *h__ == 0.f) {
		goto L400;
	    }
	    cdscl_(hmax, n, nq, rmax, h__, rc, &rh, &yh[yh_offset]);
	    goto L100;
	}
/*                Control reaches this section if the error test has */
/*                failed MXFAIL or more times.  It is assumed that the */
/*                derivatives that have accumulated in the YH array have */
/*                errors of the wrong order.  Hence the first derivative */
/*                is recomputed, the order is set to 1, and the step is */
/*                retried. */
	nfail = 0;
	*jtask = 2;
	i__4 = *n;
	for (i__ = 1; i__ <= i__4; ++i__) {
/* L215: */
	    i__1 = i__;
	    i__2 = i__ + yh_dim1;
	    y[i__1].r = yh[i__2].r, y[i__1].i = yh[i__2].i;
	}
	cdntl_(eps, (S_fp)f, (U_fp)fa, hmax, hold, impl, jtask, matdim, 
		maxord, mint, miter, ml, mu, n, nde, &save1[1], t, uround, (
		U_fp)users, &y[1], &ywt[1], h__, mntold, mtrold, nfe, rc, &yh[
		yh_offset], &a[a_offset], convrg, &el[14], &fac[1], &ier, &
		ipvt[1], nq, nwait, &rh, rmax, &save2[1], &tq[4], trend, 
		iswflg, jstate);
	*rmax = 10.f;
	if (*n == 0) {
	    goto L440;
	}
	if (*h__ == 0.f) {
	    goto L400;
	}
	if (ier) {
	    goto L420;
	}
	goto L100;
    }
/*                          After a successful step, update the YH array. */
    ++(*nstep);
    *hused = *h__;
    *nqused = *nq;
    *avgh = ((*nstep - 1) * *avgh + *h__) / *nstep;
    *avgord = ((*nstep - 1) * *avgord + *nq) / *nstep;
    i__1 = *nq + 1;
    for (j = 1; j <= i__1; ++j) {
	i__2 = *n;
	for (i__ = 1; i__ <= i__2; ++i__) {
/* L230: */
	    i__4 = i__ + j * yh_dim1;
	    i__5 = i__ + j * yh_dim1;
	    i__3 = j + *nq * 13;
	    i__6 = i__;
	    q__2.r = el[i__3] * save1[i__6].r, q__2.i = el[i__3] * save1[i__6]
		    .i;
	    q__1.r = yh[i__5].r + q__2.r, q__1.i = yh[i__5].i + q__2.i;
	    yh[i__4].r = q__1.r, yh[i__4].i = q__1.i;
	}
    }
    i__4 = *n;
    for (i__ = 1; i__ <= i__4; ++i__) {
/* L235: */
	i__5 = i__;
	i__3 = i__ + yh_dim1;
	y[i__5].r = yh[i__3].r, y[i__5].i = yh[i__3].i;
    }
/*                                          If ISWFLG is 3, consider */
/*                                          changing integration methods. */
    if (*iswflg == 3) {
	if (bnd != 0.f) {
	    if (*mint == 1 && *nq <= 5) {
/* Computing MAX */
		d__1 = (doublereal) (etest / *eps);
		d__2 = (doublereal) (1.f / (*nq + 1));
		r__1 = *uround, r__2 = pow_dd(&d__1, &d__2);
		hn = dabs(*h__) / dmax(r__1,r__2);
/* Computing MIN */
		r__1 = hn, r__2 = 1.f / (el[*nq * 13 + 1] * 2.f * bnd);
		hn = dmin(r__1,r__2);
/* Computing MAX */
		d__1 = (doublereal) (etest / (*eps * el[*nq + 14]));
		d__2 = (doublereal) (1.f / (*nq + 1));
		r__1 = *uround, r__2 = pow_dd(&d__1, &d__2);
		hs = dabs(*h__) / dmax(r__1,r__2);
		if (hs > hn * 1.2f) {
		    *mint = 2;
		    *mntold = *mint;
		    *miter = *mtrsv;
		    *mtrold = *miter;
		    *maxord = min(*mxrdsv,5);
		    *rc = 0.f;
		    *rmax = 10.f;
		    *trend = 1.f;
		    cdcst_(maxord, mint, iswflg, &el[14], &tq[4]);
		    *nwait = *nq + 2;
		}
	    } else if (*mint == 2) {
/* Computing MAX */
		d__1 = (doublereal) (etest / *eps);
		d__2 = (doublereal) (1.f / (*nq + 1));
		r__1 = *uround, r__2 = pow_dd(&d__1, &d__2);
		hs = dabs(*h__) / dmax(r__1,r__2);
/* Computing MAX */
		d__1 = (doublereal) (etest * el[*nq + 14] / *eps);
		d__2 = (doublereal) (1.f / (*nq + 1));
		r__1 = *uround, r__2 = pow_dd(&d__1, &d__2);
		hn = dabs(*h__) / dmax(r__1,r__2);
/* Computing MIN */
		r__1 = hn, r__2 = 1.f / (el[*nq * 13 + 1] * 2.f * bnd);
		hn = dmin(r__1,r__2);
		if (hn >= hs) {
		    *mint = 1;
		    *mntold = *mint;
		    *miter = 0;
		    *mtrold = *miter;
		    *maxord = min(*mxrdsv,12);
		    *rmax = 10.f;
		    *trend = 1.f;
		    *convrg = FALSE_;
		    cdcst_(maxord, mint, iswflg, &el[14], &tq[4]);
		    *nwait = *nq + 2;
		}
	    }
	}
    }
    if (switch__) {
	*mint = 2;
	*mntold = *mint;
	*miter = *mtrsv;
	*mtrold = *miter;
	*maxord = min(*mxrdsv,5);
	*nq = min(*nq,*maxord);
	*rc = 0.f;
	*rmax = 10.f;
	*trend = 1.f;
	cdcst_(maxord, mint, iswflg, &el[14], &tq[4]);
	*nwait = *nq + 2;
    }
/*                           Consider changing H if NWAIT = 1.  Otherwise */
/*                           decrease NWAIT by 1.  If NWAIT is then 1 and */
/*                           NQ.LT.MAXORD, then SAVE1 is saved for use in */
/*                           a possible order increase on the next step. */

    if (*jtask == 0 || *jtask == 2) {
/* Computing MAX */
	d__1 = (doublereal) (etest / *eps);
	d__2 = (doublereal) (1.f / (*nq + 1));
	r__1 = *uround, r__2 = pow_dd(&d__1, &d__2) * 1.2f;
	rh = 1.f / dmax(r__1,r__2);
	if (rh > 1.f) {
	    cdscl_(hmax, n, nq, rmax, h__, rc, &rh, &yh[yh_offset]);
	}
    } else if (*nwait > 1) {
	--(*nwait);
	if (*nwait == 1 && *nq < *maxord) {
	    i__5 = *nde;
	    for (i__ = 1; i__ <= i__5; ++i__) {
/* L250: */
		i__3 = i__ + (*maxord + 1) * yh_dim1;
		i__4 = i__;
		yh[i__3].r = save1[i__4].r, yh[i__3].i = save1[i__4].i;
	    }
	}
/*             If a change in H is considered, an increase or decrease in */
/*             order by one is considered also.  A change in H is made */
/*             only if it is by a factor of at least TRSHLD.  Factors */
/*             RH1, RH2, and RH3 are computed, by which H could be */
/*             multiplied at order NQ - 1, order NQ, or order NQ + 1, */
/*             respectively.  The largest of these is determined and the */
/*             new order chosen accordingly.  If the order is to be */
/*             increased, we compute one additional scaled derivative. */
/*             If there is a change of order, reset NQ and the */
/*             coefficients.  In any case H is reset according to RH and */
/*             the YH array is rescaled. */
    } else {
	if (*nq == 1) {
	    rh1 = 0.f;
	} else {
	    if (*ierror == 1 || *ierror == 5) {
		i__3 = *nde;
		for (i__ = 1; i__ <= i__3; ++i__) {
/* L270: */
		    i__4 = i__;
		    c_div(&q__1, &yh[i__ + (*nq + 1) * yh_dim1], &ywt[i__]);
		    save2[i__4].r = q__1.r, save2[i__4].i = q__1.i;
		}
	    } else {
		i__4 = *nde;
		for (i__ = 1; i__ <= i__4; ++i__) {
/* L275: */
		    i__3 = i__;
		    i__5 = i__ + (*nq + 1) * yh_dim1;
/* Computing MAX */
		    r__2 = c_abs(&y[i__]), r__3 = c_abs(&ywt[i__]);
		    r__1 = dmax(r__2,r__3);
		    q__1.r = yh[i__5].r / r__1, q__1.i = yh[i__5].i / r__1;
		    save2[i__3].r = q__1.r, save2[i__3].i = q__1.i;
		}
	    }
	    erdn = scnrm2_(nde, &save2[1], &c__1) / (tq[*nq * 3 + 1] * sqrt((
		    real) (*nde)));
/* Computing MAX */
	    d__1 = (doublereal) (erdn / *eps);
	    d__2 = (doublereal) (1.f / *nq);
	    r__1 = *uround, r__2 = pow_dd(&d__1, &d__2) * 1.3f;
	    rh1 = 1.f / dmax(r__1,r__2);
	}
/* Computing MAX */
	d__1 = (doublereal) (etest / *eps);
	d__2 = (doublereal) (1.f / (*nq + 1));
	r__1 = *uround, r__2 = pow_dd(&d__1, &d__2) * 1.2f;
	rh2 = 1.f / dmax(r__1,r__2);
	if (*nq == *maxord) {
	    rh3 = 0.f;
	} else {
	    if (*ierror == 1 || *ierror == 5) {
		i__3 = *nde;
		for (i__ = 1; i__ <= i__3; ++i__) {
/* L290: */
		    i__5 = i__;
		    i__4 = i__;
		    i__6 = i__ + (*maxord + 1) * yh_dim1;
		    q__2.r = save1[i__4].r - yh[i__6].r, q__2.i = save1[i__4]
			    .i - yh[i__6].i;
		    c_div(&q__1, &q__2, &ywt[i__]);
		    save2[i__5].r = q__1.r, save2[i__5].i = q__1.i;
		}
	    } else {
		i__5 = *nde;
		for (i__ = 1; i__ <= i__5; ++i__) {
		    i__4 = i__;
		    i__6 = i__;
		    i__3 = i__ + (*maxord + 1) * yh_dim1;
		    q__2.r = save1[i__6].r - yh[i__3].r, q__2.i = save1[i__6]
			    .i - yh[i__3].i;
/* Computing MAX */
		    r__2 = c_abs(&y[i__]), r__3 = c_abs(&ywt[i__]);
		    r__1 = dmax(r__2,r__3);
		    q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
		    save2[i__4].r = q__1.r, save2[i__4].i = q__1.i;
/* L295: */
		}
	    }
	    erup = scnrm2_(nde, &save2[1], &c__1) / (tq[*nq * 3 + 3] * sqrt((
		    real) (*nde)));
/* Computing MAX */
	    d__1 = (doublereal) (erup / *eps);
	    d__2 = (doublereal) (1.f / (*nq + 2));
	    r__1 = *uround, r__2 = pow_dd(&d__1, &d__2) * 1.4f;
	    rh3 = 1.f / dmax(r__1,r__2);
	}
	if (rh1 > rh2 && rh1 >= rh3) {
	    rh = rh1;
	    if (rh <= 1.f) {
		goto L380;
	    }
	    --(*nq);
	    *rc = *rc * el[*nq * 13 + 1] / el[(*nq + 1) * 13 + 1];
	} else if (rh2 >= rh1 && rh2 >= rh3) {
	    rh = rh2;
	    if (rh <= 1.f) {
		goto L380;
	    }
	} else {
	    rh = rh3;
	    if (rh <= 1.f) {
		goto L380;
	    }
	    i__5 = *n;
	    for (i__ = 1; i__ <= i__5; ++i__) {
/* L360: */
		i__4 = i__ + (*nq + 2) * yh_dim1;
		i__6 = i__;
		i__3 = *nq + 1 + *nq * 13;
		q__2.r = el[i__3] * save1[i__6].r, q__2.i = el[i__3] * save1[
			i__6].i;
		i__2 = *nq + 1;
		d__1 = (doublereal) i__2;
		q__1.r = q__2.r / d__1, q__1.i = q__2.i / d__1;
		yh[i__4].r = q__1.r, yh[i__4].i = q__1.i;
	    }
	    ++(*nq);
	    *rc = *rc * el[*nq * 13 + 1] / el[(*nq - 1) * 13 + 1];
	}
	if (*iswflg == 3 && *mint == 1) {
	    if (bnd != 0.f) {
/* Computing MIN */
		r__1 = rh, r__2 = 1.f / (el[*nq * 13 + 1] * 2.f * bnd * dabs(*
			h__));
		rh = dmin(r__1,r__2);
	    }
	}
	cdscl_(hmax, n, nq, rmax, h__, rc, &rh, &yh[yh_offset]);
	*rmax = 10.f;
L380:
	*nwait = *nq + 2;
    }
/*               All returns are made through this section.  H is saved */
/*               in HOLD to allow the caller to change H on the next step */
    *jstate = 1;
    *hold = *h__;
    return 0;

L400:
    *jstate = 2;
    *hold = *h__;
    i__4 = *n;
    for (i__ = 1; i__ <= i__4; ++i__) {
/* L405: */
	i__6 = i__;
	i__3 = i__ + yh_dim1;
	y[i__6].r = yh[i__3].r, y[i__6].i = yh[i__3].i;
    }
    return 0;

L410:
    *jstate = 3;
    *hold = *h__;
    return 0;

L420:
    *jstate = 4;
    *hold = *h__;
    return 0;

L430:
    *t = told;
    cdpsc_(&c_n1, &nsv, nq, &yh[yh_offset]);
    i__6 = nsv;
    for (i__ = 1; i__ <= i__6; ++i__) {
/* L435: */
	i__3 = i__;
	i__4 = i__ + yh_dim1;
	y[i__3].r = yh[i__4].r, y[i__3].i = yh[i__4].i;
    }
L440:
    *hold = *h__;
    return 0;
} /* cdstp_ */
示例#6
0
/*! \brief
 * <pre>
 * Purpose
 * =======
 *    ilu_cdrop_row() - Drop some small rows from the previous 
 *    supernode (L-part only).
 * </pre>
 */
int ilu_cdrop_row(
	superlu_options_t *options, /* options */
	int    first,	    /* index of the first column in the supernode */
	int    last,	    /* index of the last column in the supernode */
	double drop_tol,    /* dropping parameter */
	int    quota,	    /* maximum nonzero entries allowed */
	int    *nnzLj,	    /* in/out number of nonzeros in L(:, 1:last) */
	double *fill_tol,   /* in/out - on exit, fill_tol=-num_zero_pivots,
			     * does not change if options->ILU_MILU != SMILU1 */
	GlobalLU_t *Glu,    /* modified */
	float swork[],   /* working space
	                     * the length of swork[] should be no less than
			     * the number of rows in the supernode */
	float swork2[], /* working space with the same size as swork[],
			     * used only by the second dropping rule */
	int    lastc	    /* if lastc == 0, there is nothing after the
			     * working supernode [first:last];
			     * if lastc == 1, there is one more column after
			     * the working supernode. */ )
{
    register int i, j, k, m1;
    register int nzlc; /* number of nonzeros in column last+1 */
    register int xlusup_first, xlsub_first;
    int m, n; /* m x n is the size of the supernode */
    int r = 0; /* number of dropped rows */
    register float *temp;
    register complex *lusup = (complex *) Glu->lusup;
    register int *lsub = Glu->lsub;
    register int *xlsub = Glu->xlsub;
    register int *xlusup = Glu->xlusup;
    register float d_max = 0.0, d_min = 1.0;
    int    drop_rule = options->ILU_DropRule;
    milu_t milu = options->ILU_MILU;
    norm_t nrm = options->ILU_Norm;
    complex zero = {0.0, 0.0};
    complex one = {1.0, 0.0};
    complex none = {-1.0, 0.0};
    int i_1 = 1;
    int inc_diag; /* inc_diag = m + 1 */
    int nzp = 0;  /* number of zero pivots */
    float alpha = pow((double)(Glu->n), -1.0 / options->ILU_MILU_Dim);

    xlusup_first = xlusup[first];
    xlsub_first = xlsub[first];
    m = xlusup[first + 1] - xlusup_first;
    n = last - first + 1;
    m1 = m - 1;
    inc_diag = m + 1;
    nzlc = lastc ? (xlusup[last + 2] - xlusup[last + 1]) : 0;
    temp = swork - n;

    /* Quick return if nothing to do. */
    if (m == 0 || m == n || drop_rule == NODROP)
    {
	*nnzLj += m * n;
	return 0;
    }

    /* basic dropping: ILU(tau) */
    for (i = n; i <= m1; )
    {
	/* the average abs value of ith row */
	switch (nrm)
	{
	    case ONE_NORM:
		temp[i] = scasum_(&n, &lusup[xlusup_first + i], &m) / (double)n;
		break;
	    case TWO_NORM:
		temp[i] = scnrm2_(&n, &lusup[xlusup_first + i], &m)
		    / sqrt((double)n);
		break;
	    case INF_NORM:
	    default:
		k = icamax_(&n, &lusup[xlusup_first + i], &m) - 1;
		temp[i] = c_abs1(&lusup[xlusup_first + i + m * k]);
		break;
	}

	/* drop small entries due to drop_tol */
	if (drop_rule & DROP_BASIC && temp[i] < drop_tol)
	{
	    r++;
	    /* drop the current row and move the last undropped row here */
	    if (r > 1) /* add to last row */
	    {
		/* accumulate the sum (for MILU) */
		switch (milu)
		{
		    case SMILU_1:
		    case SMILU_2:
			caxpy_(&n, &one, &lusup[xlusup_first + i], &m,
				&lusup[xlusup_first + m - 1], &m);
			break;
		    case SMILU_3:
			for (j = 0; j < n; j++)
			    lusup[xlusup_first + (m - 1) + j * m].r +=
				    c_abs1(&lusup[xlusup_first + i + j * m]);
			break;
		    case SILU:
		    default:
			break;
		}
		ccopy_(&n, &lusup[xlusup_first + m1], &m,
                       &lusup[xlusup_first + i], &m);
	    } /* if (r > 1) */
	    else /* move to last row */
	    {
		cswap_(&n, &lusup[xlusup_first + m1], &m,
			&lusup[xlusup_first + i], &m);
		if (milu == SMILU_3)
		    for (j = 0; j < n; j++) {
			lusup[xlusup_first + m1 + j * m].r =
				c_abs1(&lusup[xlusup_first + m1 + j * m]);
			lusup[xlusup_first + m1 + j * m].i = 0.0;
                    }
	    }
	    lsub[xlsub_first + i] = lsub[xlsub_first + m1];
	    m1--;
	    continue;
	} /* if dropping */
	else
	{
	    if (temp[i] > d_max) d_max = temp[i];
	    if (temp[i] < d_min) d_min = temp[i];
	}
	i++;
    } /* for */

    /* Secondary dropping: drop more rows according to the quota. */
    quota = ceil((double)quota / (double)n);
    if (drop_rule & DROP_SECONDARY && m - r > quota)
    {
	register double tol = d_max;

	/* Calculate the second dropping tolerance */
	if (quota > n)
	{
	    if (drop_rule & DROP_INTERP) /* by interpolation */
	    {
		d_max = 1.0 / d_max; d_min = 1.0 / d_min;
		tol = 1.0 / (d_max + (d_min - d_max) * quota / (m - n - r));
	    }
	    else /* by quick select */
	    {
		int len = m1 - n + 1;
		scopy_(&len, swork, &i_1, swork2, &i_1);
		tol = sqselect(len, swork2, quota - n);
#if 0
		register int *itemp = iwork - n;
		A = temp;
		for (i = n; i <= m1; i++) itemp[i] = i;
		qsort(iwork, m1 - n + 1, sizeof(int), _compare_);
		tol = temp[itemp[quota]];
#endif
	    }
	}

	for (i = n; i <= m1; )
	{
	    if (temp[i] <= tol)
	    {
		register int j;
		r++;
		/* drop the current row and move the last undropped row here */
		if (r > 1) /* add to last row */
		{
		    /* accumulate the sum (for MILU) */
		    switch (milu)
		    {
			case SMILU_1:
			case SMILU_2:
			    caxpy_(&n, &one, &lusup[xlusup_first + i], &m,
				    &lusup[xlusup_first + m - 1], &m);
			    break;
			case SMILU_3:
			    for (j = 0; j < n; j++)
				lusup[xlusup_first + (m - 1) + j * m].r +=
   				  c_abs1(&lusup[xlusup_first + i + j * m]);
			    break;
			case SILU:
			default:
			    break;
		    }
		    ccopy_(&n, &lusup[xlusup_first + m1], &m,
			    &lusup[xlusup_first + i], &m);
		} /* if (r > 1) */
		else /* move to last row */
		{
		    cswap_(&n, &lusup[xlusup_first + m1], &m,
			    &lusup[xlusup_first + i], &m);
		    if (milu == SMILU_3)
			for (j = 0; j < n; j++) {
			    lusup[xlusup_first + m1 + j * m].r =
				    c_abs1(&lusup[xlusup_first + m1 + j * m]);
			    lusup[xlusup_first + m1 + j * m].i = 0.0;
                        }
		}
		lsub[xlsub_first + i] = lsub[xlsub_first + m1];
		m1--;
		temp[i] = temp[m1];

		continue;
	    }
	    i++;

	} /* for */

    } /* if secondary dropping */

    for (i = n; i < m; i++) temp[i] = 0.0;

    if (r == 0)
    {
	*nnzLj += m * n;
	return 0;
    }

    /* add dropped entries to the diagnal */
    if (milu != SILU)
    {
	register int j;
	complex t;
	float omega;
	for (j = 0; j < n; j++)
	{
	    t = lusup[xlusup_first + (m - 1) + j * m];
            if (t.r == 0.0 && t.i == 0.0) continue;
            omega = SUPERLU_MIN(2.0 * (1.0 - alpha) / c_abs1(&t), 1.0);
	    cs_mult(&t, &t, omega);

 	    switch (milu)
	    {
		case SMILU_1:
		    if ( !(c_eq(&t, &none)) ) {
                        c_add(&t, &t, &one);
                        cc_mult(&lusup[xlusup_first + j * inc_diag],
			                  &lusup[xlusup_first + j * inc_diag],
                                          &t);
                    }
		    else
		    {
                        cs_mult(
                                &lusup[xlusup_first + j * inc_diag],
			        &lusup[xlusup_first + j * inc_diag],
                                *fill_tol);
#ifdef DEBUG
			printf("[1] ZERO PIVOT: FILL col %d.\n", first + j);
			fflush(stdout);
#endif
			nzp++;
		    }
		    break;
		case SMILU_2:
                    cs_mult(&lusup[xlusup_first + j * inc_diag],
                                          &lusup[xlusup_first + j * inc_diag],
                                          1.0 + c_abs1(&t));
		    break;
		case SMILU_3:
                    c_add(&t, &t, &one);
                    cc_mult(&lusup[xlusup_first + j * inc_diag],
	                              &lusup[xlusup_first + j * inc_diag],
                                      &t);
		    break;
		case SILU:
		default:
		    break;
	    }
	}
	if (nzp > 0) *fill_tol = -nzp;
    }

    /* Remove dropped entries from the memory and fix the pointers. */
    m1 = m - r;
    for (j = 1; j < n; j++)
    {
	register int tmp1, tmp2;
	tmp1 = xlusup_first + j * m1;
	tmp2 = xlusup_first + j * m;
	for (i = 0; i < m1; i++)
	    lusup[i + tmp1] = lusup[i + tmp2];
    }
    for (i = 0; i < nzlc; i++)
	lusup[xlusup_first + i + n * m1] = lusup[xlusup_first + i + n * m];
    for (i = 0; i < nzlc; i++)
	lsub[xlsub[last + 1] - r + i] = lsub[xlsub[last + 1] + i];
    for (i = first + 1; i <= last + 1; i++)
    {
	xlusup[i] -= r * (i - first);
	xlsub[i] -= r;
    }
    if (lastc)
    {
	xlusup[last + 2] -= r * n;
	xlsub[last + 2] -= r;
    }

    *nnzLj += (m - r) * n;
    return r;
}
示例#7
0
/* Subroutine */ int claqp2_(integer *m, integer *n, integer *offset, complex 
	*a, integer *lda, integer *jpvt, complex *tau, real *vn1, real *vn2, 
	complex *work)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    real r__1;
    complex q__1;

    /* Builtin functions */
    void r_cnjg(complex *, complex *);
    double c_abs(complex *), sqrt(doublereal);

    /* Local variables */
    static integer i__, j, mn;
    static complex aii;
    static integer pvt;
    static real temp, temp2;
    extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
	    , integer *, complex *, complex *, integer *, complex *, ftnlen);
    static integer offpi;
    extern /* Subroutine */ int cswap_(integer *, complex *, integer *, 
	    complex *, integer *);
    static integer itemp;
    extern doublereal scnrm2_(integer *, complex *, integer *);
    extern /* Subroutine */ int clarfg_(integer *, complex *, complex *, 
	    integer *, complex *);
    extern integer isamax_(integer *, real *, integer *);


/*  -- LAPACK auxiliary routine (version 3.0) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */
/*     Courant Institute, Argonne National Lab, and Rice University */
/*     June 30, 1999 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CLAQP2 computes a QR factorization with column pivoting of */
/*  the block A(OFFSET+1:M,1:N). */
/*  The block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */

/*  Arguments */
/*  ========= */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A. M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix A. N >= 0. */

/*  OFFSET  (input) INTEGER */
/*          The number of rows of the matrix A that must be pivoted */
/*          but no factorized. OFFSET >= 0. */

/*  A       (input/output) COMPLEX array, dimension (LDA,N) */
/*          On entry, the M-by-N matrix A. */
/*          On exit, the upper triangle of block A(OFFSET+1:M,1:N) is */
/*          the triangular factor obtained; the elements in block */
/*          A(OFFSET+1:M,1:N) below the diagonal, together with the */
/*          array TAU, represent the orthogonal matrix Q as a product of */
/*          elementary reflectors. Block A(1:OFFSET,1:N) has been */
/*          accordingly pivoted, but no factorized. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A. LDA >= max(1,M). */

/*  JPVT    (input/output) INTEGER array, dimension (N) */
/*          On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */
/*          to the front of A*P (a leading column); if JPVT(i) = 0, */
/*          the i-th column of A is a free column. */
/*          On exit, if JPVT(i) = k, then the i-th column of A*P */
/*          was the k-th column of A. */

/*  TAU     (output) COMPLEX array, dimension (min(M,N)) */
/*          The scalar factors of the elementary reflectors. */

/*  VN1     (input/output) REAL array, dimension (N) */
/*          The vector with the partial column norms. */

/*  VN2     (input/output) REAL array, dimension (N) */
/*          The vector with the exact column norms. */

/*  WORK    (workspace) COMPLEX array, dimension (N) */

/*  Further Details */
/*  =============== */

/*  Based on contributions by */
/*    G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
/*    X. Sun, Computer Science Dept., Duke University, USA */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --jpvt;
    --tau;
    --vn1;
    --vn2;
    --work;

    /* Function Body */
/* Computing MIN */
    i__1 = *m - *offset;
    mn = min(i__1,*n);

/*     Compute factorization. */

    i__1 = mn;
    for (i__ = 1; i__ <= i__1; ++i__) {

	offpi = *offset + i__;

/*        Determine ith pivot column and swap if necessary. */

	i__2 = *n - i__ + 1;
	pvt = i__ - 1 + isamax_(&i__2, &vn1[i__], &c__1);

	if (pvt != i__) {
	    cswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &
		    c__1);
	    itemp = jpvt[pvt];
	    jpvt[pvt] = jpvt[i__];
	    jpvt[i__] = itemp;
	    vn1[pvt] = vn1[i__];
	    vn2[pvt] = vn2[i__];
	}

/*        Generate elementary reflector H(i). */

	if (offpi < *m) {
	    i__2 = *m - offpi + 1;
	    clarfg_(&i__2, &a[offpi + i__ * a_dim1], &a[offpi + 1 + i__ * 
		    a_dim1], &c__1, &tau[i__]);
	} else {
	    clarfg_(&c__1, &a[*m + i__ * a_dim1], &a[*m + i__ * a_dim1], &
		    c__1, &tau[i__]);
	}

	if (i__ < *n) {

/*           Apply H(i)' to A(offset+i:m,i+1:n) from the left. */

	    i__2 = offpi + i__ * a_dim1;
	    aii.r = a[i__2].r, aii.i = a[i__2].i;
	    i__2 = offpi + i__ * a_dim1;
	    a[i__2].r = 1.f, a[i__2].i = 0.f;
	    i__2 = *m - offpi + 1;
	    i__3 = *n - i__;
	    r_cnjg(&q__1, &tau[i__]);
	    clarf_("Left", &i__2, &i__3, &a[offpi + i__ * a_dim1], &c__1, &
		    q__1, &a[offpi + (i__ + 1) * a_dim1], lda, &work[1], (
		    ftnlen)4);
	    i__2 = offpi + i__ * a_dim1;
	    a[i__2].r = aii.r, a[i__2].i = aii.i;
	}

/*        Update partial column norms. */

	i__2 = *n;
	for (j = i__ + 1; j <= i__2; ++j) {
	    if (vn1[j] != 0.f) {
/* Computing 2nd power */
		r__1 = c_abs(&a[offpi + j * a_dim1]) / vn1[j];
		temp = 1.f - r__1 * r__1;
		temp = dmax(temp,0.f);
/* Computing 2nd power */
		r__1 = vn1[j] / vn2[j];
		temp2 = temp * .05f * (r__1 * r__1) + 1.f;
		if (temp2 == 1.f) {
		    if (offpi < *m) {
			i__3 = *m - offpi;
			vn1[j] = scnrm2_(&i__3, &a[offpi + 1 + j * a_dim1], &
				c__1);
			vn2[j] = vn1[j];
		    } else {
			vn1[j] = 0.f;
			vn2[j] = 0.f;
		    }
		} else {
		    vn1[j] *= sqrt(temp);
		}
	    }
/* L10: */
	}

/* L20: */
    }

    return 0;

/*     End of CLAQP2 */

} /* claqp2_ */
示例#8
0
/* Subroutine */ int clagsy_(integer *n, integer *k, real *d, complex *a, 
	integer *lda, integer *iseed, complex *work, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8, 
	    i__9;
    doublereal d__1;
    complex q__1, q__2, q__3, q__4;

    /* Builtin functions */
    double c_abs(complex *);
    void c_div(complex *, complex *, complex *);

    /* Local variables */
    static integer i, j;
    extern /* Subroutine */ int cgerc_(integer *, integer *, complex *, 
	    complex *, integer *, complex *, integer *, complex *, integer *);
    static complex alpha;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *, 
	    integer *);
    extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer 
	    *, complex *, integer *);
    extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
	    , complex *, integer *, complex *, integer *, complex *, complex *
	    , integer *), caxpy_(integer *, complex *, complex *, 
	    integer *, complex *, integer *), csymv_(char *, integer *, 
	    complex *, complex *, integer *, complex *, integer *, complex *, 
	    complex *, integer *);
    extern real scnrm2_(integer *, complex *, integer *);
    static integer ii, jj;
    static complex wa, wb;
    extern /* Subroutine */ int clacgv_(integer *, complex *, integer *);
    static real wn;
    extern /* Subroutine */ int xerbla_(char *, integer *), clarnv_(
	    integer *, integer *, integer *, complex *);
    static complex tau;


/*  -- LAPACK auxiliary test routine (version 2.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    CLAGSY generates a complex symmetric matrix A, by pre- and post-   
    multiplying a real diagonal matrix D with a random unitary matrix:   
    A = U*D*U**T. The semi-bandwidth may then be reduced to k by   
    additional unitary transformations.   

    Arguments   
    =========   

    N       (input) INTEGER   
            The order of the matrix A.  N >= 0.   

    K       (input) INTEGER   
            The number of nonzero subdiagonals within the band of A.   
            0 <= K <= N-1.   

    D       (input) REAL array, dimension (N)   
            The diagonal elements of the diagonal matrix D.   

    A       (output) COMPLEX array, dimension (LDA,N)   
            The generated n by n symmetric matrix A (the full matrix is   
            stored).   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= N.   

    ISEED   (input/output) INTEGER array, dimension (4)   
            On entry, the seed of the random number generator; the array 
  
            elements must be between 0 and 4095, and ISEED(4) must be   
            odd.   
            On exit, the seed is updated.   

    WORK    (workspace) COMPLEX array, dimension (2*N)   

    INFO    (output) INTEGER   
            = 0: successful exit   
            < 0: if INFO = -i, the i-th argument had an illegal value   

    ===================================================================== 
  


       Test the input arguments   

       Parameter adjustments */
    --d;
    a_dim1 = *lda;
    a_offset = a_dim1 + 1;
    a -= a_offset;
    --iseed;
    --work;

    /* Function Body */
    *info = 0;
    if (*n < 0) {
	*info = -1;
    } else if (*k < 0 || *k > *n - 1) {
	*info = -2;
    } else if (*lda < max(1,*n)) {
	*info = -5;
    }
    if (*info < 0) {
	i__1 = -(*info);
	xerbla_("CLAGSY", &i__1);
	return 0;
    }

/*     initialize lower triangle of A to diagonal matrix */

    i__1 = *n;
    for (j = 1; j <= i__1; ++j) {
	i__2 = *n;
	for (i = j + 1; i <= i__2; ++i) {
	    i__3 = i + j * a_dim1;
	    a[i__3].r = 0.f, a[i__3].i = 0.f;
/* L10: */
	}
/* L20: */
    }
    i__1 = *n;
    for (i = 1; i <= i__1; ++i) {
	i__2 = i + i * a_dim1;
	i__3 = i;
	a[i__2].r = d[i__3], a[i__2].i = 0.f;
/* L30: */
    }

/*     Generate lower triangle of symmetric matrix */

    for (i = *n - 1; i >= 1; --i) {

/*        generate random reflection */

	i__1 = *n - i + 1;
	clarnv_(&c__3, &iseed[1], &i__1, &work[1]);
	i__1 = *n - i + 1;
	wn = scnrm2_(&i__1, &work[1], &c__1);
	d__1 = wn / c_abs(&work[1]);
	q__1.r = d__1 * work[1].r, q__1.i = d__1 * work[1].i;
	wa.r = q__1.r, wa.i = q__1.i;
	if (wn == 0.f) {
	    tau.r = 0.f, tau.i = 0.f;
	} else {
	    q__1.r = work[1].r + wa.r, q__1.i = work[1].i + wa.i;
	    wb.r = q__1.r, wb.i = q__1.i;
	    i__1 = *n - i;
	    c_div(&q__1, &c_b2, &wb);
	    cscal_(&i__1, &q__1, &work[2], &c__1);
	    work[1].r = 1.f, work[1].i = 0.f;
	    c_div(&q__1, &wb, &wa);
	    d__1 = q__1.r;
	    tau.r = d__1, tau.i = 0.f;
	}

/*        apply random reflection to A(i:n,i:n) from the left   
          and the right   

          compute  y := tau * A * conjg(u) */

	i__1 = *n - i + 1;
	clacgv_(&i__1, &work[1], &c__1);
	i__1 = *n - i + 1;
	csymv_("Lower", &i__1, &tau, &a[i + i * a_dim1], lda, &work[1], &c__1,
		 &c_b1, &work[*n + 1], &c__1);
	i__1 = *n - i + 1;
	clacgv_(&i__1, &work[1], &c__1);

/*        compute  v := y - 1/2 * tau * ( u, y ) * u */

	q__3.r = -.5f, q__3.i = 0.f;
	q__2.r = q__3.r * tau.r - q__3.i * tau.i, q__2.i = q__3.r * tau.i + 
		q__3.i * tau.r;
	i__1 = *n - i + 1;
	cdotc_(&q__4, &i__1, &work[1], &c__1, &work[*n + 1], &c__1);
	q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r * q__4.i 
		+ q__2.i * q__4.r;
	alpha.r = q__1.r, alpha.i = q__1.i;
	i__1 = *n - i + 1;
	caxpy_(&i__1, &alpha, &work[1], &c__1, &work[*n + 1], &c__1);

/*        apply the transformation as a rank-2 update to A(i:n,i:n)   

          CALL CSYR2( 'Lower', N-I+1, -ONE, WORK, 1, WORK( N+1 ), 1, 
  
          $               A( I, I ), LDA ) */

	i__1 = *n;
	for (jj = i; jj <= i__1; ++jj) {
	    i__2 = *n;
	    for (ii = jj; ii <= i__2; ++ii) {
		i__3 = ii + jj * a_dim1;
		i__4 = ii + jj * a_dim1;
		i__5 = ii - i + 1;
		i__6 = *n + jj - i + 1;
		q__3.r = work[i__5].r * work[i__6].r - work[i__5].i * work[
			i__6].i, q__3.i = work[i__5].r * work[i__6].i + work[
			i__5].i * work[i__6].r;
		q__2.r = a[i__4].r - q__3.r, q__2.i = a[i__4].i - q__3.i;
		i__7 = *n + ii - i + 1;
		i__8 = jj - i + 1;
		q__4.r = work[i__7].r * work[i__8].r - work[i__7].i * work[
			i__8].i, q__4.i = work[i__7].r * work[i__8].i + work[
			i__7].i * work[i__8].r;
		q__1.r = q__2.r - q__4.r, q__1.i = q__2.i - q__4.i;
		a[i__3].r = q__1.r, a[i__3].i = q__1.i;
/* L40: */
	    }
/* L50: */
	}
/* L60: */
    }

/*     Reduce number of subdiagonals to K */

    i__1 = *n - 1 - *k;
    for (i = 1; i <= i__1; ++i) {

/*        generate reflection to annihilate A(k+i+1:n,i) */

	i__2 = *n - *k - i + 1;
	wn = scnrm2_(&i__2, &a[*k + i + i * a_dim1], &c__1);
	d__1 = wn / c_abs(&a[*k + i + i * a_dim1]);
	i__2 = *k + i + i * a_dim1;
	q__1.r = d__1 * a[i__2].r, q__1.i = d__1 * a[i__2].i;
	wa.r = q__1.r, wa.i = q__1.i;
	if (wn == 0.f) {
	    tau.r = 0.f, tau.i = 0.f;
	} else {
	    i__2 = *k + i + i * a_dim1;
	    q__1.r = a[i__2].r + wa.r, q__1.i = a[i__2].i + wa.i;
	    wb.r = q__1.r, wb.i = q__1.i;
	    i__2 = *n - *k - i;
	    c_div(&q__1, &c_b2, &wb);
	    cscal_(&i__2, &q__1, &a[*k + i + 1 + i * a_dim1], &c__1);
	    i__2 = *k + i + i * a_dim1;
	    a[i__2].r = 1.f, a[i__2].i = 0.f;
	    c_div(&q__1, &wb, &wa);
	    d__1 = q__1.r;
	    tau.r = d__1, tau.i = 0.f;
	}

/*        apply reflection to A(k+i:n,i+1:k+i-1) from the left */

	i__2 = *n - *k - i + 1;
	i__3 = *k - 1;
	cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i + (i + 1)
		 * a_dim1], lda, &a[*k + i + i * a_dim1], &c__1, &c_b1, &work[
		1], &c__1);
	i__2 = *n - *k - i + 1;
	i__3 = *k - 1;
	q__1.r = -(doublereal)tau.r, q__1.i = -(doublereal)tau.i;
	cgerc_(&i__2, &i__3, &q__1, &a[*k + i + i * a_dim1], &c__1, &work[1], 
		&c__1, &a[*k + i + (i + 1) * a_dim1], lda);

/*        apply reflection to A(k+i:n,k+i:n) from the left and the rig
ht   

          compute  y := tau * A * conjg(u) */

	i__2 = *n - *k - i + 1;
	clacgv_(&i__2, &a[*k + i + i * a_dim1], &c__1);
	i__2 = *n - *k - i + 1;
	csymv_("Lower", &i__2, &tau, &a[*k + i + (*k + i) * a_dim1], lda, &a[*
		k + i + i * a_dim1], &c__1, &c_b1, &work[1], &c__1);
	i__2 = *n - *k - i + 1;
	clacgv_(&i__2, &a[*k + i + i * a_dim1], &c__1);

/*        compute  v := y - 1/2 * tau * ( u, y ) * u */

	q__3.r = -.5f, q__3.i = 0.f;
	q__2.r = q__3.r * tau.r - q__3.i * tau.i, q__2.i = q__3.r * tau.i + 
		q__3.i * tau.r;
	i__2 = *n - *k - i + 1;
	cdotc_(&q__4, &i__2, &a[*k + i + i * a_dim1], &c__1, &work[1], &c__1);
	q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r * q__4.i 
		+ q__2.i * q__4.r;
	alpha.r = q__1.r, alpha.i = q__1.i;
	i__2 = *n - *k - i + 1;
	caxpy_(&i__2, &alpha, &a[*k + i + i * a_dim1], &c__1, &work[1], &c__1)
		;

/*        apply symmetric rank-2 update to A(k+i:n,k+i:n)   

          CALL CSYR2( 'Lower', N-K-I+1, -ONE, A( K+I, I ), 1, WORK, 1,
   
          $               A( K+I, K+I ), LDA ) */

	i__2 = *n;
	for (jj = *k + i; jj <= i__2; ++jj) {
	    i__3 = *n;
	    for (ii = jj; ii <= i__3; ++ii) {
		i__4 = ii + jj * a_dim1;
		i__5 = ii + jj * a_dim1;
		i__6 = ii + i * a_dim1;
		i__7 = jj - *k - i + 1;
		q__3.r = a[i__6].r * work[i__7].r - a[i__6].i * work[i__7].i, 
			q__3.i = a[i__6].r * work[i__7].i + a[i__6].i * work[
			i__7].r;
		q__2.r = a[i__5].r - q__3.r, q__2.i = a[i__5].i - q__3.i;
		i__8 = ii - *k - i + 1;
		i__9 = jj + i * a_dim1;
		q__4.r = work[i__8].r * a[i__9].r - work[i__8].i * a[i__9].i, 
			q__4.i = work[i__8].r * a[i__9].i + work[i__8].i * a[
			i__9].r;
		q__1.r = q__2.r - q__4.r, q__1.i = q__2.i - q__4.i;
		a[i__4].r = q__1.r, a[i__4].i = q__1.i;
/* L70: */
	    }
/* L80: */
	}

	i__2 = *k + i + i * a_dim1;
	q__1.r = -(doublereal)wa.r, q__1.i = -(doublereal)wa.i;
	a[i__2].r = q__1.r, a[i__2].i = q__1.i;
	i__2 = *n;
	for (j = *k + i + 1; j <= i__2; ++j) {
	    i__3 = j + i * a_dim1;
	    a[i__3].r = 0.f, a[i__3].i = 0.f;
/* L90: */
	}
/* L100: */
    }

/*     Store full symmetric matrix */

    i__1 = *n;
    for (j = 1; j <= i__1; ++j) {
	i__2 = *n;
	for (i = j + 1; i <= i__2; ++i) {
	    i__3 = j + i * a_dim1;
	    i__4 = i + j * a_dim1;
	    a[i__3].r = a[i__4].r, a[i__3].i = a[i__4].i;
/* L110: */
	}
/* L120: */
    }
    return 0;

/*     End of CLAGSY */

} /* clagsy_ */
示例#9
0
文件: clarge.c 项目: zangel/uquad
/* Subroutine */ int clarge_(integer *n, complex *a, integer *lda, integer *
                             iseed, complex *work, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1;
    real r__1;
    complex q__1;

    /* Builtin functions */
    double c_abs(complex *);
    void c_div(complex *, complex *, complex *);

    /* Local variables */
    static integer i__;
    extern /* Subroutine */ int cgerc_(integer *, integer *, complex *,
                                       complex *, integer *, complex *, integer *, complex *, integer *),
                                               cscal_(integer *, complex *, complex *, integer *), cgemv_(char *
                                                       , integer *, integer *, complex *, complex *, integer *, complex *
                                                       , integer *, complex *, complex *, integer *);
    extern doublereal scnrm2_(integer *, complex *, integer *);
    static complex wa, wb;
    static real wn;
    extern /* Subroutine */ int xerbla_(char *, integer *), clarnv_(
        integer *, integer *, integer *, complex *);
    static complex tau;


#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]


    /*  -- LAPACK auxiliary test routine (version 3.0) --
           Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
           Courant Institute, Argonne National Lab, and Rice University
           September 30, 1994


        Purpose
        =======

        CLARGE pre- and post-multiplies a complex general n by n matrix A
        with a random unitary matrix: A = U*D*U'.

        Arguments
        =========

        N       (input) INTEGER
                The order of the matrix A.  N >= 0.

        A       (input/output) COMPLEX array, dimension (LDA,N)
                On entry, the original n by n matrix A.
                On exit, A is overwritten by U*A*U' for some random
                unitary matrix U.

        LDA     (input) INTEGER
                The leading dimension of the array A.  LDA >= N.

        ISEED   (input/output) INTEGER array, dimension (4)
                On entry, the seed of the random number generator; the array
                elements must be between 0 and 4095, and ISEED(4) must be
                odd.
                On exit, the seed is updated.

        WORK    (workspace) COMPLEX array, dimension (2*N)

        INFO    (output) INTEGER
                = 0: successful exit
                < 0: if INFO = -i, the i-th argument had an illegal value

        =====================================================================


           Test the input arguments

           Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    --iseed;
    --work;

    /* Function Body */
    *info = 0;
    if (*n < 0) {
        *info = -1;
    } else if (*lda < max(1,*n)) {
        *info = -3;
    }
    if (*info < 0) {
        i__1 = -(*info);
        xerbla_("CLARGE", &i__1);
        return 0;
    }

    /*     pre- and post-multiply A by random unitary matrix */

    for (i__ = *n; i__ >= 1; --i__) {

        /*        generate random reflection */

        i__1 = *n - i__ + 1;
        clarnv_(&c__3, &iseed[1], &i__1, &work[1]);
        i__1 = *n - i__ + 1;
        wn = scnrm2_(&i__1, &work[1], &c__1);
        r__1 = wn / c_abs(&work[1]);
        q__1.r = r__1 * work[1].r, q__1.i = r__1 * work[1].i;
        wa.r = q__1.r, wa.i = q__1.i;
        if (wn == 0.f) {
            tau.r = 0.f, tau.i = 0.f;
        } else {
            q__1.r = work[1].r + wa.r, q__1.i = work[1].i + wa.i;
            wb.r = q__1.r, wb.i = q__1.i;
            i__1 = *n - i__;
            c_div(&q__1, &c_b2, &wb);
            cscal_(&i__1, &q__1, &work[2], &c__1);
            work[1].r = 1.f, work[1].i = 0.f;
            c_div(&q__1, &wb, &wa);
            r__1 = q__1.r;
            tau.r = r__1, tau.i = 0.f;
        }

        /*        multiply A(i:n,1:n) by random reflection from the left */

        i__1 = *n - i__ + 1;
        cgemv_("Conjugate transpose", &i__1, n, &c_b2, &a_ref(i__, 1), lda, &
               work[1], &c__1, &c_b1, &work[*n + 1], &c__1);
        i__1 = *n - i__ + 1;
        q__1.r = -tau.r, q__1.i = -tau.i;
        cgerc_(&i__1, n, &q__1, &work[1], &c__1, &work[*n + 1], &c__1, &a_ref(
                   i__, 1), lda);

        /*        multiply A(1:n,i:n) by random reflection from the right */

        i__1 = *n - i__ + 1;
        cgemv_("No transpose", n, &i__1, &c_b2, &a_ref(1, i__), lda, &work[1],
               &c__1, &c_b1, &work[*n + 1], &c__1);
        i__1 = *n - i__ + 1;
        q__1.r = -tau.r, q__1.i = -tau.i;
        cgerc_(n, &i__1, &q__1, &work[*n + 1], &c__1, &work[1], &c__1, &a_ref(
                   1, i__), lda);
        /* L10: */
    }
    return 0;

    /*     End of CLARGE */

} /* clarge_ */
示例#10
0
/* Subroutine */ int cqrt15_(integer *scale, integer *rksel, integer *m, 
	integer *n, integer *nrhs, complex *a, integer *lda, complex *b, 
	integer *ldb, real *s, integer *rank, real *norma, real *normb, 
	integer *iseed, complex *work, integer *lwork)
{
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
    real r__1;

    /* Local variables */
    integer j, mn;
    real eps;
    integer info;
    real temp;
    extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *, 
	    integer *, complex *, complex *, integer *, complex *, integer *, 
	    complex *, complex *, integer *), clarf_(char *, 
	    integer *, integer *, complex *, integer *, complex *, complex *, 
	    integer *, complex *);
    extern doublereal sasum_(integer *, real *, integer *);
    real dummy[1];
    extern doublereal scnrm2_(integer *, complex *, integer *);
    extern /* Subroutine */ int slabad_(real *, real *);
    extern doublereal clange_(char *, integer *, integer *, complex *, 
	    integer *, real *);
    extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *, 
	    real *, integer *, integer *, complex *, integer *, integer *);
    extern doublereal slamch_(char *);
    extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer 
	    *), claset_(char *, integer *, integer *, complex *, complex *, 
	    complex *, integer *), xerbla_(char *, integer *);
    real bignum;
    extern /* Subroutine */ int claror_(char *, char *, integer *, integer *, 
	    complex *, integer *, integer *, complex *, integer *);
    extern doublereal slarnd_(integer *, integer *);
    extern /* Subroutine */ int slaord_(char *, integer *, real *, integer *), clarnv_(integer *, integer *, integer *, complex *), 
	    slascl_(char *, integer *, integer *, real *, real *, integer *, 
	    integer *, real *, integer *, integer *);
    real smlnum;


/*  -- LAPACK test routine (version 3.1) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CQRT15 generates a matrix with full or deficient rank and of various */
/*  norms. */

/*  Arguments */
/*  ========= */

/*  SCALE   (input) INTEGER */
/*          SCALE = 1: normally scaled matrix */
/*          SCALE = 2: matrix scaled up */
/*          SCALE = 3: matrix scaled down */

/*  RKSEL   (input) INTEGER */
/*          RKSEL = 1: full rank matrix */
/*          RKSEL = 2: rank-deficient matrix */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A. */

/*  N       (input) INTEGER */
/*          The number of columns of A. */

/*  NRHS    (input) INTEGER */
/*          The number of columns of B. */

/*  A       (output) COMPLEX array, dimension (LDA,N) */
/*          The M-by-N matrix A. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A. */

/*  B       (output) COMPLEX array, dimension (LDB, NRHS) */
/*          A matrix that is in the range space of matrix A. */

/*  LDB     (input) INTEGER */
/*          The leading dimension of the array B. */

/*  S       (output) REAL array, dimension MIN(M,N) */
/*          Singular values of A. */

/*  RANK    (output) INTEGER */
/*          number of nonzero singular values of A. */

/*  NORMA   (output) REAL */
/*          one-norm norm of A. */

/*  NORMB   (output) REAL */
/*          one-norm norm of B. */

/*  ISEED   (input/output) integer array, dimension (4) */
/*          seed for random number generator. */

/*  WORK    (workspace) COMPLEX array, dimension (LWORK) */

/*  LWORK   (input) INTEGER */
/*          length of work space required. */
/*          LWORK >= MAX(M+MIN(M,N),NRHS*MIN(M,N),2*N+M) */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1;
    b -= b_offset;
    --s;
    --iseed;
    --work;

    /* Function Body */
    mn = min(*m,*n);
/* Computing MAX */
    i__1 = *m + mn, i__2 = mn * *nrhs, i__1 = max(i__1,i__2), i__2 = (*n << 1)
	     + *m;
    if (*lwork < max(i__1,i__2)) {
	xerbla_("CQRT15", &c__16);
	return 0;
    }

    smlnum = slamch_("Safe minimum");
    bignum = 1.f / smlnum;
    slabad_(&smlnum, &bignum);
    eps = slamch_("Epsilon");
    smlnum = smlnum / eps / eps;
    bignum = 1.f / smlnum;

/*     Determine rank and (unscaled) singular values */

    if (*rksel == 1) {
	*rank = mn;
    } else if (*rksel == 2) {
	*rank = mn * 3 / 4;
	i__1 = mn;
	for (j = *rank + 1; j <= i__1; ++j) {
	    s[j] = 0.f;
/* L10: */
	}
    } else {
	xerbla_("CQRT15", &c__2);
    }

    if (*rank > 0) {

/*        Nontrivial case */

	s[1] = 1.f;
	i__1 = *rank;
	for (j = 2; j <= i__1; ++j) {
L20:
	    temp = slarnd_(&c__1, &iseed[1]);
	    if (temp > .1f) {
		s[j] = dabs(temp);
	    } else {
		goto L20;
	    }
/* L30: */
	}
	slaord_("Decreasing", rank, &s[1], &c__1);

/*        Generate 'rank' columns of a random orthogonal matrix in A */

	clarnv_(&c__2, &iseed[1], m, &work[1]);
	r__1 = 1.f / scnrm2_(m, &work[1], &c__1);
	csscal_(m, &r__1, &work[1], &c__1);
	claset_("Full", m, rank, &c_b1, &c_b2, &a[a_offset], lda);
	clarf_("Left", m, rank, &work[1], &c__1, &c_b22, &a[a_offset], lda, &
		work[*m + 1]);

/*        workspace used: m+mn */

/*        Generate consistent rhs in the range space of A */

	i__1 = *rank * *nrhs;
	clarnv_(&c__2, &iseed[1], &i__1, &work[1]);
	cgemm_("No transpose", "No transpose", m, nrhs, rank, &c_b2, &a[
		a_offset], lda, &work[1], rank, &c_b1, &b[b_offset], ldb);

/*        work space used: <= mn *nrhs */

/*        generate (unscaled) matrix A */

	i__1 = *rank;
	for (j = 1; j <= i__1; ++j) {
	    csscal_(m, &s[j], &a[j * a_dim1 + 1], &c__1);
/* L40: */
	}
	if (*rank < *n) {
	    i__1 = *n - *rank;
	    claset_("Full", m, &i__1, &c_b1, &c_b1, &a[(*rank + 1) * a_dim1 + 
		    1], lda);
	}
	claror_("Right", "No initialization", m, n, &a[a_offset], lda, &iseed[
		1], &work[1], &info);

    } else {

/*        work space used 2*n+m */

/*        Generate null matrix and rhs */

	i__1 = mn;
	for (j = 1; j <= i__1; ++j) {
	    s[j] = 0.f;
/* L50: */
	}
	claset_("Full", m, n, &c_b1, &c_b1, &a[a_offset], lda);
	claset_("Full", m, nrhs, &c_b1, &c_b1, &b[b_offset], ldb);

    }

/*     Scale the matrix */

    if (*scale != 1) {
	*norma = clange_("Max", m, n, &a[a_offset], lda, dummy);
	if (*norma != 0.f) {
	    if (*scale == 2) {

/*              matrix scaled up */

		clascl_("General", &c__0, &c__0, norma, &bignum, m, n, &a[
			a_offset], lda, &info);
		slascl_("General", &c__0, &c__0, norma, &bignum, &mn, &c__1, &
			s[1], &mn, &info);
		clascl_("General", &c__0, &c__0, norma, &bignum, m, nrhs, &b[
			b_offset], ldb, &info);
	    } else if (*scale == 3) {

/*              matrix scaled down */

		clascl_("General", &c__0, &c__0, norma, &smlnum, m, n, &a[
			a_offset], lda, &info);
		slascl_("General", &c__0, &c__0, norma, &smlnum, &mn, &c__1, &
			s[1], &mn, &info);
		clascl_("General", &c__0, &c__0, norma, &smlnum, m, nrhs, &b[
			b_offset], ldb, &info);
	    } else {
		xerbla_("CQRT15", &c__1);
		return 0;
	    }
	}
    }

    *norma = sasum_(&mn, &s[1], &c__1);
    *normb = clange_("One-norm", m, nrhs, &b[b_offset], ldb, dummy)
	    ;

    return 0;

/*     End of CQRT15 */

} /* cqrt15_ */
示例#11
0
/* Subroutine */ int ctrsna_(char *job, char *howmny, logical *select, 
	integer *n, complex *t, integer *ldt, complex *vl, integer *ldvl, 
	complex *vr, integer *ldvr, real *s, real *sep, integer *mm, integer *
	m, complex *work, integer *ldwork, real *rwork, integer *info)
{
    /* System generated locals */
    integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, 
	    work_dim1, work_offset, i__1, i__2, i__3, i__4, i__5;
    real r__1, r__2;
    complex q__1;

    /* Builtin functions */
    double c_abs(complex *), r_imag(complex *);

    /* Local variables */
    integer i__, j, k, ks, ix;
    real eps, est;
    integer kase, ierr;
    complex prod;
    real lnrm, rnrm, scale;
    extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer 
	    *, complex *, integer *);
    extern logical lsame_(char *, char *);
    integer isave[3];
    complex dummy[1];
    logical wants;
    extern /* Subroutine */ int clacn2_(integer *, complex *, complex *, real 
	    *, integer *, integer *);
    real xnorm;
    extern doublereal scnrm2_(integer *, complex *, integer *);
    extern /* Subroutine */ int slabad_(real *, real *);
    extern integer icamax_(integer *, complex *, integer *);
    extern doublereal slamch_(char *);
    extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex 
	    *, integer *, complex *, integer *), xerbla_(char *, 
	    integer *);
    real bignum;
    logical wantbh;
    extern /* Subroutine */ int clatrs_(char *, char *, char *, char *, 
	    integer *, complex *, integer *, complex *, real *, real *, 
	    integer *), csrscl_(integer *, 
	    real *, complex *, integer *), ctrexc_(char *, integer *, complex 
	    *, integer *, complex *, integer *, integer *, integer *, integer 
	    *);
    logical somcon;
    char normin[1];
    real smlnum;
    logical wantsp;


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     Modified to call CLACN2 in place of CLACON, 10 Feb 03, SJH. */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CTRSNA estimates reciprocal condition numbers for specified */
/*  eigenvalues and/or right eigenvectors of a complex upper triangular */
/*  matrix T (or of any matrix Q*T*Q**H with Q unitary). */

/*  Arguments */
/*  ========= */

/*  JOB     (input) CHARACTER*1 */
/*          Specifies whether condition numbers are required for */
/*          eigenvalues (S) or eigenvectors (SEP): */
/*          = 'E': for eigenvalues only (S); */
/*          = 'V': for eigenvectors only (SEP); */
/*          = 'B': for both eigenvalues and eigenvectors (S and SEP). */

/*  HOWMNY  (input) CHARACTER*1 */
/*          = 'A': compute condition numbers for all eigenpairs; */
/*          = 'S': compute condition numbers for selected eigenpairs */
/*                 specified by the array SELECT. */

/*  SELECT  (input) LOGICAL array, dimension (N) */
/*          If HOWMNY = 'S', SELECT specifies the eigenpairs for which */
/*          condition numbers are required. To select condition numbers */
/*          for the j-th eigenpair, SELECT(j) must be set to .TRUE.. */
/*          If HOWMNY = 'A', SELECT is not referenced. */

/*  N       (input) INTEGER */
/*          The order of the matrix T. N >= 0. */

/*  T       (input) COMPLEX array, dimension (LDT,N) */
/*          The upper triangular matrix T. */

/*  LDT     (input) INTEGER */
/*          The leading dimension of the array T. LDT >= max(1,N). */

/*  VL      (input) COMPLEX array, dimension (LDVL,M) */
/*          If JOB = 'E' or 'B', VL must contain left eigenvectors of T */
/*          (or of any Q*T*Q**H with Q unitary), corresponding to the */
/*          eigenpairs specified by HOWMNY and SELECT. The eigenvectors */
/*          must be stored in consecutive columns of VL, as returned by */
/*          CHSEIN or CTREVC. */
/*          If JOB = 'V', VL is not referenced. */

/*  LDVL    (input) INTEGER */
/*          The leading dimension of the array VL. */
/*          LDVL >= 1; and if JOB = 'E' or 'B', LDVL >= N. */

/*  VR      (input) COMPLEX array, dimension (LDVR,M) */
/*          If JOB = 'E' or 'B', VR must contain right eigenvectors of T */
/*          (or of any Q*T*Q**H with Q unitary), corresponding to the */
/*          eigenpairs specified by HOWMNY and SELECT. The eigenvectors */
/*          must be stored in consecutive columns of VR, as returned by */
/*          CHSEIN or CTREVC. */
/*          If JOB = 'V', VR is not referenced. */

/*  LDVR    (input) INTEGER */
/*          The leading dimension of the array VR. */
/*          LDVR >= 1; and if JOB = 'E' or 'B', LDVR >= N. */

/*  S       (output) REAL array, dimension (MM) */
/*          If JOB = 'E' or 'B', the reciprocal condition numbers of the */
/*          selected eigenvalues, stored in consecutive elements of the */
/*          array. Thus S(j), SEP(j), and the j-th columns of VL and VR */
/*          all correspond to the same eigenpair (but not in general the */
/*          j-th eigenpair, unless all eigenpairs are selected). */
/*          If JOB = 'V', S is not referenced. */

/*  SEP     (output) REAL array, dimension (MM) */
/*          If JOB = 'V' or 'B', the estimated reciprocal condition */
/*          numbers of the selected eigenvectors, stored in consecutive */
/*          elements of the array. */
/*          If JOB = 'E', SEP is not referenced. */

/*  MM      (input) INTEGER */
/*          The number of elements in the arrays S (if JOB = 'E' or 'B') */
/*           and/or SEP (if JOB = 'V' or 'B'). MM >= M. */

/*  M       (output) INTEGER */
/*          The number of elements of the arrays S and/or SEP actually */
/*          used to store the estimated condition numbers. */
/*          If HOWMNY = 'A', M is set to N. */

/*  WORK    (workspace) COMPLEX array, dimension (LDWORK,N+6) */
/*          If JOB = 'E', WORK is not referenced. */

/*  LDWORK  (input) INTEGER */
/*          The leading dimension of the array WORK. */
/*          LDWORK >= 1; and if JOB = 'V' or 'B', LDWORK >= N. */

/*  RWORK   (workspace) REAL array, dimension (N) */
/*          If JOB = 'E', RWORK is not referenced. */

/*  INFO    (output) INTEGER */
/*          = 0: successful exit */
/*          < 0: if INFO = -i, the i-th argument had an illegal value */

/*  Further Details */
/*  =============== */

/*  The reciprocal of the condition number of an eigenvalue lambda is */
/*  defined as */

/*          S(lambda) = |v'*u| / (norm(u)*norm(v)) */

/*  where u and v are the right and left eigenvectors of T corresponding */
/*  to lambda; v' denotes the conjugate transpose of v, and norm(u) */
/*  denotes the Euclidean norm. These reciprocal condition numbers always */
/*  lie between zero (very badly conditioned) and one (very well */
/*  conditioned). If n = 1, S(lambda) is defined to be 1. */

/*  An approximate error bound for a computed eigenvalue W(i) is given by */

/*                      EPS * norm(T) / S(i) */

/*  where EPS is the machine precision. */

/*  The reciprocal of the condition number of the right eigenvector u */
/*  corresponding to lambda is defined as follows. Suppose */

/*              T = ( lambda  c  ) */
/*                  (   0    T22 ) */

/*  Then the reciprocal condition number is */

/*          SEP( lambda, T22 ) = sigma-min( T22 - lambda*I ) */

/*  where sigma-min denotes the smallest singular value. We approximate */
/*  the smallest singular value by the reciprocal of an estimate of the */
/*  one-norm of the inverse of T22 - lambda*I. If n = 1, SEP(1) is */
/*  defined to be abs(T(1,1)). */

/*  An approximate error bound for a computed right eigenvector VR(i) */
/*  is given by */

/*                      EPS * norm(T) / SEP(i) */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Statement Functions .. */
/*     .. */
/*     .. Statement Function definitions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Decode and test the input parameters */

    /* Parameter adjustments */
    --select;
    t_dim1 = *ldt;
    t_offset = 1 + t_dim1;
    t -= t_offset;
    vl_dim1 = *ldvl;
    vl_offset = 1 + vl_dim1;
    vl -= vl_offset;
    vr_dim1 = *ldvr;
    vr_offset = 1 + vr_dim1;
    vr -= vr_offset;
    --s;
    --sep;
    work_dim1 = *ldwork;
    work_offset = 1 + work_dim1;
    work -= work_offset;
    --rwork;

    /* Function Body */
    wantbh = lsame_(job, "B");
    wants = lsame_(job, "E") || wantbh;
    wantsp = lsame_(job, "V") || wantbh;

    somcon = lsame_(howmny, "S");

/*     Set M to the number of eigenpairs for which condition numbers are */
/*     to be computed. */

    if (somcon) {
	*m = 0;
	i__1 = *n;
	for (j = 1; j <= i__1; ++j) {
	    if (select[j]) {
		++(*m);
	    }
/* L10: */
	}
    } else {
	*m = *n;
    }

    *info = 0;
    if (! wants && ! wantsp) {
	*info = -1;
    } else if (! lsame_(howmny, "A") && ! somcon) {
	*info = -2;
    } else if (*n < 0) {
	*info = -4;
    } else if (*ldt < max(1,*n)) {
	*info = -6;
    } else if (*ldvl < 1 || wants && *ldvl < *n) {
	*info = -8;
    } else if (*ldvr < 1 || wants && *ldvr < *n) {
	*info = -10;
    } else if (*mm < *m) {
	*info = -13;
    } else if (*ldwork < 1 || wantsp && *ldwork < *n) {
	*info = -16;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CTRSNA", &i__1);
	return 0;
    }

/*     Quick return if possible */

    if (*n == 0) {
	return 0;
    }

    if (*n == 1) {
	if (somcon) {
	    if (! select[1]) {
		return 0;
	    }
	}
	if (wants) {
	    s[1] = 1.f;
	}
	if (wantsp) {
	    sep[1] = c_abs(&t[t_dim1 + 1]);
	}
	return 0;
    }

/*     Get machine constants */

    eps = slamch_("P");
    smlnum = slamch_("S") / eps;
    bignum = 1.f / smlnum;
    slabad_(&smlnum, &bignum);

    ks = 1;
    i__1 = *n;
    for (k = 1; k <= i__1; ++k) {

	if (somcon) {
	    if (! select[k]) {
		goto L50;
	    }
	}

	if (wants) {

/*           Compute the reciprocal condition number of the k-th */
/*           eigenvalue. */

	    cdotc_(&q__1, n, &vr[ks * vr_dim1 + 1], &c__1, &vl[ks * vl_dim1 + 
		    1], &c__1);
	    prod.r = q__1.r, prod.i = q__1.i;
	    rnrm = scnrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
	    lnrm = scnrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
	    s[ks] = c_abs(&prod) / (rnrm * lnrm);

	}

	if (wantsp) {

/*           Estimate the reciprocal condition number of the k-th */
/*           eigenvector. */

/*           Copy the matrix T to the array WORK and swap the k-th */
/*           diagonal element to the (1,1) position. */

	    clacpy_("Full", n, n, &t[t_offset], ldt, &work[work_offset], 
		    ldwork);
	    ctrexc_("No Q", n, &work[work_offset], ldwork, dummy, &c__1, &k, &
		    c__1, &ierr);

/*           Form  C = T22 - lambda*I in WORK(2:N,2:N). */

	    i__2 = *n;
	    for (i__ = 2; i__ <= i__2; ++i__) {
		i__3 = i__ + i__ * work_dim1;
		i__4 = i__ + i__ * work_dim1;
		i__5 = work_dim1 + 1;
		q__1.r = work[i__4].r - work[i__5].r, q__1.i = work[i__4].i - 
			work[i__5].i;
		work[i__3].r = q__1.r, work[i__3].i = q__1.i;
/* L20: */
	    }

/*           Estimate a lower bound for the 1-norm of inv(C'). The 1st */
/*           and (N+1)th columns of WORK are used to store work vectors. */

	    sep[ks] = 0.f;
	    est = 0.f;
	    kase = 0;
	    *(unsigned char *)normin = 'N';
L30:
	    i__2 = *n - 1;
	    clacn2_(&i__2, &work[(*n + 1) * work_dim1 + 1], &work[work_offset]
, &est, &kase, isave);

	    if (kase != 0) {
		if (kase == 1) {

/*                 Solve C'*x = scale*b */

		    i__2 = *n - 1;
		    clatrs_("Upper", "Conjugate transpose", "Nonunit", normin, 
			     &i__2, &work[(work_dim1 << 1) + 2], ldwork, &
			    work[work_offset], &scale, &rwork[1], &ierr);
		} else {

/*                 Solve C*x = scale*b */

		    i__2 = *n - 1;
		    clatrs_("Upper", "No transpose", "Nonunit", normin, &i__2, 
			     &work[(work_dim1 << 1) + 2], ldwork, &work[
			    work_offset], &scale, &rwork[1], &ierr);
		}
		*(unsigned char *)normin = 'Y';
		if (scale != 1.f) {

/*                 Multiply by 1/SCALE if doing so will not cause */
/*                 overflow. */

		    i__2 = *n - 1;
		    ix = icamax_(&i__2, &work[work_offset], &c__1);
		    i__2 = ix + work_dim1;
		    xnorm = (r__1 = work[i__2].r, dabs(r__1)) + (r__2 = 
			    r_imag(&work[ix + work_dim1]), dabs(r__2));
		    if (scale < xnorm * smlnum || scale == 0.f) {
			goto L40;
		    }
		    csrscl_(n, &scale, &work[work_offset], &c__1);
		}
		goto L30;
	    }

	    sep[ks] = 1.f / dmax(est,smlnum);
	}

L40:
	++ks;
L50:
	;
    }
    return 0;

/*     End of CTRSNA */

} /* ctrsna_ */
示例#12
0
/* Subroutine */ int clagge_(integer *m, integer *n, integer *kl, integer *ku, 
	 real *d__, complex *a, integer *lda, integer *iseed, complex *work, 
	integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    real r__1;
    complex q__1;

    /* Local variables */
    integer i__, j;
    complex wa, wb;
    real wn;
    complex tau;

/*  -- LAPACK auxiliary test routine (version 3.1) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CLAGGE generates a complex general m by n matrix A, by pre- and post- */
/*  multiplying a real diagonal matrix D with random unitary matrices: */
/*  A = U*D*V. The lower and upper bandwidths may then be reduced to */
/*  kl and ku by additional unitary transformations. */

/*  Arguments */
/*  ========= */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A.  M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix A.  N >= 0. */

/*  KL      (input) INTEGER */
/*          The number of nonzero subdiagonals within the band of A. */
/*          0 <= KL <= M-1. */

/*  KU      (input) INTEGER */
/*          The number of nonzero superdiagonals within the band of A. */
/*          0 <= KU <= N-1. */

/*  D       (input) REAL array, dimension (min(M,N)) */
/*          The diagonal elements of the diagonal matrix D. */

/*  A       (output) COMPLEX array, dimension (LDA,N) */
/*          The generated m by n matrix A. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= M. */

/*  ISEED   (input/output) INTEGER array, dimension (4) */
/*          On entry, the seed of the random number generator; the array */
/*          elements must be between 0 and 4095, and ISEED(4) must be */
/*          odd. */
/*          On exit, the seed is updated. */

/*  WORK    (workspace) COMPLEX array, dimension (M+N) */

/*  INFO    (output) INTEGER */
/*          = 0: successful exit */
/*          < 0: if INFO = -i, the i-th argument had an illegal value */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input arguments */

    /* Parameter adjustments */
    --d__;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --iseed;
    --work;

    /* Function Body */
    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*kl < 0 || *kl > *m - 1) {
	*info = -3;
    } else if (*ku < 0 || *ku > *n - 1) {
	*info = -4;
    } else if (*lda < max(1,*m)) {
	*info = -7;
    }
    if (*info < 0) {
	i__1 = -(*info);
	xerbla_("CLAGGE", &i__1);
	return 0;
    }

/*     initialize A to diagonal matrix */

    i__1 = *n;
    for (j = 1; j <= i__1; ++j) {
	i__2 = *m;
	for (i__ = 1; i__ <= i__2; ++i__) {
	    i__3 = i__ + j * a_dim1;
	    a[i__3].r = 0.f, a[i__3].i = 0.f;
/* L10: */
	}
/* L20: */
    }
    i__1 = min(*m,*n);
    for (i__ = 1; i__ <= i__1; ++i__) {
	i__2 = i__ + i__ * a_dim1;
	i__3 = i__;
	a[i__2].r = d__[i__3], a[i__2].i = 0.f;
/* L30: */
    }

/*     pre- and post-multiply A by random unitary matrices */

    for (i__ = min(*m,*n); i__ >= 1; --i__) {
	if (i__ < *m) {

/*           generate random reflection */

	    i__1 = *m - i__ + 1;
	    clarnv_(&c__3, &iseed[1], &i__1, &work[1]);
	    i__1 = *m - i__ + 1;
	    wn = scnrm2_(&i__1, &work[1], &c__1);
	    r__1 = wn / c_abs(&work[1]);
	    q__1.r = r__1 * work[1].r, q__1.i = r__1 * work[1].i;
	    wa.r = q__1.r, wa.i = q__1.i;
	    if (wn == 0.f) {
		tau.r = 0.f, tau.i = 0.f;
	    } else {
		q__1.r = work[1].r + wa.r, q__1.i = work[1].i + wa.i;
		wb.r = q__1.r, wb.i = q__1.i;
		i__1 = *m - i__;
		c_div(&q__1, &c_b2, &wb);
		cscal_(&i__1, &q__1, &work[2], &c__1);
		work[1].r = 1.f, work[1].i = 0.f;
		c_div(&q__1, &wb, &wa);
		r__1 = q__1.r;
		tau.r = r__1, tau.i = 0.f;
	    }

/*           multiply A(i:m,i:n) by random reflection from the left */

	    i__1 = *m - i__ + 1;
	    i__2 = *n - i__ + 1;
	    cgemv_("Conjugate transpose", &i__1, &i__2, &c_b2, &a[i__ + i__ * 
		    a_dim1], lda, &work[1], &c__1, &c_b1, &work[*m + 1], &
		    c__1);
	    i__1 = *m - i__ + 1;
	    i__2 = *n - i__ + 1;
	    q__1.r = -tau.r, q__1.i = -tau.i;
	    cgerc_(&i__1, &i__2, &q__1, &work[1], &c__1, &work[*m + 1], &c__1, 
		     &a[i__ + i__ * a_dim1], lda);
	}
	if (i__ < *n) {

/*           generate random reflection */

	    i__1 = *n - i__ + 1;
	    clarnv_(&c__3, &iseed[1], &i__1, &work[1]);
	    i__1 = *n - i__ + 1;
	    wn = scnrm2_(&i__1, &work[1], &c__1);
	    r__1 = wn / c_abs(&work[1]);
	    q__1.r = r__1 * work[1].r, q__1.i = r__1 * work[1].i;
	    wa.r = q__1.r, wa.i = q__1.i;
	    if (wn == 0.f) {
		tau.r = 0.f, tau.i = 0.f;
	    } else {
		q__1.r = work[1].r + wa.r, q__1.i = work[1].i + wa.i;
		wb.r = q__1.r, wb.i = q__1.i;
		i__1 = *n - i__;
		c_div(&q__1, &c_b2, &wb);
		cscal_(&i__1, &q__1, &work[2], &c__1);
		work[1].r = 1.f, work[1].i = 0.f;
		c_div(&q__1, &wb, &wa);
		r__1 = q__1.r;
		tau.r = r__1, tau.i = 0.f;
	    }

/*           multiply A(i:m,i:n) by random reflection from the right */

	    i__1 = *m - i__ + 1;
	    i__2 = *n - i__ + 1;
	    cgemv_("No transpose", &i__1, &i__2, &c_b2, &a[i__ + i__ * a_dim1]
, lda, &work[1], &c__1, &c_b1, &work[*n + 1], &c__1);
	    i__1 = *m - i__ + 1;
	    i__2 = *n - i__ + 1;
	    q__1.r = -tau.r, q__1.i = -tau.i;
	    cgerc_(&i__1, &i__2, &q__1, &work[*n + 1], &c__1, &work[1], &c__1, 
		     &a[i__ + i__ * a_dim1], lda);
	}
/* L40: */
    }

/*     Reduce number of subdiagonals to KL and number of superdiagonals */
/*     to KU */

/* Computing MAX */
    i__2 = *m - 1 - *kl, i__3 = *n - 1 - *ku;
    i__1 = max(i__2,i__3);
    for (i__ = 1; i__ <= i__1; ++i__) {
	if (*kl <= *ku) {

/*           annihilate subdiagonal elements first (necessary if KL = 0) */

/* Computing MIN */
	    i__2 = *m - 1 - *kl;
	    if (i__ <= min(i__2,*n)) {

/*              generate reflection to annihilate A(kl+i+1:m,i) */

		i__2 = *m - *kl - i__ + 1;
		wn = scnrm2_(&i__2, &a[*kl + i__ + i__ * a_dim1], &c__1);
		r__1 = wn / c_abs(&a[*kl + i__ + i__ * a_dim1]);
		i__2 = *kl + i__ + i__ * a_dim1;
		q__1.r = r__1 * a[i__2].r, q__1.i = r__1 * a[i__2].i;
		wa.r = q__1.r, wa.i = q__1.i;
		if (wn == 0.f) {
		    tau.r = 0.f, tau.i = 0.f;
		} else {
		    i__2 = *kl + i__ + i__ * a_dim1;
		    q__1.r = a[i__2].r + wa.r, q__1.i = a[i__2].i + wa.i;
		    wb.r = q__1.r, wb.i = q__1.i;
		    i__2 = *m - *kl - i__;
		    c_div(&q__1, &c_b2, &wb);
		    cscal_(&i__2, &q__1, &a[*kl + i__ + 1 + i__ * a_dim1], &
			    c__1);
		    i__2 = *kl + i__ + i__ * a_dim1;
		    a[i__2].r = 1.f, a[i__2].i = 0.f;
		    c_div(&q__1, &wb, &wa);
		    r__1 = q__1.r;
		    tau.r = r__1, tau.i = 0.f;
		}

/*              apply reflection to A(kl+i:m,i+1:n) from the left */

		i__2 = *m - *kl - i__ + 1;
		i__3 = *n - i__;
		cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*kl + 
			i__ + (i__ + 1) * a_dim1], lda, &a[*kl + i__ + i__ * 
			a_dim1], &c__1, &c_b1, &work[1], &c__1);
		i__2 = *m - *kl - i__ + 1;
		i__3 = *n - i__;
		q__1.r = -tau.r, q__1.i = -tau.i;
		cgerc_(&i__2, &i__3, &q__1, &a[*kl + i__ + i__ * a_dim1], &
			c__1, &work[1], &c__1, &a[*kl + i__ + (i__ + 1) * 
			a_dim1], lda);
		i__2 = *kl + i__ + i__ * a_dim1;
		q__1.r = -wa.r, q__1.i = -wa.i;
		a[i__2].r = q__1.r, a[i__2].i = q__1.i;
	    }

/* Computing MIN */
	    i__2 = *n - 1 - *ku;
	    if (i__ <= min(i__2,*m)) {

/*              generate reflection to annihilate A(i,ku+i+1:n) */

		i__2 = *n - *ku - i__ + 1;
		wn = scnrm2_(&i__2, &a[i__ + (*ku + i__) * a_dim1], lda);
		r__1 = wn / c_abs(&a[i__ + (*ku + i__) * a_dim1]);
		i__2 = i__ + (*ku + i__) * a_dim1;
		q__1.r = r__1 * a[i__2].r, q__1.i = r__1 * a[i__2].i;
		wa.r = q__1.r, wa.i = q__1.i;
		if (wn == 0.f) {
		    tau.r = 0.f, tau.i = 0.f;
		} else {
		    i__2 = i__ + (*ku + i__) * a_dim1;
		    q__1.r = a[i__2].r + wa.r, q__1.i = a[i__2].i + wa.i;
		    wb.r = q__1.r, wb.i = q__1.i;
		    i__2 = *n - *ku - i__;
		    c_div(&q__1, &c_b2, &wb);
		    cscal_(&i__2, &q__1, &a[i__ + (*ku + i__ + 1) * a_dim1], 
			    lda);
		    i__2 = i__ + (*ku + i__) * a_dim1;
		    a[i__2].r = 1.f, a[i__2].i = 0.f;
		    c_div(&q__1, &wb, &wa);
		    r__1 = q__1.r;
		    tau.r = r__1, tau.i = 0.f;
		}

/*              apply reflection to A(i+1:m,ku+i:n) from the right */

		i__2 = *n - *ku - i__ + 1;
		clacgv_(&i__2, &a[i__ + (*ku + i__) * a_dim1], lda);
		i__2 = *m - i__;
		i__3 = *n - *ku - i__ + 1;
		cgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + (*ku 
			+ i__) * a_dim1], lda, &a[i__ + (*ku + i__) * a_dim1], 
			 lda, &c_b1, &work[1], &c__1);
		i__2 = *m - i__;
		i__3 = *n - *ku - i__ + 1;
		q__1.r = -tau.r, q__1.i = -tau.i;
		cgerc_(&i__2, &i__3, &q__1, &work[1], &c__1, &a[i__ + (*ku + 
			i__) * a_dim1], lda, &a[i__ + 1 + (*ku + i__) * 
			a_dim1], lda);
		i__2 = i__ + (*ku + i__) * a_dim1;
		q__1.r = -wa.r, q__1.i = -wa.i;
		a[i__2].r = q__1.r, a[i__2].i = q__1.i;
	    }
	} else {

/*           annihilate superdiagonal elements first (necessary if */
/*           KU = 0) */

/* Computing MIN */
	    i__2 = *n - 1 - *ku;
	    if (i__ <= min(i__2,*m)) {

/*              generate reflection to annihilate A(i,ku+i+1:n) */

		i__2 = *n - *ku - i__ + 1;
		wn = scnrm2_(&i__2, &a[i__ + (*ku + i__) * a_dim1], lda);
		r__1 = wn / c_abs(&a[i__ + (*ku + i__) * a_dim1]);
		i__2 = i__ + (*ku + i__) * a_dim1;
		q__1.r = r__1 * a[i__2].r, q__1.i = r__1 * a[i__2].i;
		wa.r = q__1.r, wa.i = q__1.i;
		if (wn == 0.f) {
		    tau.r = 0.f, tau.i = 0.f;
		} else {
		    i__2 = i__ + (*ku + i__) * a_dim1;
		    q__1.r = a[i__2].r + wa.r, q__1.i = a[i__2].i + wa.i;
		    wb.r = q__1.r, wb.i = q__1.i;
		    i__2 = *n - *ku - i__;
		    c_div(&q__1, &c_b2, &wb);
		    cscal_(&i__2, &q__1, &a[i__ + (*ku + i__ + 1) * a_dim1], 
			    lda);
		    i__2 = i__ + (*ku + i__) * a_dim1;
		    a[i__2].r = 1.f, a[i__2].i = 0.f;
		    c_div(&q__1, &wb, &wa);
		    r__1 = q__1.r;
		    tau.r = r__1, tau.i = 0.f;
		}

/*              apply reflection to A(i+1:m,ku+i:n) from the right */

		i__2 = *n - *ku - i__ + 1;
		clacgv_(&i__2, &a[i__ + (*ku + i__) * a_dim1], lda);
		i__2 = *m - i__;
		i__3 = *n - *ku - i__ + 1;
		cgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + (*ku 
			+ i__) * a_dim1], lda, &a[i__ + (*ku + i__) * a_dim1], 
			 lda, &c_b1, &work[1], &c__1);
		i__2 = *m - i__;
		i__3 = *n - *ku - i__ + 1;
		q__1.r = -tau.r, q__1.i = -tau.i;
		cgerc_(&i__2, &i__3, &q__1, &work[1], &c__1, &a[i__ + (*ku + 
			i__) * a_dim1], lda, &a[i__ + 1 + (*ku + i__) * 
			a_dim1], lda);
		i__2 = i__ + (*ku + i__) * a_dim1;
		q__1.r = -wa.r, q__1.i = -wa.i;
		a[i__2].r = q__1.r, a[i__2].i = q__1.i;
	    }

/* Computing MIN */
	    i__2 = *m - 1 - *kl;
	    if (i__ <= min(i__2,*n)) {

/*              generate reflection to annihilate A(kl+i+1:m,i) */

		i__2 = *m - *kl - i__ + 1;
		wn = scnrm2_(&i__2, &a[*kl + i__ + i__ * a_dim1], &c__1);
		r__1 = wn / c_abs(&a[*kl + i__ + i__ * a_dim1]);
		i__2 = *kl + i__ + i__ * a_dim1;
		q__1.r = r__1 * a[i__2].r, q__1.i = r__1 * a[i__2].i;
		wa.r = q__1.r, wa.i = q__1.i;
		if (wn == 0.f) {
		    tau.r = 0.f, tau.i = 0.f;
		} else {
		    i__2 = *kl + i__ + i__ * a_dim1;
		    q__1.r = a[i__2].r + wa.r, q__1.i = a[i__2].i + wa.i;
		    wb.r = q__1.r, wb.i = q__1.i;
		    i__2 = *m - *kl - i__;
		    c_div(&q__1, &c_b2, &wb);
		    cscal_(&i__2, &q__1, &a[*kl + i__ + 1 + i__ * a_dim1], &
			    c__1);
		    i__2 = *kl + i__ + i__ * a_dim1;
		    a[i__2].r = 1.f, a[i__2].i = 0.f;
		    c_div(&q__1, &wb, &wa);
		    r__1 = q__1.r;
		    tau.r = r__1, tau.i = 0.f;
		}

/*              apply reflection to A(kl+i:m,i+1:n) from the left */

		i__2 = *m - *kl - i__ + 1;
		i__3 = *n - i__;
		cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*kl + 
			i__ + (i__ + 1) * a_dim1], lda, &a[*kl + i__ + i__ * 
			a_dim1], &c__1, &c_b1, &work[1], &c__1);
		i__2 = *m - *kl - i__ + 1;
		i__3 = *n - i__;
		q__1.r = -tau.r, q__1.i = -tau.i;
		cgerc_(&i__2, &i__3, &q__1, &a[*kl + i__ + i__ * a_dim1], &
			c__1, &work[1], &c__1, &a[*kl + i__ + (i__ + 1) * 
			a_dim1], lda);
		i__2 = *kl + i__ + i__ * a_dim1;
		q__1.r = -wa.r, q__1.i = -wa.i;
		a[i__2].r = q__1.r, a[i__2].i = q__1.i;
	    }
	}

	i__2 = *m;
	for (j = *kl + i__ + 1; j <= i__2; ++j) {
	    i__3 = j + i__ * a_dim1;
	    a[i__3].r = 0.f, a[i__3].i = 0.f;
/* L50: */
	}

	i__2 = *n;
	for (j = *ku + i__ + 1; j <= i__2; ++j) {
	    i__3 = i__ + j * a_dim1;
	    a[i__3].r = 0.f, a[i__3].i = 0.f;
/* L60: */
	}
/* L70: */
    }
    return 0;

/*     End of CLAGGE */

} /* clagge_ */
示例#13
0
/* Subroutine */
int cgeqp3_(integer *m, integer *n, complex *a, integer *lda, integer *jpvt, complex *tau, complex *work, integer *lwork, real * rwork, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    /* Local variables */
    integer j, jb, na, nb, sm, sn, nx, fjb, iws, nfxd, nbmin;
    extern /* Subroutine */
    int cswap_(integer *, complex *, integer *, complex *, integer *);
    integer minmn, minws;
    extern /* Subroutine */
    int claqp2_(integer *, integer *, integer *, complex *, integer *, integer *, complex *, real *, real *, complex *);
    extern real scnrm2_(integer *, complex *, integer *);
    extern /* Subroutine */
    int cgeqrf_(integer *, integer *, complex *, integer *, complex *, complex *, integer *, integer *), xerbla_( char *, integer *);
    extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *);
    extern /* Subroutine */
    int claqps_(integer *, integer *, integer *, integer *, integer *, complex *, integer *, integer *, complex *, real *, real *, complex *, complex *, integer *);
    integer topbmn, sminmn;
    extern /* Subroutine */
    int cunmqr_(char *, char *, integer *, integer *, integer *, complex *, integer *, complex *, complex *, integer *, complex *, integer *, integer *);
    integer lwkopt;
    logical lquery;
    /* -- LAPACK computational routine (version 3.4.2) -- */
    /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
    /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
    /* September 2012 */
    /* .. Scalar Arguments .. */
    /* .. */
    /* .. Array Arguments .. */
    /* .. */
    /* ===================================================================== */
    /* .. Parameters .. */
    /* .. */
    /* .. Local Scalars .. */
    /* .. */
    /* .. External Subroutines .. */
    /* .. */
    /* .. External Functions .. */
    /* .. */
    /* .. Intrinsic Functions .. */
    /* .. */
    /* .. Executable Statements .. */
    /* Test input arguments */
    /* ==================== */
    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --jpvt;
    --tau;
    --work;
    --rwork;
    /* Function Body */
    *info = 0;
    lquery = *lwork == -1;
    if (*m < 0)
    {
        *info = -1;
    }
    else if (*n < 0)
    {
        *info = -2;
    }
    else if (*lda < max(1,*m))
    {
        *info = -4;
    }
    if (*info == 0)
    {
        minmn = min(*m,*n);
        if (minmn == 0)
        {
            iws = 1;
            lwkopt = 1;
        }
        else
        {
            iws = *n + 1;
            nb = ilaenv_(&c__1, "CGEQRF", " ", m, n, &c_n1, &c_n1);
            lwkopt = (*n + 1) * nb;
        }
        work[1].r = (real) lwkopt;
        work[1].i = 0.f; // , expr subst
        if (*lwork < iws && ! lquery)
        {
            *info = -8;
        }
    }
    if (*info != 0)
    {
        i__1 = -(*info);
        xerbla_("CGEQP3", &i__1);
        return 0;
    }
    else if (lquery)
    {
        return 0;
    }
    /* Quick return if possible. */
    if (minmn == 0)
    {
        return 0;
    }
    /* Move initial columns up front. */
    nfxd = 1;
    i__1 = *n;
    for (j = 1;
            j <= i__1;
            ++j)
    {
        if (jpvt[j] != 0)
        {
            if (j != nfxd)
            {
                cswap_(m, &a[j * a_dim1 + 1], &c__1, &a[nfxd * a_dim1 + 1], & c__1);
                jpvt[j] = jpvt[nfxd];
                jpvt[nfxd] = j;
            }
            else
            {
                jpvt[j] = j;
            }
            ++nfxd;
        }
        else
        {
            jpvt[j] = j;
        }
        /* L10: */
    }
    --nfxd;
    /* Factorize fixed columns */
    /* ======================= */
    /* Compute the QR factorization of fixed columns and update */
    /* remaining columns. */
    if (nfxd > 0)
    {
        na = min(*m,nfxd);
        /* CC CALL CGEQR2( M, NA, A, LDA, TAU, WORK, INFO ) */
        cgeqrf_(m, &na, &a[a_offset], lda, &tau[1], &work[1], lwork, info);
        /* Computing MAX */
        i__1 = iws;
        i__2 = (integer) work[1].r; // , expr subst
        iws = max(i__1,i__2);
        if (na < *n)
        {
            /* CC CALL CUNM2R( 'Left', 'Conjugate Transpose', M, N-NA, */
            /* CC $ NA, A, LDA, TAU, A( 1, NA+1 ), LDA, WORK, */
            /* CC $ INFO ) */
            i__1 = *n - na;
            cunmqr_("Left", "Conjugate Transpose", m, &i__1, &na, &a[a_offset] , lda, &tau[1], &a[(na + 1) * a_dim1 + 1], lda, &work[1], lwork, info);
            /* Computing MAX */
            i__1 = iws;
            i__2 = (integer) work[1].r; // , expr subst
            iws = max(i__1,i__2);
        }
    }
    /* Factorize free columns */
    /* ====================== */
    if (nfxd < minmn)
    {
        sm = *m - nfxd;
        sn = *n - nfxd;
        sminmn = minmn - nfxd;
        /* Determine the block size. */
        nb = ilaenv_(&c__1, "CGEQRF", " ", &sm, &sn, &c_n1, &c_n1);
        nbmin = 2;
        nx = 0;
        if (nb > 1 && nb < sminmn)
        {
            /* Determine when to cross over from blocked to unblocked code. */
            /* Computing MAX */
            i__1 = 0;
            i__2 = ilaenv_(&c__3, "CGEQRF", " ", &sm, &sn, &c_n1, & c_n1); // , expr subst
            nx = max(i__1,i__2);
            if (nx < sminmn)
            {
                /* Determine if workspace is large enough for blocked code. */
                minws = (sn + 1) * nb;
                iws = max(iws,minws);
                if (*lwork < minws)
                {
                    /* Not enough workspace to use optimal NB: Reduce NB and */
                    /* determine the minimum value of NB. */
                    nb = *lwork / (sn + 1);
                    /* Computing MAX */
                    i__1 = 2;
                    i__2 = ilaenv_(&c__2, "CGEQRF", " ", &sm, &sn, & c_n1, &c_n1); // , expr subst
                    nbmin = max(i__1,i__2);
                }
            }
        }
        /* Initialize partial column norms. The first N elements of work */
        /* store the exact column norms. */
        i__1 = *n;
        for (j = nfxd + 1;
                j <= i__1;
                ++j)
        {
            rwork[j] = scnrm2_(&sm, &a[nfxd + 1 + j * a_dim1], &c__1);
            rwork[*n + j] = rwork[j];
            /* L20: */
        }
        if (nb >= nbmin && nb < sminmn && nx < sminmn)
        {
            /* Use blocked code initially. */
            j = nfxd + 1;
            /* Compute factorization: while loop. */
            topbmn = minmn - nx;
L30:
            if (j <= topbmn)
            {
                /* Computing MIN */
                i__1 = nb;
                i__2 = topbmn - j + 1; // , expr subst
                jb = min(i__1,i__2);
                /* Factorize JB columns among columns J:N. */
                i__1 = *n - j + 1;
                i__2 = j - 1;
                i__3 = *n - j + 1;
                claqps_(m, &i__1, &i__2, &jb, &fjb, &a[j * a_dim1 + 1], lda, & jpvt[j], &tau[j], &rwork[j], &rwork[*n + j], &work[1], &work[jb + 1], &i__3);
                j += fjb;
                goto L30;
            }
        }
        else
        {
            j = nfxd + 1;
        }
        /* Use unblocked code to factor the last or only block. */
        if (j <= minmn)
        {
            i__1 = *n - j + 1;
            i__2 = j - 1;
            claqp2_(m, &i__1, &i__2, &a[j * a_dim1 + 1], lda, &jpvt[j], &tau[ j], &rwork[j], &rwork[*n + j], &work[1]);
        }
    }
    work[1].r = (real) iws;
    work[1].i = 0.f; // , expr subst
    return 0;
    /* End of CGEQP3 */
}
示例#14
0
文件: clagge.c 项目: petsc/superlu
/* Subroutine */ int clagge_slu(integer *m, integer *n, integer *kl, integer *ku,
	 real *d, complex *a, integer *lda, integer *iseed, complex *work, 
	integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    doublereal d__1;
    complex q__1;

    /* Builtin functions */
    double c_abs(complex *);
    void c_div(complex *, complex *, complex *);

    /* Local variables */
    static integer i, j;
    extern /* Subroutine */ int cgerc_(integer *, integer *, complex *, 
	    complex *, integer *, complex *, integer *, complex *, integer *),
	     cscal_(integer *, complex *, complex *, integer *), cgemv_(char *
	    , integer *, integer *, complex *, complex *, integer *, complex *
	    , integer *, complex *, complex *, integer *);
    extern real scnrm2_(integer *, complex *, integer *);
    static complex wa, wb;
    extern /* Subroutine */ int clacgv_slu(integer *, complex *, integer *);
    static real wn;
    extern /* Subroutine */ int clarnv_slu(integer *, integer *, integer *, complex *);
    extern int input_error(char *, int *);
    static complex tau;


/*  -- LAPACK auxiliary test routine (version 2.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    CLAGGE generates a complex general m by n matrix A, by pre- and post- 
  
    multiplying a real diagonal matrix D with random unitary matrices:   
    A = U*D*V. The lower and upper bandwidths may then be reduced to   
    kl and ku by additional unitary transformations.   

    Arguments   
    =========   

    M       (input) INTEGER   
            The number of rows of the matrix A.  M >= 0.   

    N       (input) INTEGER   
            The number of columns of the matrix A.  N >= 0.   

    KL      (input) INTEGER   
            The number of nonzero subdiagonals within the band of A.   
            0 <= KL <= M-1.   

    KU      (input) INTEGER   
            The number of nonzero superdiagonals within the band of A.   
            0 <= KU <= N-1.   

    D       (input) REAL array, dimension (min(M,N))   
            The diagonal elements of the diagonal matrix D.   

    A       (output) COMPLEX array, dimension (LDA,N)   
            The generated m by n matrix A.   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= M.   

    ISEED   (input/output) INTEGER array, dimension (4)   
            On entry, the seed of the random number generator; the array 
  
            elements must be between 0 and 4095, and ISEED(4) must be   
            odd.   
            On exit, the seed is updated.   

    WORK    (workspace) COMPLEX array, dimension (M+N)   

    INFO    (output) INTEGER   
            = 0: successful exit   
            < 0: if INFO = -i, the i-th argument had an illegal value   

    ===================================================================== 
  


       Test the input arguments   

       Parameter adjustments */
    --d;
    a_dim1 = *lda;
    a_offset = a_dim1 + 1;
    a -= a_offset;
    --iseed;
    --work;

    /* Function Body */
    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*kl < 0 || *kl > *m - 1) {
	*info = -3;
    } else if (*ku < 0 || *ku > *n - 1) {
	*info = -4;
    } else if (*lda < max(1,*m)) {
	*info = -7;
    }
    if (*info < 0) {
	i__1 = -(*info);
	input_error("CLAGGE", &i__1);
	return 0;
    }

/*     initialize A to diagonal matrix */

    i__1 = *n;
    for (j = 1; j <= i__1; ++j) {
	i__2 = *m;
	for (i = 1; i <= i__2; ++i) {
	    i__3 = i + j * a_dim1;
	    a[i__3].r = 0.f, a[i__3].i = 0.f;
/* L10: */
	}
/* L20: */
    }
    i__1 = min(*m,*n);
    for (i = 1; i <= i__1; ++i) {
	i__2 = i + i * a_dim1;
	i__3 = i;
	a[i__2].r = d[i__3], a[i__2].i = 0.f;
/* L30: */
    }

/*     pre- and post-multiply A by random unitary matrices */

    for (i = min(*m,*n); i >= 1; --i) {
	if (i < *m) {

/*           generate random reflection */

	    i__1 = *m - i + 1;
	    clarnv_slu(&c__3, &iseed[1], &i__1, &work[1]);
	    i__1 = *m - i + 1;
	    wn = scnrm2_(&i__1, &work[1], &c__1);
	    d__1 = wn / c_abs(&work[1]);
	    q__1.r = d__1 * work[1].r, q__1.i = d__1 * work[1].i;
	    wa.r = q__1.r, wa.i = q__1.i;
	    if (wn == 0.f) {
		tau.r = 0.f, tau.i = 0.f;
	    } else {
		q__1.r = work[1].r + wa.r, q__1.i = work[1].i + wa.i;
		wb.r = q__1.r, wb.i = q__1.i;
		i__1 = *m - i;
		c_div(&q__1, &c_b2, &wb);
		cscal_(&i__1, &q__1, &work[2], &c__1);
		work[1].r = 1.f, work[1].i = 0.f;
		c_div(&q__1, &wb, &wa);
		d__1 = q__1.r;
		tau.r = d__1, tau.i = 0.f;
	    }

/*           multiply A(i:m,i:n) by random reflection from the lef
t */

	    i__1 = *m - i + 1;
	    i__2 = *n - i + 1;
	    cgemv_("Conjugate transpose", &i__1, &i__2, &c_b2, &a[i + i * 
		    a_dim1], lda, &work[1], &c__1, &c_b1, &work[*m + 1], &
		    c__1);
	    i__1 = *m - i + 1;
	    i__2 = *n - i + 1;
	    q__1.r = -(doublereal)tau.r, q__1.i = -(doublereal)tau.i;
	    cgerc_(&i__1, &i__2, &q__1, &work[1], &c__1, &work[*m + 1], &c__1,
		     &a[i + i * a_dim1], lda);
	}
	if (i < *n) {

/*           generate random reflection */

	    i__1 = *n - i + 1;
	    clarnv_slu(&c__3, &iseed[1], &i__1, &work[1]);
	    i__1 = *n - i + 1;
	    wn = scnrm2_(&i__1, &work[1], &c__1);
	    d__1 = wn / c_abs(&work[1]);
	    q__1.r = d__1 * work[1].r, q__1.i = d__1 * work[1].i;
	    wa.r = q__1.r, wa.i = q__1.i;
	    if (wn == 0.f) {
		tau.r = 0.f, tau.i = 0.f;
	    } else {
		q__1.r = work[1].r + wa.r, q__1.i = work[1].i + wa.i;
		wb.r = q__1.r, wb.i = q__1.i;
		i__1 = *n - i;
		c_div(&q__1, &c_b2, &wb);
		cscal_(&i__1, &q__1, &work[2], &c__1);
		work[1].r = 1.f, work[1].i = 0.f;
		c_div(&q__1, &wb, &wa);
		d__1 = q__1.r;
		tau.r = d__1, tau.i = 0.f;
	    }

/*           multiply A(i:m,i:n) by random reflection from the rig
ht */

	    i__1 = *m - i + 1;
	    i__2 = *n - i + 1;
	    cgemv_("No transpose", &i__1, &i__2, &c_b2, &a[i + i * a_dim1], 
		    lda, &work[1], &c__1, &c_b1, &work[*n + 1], &c__1);
	    i__1 = *m - i + 1;
	    i__2 = *n - i + 1;
	    q__1.r = -(doublereal)tau.r, q__1.i = -(doublereal)tau.i;
	    cgerc_(&i__1, &i__2, &q__1, &work[*n + 1], &c__1, &work[1], &c__1,
		     &a[i + i * a_dim1], lda);
	}
/* L40: */
    }

/*     Reduce number of subdiagonals to KL and number of superdiagonals   
       to KU   

   Computing MAX */
    i__2 = *m - 1 - *kl, i__3 = *n - 1 - *ku;
    i__1 = max(i__2,i__3);
    for (i = 1; i <= i__1; ++i) {
	if (*kl <= *ku) {

/*           annihilate subdiagonal elements first (necessary if K
L = 0)   

   Computing MIN */
	    i__2 = *m - 1 - *kl;
	    if (i <= min(i__2,*n)) {

/*              generate reflection to annihilate A(kl+i+1:m,i
) */

		i__2 = *m - *kl - i + 1;
		wn = scnrm2_(&i__2, &a[*kl + i + i * a_dim1], &c__1);
		d__1 = wn / c_abs(&a[*kl + i + i * a_dim1]);
		i__2 = *kl + i + i * a_dim1;
		q__1.r = d__1 * a[i__2].r, q__1.i = d__1 * a[i__2].i;
		wa.r = q__1.r, wa.i = q__1.i;
		if (wn == 0.f) {
		    tau.r = 0.f, tau.i = 0.f;
		} else {
		    i__2 = *kl + i + i * a_dim1;
		    q__1.r = a[i__2].r + wa.r, q__1.i = a[i__2].i + wa.i;
		    wb.r = q__1.r, wb.i = q__1.i;
		    i__2 = *m - *kl - i;
		    c_div(&q__1, &c_b2, &wb);
		    cscal_(&i__2, &q__1, &a[*kl + i + 1 + i * a_dim1], &c__1);
		    i__2 = *kl + i + i * a_dim1;
		    a[i__2].r = 1.f, a[i__2].i = 0.f;
		    c_div(&q__1, &wb, &wa);
		    d__1 = q__1.r;
		    tau.r = d__1, tau.i = 0.f;
		}

/*              apply reflection to A(kl+i:m,i+1:n) from the l
eft */

		i__2 = *m - *kl - i + 1;
		i__3 = *n - i;
		cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*kl + i 
			+ (i + 1) * a_dim1], lda, &a[*kl + i + i * a_dim1], &
			c__1, &c_b1, &work[1], &c__1);
		i__2 = *m - *kl - i + 1;
		i__3 = *n - i;
		q__1.r = -(doublereal)tau.r, q__1.i = -(doublereal)tau.i;
		cgerc_(&i__2, &i__3, &q__1, &a[*kl + i + i * a_dim1], &c__1, &
			work[1], &c__1, &a[*kl + i + (i + 1) * a_dim1], lda);
		i__2 = *kl + i + i * a_dim1;
		q__1.r = -(doublereal)wa.r, q__1.i = -(doublereal)wa.i;
		a[i__2].r = q__1.r, a[i__2].i = q__1.i;
	    }

/* Computing MIN */
	    i__2 = *n - 1 - *ku;
	    if (i <= min(i__2,*m)) {

/*              generate reflection to annihilate A(i,ku+i+1:n
) */

		i__2 = *n - *ku - i + 1;
		wn = scnrm2_(&i__2, &a[i + (*ku + i) * a_dim1], lda);
		d__1 = wn / c_abs(&a[i + (*ku + i) * a_dim1]);
		i__2 = i + (*ku + i) * a_dim1;
		q__1.r = d__1 * a[i__2].r, q__1.i = d__1 * a[i__2].i;
		wa.r = q__1.r, wa.i = q__1.i;
		if (wn == 0.f) {
		    tau.r = 0.f, tau.i = 0.f;
		} else {
		    i__2 = i + (*ku + i) * a_dim1;
		    q__1.r = a[i__2].r + wa.r, q__1.i = a[i__2].i + wa.i;
		    wb.r = q__1.r, wb.i = q__1.i;
		    i__2 = *n - *ku - i;
		    c_div(&q__1, &c_b2, &wb);
		    cscal_(&i__2, &q__1, &a[i + (*ku + i + 1) * a_dim1], lda);
		    i__2 = i + (*ku + i) * a_dim1;
		    a[i__2].r = 1.f, a[i__2].i = 0.f;
		    c_div(&q__1, &wb, &wa);
		    d__1 = q__1.r;
		    tau.r = d__1, tau.i = 0.f;
		}

/*              apply reflection to A(i+1:m,ku+i:n) from the r
ight */

		i__2 = *n - *ku - i + 1;
		clacgv_slu(&i__2, &a[i + (*ku + i) * a_dim1], lda);
		i__2 = *m - i;
		i__3 = *n - *ku - i + 1;
		cgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i + 1 + (*ku + 
			i) * a_dim1], lda, &a[i + (*ku + i) * a_dim1], lda, &
			c_b1, &work[1], &c__1);
		i__2 = *m - i;
		i__3 = *n - *ku - i + 1;
		q__1.r = -(doublereal)tau.r, q__1.i = -(doublereal)tau.i;
		cgerc_(&i__2, &i__3, &q__1, &work[1], &c__1, &a[i + (*ku + i) 
			* a_dim1], lda, &a[i + 1 + (*ku + i) * a_dim1], lda);
		i__2 = i + (*ku + i) * a_dim1;
		q__1.r = -(doublereal)wa.r, q__1.i = -(doublereal)wa.i;
		a[i__2].r = q__1.r, a[i__2].i = q__1.i;
	    }
	} else {

/*           annihilate superdiagonal elements first (necessary if
   
             KU = 0)   

   Computing MIN */
	    i__2 = *n - 1 - *ku;
	    if (i <= min(i__2,*m)) {

/*              generate reflection to annihilate A(i,ku+i+1:n
) */

		i__2 = *n - *ku - i + 1;
		wn = scnrm2_(&i__2, &a[i + (*ku + i) * a_dim1], lda);
		d__1 = wn / c_abs(&a[i + (*ku + i) * a_dim1]);
		i__2 = i + (*ku + i) * a_dim1;
		q__1.r = d__1 * a[i__2].r, q__1.i = d__1 * a[i__2].i;
		wa.r = q__1.r, wa.i = q__1.i;
		if (wn == 0.f) {
		    tau.r = 0.f, tau.i = 0.f;
		} else {
		    i__2 = i + (*ku + i) * a_dim1;
		    q__1.r = a[i__2].r + wa.r, q__1.i = a[i__2].i + wa.i;
		    wb.r = q__1.r, wb.i = q__1.i;
		    i__2 = *n - *ku - i;
		    c_div(&q__1, &c_b2, &wb);
		    cscal_(&i__2, &q__1, &a[i + (*ku + i + 1) * a_dim1], lda);
		    i__2 = i + (*ku + i) * a_dim1;
		    a[i__2].r = 1.f, a[i__2].i = 0.f;
		    c_div(&q__1, &wb, &wa);
		    d__1 = q__1.r;
		    tau.r = d__1, tau.i = 0.f;
		}

/*              apply reflection to A(i+1:m,ku+i:n) from the r
ight */

		i__2 = *n - *ku - i + 1;
		clacgv_slu(&i__2, &a[i + (*ku + i) * a_dim1], lda);
		i__2 = *m - i;
		i__3 = *n - *ku - i + 1;
		cgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i + 1 + (*ku + 
			i) * a_dim1], lda, &a[i + (*ku + i) * a_dim1], lda, &
			c_b1, &work[1], &c__1);
		i__2 = *m - i;
		i__3 = *n - *ku - i + 1;
		q__1.r = -(doublereal)tau.r, q__1.i = -(doublereal)tau.i;
		cgerc_(&i__2, &i__3, &q__1, &work[1], &c__1, &a[i + (*ku + i) 
			* a_dim1], lda, &a[i + 1 + (*ku + i) * a_dim1], lda);
		i__2 = i + (*ku + i) * a_dim1;
		q__1.r = -(doublereal)wa.r, q__1.i = -(doublereal)wa.i;
		a[i__2].r = q__1.r, a[i__2].i = q__1.i;
	    }

/* Computing MIN */
	    i__2 = *m - 1 - *kl;
	    if (i <= min(i__2,*n)) {

/*              generate reflection to annihilate A(kl+i+1:m,i
) */

		i__2 = *m - *kl - i + 1;
		wn = scnrm2_(&i__2, &a[*kl + i + i * a_dim1], &c__1);
		d__1 = wn / c_abs(&a[*kl + i + i * a_dim1]);
		i__2 = *kl + i + i * a_dim1;
		q__1.r = d__1 * a[i__2].r, q__1.i = d__1 * a[i__2].i;
		wa.r = q__1.r, wa.i = q__1.i;
		if (wn == 0.f) {
		    tau.r = 0.f, tau.i = 0.f;
		} else {
		    i__2 = *kl + i + i * a_dim1;
		    q__1.r = a[i__2].r + wa.r, q__1.i = a[i__2].i + wa.i;
		    wb.r = q__1.r, wb.i = q__1.i;
		    i__2 = *m - *kl - i;
		    c_div(&q__1, &c_b2, &wb);
		    cscal_(&i__2, &q__1, &a[*kl + i + 1 + i * a_dim1], &c__1);
		    i__2 = *kl + i + i * a_dim1;
		    a[i__2].r = 1.f, a[i__2].i = 0.f;
		    c_div(&q__1, &wb, &wa);
		    d__1 = q__1.r;
		    tau.r = d__1, tau.i = 0.f;
		}

/*              apply reflection to A(kl+i:m,i+1:n) from the l
eft */

		i__2 = *m - *kl - i + 1;
		i__3 = *n - i;
		cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*kl + i 
			+ (i + 1) * a_dim1], lda, &a[*kl + i + i * a_dim1], &
			c__1, &c_b1, &work[1], &c__1);
		i__2 = *m - *kl - i + 1;
		i__3 = *n - i;
		q__1.r = -(doublereal)tau.r, q__1.i = -(doublereal)tau.i;
		cgerc_(&i__2, &i__3, &q__1, &a[*kl + i + i * a_dim1], &c__1, &
			work[1], &c__1, &a[*kl + i + (i + 1) * a_dim1], lda);
		i__2 = *kl + i + i * a_dim1;
		q__1.r = -(doublereal)wa.r, q__1.i = -(doublereal)wa.i;
		a[i__2].r = q__1.r, a[i__2].i = q__1.i;
	    }
	}

	i__2 = *m;
	for (j = *kl + i + 1; j <= i__2; ++j) {
	    i__3 = j + i * a_dim1;
	    a[i__3].r = 0.f, a[i__3].i = 0.f;
/* L50: */
	}

	i__2 = *n;
	for (j = *ku + i + 1; j <= i__2; ++j) {
	    i__3 = i + j * a_dim1;
	    a[i__3].r = 0.f, a[i__3].i = 0.f;
/* L60: */
	}
/* L70: */
    }
    return 0;

/*     End of CLAGGE */

} /* clagge_slu */
示例#15
0
/* Subroutine */ int clarfg_(integer *n, complex *alpha, complex *x, integer *
                             incx, complex *tau)
{
    /*  -- LAPACK auxiliary routine (version 2.0) --
           Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
           Courant Institute, Argonne National Lab, and Rice University
           September 30, 1994


        Purpose
        =======

        CLARFG generates a complex elementary reflector H of order n, such
        that

              H' * ( alpha ) = ( beta ),   H' * H = I.
                   (   x   )   (   0  )

        where alpha and beta are scalars, with beta real, and x is an
        (n-1)-element complex vector. H is represented in the form

              H = I - tau * ( 1 ) * ( 1 v' ) ,
                            ( v )

        where tau is a complex scalar and v is a complex (n-1)-element
        vector. Note that H is not hermitian.

        If the elements of x are all zero and alpha is real, then tau = 0
        and H is taken to be the unit matrix.

        Otherwise  1 <= real(tau) <= 2  and  abs(tau-1) <= 1 .

        Arguments
        =========

        N       (input) INTEGER
                The order of the elementary reflector.

        ALPHA   (input/output) COMPLEX
                On entry, the value alpha.
                On exit, it is overwritten with the value beta.

        X       (input/output) COMPLEX array, dimension
                               (1+(N-2)*abs(INCX))
                On entry, the vector x.
                On exit, it is overwritten with the vector v.

        INCX    (input) INTEGER
                The increment between elements of X. INCX > 0.

        TAU     (output) COMPLEX
                The value tau.

        =====================================================================




       Parameter adjustments
           Function Body */
    /* Table of constant values */
    static complex c_b5 = {1.f,0.f};

    /* System generated locals */
    integer i__1;
    real r__1;
    doublereal d__1, d__2;
    complex q__1, q__2;
    /* Builtin functions */
    double r_imag(complex *), r_sign(real *, real *);
    /* Local variables */
    static real beta;
    static integer j;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
                                       integer *);
    static real alphi, alphr, xnorm;
    extern doublereal scnrm2_(integer *, complex *, integer *), slapy3_(real *
            , real *, real *);
    extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
    extern doublereal slamch_(char *);
    extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
                                        *);
    static real safmin, rsafmn;
    static integer knt;



#define X(I) x[(I)-1]


    if (*n <= 0) {
        tau->r = 0.f, tau->i = 0.f;
        return 0;
    }

    i__1 = *n - 1;
    xnorm = scnrm2_(&i__1, &X(1), incx);
    alphr = alpha->r;
    alphi = r_imag(alpha);

    if (xnorm == 0.f && alphi == 0.f) {

        /*        H  =  I */

        tau->r = 0.f, tau->i = 0.f;
    } else {

        /*        general case */

        r__1 = slapy3_(&alphr, &alphi, &xnorm);
        beta = -(doublereal)r_sign(&r__1, &alphr);
        safmin = slamch_("S") / slamch_("E");
        rsafmn = 1.f / safmin;

        if (dabs(beta) < safmin) {

            /*           XNORM, BETA may be inaccurate; scale X and recompute
            them */

            knt = 0;
L10:
            ++knt;
            i__1 = *n - 1;
            csscal_(&i__1, &rsafmn, &X(1), incx);
            beta *= rsafmn;
            alphi *= rsafmn;
            alphr *= rsafmn;
            if (dabs(beta) < safmin) {
                goto L10;
            }

            /*           New BETA is at most 1, at least SAFMIN */

            i__1 = *n - 1;
            xnorm = scnrm2_(&i__1, &X(1), incx);
            q__1.r = alphr, q__1.i = alphi;
            alpha->r = q__1.r, alpha->i = q__1.i;
            r__1 = slapy3_(&alphr, &alphi, &xnorm);
            beta = -(doublereal)r_sign(&r__1, &alphr);
            d__1 = (beta - alphr) / beta;
            d__2 = -(doublereal)alphi / beta;
            q__1.r = d__1, q__1.i = d__2;
            tau->r = q__1.r, tau->i = q__1.i;
            q__2.r = alpha->r - beta, q__2.i = alpha->i;
            cladiv_(&q__1, &c_b5, &q__2);
            alpha->r = q__1.r, alpha->i = q__1.i;
            i__1 = *n - 1;
            cscal_(&i__1, alpha, &X(1), incx);

            /*           If ALPHA is subnormal, it may lose relative accuracy
            */

            alpha->r = beta, alpha->i = 0.f;
            i__1 = knt;
            for (j = 1; j <= knt; ++j) {
                q__1.r = safmin * alpha->r, q__1.i = safmin * alpha->i;
                alpha->r = q__1.r, alpha->i = q__1.i;
                /* L20: */
            }
        } else {
            d__1 = (beta - alphr) / beta;
            d__2 = -(doublereal)alphi / beta;
            q__1.r = d__1, q__1.i = d__2;
            tau->r = q__1.r, tau->i = q__1.i;
            q__2.r = alpha->r - beta, q__2.i = alpha->i;
            cladiv_(&q__1, &c_b5, &q__2);
            alpha->r = q__1.r, alpha->i = q__1.i;
            i__1 = *n - 1;
            cscal_(&i__1, alpha, &X(1), incx);
            alpha->r = beta, alpha->i = 0.f;
        }
    }

    return 0;

    /*     End of CLARFG */

} /* clarfg_ */
示例#16
0
文件: clarfgp.c 项目: flame/libflame
/* Subroutine */
int clarfgp_(integer *n, complex *alpha, complex *x, integer *incx, complex *tau)
{
    /* System generated locals */
    integer i__1, i__2;
    real r__1, r__2;
    complex q__1, q__2;
    /* Builtin functions */
    double r_imag(complex *), r_sign(real *, real *), c_abs(complex *);
    /* Local variables */
    integer j;
    complex savealpha;
    integer knt;
    real beta;
    extern /* Subroutine */
    int cscal_(integer *, complex *, complex *, integer *);
    real alphi, alphr, xnorm;
    extern real scnrm2_(integer *, complex *, integer *), slapy2_(real *, real *), slapy3_(real *, real *, real *);
    extern /* Complex */
    VOID cladiv_(complex *, complex *, complex *);
    extern real slamch_(char *);
    extern /* Subroutine */
    int csscal_(integer *, real *, complex *, integer *);
    real bignum, smlnum;
    /* -- LAPACK auxiliary routine (version 3.4.2) -- */
    /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
    /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
    /* September 2012 */
    /* .. Scalar Arguments .. */
    /* .. */
    /* .. Array Arguments .. */
    /* .. */
    /* ===================================================================== */
    /* .. Parameters .. */
    /* .. */
    /* .. Local Scalars .. */
    /* .. */
    /* .. External Functions .. */
    /* .. */
    /* .. Intrinsic Functions .. */
    /* .. */
    /* .. External Subroutines .. */
    /* .. */
    /* .. Executable Statements .. */
    /* Parameter adjustments */
    --x;
    /* Function Body */
    if (*n <= 0)
    {
        tau->r = 0.f, tau->i = 0.f;
        return 0;
    }
    i__1 = *n - 1;
    xnorm = scnrm2_(&i__1, &x[1], incx);
    alphr = alpha->r;
    alphi = r_imag(alpha);
    if (xnorm == 0.f)
    {
        /* H = [1-alpha/f2c_abs(alpha) 0;
        0 I], sign chosen so ALPHA >= 0. */
        if (alphi == 0.f)
        {
            if (alphr >= 0.f)
            {
                /* When TAU.eq.ZERO, the vector is special-cased to be */
                /* all zeros in the application routines. We do not need */
                /* to clear it. */
                tau->r = 0.f, tau->i = 0.f;
            }
            else
            {
                /* However, the application routines rely on explicit */
                /* zero checks when TAU.ne.ZERO, and we must clear X. */
                tau->r = 2.f, tau->i = 0.f;
                i__1 = *n - 1;
                for (j = 1;
                        j <= i__1;
                        ++j)
                {
                    i__2 = (j - 1) * *incx + 1;
                    x[i__2].r = 0.f;
                    x[i__2].i = 0.f; // , expr subst
                }
                q__1.r = -alpha->r;
                q__1.i = -alpha->i; // , expr subst
                alpha->r = q__1.r, alpha->i = q__1.i;
            }
        }
        else
        {
            /* Only "reflecting" the diagonal entry to be real and non-negative. */
            xnorm = slapy2_(&alphr, &alphi);
            r__1 = 1.f - alphr / xnorm;
            r__2 = -alphi / xnorm;
            q__1.r = r__1;
            q__1.i = r__2; // , expr subst
            tau->r = q__1.r, tau->i = q__1.i;
            i__1 = *n - 1;
            for (j = 1;
                    j <= i__1;
                    ++j)
            {
                i__2 = (j - 1) * *incx + 1;
                x[i__2].r = 0.f;
                x[i__2].i = 0.f; // , expr subst
            }
            alpha->r = xnorm, alpha->i = 0.f;
        }
    }
    else
    {
        /* general case */
        r__1 = slapy3_(&alphr, &alphi, &xnorm);
        beta = r_sign(&r__1, &alphr);
        smlnum = slamch_("S") / slamch_("E");
        bignum = 1.f / smlnum;
        knt = 0;
        if (f2c_abs(beta) < smlnum)
        {
            /* XNORM, BETA may be inaccurate;
            scale X and recompute them */
L10:
            ++knt;
            i__1 = *n - 1;
            csscal_(&i__1, &bignum, &x[1], incx);
            beta *= bignum;
            alphi *= bignum;
            alphr *= bignum;
            if (f2c_abs(beta) < smlnum)
            {
                goto L10;
            }
            /* New BETA is at most 1, at least SMLNUM */
            i__1 = *n - 1;
            xnorm = scnrm2_(&i__1, &x[1], incx);
            q__1.r = alphr;
            q__1.i = alphi; // , expr subst
            alpha->r = q__1.r, alpha->i = q__1.i;
            r__1 = slapy3_(&alphr, &alphi, &xnorm);
            beta = r_sign(&r__1, &alphr);
        }
        savealpha.r = alpha->r;
        savealpha.i = alpha->i; // , expr subst
        q__1.r = alpha->r + beta;
        q__1.i = alpha->i; // , expr subst
        alpha->r = q__1.r, alpha->i = q__1.i;
        if (beta < 0.f)
        {
            beta = -beta;
            q__2.r = -alpha->r;
            q__2.i = -alpha->i; // , expr subst
            q__1.r = q__2.r / beta;
            q__1.i = q__2.i / beta; // , expr subst
            tau->r = q__1.r, tau->i = q__1.i;
        }
        else
        {
            alphr = alphi * (alphi / alpha->r);
            alphr += xnorm * (xnorm / alpha->r);
            r__1 = alphr / beta;
            r__2 = -alphi / beta;
            q__1.r = r__1;
            q__1.i = r__2; // , expr subst
            tau->r = q__1.r, tau->i = q__1.i;
            r__1 = -alphr;
            q__1.r = r__1;
            q__1.i = alphi; // , expr subst
            alpha->r = q__1.r, alpha->i = q__1.i;
        }
        cladiv_(&q__1, &c_b5, alpha);
        alpha->r = q__1.r, alpha->i = q__1.i;
        if (c_abs(tau) <= smlnum)
        {
            /* In the case where the computed TAU ends up being a denormalized number, */
            /* it loses relative accuracy. This is a BIG problem. Solution: flush TAU */
            /* to ZERO (or TWO or whatever makes a nonnegative real number for BETA). */
            /* (Bug report provided by Pat Quillen from MathWorks on Jul 29, 2009.) */
            /* (Thanks Pat. Thanks MathWorks.) */
            alphr = savealpha.r;
            alphi = r_imag(&savealpha);
            if (alphi == 0.f)
            {
                if (alphr >= 0.f)
                {
                    tau->r = 0.f, tau->i = 0.f;
                }
                else
                {
                    tau->r = 2.f, tau->i = 0.f;
                    i__1 = *n - 1;
                    for (j = 1;
                            j <= i__1;
                            ++j)
                    {
                        i__2 = (j - 1) * *incx + 1;
                        x[i__2].r = 0.f;
                        x[i__2].i = 0.f; // , expr subst
                    }
                    q__1.r = -savealpha.r;
                    q__1.i = -savealpha.i; // , expr subst
                    beta = q__1.r;
                }
            }
            else
            {
                xnorm = slapy2_(&alphr, &alphi);
                r__1 = 1.f - alphr / xnorm;
                r__2 = -alphi / xnorm;
                q__1.r = r__1;
                q__1.i = r__2; // , expr subst
                tau->r = q__1.r, tau->i = q__1.i;
                i__1 = *n - 1;
                for (j = 1;
                        j <= i__1;
                        ++j)
                {
                    i__2 = (j - 1) * *incx + 1;
                    x[i__2].r = 0.f;
                    x[i__2].i = 0.f; // , expr subst
                }
                beta = xnorm;
            }
        }
        else
        {
            /* This is the general case. */
            i__1 = *n - 1;
            cscal_(&i__1, alpha, &x[1], incx);
        }
        /* If BETA is subnormal, it may lose relative accuracy */
        i__1 = knt;
        for (j = 1;
                j <= i__1;
                ++j)
        {
            beta *= smlnum;
            /* L20: */
        }
        alpha->r = beta, alpha->i = 0.f;
    }
    return 0;
    /* End of CLARFGP */
}
示例#17
0
/* Subroutine */ int clagsy_(integer *n, integer *k, real *d__, complex *a, 
	integer *lda, integer *iseed, complex *work, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8, 
	    i__9;
    real r__1;
    complex q__1, q__2, q__3, q__4;

    /* Local variables */
    integer i__, j, ii, jj;
    complex wa, wb;
    real wn;
    complex tau;
    complex alpha;

/*  -- LAPACK auxiliary test routine (version 3.1) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CLAGSY generates a complex symmetric matrix A, by pre- and post- */
/*  multiplying a real diagonal matrix D with a random unitary matrix: */
/*  A = U*D*U**T. The semi-bandwidth may then be reduced to k by */
/*  additional unitary transformations. */

/*  Arguments */
/*  ========= */

/*  N       (input) INTEGER */
/*          The order of the matrix A.  N >= 0. */

/*  K       (input) INTEGER */
/*          The number of nonzero subdiagonals within the band of A. */
/*          0 <= K <= N-1. */

/*  D       (input) REAL array, dimension (N) */
/*          The diagonal elements of the diagonal matrix D. */

/*  A       (output) COMPLEX array, dimension (LDA,N) */
/*          The generated n by n symmetric matrix A (the full matrix is */
/*          stored). */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= N. */

/*  ISEED   (input/output) INTEGER array, dimension (4) */
/*          On entry, the seed of the random number generator; the array */
/*          elements must be between 0 and 4095, and ISEED(4) must be */
/*          odd. */
/*          On exit, the seed is updated. */

/*  WORK    (workspace) COMPLEX array, dimension (2*N) */

/*  INFO    (output) INTEGER */
/*          = 0: successful exit */
/*          < 0: if INFO = -i, the i-th argument had an illegal value */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input arguments */

    /* Parameter adjustments */
    --d__;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --iseed;
    --work;

    /* Function Body */
    *info = 0;
    if (*n < 0) {
	*info = -1;
    } else if (*k < 0 || *k > *n - 1) {
	*info = -2;
    } else if (*lda < max(1,*n)) {
	*info = -5;
    }
    if (*info < 0) {
	i__1 = -(*info);
	xerbla_("CLAGSY", &i__1);
	return 0;
    }

/*     initialize lower triangle of A to diagonal matrix */

    i__1 = *n;
    for (j = 1; j <= i__1; ++j) {
	i__2 = *n;
	for (i__ = j + 1; i__ <= i__2; ++i__) {
	    i__3 = i__ + j * a_dim1;
	    a[i__3].r = 0.f, a[i__3].i = 0.f;
/* L10: */
	}
/* L20: */
    }
    i__1 = *n;
    for (i__ = 1; i__ <= i__1; ++i__) {
	i__2 = i__ + i__ * a_dim1;
	i__3 = i__;
	a[i__2].r = d__[i__3], a[i__2].i = 0.f;
/* L30: */
    }

/*     Generate lower triangle of symmetric matrix */

    for (i__ = *n - 1; i__ >= 1; --i__) {

/*        generate random reflection */

	i__1 = *n - i__ + 1;
	clarnv_(&c__3, &iseed[1], &i__1, &work[1]);
	i__1 = *n - i__ + 1;
	wn = scnrm2_(&i__1, &work[1], &c__1);
	r__1 = wn / c_abs(&work[1]);
	q__1.r = r__1 * work[1].r, q__1.i = r__1 * work[1].i;
	wa.r = q__1.r, wa.i = q__1.i;
	if (wn == 0.f) {
	    tau.r = 0.f, tau.i = 0.f;
	} else {
	    q__1.r = work[1].r + wa.r, q__1.i = work[1].i + wa.i;
	    wb.r = q__1.r, wb.i = q__1.i;
	    i__1 = *n - i__;
	    c_div(&q__1, &c_b2, &wb);
	    cscal_(&i__1, &q__1, &work[2], &c__1);
	    work[1].r = 1.f, work[1].i = 0.f;
	    c_div(&q__1, &wb, &wa);
	    r__1 = q__1.r;
	    tau.r = r__1, tau.i = 0.f;
	}

/*        apply random reflection to A(i:n,i:n) from the left */
/*        and the right */

/*        compute  y := tau * A * conjg(u) */

	i__1 = *n - i__ + 1;
	clacgv_(&i__1, &work[1], &c__1);
	i__1 = *n - i__ + 1;
	csymv_("Lower", &i__1, &tau, &a[i__ + i__ * a_dim1], lda, &work[1], &
		c__1, &c_b1, &work[*n + 1], &c__1);
	i__1 = *n - i__ + 1;
	clacgv_(&i__1, &work[1], &c__1);

/*        compute  v := y - 1/2 * tau * ( u, y ) * u */

	q__3.r = -.5f, q__3.i = -0.f;
	q__2.r = q__3.r * tau.r - q__3.i * tau.i, q__2.i = q__3.r * tau.i + 
		q__3.i * tau.r;
	i__1 = *n - i__ + 1;
	cdotc_(&q__4, &i__1, &work[1], &c__1, &work[*n + 1], &c__1);
	q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r * q__4.i 
		+ q__2.i * q__4.r;
	alpha.r = q__1.r, alpha.i = q__1.i;
	i__1 = *n - i__ + 1;
	caxpy_(&i__1, &alpha, &work[1], &c__1, &work[*n + 1], &c__1);

/*        apply the transformation as a rank-2 update to A(i:n,i:n) */

/*        CALL CSYR2( 'Lower', N-I+1, -ONE, WORK, 1, WORK( N+1 ), 1, */
/*        $               A( I, I ), LDA ) */

	i__1 = *n;
	for (jj = i__; jj <= i__1; ++jj) {
	    i__2 = *n;
	    for (ii = jj; ii <= i__2; ++ii) {
		i__3 = ii + jj * a_dim1;
		i__4 = ii + jj * a_dim1;
		i__5 = ii - i__ + 1;
		i__6 = *n + jj - i__ + 1;
		q__3.r = work[i__5].r * work[i__6].r - work[i__5].i * work[
			i__6].i, q__3.i = work[i__5].r * work[i__6].i + work[
			i__5].i * work[i__6].r;
		q__2.r = a[i__4].r - q__3.r, q__2.i = a[i__4].i - q__3.i;
		i__7 = *n + ii - i__ + 1;
		i__8 = jj - i__ + 1;
		q__4.r = work[i__7].r * work[i__8].r - work[i__7].i * work[
			i__8].i, q__4.i = work[i__7].r * work[i__8].i + work[
			i__7].i * work[i__8].r;
		q__1.r = q__2.r - q__4.r, q__1.i = q__2.i - q__4.i;
		a[i__3].r = q__1.r, a[i__3].i = q__1.i;
/* L40: */
	    }
/* L50: */
	}
/* L60: */
    }

/*     Reduce number of subdiagonals to K */

    i__1 = *n - 1 - *k;
    for (i__ = 1; i__ <= i__1; ++i__) {

/*        generate reflection to annihilate A(k+i+1:n,i) */

	i__2 = *n - *k - i__ + 1;
	wn = scnrm2_(&i__2, &a[*k + i__ + i__ * a_dim1], &c__1);
	r__1 = wn / c_abs(&a[*k + i__ + i__ * a_dim1]);
	i__2 = *k + i__ + i__ * a_dim1;
	q__1.r = r__1 * a[i__2].r, q__1.i = r__1 * a[i__2].i;
	wa.r = q__1.r, wa.i = q__1.i;
	if (wn == 0.f) {
	    tau.r = 0.f, tau.i = 0.f;
	} else {
	    i__2 = *k + i__ + i__ * a_dim1;
	    q__1.r = a[i__2].r + wa.r, q__1.i = a[i__2].i + wa.i;
	    wb.r = q__1.r, wb.i = q__1.i;
	    i__2 = *n - *k - i__;
	    c_div(&q__1, &c_b2, &wb);
	    cscal_(&i__2, &q__1, &a[*k + i__ + 1 + i__ * a_dim1], &c__1);
	    i__2 = *k + i__ + i__ * a_dim1;
	    a[i__2].r = 1.f, a[i__2].i = 0.f;
	    c_div(&q__1, &wb, &wa);
	    r__1 = q__1.r;
	    tau.r = r__1, tau.i = 0.f;
	}

/*        apply reflection to A(k+i:n,i+1:k+i-1) from the left */

	i__2 = *n - *k - i__ + 1;
	i__3 = *k - 1;
	cgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ + (i__ 
		+ 1) * a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &
		c_b1, &work[1], &c__1);
	i__2 = *n - *k - i__ + 1;
	i__3 = *k - 1;
	q__1.r = -tau.r, q__1.i = -tau.i;
	cgerc_(&i__2, &i__3, &q__1, &a[*k + i__ + i__ * a_dim1], &c__1, &work[
		1], &c__1, &a[*k + i__ + (i__ + 1) * a_dim1], lda);

/*        apply reflection to A(k+i:n,k+i:n) from the left and the right */

/*        compute  y := tau * A * conjg(u) */

	i__2 = *n - *k - i__ + 1;
	clacgv_(&i__2, &a[*k + i__ + i__ * a_dim1], &c__1);
	i__2 = *n - *k - i__ + 1;
	csymv_("Lower", &i__2, &tau, &a[*k + i__ + (*k + i__) * a_dim1], lda, 
		&a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &work[1], &c__1);
	i__2 = *n - *k - i__ + 1;
	clacgv_(&i__2, &a[*k + i__ + i__ * a_dim1], &c__1);

/*        compute  v := y - 1/2 * tau * ( u, y ) * u */

	q__3.r = -.5f, q__3.i = -0.f;
	q__2.r = q__3.r * tau.r - q__3.i * tau.i, q__2.i = q__3.r * tau.i + 
		q__3.i * tau.r;
	i__2 = *n - *k - i__ + 1;
	cdotc_(&q__4, &i__2, &a[*k + i__ + i__ * a_dim1], &c__1, &work[1], &
		c__1);
	q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r * q__4.i 
		+ q__2.i * q__4.r;
	alpha.r = q__1.r, alpha.i = q__1.i;
	i__2 = *n - *k - i__ + 1;
	caxpy_(&i__2, &alpha, &a[*k + i__ + i__ * a_dim1], &c__1, &work[1], &
		c__1);

/*        apply symmetric rank-2 update to A(k+i:n,k+i:n) */

/*        CALL CSYR2( 'Lower', N-K-I+1, -ONE, A( K+I, I ), 1, WORK, 1, */
/*        $               A( K+I, K+I ), LDA ) */

	i__2 = *n;
	for (jj = *k + i__; jj <= i__2; ++jj) {
	    i__3 = *n;
	    for (ii = jj; ii <= i__3; ++ii) {
		i__4 = ii + jj * a_dim1;
		i__5 = ii + jj * a_dim1;
		i__6 = ii + i__ * a_dim1;
		i__7 = jj - *k - i__ + 1;
		q__3.r = a[i__6].r * work[i__7].r - a[i__6].i * work[i__7].i, 
			q__3.i = a[i__6].r * work[i__7].i + a[i__6].i * work[
			i__7].r;
		q__2.r = a[i__5].r - q__3.r, q__2.i = a[i__5].i - q__3.i;
		i__8 = ii - *k - i__ + 1;
		i__9 = jj + i__ * a_dim1;
		q__4.r = work[i__8].r * a[i__9].r - work[i__8].i * a[i__9].i, 
			q__4.i = work[i__8].r * a[i__9].i + work[i__8].i * a[
			i__9].r;
		q__1.r = q__2.r - q__4.r, q__1.i = q__2.i - q__4.i;
		a[i__4].r = q__1.r, a[i__4].i = q__1.i;
/* L70: */
	    }
/* L80: */
	}

	i__2 = *k + i__ + i__ * a_dim1;
	q__1.r = -wa.r, q__1.i = -wa.i;
	a[i__2].r = q__1.r, a[i__2].i = q__1.i;
	i__2 = *n;
	for (j = *k + i__ + 1; j <= i__2; ++j) {
	    i__3 = j + i__ * a_dim1;
	    a[i__3].r = 0.f, a[i__3].i = 0.f;
/* L90: */
	}
/* L100: */
    }

/*     Store full symmetric matrix */

    i__1 = *n;
    for (j = 1; j <= i__1; ++j) {
	i__2 = *n;
	for (i__ = j + 1; i__ <= i__2; ++i__) {
	    i__3 = j + i__ * a_dim1;
	    i__4 = i__ + j * a_dim1;
	    a[i__3].r = a[i__4].r, a[i__3].i = a[i__4].i;
/* L110: */
	}
/* L120: */
    }
    return 0;

/*     End of CLAGSY */

} /* clagsy_ */
示例#18
0
/* Subroutine */ int cgeqpf_(integer *m, integer *n, complex *a, integer *lda,
	 integer *jpvt, complex *tau, complex *work, real *rwork, integer *
	info)
{
/*  -- LAPACK auxiliary routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   


    Purpose   
    =======   

    This routine is deprecated and has been replaced by routine CGEQP3.   

    CGEQPF computes a QR factorization with column pivoting of a   
    complex M-by-N matrix A: A*P = Q*R.   

    Arguments   
    =========   

    M       (input) INTEGER   
            The number of rows of the matrix A. M >= 0.   

    N       (input) INTEGER   
            The number of columns of the matrix A. N >= 0   

    A       (input/output) COMPLEX array, dimension (LDA,N)   
            On entry, the M-by-N matrix A.   
            On exit, the upper triangle of the array contains the   
            min(M,N)-by-N upper triangular matrix R; the elements   
            below the diagonal, together with the array TAU,   
            represent the unitary matrix Q as a product of   
            min(m,n) elementary reflectors.   

    LDA     (input) INTEGER   
            The leading dimension of the array A. LDA >= max(1,M).   

    JPVT    (input/output) INTEGER array, dimension (N)   
            On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted   
            to the front of A*P (a leading column); if JPVT(i) = 0,   
            the i-th column of A is a free column.   
            On exit, if JPVT(i) = k, then the i-th column of A*P   
            was the k-th column of A.   

    TAU     (output) COMPLEX array, dimension (min(M,N))   
            The scalar factors of the elementary reflectors.   

    WORK    (workspace) COMPLEX array, dimension (N)   

    RWORK   (workspace) REAL array, dimension (2*N)   

    INFO    (output) INTEGER   
            = 0:  successful exit   
            < 0:  if INFO = -i, the i-th argument had an illegal value   

    Further Details   
    ===============   

    The matrix Q is represented as a product of elementary reflectors   

       Q = H(1) H(2) . . . H(n)   

    Each H(i) has the form   

       H = I - tau * v * v'   

    where tau is a complex scalar, and v is a complex vector with   
    v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i).   

    The matrix P is represented in jpvt as follows: If   
       jpvt(j) = i   
    then the jth column of P is the ith canonical unit vector.   

    =====================================================================   


       Test the input arguments   

       Parameter adjustments */
    /* Table of constant values */
    static integer c__1 = 1;
    
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    real r__1;
    complex q__1;
    /* Builtin functions */
    void r_cnjg(complex *, complex *);
    double c_abs(complex *), sqrt(doublereal);
    /* Local variables */
    static real temp, temp2;
    static integer i__, j;
    extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
	    , integer *, complex *, complex *, integer *, complex *), 
	    cswap_(integer *, complex *, integer *, complex *, integer *);
    static integer itemp;
    extern /* Subroutine */ int cgeqr2_(integer *, integer *, complex *, 
	    integer *, complex *, complex *, integer *);
    extern doublereal scnrm2_(integer *, complex *, integer *);
    extern /* Subroutine */ int cunm2r_(char *, char *, integer *, integer *, 
	    integer *, complex *, integer *, complex *, complex *, integer *, 
	    complex *, integer *);
    static integer ma, mn;
    extern /* Subroutine */ int clarfg_(integer *, complex *, complex *, 
	    integer *, complex *), xerbla_(char *, integer *);
    extern integer isamax_(integer *, real *, integer *);
    static complex aii;
    static integer pvt;
#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]


    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    --jpvt;
    --tau;
    --work;
    --rwork;

    /* Function Body */
    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*lda < max(1,*m)) {
	*info = -4;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CGEQPF", &i__1);
	return 0;
    }

    mn = min(*m,*n);

/*     Move initial columns up front */

    itemp = 1;
    i__1 = *n;
    for (i__ = 1; i__ <= i__1; ++i__) {
	if (jpvt[i__] != 0) {
	    if (i__ != itemp) {
		cswap_(m, &a_ref(1, i__), &c__1, &a_ref(1, itemp), &c__1);
		jpvt[i__] = jpvt[itemp];
		jpvt[itemp] = i__;
	    } else {
		jpvt[i__] = i__;
	    }
	    ++itemp;
	} else {
	    jpvt[i__] = i__;
	}
/* L10: */
    }
    --itemp;

/*     Compute the QR factorization and update remaining columns */

    if (itemp > 0) {
	ma = min(itemp,*m);
	cgeqr2_(m, &ma, &a[a_offset], lda, &tau[1], &work[1], info);
	if (ma < *n) {
	    i__1 = *n - ma;
	    cunm2r_("Left", "Conjugate transpose", m, &i__1, &ma, &a[a_offset]
		    , lda, &tau[1], &a_ref(1, ma + 1), lda, &work[1], info);
	}
    }

    if (itemp < mn) {

/*        Initialize partial column norms. The first n elements of   
          work store the exact column norms. */

	i__1 = *n;
	for (i__ = itemp + 1; i__ <= i__1; ++i__) {
	    i__2 = *m - itemp;
	    rwork[i__] = scnrm2_(&i__2, &a_ref(itemp + 1, i__), &c__1);
	    rwork[*n + i__] = rwork[i__];
/* L20: */
	}

/*        Compute factorization */

	i__1 = mn;
	for (i__ = itemp + 1; i__ <= i__1; ++i__) {

/*           Determine ith pivot column and swap if necessary */

	    i__2 = *n - i__ + 1;
	    pvt = i__ - 1 + isamax_(&i__2, &rwork[i__], &c__1);

	    if (pvt != i__) {
		cswap_(m, &a_ref(1, pvt), &c__1, &a_ref(1, i__), &c__1);
		itemp = jpvt[pvt];
		jpvt[pvt] = jpvt[i__];
		jpvt[i__] = itemp;
		rwork[pvt] = rwork[i__];
		rwork[*n + pvt] = rwork[*n + i__];
	    }

/*           Generate elementary reflector H(i) */

	    i__2 = a_subscr(i__, i__);
	    aii.r = a[i__2].r, aii.i = a[i__2].i;
/* Computing MIN */
	    i__2 = i__ + 1;
	    i__3 = *m - i__ + 1;
	    clarfg_(&i__3, &aii, &a_ref(min(i__2,*m), i__), &c__1, &tau[i__]);
	    i__2 = a_subscr(i__, i__);
	    a[i__2].r = aii.r, a[i__2].i = aii.i;

	    if (i__ < *n) {

/*              Apply H(i) to A(i:m,i+1:n) from the left */

		i__2 = a_subscr(i__, i__);
		aii.r = a[i__2].r, aii.i = a[i__2].i;
		i__2 = a_subscr(i__, i__);
		a[i__2].r = 1.f, a[i__2].i = 0.f;
		i__2 = *m - i__ + 1;
		i__3 = *n - i__;
		r_cnjg(&q__1, &tau[i__]);
		clarf_("Left", &i__2, &i__3, &a_ref(i__, i__), &c__1, &q__1, &
			a_ref(i__, i__ + 1), lda, &work[1]);
		i__2 = a_subscr(i__, i__);
		a[i__2].r = aii.r, a[i__2].i = aii.i;
	    }

/*           Update partial column norms */

	    i__2 = *n;
	    for (j = i__ + 1; j <= i__2; ++j) {
		if (rwork[j] != 0.f) {
/* Computing 2nd power */
		    r__1 = c_abs(&a_ref(i__, j)) / rwork[j];
		    temp = 1.f - r__1 * r__1;
		    temp = dmax(temp,0.f);
/* Computing 2nd power */
		    r__1 = rwork[j] / rwork[*n + j];
		    temp2 = temp * .05f * (r__1 * r__1) + 1.f;
		    if (temp2 == 1.f) {
			if (*m - i__ > 0) {
			    i__3 = *m - i__;
			    rwork[j] = scnrm2_(&i__3, &a_ref(i__ + 1, j), &
				    c__1);
			    rwork[*n + j] = rwork[j];
			} else {
			    rwork[j] = 0.f;
			    rwork[*n + j] = 0.f;
			}
		    } else {
			rwork[j] *= sqrt(temp);
		    }
		}
/* L30: */
	    }

/* L40: */
	}
    }
    return 0;

/*     End of CGEQPF */

} /* cgeqpf_ */
示例#19
0
/* Subroutine */ int claein_(logical *rightv, logical *noinit, integer *n, 
	complex *h__, integer *ldh, complex *w, complex *v, complex *b, 
	integer *ldb, real *rwork, real *eps3, real *smlnum, integer *info)
{
/*  -- LAPACK auxiliary routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    CLAEIN uses inverse iteration to find a right or left eigenvector   
    corresponding to the eigenvalue W of a complex upper Hessenberg   
    matrix H.   

    Arguments   
    =========   

    RIGHTV   (input) LOGICAL   
            = .TRUE. : compute right eigenvector;   
            = .FALSE.: compute left eigenvector.   

    NOINIT   (input) LOGICAL   
            = .TRUE. : no initial vector supplied in V   
            = .FALSE.: initial vector supplied in V.   

    N       (input) INTEGER   
            The order of the matrix H.  N >= 0.   

    H       (input) COMPLEX array, dimension (LDH,N)   
            The upper Hessenberg matrix H.   

    LDH     (input) INTEGER   
            The leading dimension of the array H.  LDH >= max(1,N).   

    W       (input) COMPLEX   
            The eigenvalue of H whose corresponding right or left   
            eigenvector is to be computed.   

    V       (input/output) COMPLEX array, dimension (N)   
            On entry, if NOINIT = .FALSE., V must contain a starting   
            vector for inverse iteration; otherwise V need not be set.   
            On exit, V contains the computed eigenvector, normalized so   
            that the component of largest magnitude has magnitude 1; here   
            the magnitude of a complex number (x,y) is taken to be   
            |x| + |y|.   

    B       (workspace) COMPLEX array, dimension (LDB,N)   

    LDB     (input) INTEGER   
            The leading dimension of the array B.  LDB >= max(1,N).   

    RWORK   (workspace) REAL array, dimension (N)   

    EPS3    (input) REAL   
            A small machine-dependent value which is used to perturb   
            close eigenvalues, and to replace zero pivots.   

    SMLNUM  (input) REAL   
            A machine-dependent value close to the underflow threshold.   

    INFO    (output) INTEGER   
            = 0:  successful exit   
            = 1:  inverse iteration did not converge; V is set to the   
                  last iterate.   

    =====================================================================   


       Parameter adjustments */
    /* Table of constant values */
    static integer c__1 = 1;
    
    /* System generated locals */
    integer b_dim1, b_offset, h_dim1, h_offset, i__1, i__2, i__3, i__4, i__5;
    real r__1, r__2, r__3, r__4;
    complex q__1, q__2;
    /* Builtin functions */
    double sqrt(doublereal), r_imag(complex *);
    /* Local variables */
    static integer ierr;
    static complex temp;
    static integer i__, j;
    static real scale;
    static complex x;
    static char trans[1];
    static real rtemp, rootn, vnorm;
    extern doublereal scnrm2_(integer *, complex *, integer *);
    static complex ei, ej;
    extern integer icamax_(integer *, complex *, integer *);
    extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
    extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer 
	    *), clatrs_(char *, char *, char *, char *, integer *, complex *, 
	    integer *, complex *, real *, real *, integer *);
    extern doublereal scasum_(integer *, complex *, integer *);
    static char normin[1];
    static real nrmsml, growto;
    static integer its;
#define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1
#define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)]
#define h___subscr(a_1,a_2) (a_2)*h_dim1 + a_1
#define h___ref(a_1,a_2) h__[h___subscr(a_1,a_2)]


    h_dim1 = *ldh;
    h_offset = 1 + h_dim1 * 1;
    h__ -= h_offset;
    --v;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;
    --rwork;

    /* Function Body */
    *info = 0;

/*     GROWTO is the threshold used in the acceptance test for an   
       eigenvector. */

    rootn = sqrt((real) (*n));
    growto = .1f / rootn;
/* Computing MAX */
    r__1 = 1.f, r__2 = *eps3 * rootn;
    nrmsml = dmax(r__1,r__2) * *smlnum;

/*     Form B = H - W*I (except that the subdiagonal elements are not   
       stored). */

    i__1 = *n;
    for (j = 1; j <= i__1; ++j) {
	i__2 = j - 1;
	for (i__ = 1; i__ <= i__2; ++i__) {
	    i__3 = b_subscr(i__, j);
	    i__4 = h___subscr(i__, j);
	    b[i__3].r = h__[i__4].r, b[i__3].i = h__[i__4].i;
/* L10: */
	}
	i__2 = b_subscr(j, j);
	i__3 = h___subscr(j, j);
	q__1.r = h__[i__3].r - w->r, q__1.i = h__[i__3].i - w->i;
	b[i__2].r = q__1.r, b[i__2].i = q__1.i;
/* L20: */
    }

    if (*noinit) {

/*        Initialize V. */

	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    i__2 = i__;
	    v[i__2].r = *eps3, v[i__2].i = 0.f;
/* L30: */
	}
    } else {

/*        Scale supplied initial vector. */

	vnorm = scnrm2_(n, &v[1], &c__1);
	r__1 = *eps3 * rootn / dmax(vnorm,nrmsml);
	csscal_(n, &r__1, &v[1], &c__1);
    }

    if (*rightv) {

/*        LU decomposition with partial pivoting of B, replacing zero   
          pivots by EPS3. */

	i__1 = *n - 1;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    i__2 = h___subscr(i__ + 1, i__);
	    ei.r = h__[i__2].r, ei.i = h__[i__2].i;
	    i__2 = b_subscr(i__, i__);
	    if ((r__1 = b[i__2].r, dabs(r__1)) + (r__2 = r_imag(&b_ref(i__, 
		    i__)), dabs(r__2)) < (r__3 = ei.r, dabs(r__3)) + (r__4 = 
		    r_imag(&ei), dabs(r__4))) {

/*              Interchange rows and eliminate. */

		cladiv_(&q__1, &b_ref(i__, i__), &ei);
		x.r = q__1.r, x.i = q__1.i;
		i__2 = b_subscr(i__, i__);
		b[i__2].r = ei.r, b[i__2].i = ei.i;
		i__2 = *n;
		for (j = i__ + 1; j <= i__2; ++j) {
		    i__3 = b_subscr(i__ + 1, j);
		    temp.r = b[i__3].r, temp.i = b[i__3].i;
		    i__3 = b_subscr(i__ + 1, j);
		    i__4 = b_subscr(i__, j);
		    q__2.r = x.r * temp.r - x.i * temp.i, q__2.i = x.r * 
			    temp.i + x.i * temp.r;
		    q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4].i - q__2.i;
		    b[i__3].r = q__1.r, b[i__3].i = q__1.i;
		    i__3 = b_subscr(i__, j);
		    b[i__3].r = temp.r, b[i__3].i = temp.i;
/* L40: */
		}
	    } else {

/*              Eliminate without interchange. */

		i__2 = b_subscr(i__, i__);
		if (b[i__2].r == 0.f && b[i__2].i == 0.f) {
		    i__3 = b_subscr(i__, i__);
		    b[i__3].r = *eps3, b[i__3].i = 0.f;
		}
		cladiv_(&q__1, &ei, &b_ref(i__, i__));
		x.r = q__1.r, x.i = q__1.i;
		if (x.r != 0.f || x.i != 0.f) {
		    i__2 = *n;
		    for (j = i__ + 1; j <= i__2; ++j) {
			i__3 = b_subscr(i__ + 1, j);
			i__4 = b_subscr(i__ + 1, j);
			i__5 = b_subscr(i__, j);
			q__2.r = x.r * b[i__5].r - x.i * b[i__5].i, q__2.i = 
				x.r * b[i__5].i + x.i * b[i__5].r;
			q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4].i - 
				q__2.i;
			b[i__3].r = q__1.r, b[i__3].i = q__1.i;
/* L50: */
		    }
		}
	    }
/* L60: */
	}
	i__1 = b_subscr(*n, *n);
	if (b[i__1].r == 0.f && b[i__1].i == 0.f) {
	    i__2 = b_subscr(*n, *n);
	    b[i__2].r = *eps3, b[i__2].i = 0.f;
	}

	*(unsigned char *)trans = 'N';

    } else {

/*        UL decomposition with partial pivoting of B, replacing zero   
          pivots by EPS3. */

	for (j = *n; j >= 2; --j) {
	    i__1 = h___subscr(j, j - 1);
	    ej.r = h__[i__1].r, ej.i = h__[i__1].i;
	    i__1 = b_subscr(j, j);
	    if ((r__1 = b[i__1].r, dabs(r__1)) + (r__2 = r_imag(&b_ref(j, j)),
		     dabs(r__2)) < (r__3 = ej.r, dabs(r__3)) + (r__4 = r_imag(
		    &ej), dabs(r__4))) {

/*              Interchange columns and eliminate. */

		cladiv_(&q__1, &b_ref(j, j), &ej);
		x.r = q__1.r, x.i = q__1.i;
		i__1 = b_subscr(j, j);
		b[i__1].r = ej.r, b[i__1].i = ej.i;
		i__1 = j - 1;
		for (i__ = 1; i__ <= i__1; ++i__) {
		    i__2 = b_subscr(i__, j - 1);
		    temp.r = b[i__2].r, temp.i = b[i__2].i;
		    i__2 = b_subscr(i__, j - 1);
		    i__3 = b_subscr(i__, j);
		    q__2.r = x.r * temp.r - x.i * temp.i, q__2.i = x.r * 
			    temp.i + x.i * temp.r;
		    q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - q__2.i;
		    b[i__2].r = q__1.r, b[i__2].i = q__1.i;
		    i__2 = b_subscr(i__, j);
		    b[i__2].r = temp.r, b[i__2].i = temp.i;
/* L70: */
		}
	    } else {

/*              Eliminate without interchange. */

		i__1 = b_subscr(j, j);
		if (b[i__1].r == 0.f && b[i__1].i == 0.f) {
		    i__2 = b_subscr(j, j);
		    b[i__2].r = *eps3, b[i__2].i = 0.f;
		}
		cladiv_(&q__1, &ej, &b_ref(j, j));
		x.r = q__1.r, x.i = q__1.i;
		if (x.r != 0.f || x.i != 0.f) {
		    i__1 = j - 1;
		    for (i__ = 1; i__ <= i__1; ++i__) {
			i__2 = b_subscr(i__, j - 1);
			i__3 = b_subscr(i__, j - 1);
			i__4 = b_subscr(i__, j);
			q__2.r = x.r * b[i__4].r - x.i * b[i__4].i, q__2.i = 
				x.r * b[i__4].i + x.i * b[i__4].r;
			q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - 
				q__2.i;
			b[i__2].r = q__1.r, b[i__2].i = q__1.i;
/* L80: */
		    }
		}
	    }
/* L90: */
	}
	i__1 = b_subscr(1, 1);
	if (b[i__1].r == 0.f && b[i__1].i == 0.f) {
	    i__2 = b_subscr(1, 1);
	    b[i__2].r = *eps3, b[i__2].i = 0.f;
	}

	*(unsigned char *)trans = 'C';

    }

    *(unsigned char *)normin = 'N';
    i__1 = *n;
    for (its = 1; its <= i__1; ++its) {

/*        Solve U*x = scale*v for a right eigenvector   
            or U'*x = scale*v for a left eigenvector,   
          overwriting x on v. */

	clatrs_("Upper", trans, "Nonunit", normin, n, &b[b_offset], ldb, &v[1]
		, &scale, &rwork[1], &ierr);
	*(unsigned char *)normin = 'Y';

/*        Test for sufficient growth in the norm of v. */

	vnorm = scasum_(n, &v[1], &c__1);
	if (vnorm >= growto * scale) {
	    goto L120;
	}

/*        Choose new orthogonal starting vector and try again. */

	rtemp = *eps3 / (rootn + 1.f);
	v[1].r = *eps3, v[1].i = 0.f;
	i__2 = *n;
	for (i__ = 2; i__ <= i__2; ++i__) {
	    i__3 = i__;
	    v[i__3].r = rtemp, v[i__3].i = 0.f;
/* L100: */
	}
	i__2 = *n - its + 1;
	i__3 = *n - its + 1;
	r__1 = *eps3 * rootn;
	q__1.r = v[i__3].r - r__1, q__1.i = v[i__3].i;
	v[i__2].r = q__1.r, v[i__2].i = q__1.i;
/* L110: */
    }

/*     Failure to find eigenvector in N iterations. */

    *info = 1;

L120:

/*     Normalize eigenvector. */

    i__ = icamax_(n, &v[1], &c__1);
    i__1 = i__;
    r__3 = 1.f / ((r__1 = v[i__1].r, dabs(r__1)) + (r__2 = r_imag(&v[i__]), 
	    dabs(r__2)));
    csscal_(n, &r__3, &v[1], &c__1);

    return 0;

/*     End of CLAEIN */

} /* claein_ */
示例#20
0
文件: citersol.c 项目: gilso/Packages
int main(int argc, char *argv[])
{
    void cmatvec_mult(complex alpha, complex x[], complex beta, complex y[]);
    void cpsolve(int n, complex x[], complex y[]);
    extern int cfgmr( int n,
	void (*matvec_mult)(complex, complex [], complex, complex []),
	void (*psolve)(int n, complex [], complex[]),
	complex *rhs, complex *sol, double tol, int restrt, int *itmax,
	FILE *fits);
    extern int cfill_diag(int n, NCformat *Astore);

    char     equed[1] = {'B'};
    yes_no_t equil;
    trans_t  trans;
    SuperMatrix A, L, U;
    SuperMatrix B, X;
    NCformat *Astore;
    NCformat *Ustore;
    SCformat *Lstore;
    complex   *a;
    int      *asub, *xa;
    int      *etree;
    int      *perm_c; /* column permutation vector */
    int      *perm_r; /* row permutations from partial pivoting */
    int      nrhs, ldx, lwork, info, m, n, nnz;
    complex   *rhsb, *rhsx, *xact;
    complex   *work = NULL;
    float   *R, *C;
    float   u, rpg, rcond;
    complex zero = {0.0, 0.0};
    complex one = {1.0, 0.0};
    complex none = {-1.0, 0.0};
    mem_usage_t   mem_usage;
    superlu_options_t options;
    SuperLUStat_t stat;

    int restrt, iter, maxit, i;
    double resid;
    complex *x, *b;

#ifdef DEBUG
    extern int num_drop_L, num_drop_U;
#endif

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC("Enter main()");
#endif

    /* Defaults */
    lwork = 0;
    nrhs  = 1;
    equil = YES;
    u	  = 0.1; /* u=1.0 for complete factorization */
    trans = NOTRANS;

    /* Set the default input options:
	options.Fact = DOFACT;
	options.Equil = YES;
	options.ColPerm = COLAMD;
	options.DiagPivotThresh = 0.1; //different from complete LU
	options.Trans = NOTRANS;
	options.IterRefine = NOREFINE;
	options.SymmetricMode = NO;
	options.PivotGrowth = NO;
	options.ConditionNumber = NO;
	options.PrintStat = YES;
	options.RowPerm = LargeDiag;
	options.ILU_DropTol = 1e-4;
	options.ILU_FillTol = 1e-2;
	options.ILU_FillFactor = 10.0;
	options.ILU_DropRule = DROP_BASIC | DROP_AREA;
	options.ILU_Norm = INF_NORM;
	options.ILU_MILU = SMILU_2;
     */
    ilu_set_default_options(&options);

    /* Modify the defaults. */
    options.PivotGrowth = YES;	  /* Compute reciprocal pivot growth */
    options.ConditionNumber = YES;/* Compute reciprocal condition number */

    if ( lwork > 0 ) {
	work = SUPERLU_MALLOC(lwork);
	if ( !work ) ABORT("Malloc fails for work[].");
    }

    /* Read matrix A from a file in Harwell-Boeing format.*/
    if (argc < 2)
    {
	printf("Usage:\n%s [OPTION] < [INPUT] > [OUTPUT]\nOPTION:\n"
		"-h -hb:\n\t[INPUT] is a Harwell-Boeing format matrix.\n"
		"-r -rb:\n\t[INPUT] is a Rutherford-Boeing format matrix.\n"
		"-t -triplet:\n\t[INPUT] is a triplet format matrix.\n",
		argv[0]);
	return 0;
    }
    else
    {
	switch (argv[1][1])
	{
	    case 'H':
	    case 'h':
		printf("Input a Harwell-Boeing format matrix:\n");
		creadhb(&m, &n, &nnz, &a, &asub, &xa);
		break;
	    case 'R':
	    case 'r':
		printf("Input a Rutherford-Boeing format matrix:\n");
		creadrb(&m, &n, &nnz, &a, &asub, &xa);
		break;
	    case 'T':
	    case 't':
		printf("Input a triplet format matrix:\n");
		creadtriple(&m, &n, &nnz, &a, &asub, &xa);
		break;
	    default:
		printf("Unrecognized format.\n");
		return 0;
	}
    }

    cCreate_CompCol_Matrix(&A, m, n, nnz, a, asub, xa, SLU_NC, SLU_C, SLU_GE);
    Astore = A.Store;
    cfill_diag(n, Astore);
    printf("Dimension %dx%d; # nonzeros %d\n", A.nrow, A.ncol, Astore->nnz);
    fflush(stdout);

    if ( !(rhsb = complexMalloc(m * nrhs)) ) ABORT("Malloc fails for rhsb[].");
    if ( !(rhsx = complexMalloc(m * nrhs)) ) ABORT("Malloc fails for rhsx[].");
    cCreate_Dense_Matrix(&B, m, nrhs, rhsb, m, SLU_DN, SLU_C, SLU_GE);
    cCreate_Dense_Matrix(&X, m, nrhs, rhsx, m, SLU_DN, SLU_C, SLU_GE);
    xact = complexMalloc(n * nrhs);
    ldx = n;
    cGenXtrue(n, nrhs, xact, ldx);
    cFillRHS(trans, nrhs, xact, ldx, &A, &B);

    if ( !(etree = intMalloc(n)) ) ABORT("Malloc fails for etree[].");
    if ( !(perm_r = intMalloc(m)) ) ABORT("Malloc fails for perm_r[].");
    if ( !(perm_c = intMalloc(n)) ) ABORT("Malloc fails for perm_c[].");
    if ( !(R = (float *) SUPERLU_MALLOC(A.nrow * sizeof(float))) )
	ABORT("SUPERLU_MALLOC fails for R[].");
    if ( !(C = (float *) SUPERLU_MALLOC(A.ncol * sizeof(float))) )
	ABORT("SUPERLU_MALLOC fails for C[].");

    info = 0;
#ifdef DEBUG
    num_drop_L = 0;
    num_drop_U = 0;
#endif

    /* Initialize the statistics variables. */
    StatInit(&stat);

    /* Compute the incomplete factorization and compute the condition number
       and pivot growth using dgsisx. */
    cgsisx(&options, &A, perm_c, perm_r, etree, equed, R, C, &L, &U, work,
	   lwork, &B, &X, &rpg, &rcond, &mem_usage, &stat, &info);

    Lstore = (SCformat *) L.Store;
    Ustore = (NCformat *) U.Store;
    printf("cgsisx(): info %d\n", info);
    if (info > 0 || rcond < 1e-8 || rpg > 1e8)
	printf("WARNING: This preconditioner might be unstable.\n");

    if ( info == 0 || info == n+1 ) {

	if ( options.PivotGrowth == YES )
	    printf("Recip. pivot growth = %e\n", rpg);
	if ( options.ConditionNumber == YES )
	    printf("Recip. condition number = %e\n", rcond);

    } else if ( info > 0 && lwork == -1 ) {
	printf("** Estimated memory: %d bytes\n", info - n);
    }
    printf("n(A) = %d, nnz(A) = %d\n", n, Astore->nnz);
    printf("No of nonzeros in factor L = %d\n", Lstore->nnz);
    printf("No of nonzeros in factor U = %d\n", Ustore->nnz);
    printf("No of nonzeros in L+U = %d\n", Lstore->nnz + Ustore->nnz - n);
    printf("Fill ratio: nnz(F)/nnz(A) = %.3f\n",
	    ((double)(Lstore->nnz) + (double)(Ustore->nnz) - (double)n)
	    / (double)Astore->nnz);
    printf("L\\U MB %.3f\ttotal MB needed %.3f\n",
	   mem_usage.for_lu/1e6, mem_usage.total_needed/1e6);
    fflush(stdout);

    /* Set the global variables. */
    GLOBAL_A = &A;
    GLOBAL_L = &L;
    GLOBAL_U = &U;
    GLOBAL_STAT = &stat;
    GLOBAL_PERM_C = perm_c;
    GLOBAL_PERM_R = perm_r;

    /* Set the variables used by GMRES. */
    restrt = SUPERLU_MIN(n / 3 + 1, 50);
    maxit = 1000;
    iter = maxit;
    resid = 1e-8;
    if (!(b = complexMalloc(m))) ABORT("Malloc fails for b[].");
    if (!(x = complexMalloc(n))) ABORT("Malloc fails for x[].");
    sp_cgemv("N", one, &A, xact, 1, zero, b, 1);

    if (info <= n + 1)
    {
	int i_1 = 1;
	double maxferr = 0.0, nrmA, nrmB, res, t;
        complex temp;
	extern float scnrm2_(int *, complex [], int *);
	extern void caxpy_(int *, complex *, complex [], int *, complex [], int *);

	/* Call GMRES. */
	/*double *sol = (double*) ((DNformat*) X.Store)->nzval;
	for (i = 0; i < n; i++) x[i] = sol[i];*/
	for (i = 0; i < n; i++) x[i] = zero;

	t = SuperLU_timer_();

	cfgmr(n, cmatvec_mult, cpsolve, b, x, resid, restrt, &iter, stdout);

	t = SuperLU_timer_() - t;

	/* Output the result. */
	nrmA = scnrm2_(&(Astore->nnz), (complex *)((DNformat *)A.Store)->nzval,
		&i_1);
	nrmB = scnrm2_(&m, b, &i_1);
	sp_cgemv("N", none, &A, x, 1, one, b, 1);
	res = scnrm2_(&m, b, &i_1);
	resid = res / nrmB;
	printf("||A||_F = %.1e, ||B||_2 = %.1e, ||B-A*X||_2 = %.1e, "
		"relres = %.1e\n", nrmA, nrmB, res, resid);

	if (iter >= maxit)
	{
	    if (resid >= 1.0) iter = -180;
	    else if (resid > 1e-8) iter = -111;
	}
	printf("iteration: %d\nresidual: %.1e\nGMRES time: %.2f seconds.\n",
		iter, resid, t);

	for (i = 0; i < m; i++)
            c_sub(&temp, &x[i], &xact[i]);
            maxferr = SUPERLU_MAX(maxferr, c_abs1(&temp));
	printf("||X-X_true||_oo = %.1e\n", maxferr);
    }
#ifdef DEBUG
    printf("%d entries in L and %d entries in U dropped.\n",
	    num_drop_L, num_drop_U);
#endif
    fflush(stdout);

    if ( options.PrintStat ) StatPrint(&stat);
    StatFree(&stat);

    SUPERLU_FREE (rhsb);
    SUPERLU_FREE (rhsx);
    SUPERLU_FREE (xact);
    SUPERLU_FREE (etree);
    SUPERLU_FREE (perm_r);
    SUPERLU_FREE (perm_c);
    SUPERLU_FREE (R);
    SUPERLU_FREE (C);
    Destroy_CompCol_Matrix(&A);
    Destroy_SuperMatrix_Store(&B);
    Destroy_SuperMatrix_Store(&X);
    if ( lwork >= 0 ) {
	Destroy_SuperNode_Matrix(&L);
	Destroy_CompCol_Matrix(&U);
    }
    SUPERLU_FREE(b);
    SUPERLU_FREE(x);

#if ( DEBUGlevel>=1 )
    CHECK_MALLOC("Exit main()");
#endif

    return 0;
}
示例#21
0
float scnrm2( int n, complex *x, int incx)
{
    return scnrm2_(&n, x, &incx);
}
示例#22
0
/* Subroutine */ int clarfp_(integer *n, complex *alpha, complex *x, integer *
	incx, complex *tau)
{
    /* System generated locals */
    integer i__1, i__2;
    real r__1, r__2;
    complex q__1, q__2;

    /* Builtin functions */
    double r_imag(complex *), r_sign(real *, real *);

    /* Local variables */
    integer j, knt;
    real beta;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *, 
	    integer *);
    real alphi, alphr, xnorm;
    extern doublereal scnrm2_(integer *, complex *, integer *), slapy2_(real *
, real *), slapy3_(real *, real *, real *);
    extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
    extern doublereal slamch_(char *);
    extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer 
	    *);
    real safmin, rsafmn;


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CLARFP generates a complex elementary reflector H of order n, such */
/*  that */

/*        H' * ( alpha ) = ( beta ),   H' * H = I. */
/*             (   x   )   (   0  ) */

/*  where alpha and beta are scalars, beta is real and non-negative, and */
/*  x is an (n-1)-element complex vector.  H is represented in the form */

/*        H = I - tau * ( 1 ) * ( 1 v' ) , */
/*                      ( v ) */

/*  where tau is a complex scalar and v is a complex (n-1)-element */
/*  vector. Note that H is not hermitian. */

/*  If the elements of x are all zero and alpha is real, then tau = 0 */
/*  and H is taken to be the unit matrix. */

/*  Otherwise  1 <= real(tau) <= 2  and  abs(tau-1) <= 1 . */

/*  Arguments */
/*  ========= */

/*  N       (input) INTEGER */
/*          The order of the elementary reflector. */

/*  ALPHA   (input/output) COMPLEX */
/*          On entry, the value alpha. */
/*          On exit, it is overwritten with the value beta. */

/*  X       (input/output) COMPLEX array, dimension */
/*                         (1+(N-2)*abs(INCX)) */
/*          On entry, the vector x. */
/*          On exit, it is overwritten with the vector v. */

/*  INCX    (input) INTEGER */
/*          The increment between elements of X. INCX > 0. */

/*  TAU     (output) COMPLEX */
/*          The value tau. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    --x;

    /* Function Body */
    if (*n <= 0) {
	tau->r = 0.f, tau->i = 0.f;
	return 0;
    }

    i__1 = *n - 1;
    xnorm = scnrm2_(&i__1, &x[1], incx);
    alphr = alpha->r;
    alphi = r_imag(alpha);

    if (xnorm == 0.f && alphi == 0.f) {

/*        H  =  [1-alpha/abs(alpha) 0; 0 I], sign chosen so ALPHA >= 0. */

	if (alphi == 0.f) {
	    if (alphr >= 0.f) {
/*              When TAU.eq.ZERO, the vector is special-cased to be */
/*              all zeros in the application routines.  We do not need */
/*              to clear it. */
		tau->r = 0.f, tau->i = 0.f;
	    } else {
/*              However, the application routines rely on explicit */
/*              zero checks when TAU.ne.ZERO, and we must clear X. */
		tau->r = 2.f, tau->i = 0.f;
		i__1 = *n - 1;
		for (j = 1; j <= i__1; ++j) {
		    i__2 = (j - 1) * *incx + 1;
		    x[i__2].r = 0.f, x[i__2].i = 0.f;
		}
		q__1.r = -alpha->r, q__1.i = -alpha->i;
		alpha->r = q__1.r, alpha->i = q__1.i;
	    }
	} else {
/*           Only "reflecting" the diagonal entry to be real and non-negative. */
	    xnorm = slapy2_(&alphr, &alphi);
	    r__1 = 1.f - alphr / xnorm;
	    r__2 = -alphi / xnorm;
	    q__1.r = r__1, q__1.i = r__2;
	    tau->r = q__1.r, tau->i = q__1.i;
	    i__1 = *n - 1;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = (j - 1) * *incx + 1;
		x[i__2].r = 0.f, x[i__2].i = 0.f;
	    }
	    alpha->r = xnorm, alpha->i = 0.f;
	}
    } else {

/*        general case */

	r__1 = slapy3_(&alphr, &alphi, &xnorm);
	beta = r_sign(&r__1, &alphr);
	safmin = slamch_("S") / slamch_("E");
	rsafmn = 1.f / safmin;

	knt = 0;
	if (dabs(beta) < safmin) {

/*           XNORM, BETA may be inaccurate; scale X and recompute them */

L10:
	    ++knt;
	    i__1 = *n - 1;
	    csscal_(&i__1, &rsafmn, &x[1], incx);
	    beta *= rsafmn;
	    alphi *= rsafmn;
	    alphr *= rsafmn;
	    if (dabs(beta) < safmin) {
		goto L10;
	    }

/*           New BETA is at most 1, at least SAFMIN */

	    i__1 = *n - 1;
	    xnorm = scnrm2_(&i__1, &x[1], incx);
	    q__1.r = alphr, q__1.i = alphi;
	    alpha->r = q__1.r, alpha->i = q__1.i;
	    r__1 = slapy3_(&alphr, &alphi, &xnorm);
	    beta = r_sign(&r__1, &alphr);
	}
	q__1.r = alpha->r + beta, q__1.i = alpha->i;
	alpha->r = q__1.r, alpha->i = q__1.i;
	if (beta < 0.f) {
	    beta = -beta;
	    q__2.r = -alpha->r, q__2.i = -alpha->i;
	    q__1.r = q__2.r / beta, q__1.i = q__2.i / beta;
	    tau->r = q__1.r, tau->i = q__1.i;
	} else {
	    alphr = alphi * (alphi / alpha->r);
	    alphr += xnorm * (xnorm / alpha->r);
	    r__1 = alphr / beta;
	    r__2 = -alphi / beta;
	    q__1.r = r__1, q__1.i = r__2;
	    tau->r = q__1.r, tau->i = q__1.i;
	    r__1 = -alphr;
	    q__1.r = r__1, q__1.i = alphi;
	    alpha->r = q__1.r, alpha->i = q__1.i;
	}
	cladiv_(&q__1, &c_b5, alpha);
	alpha->r = q__1.r, alpha->i = q__1.i;
	i__1 = *n - 1;
	cscal_(&i__1, alpha, &x[1], incx);

/*        If BETA is subnormal, it may lose relative accuracy */

	i__1 = knt;
	for (j = 1; j <= i__1; ++j) {
	    beta *= safmin;
/* L20: */
	}
	alpha->r = beta, alpha->i = 0.f;
    }

    return 0;

/*     End of CLARFP */

} /* clarfp_ */
示例#23
0
 int ctgsna_(char *job, char *howmny, int *select, 
	int *n, complex *a, int *lda, complex *b, int *ldb, 
	complex *vl, int *ldvl, complex *vr, int *ldvr, float *s, float 
	*dif, int *mm, int *m, complex *work, int *lwork, int 
	*iwork, int *info)
{
    /* System generated locals */
    int a_dim1, a_offset, b_dim1, b_offset, vl_dim1, vl_offset, vr_dim1, 
	    vr_offset, i__1;
    float r__1, r__2;
    complex q__1;

    /* Builtin functions */
    double c_abs(complex *);

    /* Local variables */
    int i__, k, n1, n2, ks;
    float eps, cond;
    int ierr, ifst;
    float lnrm;
    complex yhax, yhbx;
    int ilst;
    float rnrm, scale;
    extern /* Complex */ VOID cdotc_(complex *, int *, complex *, int 
	    *, complex *, int *);
    extern int lsame_(char *, char *);
    extern  int cgemv_(char *, int *, int *, complex *
, complex *, int *, complex *, int *, complex *, complex *
, int *);
    int lwmin;
    int wants;
    complex dummy[1];
    extern double scnrm2_(int *, complex *, int *), slapy2_(float *
, float *);
    complex dummy1[1];
    extern  int slabad_(float *, float *);
    extern double slamch_(char *);
    extern  int clacpy_(char *, int *, int *, complex 
	    *, int *, complex *, int *), ctgexc_(int *, 
	    int *, int *, complex *, int *, complex *, int *, 
	    complex *, int *, complex *, int *, int *, int *, 
	    int *), xerbla_(char *, int *);
    float bignum;
    int wantbh, wantdf, somcon;
    extern  int ctgsyl_(char *, int *, int *, int 
	    *, complex *, int *, complex *, int *, complex *, int 
	    *, complex *, int *, complex *, int *, complex *, int 
	    *, float *, float *, complex *, int *, int *, int *);
    float smlnum;
    int lquery;


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CTGSNA estimates reciprocal condition numbers for specified */
/*  eigenvalues and/or eigenvectors of a matrix pair (A, B). */

/*  (A, B) must be in generalized Schur canonical form, that is, A and */
/*  B are both upper triangular. */

/*  Arguments */
/*  ========= */

/*  JOB     (input) CHARACTER*1 */
/*          Specifies whether condition numbers are required for */
/*          eigenvalues (S) or eigenvectors (DIF): */
/*          = 'E': for eigenvalues only (S); */
/*          = 'V': for eigenvectors only (DIF); */
/*          = 'B': for both eigenvalues and eigenvectors (S and DIF). */

/*  HOWMNY  (input) CHARACTER*1 */
/*          = 'A': compute condition numbers for all eigenpairs; */
/*          = 'S': compute condition numbers for selected eigenpairs */
/*                 specified by the array SELECT. */

/*  SELECT  (input) LOGICAL array, dimension (N) */
/*          If HOWMNY = 'S', SELECT specifies the eigenpairs for which */
/*          condition numbers are required. To select condition numbers */
/*          for the corresponding j-th eigenvalue and/or eigenvector, */
/*          SELECT(j) must be set to .TRUE.. */
/*          If HOWMNY = 'A', SELECT is not referenced. */

/*  N       (input) INTEGER */
/*          The order of the square matrix pair (A, B). N >= 0. */

/*  A       (input) COMPLEX array, dimension (LDA,N) */
/*          The upper triangular matrix A in the pair (A,B). */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A. LDA >= MAX(1,N). */

/*  B       (input) COMPLEX array, dimension (LDB,N) */
/*          The upper triangular matrix B in the pair (A, B). */

/*  LDB     (input) INTEGER */
/*          The leading dimension of the array B. LDB >= MAX(1,N). */

/*  VL      (input) COMPLEX array, dimension (LDVL,M) */
/*          IF JOB = 'E' or 'B', VL must contain left eigenvectors of */
/*          (A, B), corresponding to the eigenpairs specified by HOWMNY */
/*          and SELECT.  The eigenvectors must be stored in consecutive */
/*          columns of VL, as returned by CTGEVC. */
/*          If JOB = 'V', VL is not referenced. */

/*  LDVL    (input) INTEGER */
/*          The leading dimension of the array VL. LDVL >= 1; and */
/*          If JOB = 'E' or 'B', LDVL >= N. */

/*  VR      (input) COMPLEX array, dimension (LDVR,M) */
/*          IF JOB = 'E' or 'B', VR must contain right eigenvectors of */
/*          (A, B), corresponding to the eigenpairs specified by HOWMNY */
/*          and SELECT.  The eigenvectors must be stored in consecutive */
/*          columns of VR, as returned by CTGEVC. */
/*          If JOB = 'V', VR is not referenced. */

/*  LDVR    (input) INTEGER */
/*          The leading dimension of the array VR. LDVR >= 1; */
/*          If JOB = 'E' or 'B', LDVR >= N. */

/*  S       (output) REAL array, dimension (MM) */
/*          If JOB = 'E' or 'B', the reciprocal condition numbers of the */
/*          selected eigenvalues, stored in consecutive elements of the */
/*          array. */
/*          If JOB = 'V', S is not referenced. */

/*  DIF     (output) REAL array, dimension (MM) */
/*          If JOB = 'V' or 'B', the estimated reciprocal condition */
/*          numbers of the selected eigenvectors, stored in consecutive */
/*          elements of the array. */
/*          If the eigenvalues cannot be reordered to compute DIF(j), */
/*          DIF(j) is set to 0; this can only occur when the true value */
/*          would be very small anyway. */
/*          For each eigenvalue/vector specified by SELECT, DIF stores */
/*          a Frobenius norm-based estimate of Difl. */
/*          If JOB = 'E', DIF is not referenced. */

/*  MM      (input) INTEGER */
/*          The number of elements in the arrays S and DIF. MM >= M. */

/*  M       (output) INTEGER */
/*          The number of elements of the arrays S and DIF used to store */
/*          the specified condition numbers; for each selected eigenvalue */
/*          one element is used. If HOWMNY = 'A', M is set to N. */

/*  WORK    (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
/*          On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */

/*  LWORK  (input) INTEGER */
/*          The dimension of the array WORK. LWORK >= MAX(1,N). */
/*          If JOB = 'V' or 'B', LWORK >= MAX(1,2*N*N). */

/*  IWORK   (workspace) INTEGER array, dimension (N+2) */
/*          If JOB = 'E', IWORK is not referenced. */

/*  INFO    (output) INTEGER */
/*          = 0: Successful exit */
/*          < 0: If INFO = -i, the i-th argument had an illegal value */

/*  Further Details */
/*  =============== */

/*  The reciprocal of the condition number of the i-th generalized */
/*  eigenvalue w = (a, b) is defined as */

/*          S(I) = (|v'Au|**2 + |v'Bu|**2)**(1/2) / (norm(u)*norm(v)) */

/*  where u and v are the right and left eigenvectors of (A, B) */
/*  corresponding to w; |z| denotes the absolute value of the complex */
/*  number, and norm(u) denotes the 2-norm of the vector u. The pair */
/*  (a, b) corresponds to an eigenvalue w = a/b (= v'Au/v'Bu) of the */
/*  matrix pair (A, B). If both a and b equal zero, then (A,B) is */
/*  singular and S(I) = -1 is returned. */

/*  An approximate error bound on the chordal distance between the i-th */
/*  computed generalized eigenvalue w and the corresponding exact */
/*  eigenvalue lambda is */

/*          chord(w, lambda) <=   EPS * norm(A, B) / S(I), */

/*  where EPS is the machine precision. */

/*  The reciprocal of the condition number of the right eigenvector u */
/*  and left eigenvector v corresponding to the generalized eigenvalue w */
/*  is defined as follows. Suppose */

/*                   (A, B) = ( a   *  ) ( b  *  )  1 */
/*                            ( 0  A22 ),( 0 B22 )  n-1 */
/*                              1  n-1     1 n-1 */

/*  Then the reciprocal condition number DIF(I) is */

/*          Difl[(a, b), (A22, B22)]  = sigma-MIN( Zl ) */

/*  where sigma-MIN(Zl) denotes the smallest singular value of */

/*         Zl = [ kron(a, In-1) -kron(1, A22) ] */
/*              [ kron(b, In-1) -kron(1, B22) ]. */

/*  Here In-1 is the identity matrix of size n-1 and X' is the conjugate */
/*  transpose of X. kron(X, Y) is the Kronecker product between the */
/*  matrices X and Y. */

/*  We approximate the smallest singular value of Zl with an upper */
/*  bound. This is done by CLATDF. */

/*  An approximate error bound for a computed eigenvector VL(i) or */
/*  VR(i) is given by */

/*                      EPS * norm(A, B) / DIF(i). */

/*  See ref. [2-3] for more details and further references. */

/*  Based on contributions by */
/*     Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
/*     Umea University, S-901 87 Umea, Sweden. */

/*  References */
/*  ========== */

/*  [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the */
/*      Generalized Real Schur Form of a Regular Matrix Pair (A, B), in */
/*      M.S. Moonen et al (eds), Linear Algebra for Large Scale and */
/*      Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218. */

/*  [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified */
/*      Eigenvalues of a Regular Matrix Pair (A, B) and Condition */
/*      Estimation: Theory, Algorithms and Software, Report */
/*      UMINF - 94.04, Department of Computing Science, Umea University, */
/*      S-901 87 Umea, Sweden, 1994. Also as LAPACK Working Note 87. */
/*      To appear in Numerical Algorithms, 1996. */

/*  [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software */
/*      for Solving the Generalized Sylvester Equation and Estimating the */
/*      Separation between Regular Matrix Pairs, Report UMINF - 93.23, */
/*      Department of Computing Science, Umea University, S-901 87 Umea, */
/*      Sweden, December 1993, Revised April 1994, Also as LAPACK Working */
/*      Note 75. */
/*      To appear in ACM Trans. on Math. Software, Vol 22, No 1, 1996. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Decode and test the input parameters */

    /* Parameter adjustments */
    --select;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1;
    b -= b_offset;
    vl_dim1 = *ldvl;
    vl_offset = 1 + vl_dim1;
    vl -= vl_offset;
    vr_dim1 = *ldvr;
    vr_offset = 1 + vr_dim1;
    vr -= vr_offset;
    --s;
    --dif;
    --work;
    --iwork;

    /* Function Body */
    wantbh = lsame_(job, "B");
    wants = lsame_(job, "E") || wantbh;
    wantdf = lsame_(job, "V") || wantbh;

    somcon = lsame_(howmny, "S");

    *info = 0;
    lquery = *lwork == -1;

    if (! wants && ! wantdf) {
	*info = -1;
    } else if (! lsame_(howmny, "A") && ! somcon) {
	*info = -2;
    } else if (*n < 0) {
	*info = -4;
    } else if (*lda < MAX(1,*n)) {
	*info = -6;
    } else if (*ldb < MAX(1,*n)) {
	*info = -8;
    } else if (wants && *ldvl < *n) {
	*info = -10;
    } else if (wants && *ldvr < *n) {
	*info = -12;
    } else {

/*        Set M to the number of eigenpairs for which condition numbers */
/*        are required, and test MM. */

	if (somcon) {
	    *m = 0;
	    i__1 = *n;
	    for (k = 1; k <= i__1; ++k) {
		if (select[k]) {
		    ++(*m);
		}
/* L10: */
	    }
	} else {
	    *m = *n;
	}

	if (*n == 0) {
	    lwmin = 1;
	} else if (lsame_(job, "V") || lsame_(job, 
		"B")) {
	    lwmin = (*n << 1) * *n;
	} else {
	    lwmin = *n;
	}
	work[1].r = (float) lwmin, work[1].i = 0.f;

	if (*mm < *m) {
	    *info = -15;
	} else if (*lwork < lwmin && ! lquery) {
	    *info = -18;
	}
    }

    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CTGSNA", &i__1);
	return 0;
    } else if (lquery) {
	return 0;
    }

/*     Quick return if possible */

    if (*n == 0) {
	return 0;
    }

/*     Get machine constants */

    eps = slamch_("P");
    smlnum = slamch_("S") / eps;
    bignum = 1.f / smlnum;
    slabad_(&smlnum, &bignum);
    ks = 0;
    i__1 = *n;
    for (k = 1; k <= i__1; ++k) {

/*        Determine whether condition numbers are required for the k-th */
/*        eigenpair. */

	if (somcon) {
	    if (! select[k]) {
		goto L20;
	    }
	}

	++ks;

	if (wants) {

/*           Compute the reciprocal condition number of the k-th */
/*           eigenvalue. */

	    rnrm = scnrm2_(n, &vr[ks * vr_dim1 + 1], &c__1);
	    lnrm = scnrm2_(n, &vl[ks * vl_dim1 + 1], &c__1);
	    cgemv_("N", n, n, &c_b19, &a[a_offset], lda, &vr[ks * vr_dim1 + 1]
, &c__1, &c_b20, &work[1], &c__1);
	    cdotc_(&q__1, n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &c__1);
	    yhax.r = q__1.r, yhax.i = q__1.i;
	    cgemv_("N", n, n, &c_b19, &b[b_offset], ldb, &vr[ks * vr_dim1 + 1]
, &c__1, &c_b20, &work[1], &c__1);
	    cdotc_(&q__1, n, &work[1], &c__1, &vl[ks * vl_dim1 + 1], &c__1);
	    yhbx.r = q__1.r, yhbx.i = q__1.i;
	    r__1 = c_abs(&yhax);
	    r__2 = c_abs(&yhbx);
	    cond = slapy2_(&r__1, &r__2);
	    if (cond == 0.f) {
		s[ks] = -1.f;
	    } else {
		s[ks] = cond / (rnrm * lnrm);
	    }
	}

	if (wantdf) {
	    if (*n == 1) {
		r__1 = c_abs(&a[a_dim1 + 1]);
		r__2 = c_abs(&b[b_dim1 + 1]);
		dif[ks] = slapy2_(&r__1, &r__2);
	    } else {

/*              Estimate the reciprocal condition number of the k-th */
/*              eigenvectors. */

/*              Copy the matrix (A, B) to the array WORK and move the */
/*              (k,k)th pair to the (1,1) position. */

		clacpy_("Full", n, n, &a[a_offset], lda, &work[1], n);
		clacpy_("Full", n, n, &b[b_offset], ldb, &work[*n * *n + 1], 
			n);
		ifst = k;
		ilst = 1;

		ctgexc_(&c_false, &c_false, n, &work[1], n, &work[*n * *n + 1]
, n, dummy, &c__1, dummy1, &c__1, &ifst, &ilst, &ierr)
			;

		if (ierr > 0) {

/*                 Ill-conditioned problem - swap rejected. */

		    dif[ks] = 0.f;
		} else {

/*                 Reordering successful, solve generalized Sylvester */
/*                 equation for R and L, */
/*                            A22 * R - L * A11 = A12 */
/*                            B22 * R - L * B11 = B12, */
/*                 and compute estimate of Difl[(A11,B11), (A22, B22)]. */

		    n1 = 1;
		    n2 = *n - n1;
		    i__ = *n * *n + 1;
		    ctgsyl_("N", &c__3, &n2, &n1, &work[*n * n1 + n1 + 1], n, 
			    &work[1], n, &work[n1 + 1], n, &work[*n * n1 + n1 
			    + i__], n, &work[i__], n, &work[n1 + i__], n, &
			    scale, &dif[ks], dummy, &c__1, &iwork[1], &ierr);
		}
	    }
	}

L20:
	;
    }
    work[1].r = (float) lwmin, work[1].i = 0.f;
    return 0;

/*     End of CTGSNA */

} /* ctgsna_ */
示例#24
0
/* Subroutine */ int claqps_(integer *m, integer *n, integer *offset, integer 
	*nb, integer *kb, complex *a, integer *lda, integer *jpvt, complex *
	tau, real *vn1, real *vn2, complex *auxv, complex *f, integer *ldf)
{
    /* System generated locals */
    integer a_dim1, a_offset, f_dim1, f_offset, i__1, i__2, i__3;
    real r__1, r__2;
    complex q__1;

    /* Local variables */
    integer j, k, rk;
    complex akk;
    integer pvt;
    real temp, temp2, tol3z;
    integer itemp;
    integer lsticc;
    integer lastrk;

/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*  -- LAPACK is a software package provided by Univ. of Tennessee,    -- */
/*     November 2006 */

/*  Purpose */
/*  ======= */

/*  CLAQPS computes a step of QR factorization with column pivoting */
/*  of a complex M-by-N matrix A by using Blas-3.  It tries to factorize */
/*  NB columns from A starting from the row OFFSET+1, and updates all */
/*  of the matrix with Blas-3 xGEMM. */

/*  In some cases, due to catastrophic cancellations, it cannot */
/*  factorize NB columns.  Hence, the actual number of factorized */
/*  columns is returned in KB. */

/*  Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */

/*  Arguments */
/*  ========= */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A. M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix A. N >= 0 */

/*  OFFSET  (input) INTEGER */
/*          The number of rows of A that have been factorized in */
/*          previous steps. */

/*  NB      (input) INTEGER */
/*          The number of columns to factorize. */

/*  KB      (output) INTEGER */
/*          The number of columns actually factorized. */

/*  A       (input/output) COMPLEX array, dimension (LDA,N) */
/*          On entry, the M-by-N matrix A. */
/*          On exit, block A(OFFSET+1:M,1:KB) is the triangular */
/*          factor obtained and block A(1:OFFSET,1:N) has been */
/*          accordingly pivoted, but no factorized. */
/*          The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has */
/*          been updated. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A. LDA >= max(1,M). */

/*  JPVT    (input/output) INTEGER array, dimension (N) */
/*          JPVT(I) = K <==> Column K of the full matrix A has been */
/*          permuted into position I in AP. */

/*  TAU     (output) COMPLEX array, dimension (KB) */
/*          The scalar factors of the elementary reflectors. */

/*  VN1     (input/output) REAL array, dimension (N) */
/*          The vector with the partial column norms. */

/*  VN2     (input/output) REAL array, dimension (N) */
/*          The vector with the exact column norms. */

/*  AUXV    (input/output) COMPLEX array, dimension (NB) */
/*          Auxiliar vector. */

/*  F       (input/output) COMPLEX array, dimension (LDF,NB) */
/*          Matrix F' = L*Y'*A. */

/*  LDF     (input) INTEGER */
/*          The leading dimension of the array F. LDF >= max(1,N). */

/*  Further Details */
/*  =============== */

/*  Based on contributions by */
/*    G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */
/*    X. Sun, Computer Science Dept., Duke University, USA */

/*  Partial column norm updating strategy modified by */
/*    Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */
/*    University of Zagreb, Croatia. */
/*    June 2006. */
/*  For more details see LAPACK Working Note 176. */
/*  ===================================================================== */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --jpvt;
    --tau;
    --vn1;
    --vn2;
    --auxv;
    f_dim1 = *ldf;
    f_offset = 1 + f_dim1;
    f -= f_offset;

    /* Function Body */
/* Computing MIN */
    i__1 = *m, i__2 = *n + *offset;
    lastrk = min(i__1,i__2);
    lsticc = 0;
    k = 0;
    tol3z = sqrt(slamch_("Epsilon"));

/*     Beginning of while loop. */

L10:
    if (k < *nb && lsticc == 0) {
	++k;
	rk = *offset + k;

/*        Determine ith pivot column and swap if necessary */

	i__1 = *n - k + 1;
	pvt = k - 1 + isamax_(&i__1, &vn1[k], &c__1);
	if (pvt != k) {
	    cswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &c__1);
	    i__1 = k - 1;
	    cswap_(&i__1, &f[pvt + f_dim1], ldf, &f[k + f_dim1], ldf);
	    itemp = jpvt[pvt];
	    jpvt[pvt] = jpvt[k];
	    jpvt[k] = itemp;
	    vn1[pvt] = vn1[k];
	    vn2[pvt] = vn2[k];
	}

/*        Apply previous Householder reflectors to column K: */
/*        A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. */

	if (k > 1) {
	    i__1 = k - 1;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = k + j * f_dim1;
		r_cnjg(&q__1, &f[k + j * f_dim1]);
		f[i__2].r = q__1.r, f[i__2].i = q__1.i;
	    }
	    i__1 = *m - rk + 1;
	    i__2 = k - 1;
	    q__1.r = -1.f, q__1.i = -0.f;
	    cgemv_("No transpose", &i__1, &i__2, &q__1, &a[rk + a_dim1], lda, 
		    &f[k + f_dim1], ldf, &c_b2, &a[rk + k * a_dim1], &c__1);
	    i__1 = k - 1;
	    for (j = 1; j <= i__1; ++j) {
		i__2 = k + j * f_dim1;
		r_cnjg(&q__1, &f[k + j * f_dim1]);
		f[i__2].r = q__1.r, f[i__2].i = q__1.i;
	    }
	}

/*        Generate elementary reflector H(k). */

	if (rk < *m) {
	    i__1 = *m - rk + 1;
	    clarfp_(&i__1, &a[rk + k * a_dim1], &a[rk + 1 + k * a_dim1], &
		    c__1, &tau[k]);
	} else {
	    clarfp_(&c__1, &a[rk + k * a_dim1], &a[rk + k * a_dim1], &c__1, &
		    tau[k]);
	}

	i__1 = rk + k * a_dim1;
	akk.r = a[i__1].r, akk.i = a[i__1].i;
	i__1 = rk + k * a_dim1;
	a[i__1].r = 1.f, a[i__1].i = 0.f;

/*        Compute Kth column of F: */

/*        Compute  F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K). */

	if (k < *n) {
	    i__1 = *m - rk + 1;
	    i__2 = *n - k;
	    cgemv_("Conjugate transpose", &i__1, &i__2, &tau[k], &a[rk + (k + 
		    1) * a_dim1], lda, &a[rk + k * a_dim1], &c__1, &c_b1, &f[
		    k + 1 + k * f_dim1], &c__1);
	}

/*        Padding F(1:K,K) with zeros. */

	i__1 = k;
	for (j = 1; j <= i__1; ++j) {
	    i__2 = j + k * f_dim1;
	    f[i__2].r = 0.f, f[i__2].i = 0.f;
	}

/*        Incremental updating of F: */
/*        F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)' */
/*                    *A(RK:M,K). */

	if (k > 1) {
	    i__1 = *m - rk + 1;
	    i__2 = k - 1;
	    i__3 = k;
	    q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
	    cgemv_("Conjugate transpose", &i__1, &i__2, &q__1, &a[rk + a_dim1]
, lda, &a[rk + k * a_dim1], &c__1, &c_b1, &auxv[1], &c__1);

	    i__1 = k - 1;
	    cgemv_("No transpose", n, &i__1, &c_b2, &f[f_dim1 + 1], ldf, &
		    auxv[1], &c__1, &c_b2, &f[k * f_dim1 + 1], &c__1);
	}

/*        Update the current row of A: */
/*        A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */

	if (k < *n) {
	    i__1 = *n - k;
	    q__1.r = -1.f, q__1.i = -0.f;
	    cgemm_("No transpose", "Conjugate transpose", &c__1, &i__1, &k, &
		    q__1, &a[rk + a_dim1], lda, &f[k + 1 + f_dim1], ldf, &
		    c_b2, &a[rk + (k + 1) * a_dim1], lda);
	}

/*        Update partial column norms. */

	if (rk < lastrk) {
	    i__1 = *n;
	    for (j = k + 1; j <= i__1; ++j) {
		if (vn1[j] != 0.f) {

/*                 NOTE: The following 4 lines follow from the analysis in */
/*                 Lapack Working Note 176. */

		    temp = c_abs(&a[rk + j * a_dim1]) / vn1[j];
/* Computing MAX */
		    r__1 = 0.f, r__2 = (temp + 1.f) * (1.f - temp);
		    temp = dmax(r__1,r__2);
/* Computing 2nd power */
		    r__1 = vn1[j] / vn2[j];
		    temp2 = temp * (r__1 * r__1);
		    if (temp2 <= tol3z) {
			vn2[j] = (real) lsticc;
			lsticc = j;
		    } else {
			vn1[j] *= sqrt(temp);
		    }
		}
	    }
	}

	i__1 = rk + k * a_dim1;
	a[i__1].r = akk.r, a[i__1].i = akk.i;

/*        End of while loop. */

	goto L10;
    }
    *kb = k;
    rk = *offset + *kb;

/*     Apply the block reflector to the rest of the matrix: */
/*     A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - */
/*                         A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)'. */

/* Computing MIN */
    i__1 = *n, i__2 = *m - *offset;
    if (*kb < min(i__1,i__2)) {
	i__1 = *m - rk;
	i__2 = *n - *kb;
	q__1.r = -1.f, q__1.i = -0.f;
	cgemm_("No transpose", "Conjugate transpose", &i__1, &i__2, kb, &q__1, 
		 &a[rk + 1 + a_dim1], lda, &f[*kb + 1 + f_dim1], ldf, &c_b2, &
		a[rk + 1 + (*kb + 1) * a_dim1], lda);
    }

/*     Recomputation of difficult columns. */

L60:
    if (lsticc > 0) {
	itemp = i_nint(&vn2[lsticc]);
	i__1 = *m - rk;
	vn1[lsticc] = scnrm2_(&i__1, &a[rk + 1 + lsticc * a_dim1], &c__1);

/*        NOTE: The computation of VN1( LSTICC ) relies on the fact that */
/*        SNRM2 does not fail on vectors with norm below the value of */
/*        SQRT(DLAMCH('S')) */

	vn2[lsticc] = vn1[lsticc];
	lsticc = itemp;
	goto L60;
    }

    return 0;

/*     End of CLAQPS */

} /* claqps_ */
示例#25
0
文件: csvdc.c 项目: 151706061/ITK
/*<       subroutine csvdc(x,ldx,n,p,s,e,u,ldu,v,ldv,work,job,info) >*/
/* Subroutine */ int csvdc_(complex *x, integer *ldx, integer *n, integer *p, 
        complex *s, complex *e, complex *u, integer *ldu, complex *v, integer 
        *ldv, complex *work, integer *job, integer *info)
{
    /* System generated locals */
    integer x_dim1, x_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2, 
            i__3, i__4;
    real r__1, r__2, r__3, r__4;
    complex q__1, q__2, q__3;

    /* Builtin functions */
    double r_imag(complex *), c_abs(complex *);
    void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *);
    double sqrt(doublereal);

    /* Local variables */
    real b, c__, f, g;
    integer i__, j, k, l=0, m;
    complex r__, t;
    real t1, el;
    integer kk;
    real cs;
    integer ll, mm, ls=0;
    real sl;
    integer lu;
    real sm, sn;
    integer lm1, mm1, lp1, mp1, nct, ncu, lls, nrt;
    real emm1, smm1;
    integer kase, jobu, iter;
    real test;
    integer nctp1, nrtp1;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *, 
            integer *);
    real scale;
    extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer 
            *, complex *, integer *);
    real shift;
    extern /* Subroutine */ int cswap_(integer *, complex *, integer *, 
            complex *, integer *);
    integer maxit;
    extern /* Subroutine */ int caxpy_(integer *, complex *, complex *, 
            integer *, complex *, integer *), csrot_(integer *, complex *, 
            integer *, complex *, integer *, real *, real *);
    logical wantu, wantv;
    extern /* Subroutine */ int srotg_(real *, real *, real *, real *);
    real ztest;
    extern doublereal scnrm2_(integer *, complex *, integer *);

/*<       integer ldx,n,p,ldu,ldv,job,info >*/
/*<       complex x(ldx,1),s(1),e(1),u(ldu,1),v(ldv,1),work(1) >*/


/*     csvdc is a subroutine to reduce a complex nxp matrix x by */
/*     unitary transformations u and v to diagonal form.  the */
/*     diagonal elements s(i) are the singular values of x.  the */
/*     columns of u are the corresponding left singular vectors, */
/*     and the columns of v the right singular vectors. */

/*     on entry */

/*         x         complex(ldx,p), where ldx.ge.n. */
/*                   x contains the matrix whose singular value */
/*                   decomposition is to be computed.  x is */
/*                   destroyed by csvdc. */

/*         ldx       integer. */
/*                   ldx is the leading dimension of the array x. */

/*         n         integer. */
/*                   n is the number of rows of the matrix x. */

/*         p         integer. */
/*                   p is the number of columns of the matrix x. */

/*         ldu       integer. */
/*                   ldu is the leading dimension of the array u */
/*                   (see below). */

/*         ldv       integer. */
/*                   ldv is the leading dimension of the array v */
/*                   (see below). */

/*         work      complex(n). */
/*                   work is a scratch array. */

/*         job       integer. */
/*                   job controls the computation of the singular */
/*                   vectors.  it has the decimal expansion ab */
/*                   with the following meaning */

/*                        a.eq.0    do not compute the left singular */
/*                                  vectors. */
/*                        a.eq.1    return the n left singular vectors */
/*                                  in u. */
/*                        a.ge.2    returns the first min(n,p) */
/*                                  left singular vectors in u. */
/*                        b.eq.0    do not compute the right singular */
/*                                  vectors. */
/*                        b.eq.1    return the right singular vectors */
/*                                  in v. */

/*     on return */

/*         s         complex(mm), where mm=min(n+1,p). */
/*                   the first min(n,p) entries of s contain the */
/*                   singular values of x arranged in descending */
/*                   order of magnitude. */

/*         e         complex(p). */
/*                   e ordinarily contains zeros.  however see the */
/*                   discussion of info for exceptions. */

/*         u         complex(ldu,k), where ldu.ge.n.  if joba.eq.1 then */
/*                                   k.eq.n, if joba.ge.2 then */
/*                                   k.eq.min(n,p). */
/*                   u contains the matrix of left singular vectors. */
/*                   u is not referenced if joba.eq.0.  if n.le.p */
/*                   or if joba.gt.2, then u may be identified with x */
/*                   in the subroutine call. */

/*         v         complex(ldv,p), where ldv.ge.p. */
/*                   v contains the matrix of right singular vectors. */
/*                   v is not referenced if jobb.eq.0.  if p.le.n, */
/*                   then v may be identified whth x in the */
/*                   subroutine call. */

/*         info      integer. */
/*                   the singular values (and their corresponding */
/*                   singular vectors) s(info+1),s(info+2),...,s(m) */
/*                   are correct (here m=min(n,p)).  thus if */
/*                   info.eq.0, all the singular values and their */
/*                   vectors are correct.  in any event, the matrix */
/*                   b = ctrans(u)*x*v is the bidiagonal matrix */
/*                   with the elements of s on its diagonal and the */
/*                   elements of e on its super-diagonal (ctrans(u) */
/*                   is the conjugate-transpose of u).  thus the */
/*                   singular values of x and b are the same. */

/*     linpack. this version dated 03/19/79 . */
/*              correction to shift calculation made 2/85. */
/*     g.w. stewart, university of maryland, argonne national lab. */

/*     csvdc uses the following functions and subprograms. */

/*     external csrot */
/*     blas caxpy,cdotc,cscal,cswap,scnrm2,srotg */
/*     fortran abs,aimag,amax1,cabs,cmplx */
/*     fortran conjg,max0,min0,mod,real,sqrt */

/*     internal variables */

/*<    >*/
/*<       complex cdotc,t,r >*/
/*<    >*/
/*<       logical wantu,wantv >*/

/*<       complex csign,zdum,zdum1,zdum2 >*/
/*<       real cabs1 >*/
/*<       cabs1(zdum) = abs(real(zdum)) + abs(aimag(zdum)) >*/
/*<       csign(zdum1,zdum2) = cabs(zdum1)*(zdum2/cabs(zdum2)) >*/

/*     set the maximum number of iterations. */

/*<       maxit = 1000 >*/
    /* Parameter adjustments */
    x_dim1 = *ldx;
    x_offset = 1 + x_dim1;
    x -= x_offset;
    --s;
    --e;
    u_dim1 = *ldu;
    u_offset = 1 + u_dim1;
    u -= u_offset;
    v_dim1 = *ldv;
    v_offset = 1 + v_dim1;
    v -= v_offset;
    --work;

    /* Function Body */
    maxit = 1000;

/*     determine what is to be computed. */

/*<       wantu = .false. >*/
    wantu = FALSE_;
/*<       wantv = .false. >*/
    wantv = FALSE_;
/*<       jobu = mod(job,100)/10 >*/
    jobu = *job % 100 / 10;
/*<       ncu = n >*/
    ncu = *n;
/*<       if (jobu .gt. 1) ncu = min0(n,p) >*/
    if (jobu > 1) {
        ncu = min(*n,*p);
    }
/*<       if (jobu .ne. 0) wantu = .true. >*/
    if (jobu != 0) {
        wantu = TRUE_;
    }
/*<       if (mod(job,10) .ne. 0) wantv = .true. >*/
    if (*job % 10 != 0) {
        wantv = TRUE_;
    }

/*     reduce x to bidiagonal form, storing the diagonal elements */
/*     in s and the super-diagonal elements in e. */

/*<       info = 0 >*/
    *info = 0;
/*<       nct = min0(n-1,p) >*/
/* Computing MIN */
    i__1 = *n - 1;
    nct = min(i__1,*p);
/*<       nrt = max0(0,min0(p-2,n)) >*/
/* Computing MAX */
/* Computing MIN */
    i__3 = *p - 2;
    i__1 = 0, i__2 = min(i__3,*n);
    nrt = max(i__1,i__2);
/*<       lu = max0(nct,nrt) >*/
    lu = max(nct,nrt);
/*<       if (lu .lt. 1) go to 170 >*/
    if (lu < 1) {
        goto L170;
    }
/*<       do 160 l = 1, lu >*/
    i__1 = lu;
    for (l = 1; l <= i__1; ++l) {
/*<          lp1 = l + 1 >*/
        lp1 = l + 1;
/*<          if (l .gt. nct) go to 20 >*/
        if (l > nct) {
            goto L20;
        }

/*           compute the transformation for the l-th column and */
/*           place the l-th diagonal in s(l). */

/*<             s(l) = cmplx(scnrm2(n-l+1,x(l,l),1),0.0e0) >*/
        i__2 = l;
        i__3 = *n - l + 1;
        r__1 = scnrm2_(&i__3, &x[l + l * x_dim1], &c__1);
        q__1.r = r__1, q__1.i = (float)0.;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<             if (cabs1(s(l)) .eq. 0.0e0) go to 10 >*/
        i__2 = l;
        if ((r__1 = s[i__2].r, dabs(r__1)) + (r__2 = r_imag(&s[l]), dabs(r__2)
                ) == (float)0.) {
            goto L10;
        }
/*<                if (cabs1(x(l,l)) .ne. 0.0e0) s(l) = csign(s(l),x(l,l)) >*/
        i__2 = l + l * x_dim1;
        if ((r__1 = x[i__2].r, dabs(r__1)) + (r__2 = r_imag(&x[l + l * x_dim1]
                ), dabs(r__2)) != (float)0.) {
            i__3 = l;
            r__3 = c_abs(&s[l]);
            i__4 = l + l * x_dim1;
            r__4 = c_abs(&x[l + l * x_dim1]);
            q__2.r = x[i__4].r / r__4, q__2.i = x[i__4].i / r__4;
            q__1.r = r__3 * q__2.r, q__1.i = r__3 * q__2.i;
            s[i__3].r = q__1.r, s[i__3].i = q__1.i;
        }
/*<                call cscal(n-l+1,1.0e0/s(l),x(l,l),1) >*/
        i__2 = *n - l + 1;
        c_div(&q__1, &c_b8, &s[l]);
        cscal_(&i__2, &q__1, &x[l + l * x_dim1], &c__1);
/*<                x(l,l) = (1.0e0,0.0e0) + x(l,l) >*/
        i__2 = l + l * x_dim1;
        i__3 = l + l * x_dim1;
        q__1.r = x[i__3].r + (float)1., q__1.i = x[i__3].i + (float)0.;
        x[i__2].r = q__1.r, x[i__2].i = q__1.i;
/*<    10       continue >*/
L10:
/*<             s(l) = -s(l) >*/
        i__2 = l;
        i__3 = l;
        q__1.r = -s[i__3].r, q__1.i = -s[i__3].i;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<    20    continue >*/
L20:
/*<          if (p .lt. lp1) go to 50 >*/
        if (*p < lp1) {
            goto L50;
        }
/*<          do 40 j = lp1, p >*/
        i__2 = *p;
        for (j = lp1; j <= i__2; ++j) {
/*<             if (l .gt. nct) go to 30 >*/
            if (l > nct) {
                goto L30;
            }
/*<             if (cabs1(s(l)) .eq. 0.0e0) go to 30 >*/
            i__3 = l;
            if ((r__1 = s[i__3].r, dabs(r__1)) + (r__2 = r_imag(&s[l]), dabs(
                    r__2)) == (float)0.) {
                goto L30;
            }

/*              apply the transformation. */

/*<                t = -cdotc(n-l+1,x(l,l),1,x(l,j),1)/x(l,l) >*/
            i__3 = *n - l + 1;
            cdotc_(&q__3, &i__3, &x[l + l * x_dim1], &c__1, &x[l + j * x_dim1]
                    , &c__1);
            q__2.r = -q__3.r, q__2.i = -q__3.i;
            c_div(&q__1, &q__2, &x[l + l * x_dim1]);
            t.r = q__1.r, t.i = q__1.i;
/*<                call caxpy(n-l+1,t,x(l,l),1,x(l,j),1) >*/
            i__3 = *n - l + 1;
            caxpy_(&i__3, &t, &x[l + l * x_dim1], &c__1, &x[l + j * x_dim1], &
                    c__1);
/*<    30       continue >*/
L30:

/*           place the l-th row of x into  e for the */
/*           subsequent calculation of the row transformation. */

/*<             e(j) = conjg(x(l,j)) >*/
            i__3 = j;
            r_cnjg(&q__1, &x[l + j * x_dim1]);
            e[i__3].r = q__1.r, e[i__3].i = q__1.i;
/*<    40    continue >*/
/* L40: */
        }
/*<    50    continue >*/
L50:
/*<          if (.not.wantu .or. l .gt. nct) go to 70 >*/
        if (! wantu || l > nct) {
            goto L70;
        }

/*           place the transformation in u for subsequent back */
/*           multiplication. */

/*<             do 60 i = l, n >*/
        i__2 = *n;
        for (i__ = l; i__ <= i__2; ++i__) {
/*<                u(i,l) = x(i,l) >*/
            i__3 = i__ + l * u_dim1;
            i__4 = i__ + l * x_dim1;
            u[i__3].r = x[i__4].r, u[i__3].i = x[i__4].i;
/*<    60       continue >*/
/* L60: */
        }
/*<    70    continue >*/
L70:
/*<          if (l .gt. nrt) go to 150 >*/
        if (l > nrt) {
            goto L150;
        }

/*           compute the l-th row transformation and place the */
/*           l-th super-diagonal in e(l). */

/*<             e(l) = cmplx(scnrm2(p-l,e(lp1),1),0.0e0) >*/
        i__2 = l;
        i__3 = *p - l;
        r__1 = scnrm2_(&i__3, &e[lp1], &c__1);
        q__1.r = r__1, q__1.i = (float)0.;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<             if (cabs1(e(l)) .eq. 0.0e0) go to 80 >*/
        i__2 = l;
        if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[l]), dabs(r__2)
                ) == (float)0.) {
            goto L80;
        }
/*<                if (cabs1(e(lp1)) .ne. 0.0e0) e(l) = csign(e(l),e(lp1)) >*/
        i__2 = lp1;
        if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[lp1]), dabs(
                r__2)) != (float)0.) {
            i__3 = l;
            r__3 = c_abs(&e[l]);
            i__4 = lp1;
            r__4 = c_abs(&e[lp1]);
            q__2.r = e[i__4].r / r__4, q__2.i = e[i__4].i / r__4;
            q__1.r = r__3 * q__2.r, q__1.i = r__3 * q__2.i;
            e[i__3].r = q__1.r, e[i__3].i = q__1.i;
        }
/*<                call cscal(p-l,1.0e0/e(l),e(lp1),1) >*/
        i__2 = *p - l;
        c_div(&q__1, &c_b8, &e[l]);
        cscal_(&i__2, &q__1, &e[lp1], &c__1);
/*<                e(lp1) = (1.0e0,0.0e0) + e(lp1) >*/
        i__2 = lp1;
        i__3 = lp1;
        q__1.r = e[i__3].r + (float)1., q__1.i = e[i__3].i + (float)0.;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<    80       continue >*/
L80:
/*<             e(l) = -conjg(e(l)) >*/
        i__2 = l;
        r_cnjg(&q__2, &e[l]);
        q__1.r = -q__2.r, q__1.i = -q__2.i;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<             if (lp1 .gt. n .or. cabs1(e(l)) .eq. 0.0e0) go to 120 >*/
        i__2 = l;
        if (lp1 > *n || (r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[l])
                , dabs(r__2)) == (float)0.) {
            goto L120;
        }

/*              apply the transformation. */

/*<                do 90 i = lp1, n >*/
        i__2 = *n;
        for (i__ = lp1; i__ <= i__2; ++i__) {
/*<                   work(i) = (0.0e0,0.0e0) >*/
            i__3 = i__;
            work[i__3].r = (float)0., work[i__3].i = (float)0.;
/*<    90          continue >*/
/* L90: */
        }
/*<                do 100 j = lp1, p >*/
        i__2 = *p;
        for (j = lp1; j <= i__2; ++j) {
/*<                   call caxpy(n-l,e(j),x(lp1,j),1,work(lp1),1) >*/
            i__3 = *n - l;
            caxpy_(&i__3, &e[j], &x[lp1 + j * x_dim1], &c__1, &work[lp1], &
                    c__1);
/*<   100          continue >*/
/* L100: */
        }
/*<                do 110 j = lp1, p >*/
        i__2 = *p;
        for (j = lp1; j <= i__2; ++j) {
/*<    >*/
            i__3 = *n - l;
            i__4 = j;
            q__3.r = -e[i__4].r, q__3.i = -e[i__4].i;
            c_div(&q__2, &q__3, &e[lp1]);
            r_cnjg(&q__1, &q__2);
            caxpy_(&i__3, &q__1, &work[lp1], &c__1, &x[lp1 + j * x_dim1], &
                    c__1);
/*<   110          continue >*/
/* L110: */
        }
/*<   120       continue >*/
L120:
/*<             if (.not.wantv) go to 140 >*/
        if (! wantv) {
            goto L140;
        }

/*              place the transformation in v for subsequent */
/*              back multiplication. */

/*<                do 130 i = lp1, p >*/
        i__2 = *p;
        for (i__ = lp1; i__ <= i__2; ++i__) {
/*<                   v(i,l) = e(i) >*/
            i__3 = i__ + l * v_dim1;
            i__4 = i__;
            v[i__3].r = e[i__4].r, v[i__3].i = e[i__4].i;
/*<   130          continue >*/
/* L130: */
        }
/*<   140       continue >*/
L140:
/*<   150    continue >*/
L150:
/*<   160 continue >*/
/* L160: */
        ;
    }
/*<   170 continue >*/
L170:

/*     set up the final bidiagonal matrix or order m. */

/*<       m = min0(p,n+1) >*/
/* Computing MIN */
    i__1 = *p, i__2 = *n + 1;
    m = min(i__1,i__2);
/*<       nctp1 = nct + 1 >*/
    nctp1 = nct + 1;
/*<       nrtp1 = nrt + 1 >*/
    nrtp1 = nrt + 1;
/*<       if (nct .lt. p) s(nctp1) = x(nctp1,nctp1) >*/
    if (nct < *p) {
        i__1 = nctp1;
        i__2 = nctp1 + nctp1 * x_dim1;
        s[i__1].r = x[i__2].r, s[i__1].i = x[i__2].i;
    }
/*<       if (n .lt. m) s(m) = (0.0e0,0.0e0) >*/
    if (*n < m) {
        i__1 = m;
        s[i__1].r = (float)0., s[i__1].i = (float)0.;
    }
/*<       if (nrtp1 .lt. m) e(nrtp1) = x(nrtp1,m) >*/
    if (nrtp1 < m) {
        i__1 = nrtp1;
        i__2 = nrtp1 + m * x_dim1;
        e[i__1].r = x[i__2].r, e[i__1].i = x[i__2].i;
    }
/*<       e(m) = (0.0e0,0.0e0) >*/
    i__1 = m;
    e[i__1].r = (float)0., e[i__1].i = (float)0.;

/*     if required, generate u. */

/*<       if (.not.wantu) go to 300 >*/
    if (! wantu) {
        goto L300;
    }
/*<          if (ncu .lt. nctp1) go to 200 >*/
    if (ncu < nctp1) {
        goto L200;
    }
/*<          do 190 j = nctp1, ncu >*/
    i__1 = ncu;
    for (j = nctp1; j <= i__1; ++j) {
/*<             do 180 i = 1, n >*/
        i__2 = *n;
        for (i__ = 1; i__ <= i__2; ++i__) {
/*<                u(i,j) = (0.0e0,0.0e0) >*/
            i__3 = i__ + j * u_dim1;
            u[i__3].r = (float)0., u[i__3].i = (float)0.;
/*<   180       continue >*/
/* L180: */
        }
/*<             u(j,j) = (1.0e0,0.0e0) >*/
        i__2 = j + j * u_dim1;
        u[i__2].r = (float)1., u[i__2].i = (float)0.;
/*<   190    continue >*/
/* L190: */
    }
/*<   200    continue >*/
L200:
/*<          if (nct .lt. 1) go to 290 >*/
    if (nct < 1) {
        goto L290;
    }
/*<          do 280 ll = 1, nct >*/
    i__1 = nct;
    for (ll = 1; ll <= i__1; ++ll) {
/*<             l = nct - ll + 1 >*/
        l = nct - ll + 1;
/*<             if (cabs1(s(l)) .eq. 0.0e0) go to 250 >*/
        i__2 = l;
        if ((r__1 = s[i__2].r, dabs(r__1)) + (r__2 = r_imag(&s[l]), dabs(r__2)
                ) == (float)0.) {
            goto L250;
        }
/*<                lp1 = l + 1 >*/
        lp1 = l + 1;
/*<                if (ncu .lt. lp1) go to 220 >*/
        if (ncu < lp1) {
            goto L220;
        }
/*<                do 210 j = lp1, ncu >*/
        i__2 = ncu;
        for (j = lp1; j <= i__2; ++j) {
/*<                   t = -cdotc(n-l+1,u(l,l),1,u(l,j),1)/u(l,l) >*/
            i__3 = *n - l + 1;
            cdotc_(&q__3, &i__3, &u[l + l * u_dim1], &c__1, &u[l + j * u_dim1]
                    , &c__1);
            q__2.r = -q__3.r, q__2.i = -q__3.i;
            c_div(&q__1, &q__2, &u[l + l * u_dim1]);
            t.r = q__1.r, t.i = q__1.i;
/*<                   call caxpy(n-l+1,t,u(l,l),1,u(l,j),1) >*/
            i__3 = *n - l + 1;
            caxpy_(&i__3, &t, &u[l + l * u_dim1], &c__1, &u[l + j * u_dim1], &
                    c__1);
/*<   210          continue >*/
/* L210: */
        }
/*<   220          continue >*/
L220:
/*<                call cscal(n-l+1,(-1.0e0,0.0e0),u(l,l),1) >*/
        i__2 = *n - l + 1;
        cscal_(&i__2, &c_b53, &u[l + l * u_dim1], &c__1);
/*<                u(l,l) = (1.0e0,0.0e0) + u(l,l) >*/
        i__2 = l + l * u_dim1;
        i__3 = l + l * u_dim1;
        q__1.r = u[i__3].r + (float)1., q__1.i = u[i__3].i + (float)0.;
        u[i__2].r = q__1.r, u[i__2].i = q__1.i;
/*<                lm1 = l - 1 >*/
        lm1 = l - 1;
/*<                if (lm1 .lt. 1) go to 240 >*/
        if (lm1 < 1) {
            goto L240;
        }
/*<                do 230 i = 1, lm1 >*/
        i__2 = lm1;
        for (i__ = 1; i__ <= i__2; ++i__) {
/*<                   u(i,l) = (0.0e0,0.0e0) >*/
            i__3 = i__ + l * u_dim1;
            u[i__3].r = (float)0., u[i__3].i = (float)0.;
/*<   230          continue >*/
/* L230: */
        }
/*<   240          continue >*/
L240:
/*<             go to 270 >*/
        goto L270;
/*<   250       continue >*/
L250:
/*<                do 260 i = 1, n >*/
        i__2 = *n;
        for (i__ = 1; i__ <= i__2; ++i__) {
/*<                   u(i,l) = (0.0e0,0.0e0) >*/
            i__3 = i__ + l * u_dim1;
            u[i__3].r = (float)0., u[i__3].i = (float)0.;
/*<   260          continue >*/
/* L260: */
        }
/*<                u(l,l) = (1.0e0,0.0e0) >*/
        i__2 = l + l * u_dim1;
        u[i__2].r = (float)1., u[i__2].i = (float)0.;
/*<   270       continue >*/
L270:
/*<   280    continue >*/
/* L280: */
        ;
    }
/*<   290    continue >*/
L290:
/*<   300 continue >*/
L300:

/*     if it is required, generate v. */

/*<       if (.not.wantv) go to 350 >*/
    if (! wantv) {
        goto L350;
    }
/*<          do 340 ll = 1, p >*/
    i__1 = *p;
    for (ll = 1; ll <= i__1; ++ll) {
/*<             l = p - ll + 1 >*/
        l = *p - ll + 1;
/*<             lp1 = l + 1 >*/
        lp1 = l + 1;
/*<             if (l .gt. nrt) go to 320 >*/
        if (l > nrt) {
            goto L320;
        }
/*<             if (cabs1(e(l)) .eq. 0.0e0) go to 320 >*/
        i__2 = l;
        if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[l]), dabs(r__2)
                ) == (float)0.) {
            goto L320;
        }
/*<                do 310 j = lp1, p >*/
        i__2 = *p;
        for (j = lp1; j <= i__2; ++j) {
/*<                   t = -cdotc(p-l,v(lp1,l),1,v(lp1,j),1)/v(lp1,l) >*/
            i__3 = *p - l;
            cdotc_(&q__3, &i__3, &v[lp1 + l * v_dim1], &c__1, &v[lp1 + j * 
                    v_dim1], &c__1);
            q__2.r = -q__3.r, q__2.i = -q__3.i;
            c_div(&q__1, &q__2, &v[lp1 + l * v_dim1]);
            t.r = q__1.r, t.i = q__1.i;
/*<                   call caxpy(p-l,t,v(lp1,l),1,v(lp1,j),1) >*/
            i__3 = *p - l;
            caxpy_(&i__3, &t, &v[lp1 + l * v_dim1], &c__1, &v[lp1 + j * 
                    v_dim1], &c__1);
/*<   310          continue >*/
/* L310: */
        }
/*<   320       continue >*/
L320:
/*<             do 330 i = 1, p >*/
        i__2 = *p;
        for (i__ = 1; i__ <= i__2; ++i__) {
/*<                v(i,l) = (0.0e0,0.0e0) >*/
            i__3 = i__ + l * v_dim1;
            v[i__3].r = (float)0., v[i__3].i = (float)0.;
/*<   330       continue >*/
/* L330: */
        }
/*<             v(l,l) = (1.0e0,0.0e0) >*/
        i__2 = l + l * v_dim1;
        v[i__2].r = (float)1., v[i__2].i = (float)0.;
/*<   340    continue >*/
/* L340: */
    }
/*<   350 continue >*/
L350:

/*     transform s and e so that they are real. */

/*<       do 380 i = 1, m >*/
    i__1 = m;
    for (i__ = 1; i__ <= i__1; ++i__) {
/*<          if (cabs1(s(i)) .eq. 0.0e0) go to 360 >*/
        i__2 = i__;
        if ((r__1 = s[i__2].r, dabs(r__1)) + (r__2 = r_imag(&s[i__]), dabs(
                r__2)) == (float)0.) {
            goto L360;
        }
/*<             t = cmplx(cabs(s(i)),0.0e0) >*/
        r__1 = c_abs(&s[i__]);
        q__1.r = r__1, q__1.i = (float)0.;
        t.r = q__1.r, t.i = q__1.i;
/*<             r = s(i)/t >*/
        c_div(&q__1, &s[i__], &t);
        r__.r = q__1.r, r__.i = q__1.i;
/*<             s(i) = t >*/
        i__2 = i__;
        s[i__2].r = t.r, s[i__2].i = t.i;
/*<             if (i .lt. m) e(i) = e(i)/r >*/
        if (i__ < m) {
            i__2 = i__;
            c_div(&q__1, &e[i__], &r__);
            e[i__2].r = q__1.r, e[i__2].i = q__1.i;
        }
/*<             if (wantu) call cscal(n,r,u(1,i),1) >*/
        if (wantu) {
            cscal_(n, &r__, &u[i__ * u_dim1 + 1], &c__1);
        }
/*<   360    continue >*/
L360:
/*     ...exit */
/*<          if (i .eq. m) go to 390 >*/
        if (i__ == m) {
            goto L390;
        }
/*<          if (cabs1(e(i)) .eq. 0.0e0) go to 370 >*/
        i__2 = i__;
        if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[i__]), dabs(
                r__2)) == (float)0.) {
            goto L370;
        }
/*<             t = cmplx(cabs(e(i)),0.0e0) >*/
        r__1 = c_abs(&e[i__]);
        q__1.r = r__1, q__1.i = (float)0.;
        t.r = q__1.r, t.i = q__1.i;
/*<             r = t/e(i) >*/
        c_div(&q__1, &t, &e[i__]);
        r__.r = q__1.r, r__.i = q__1.i;
/*<             e(i) = t >*/
        i__2 = i__;
        e[i__2].r = t.r, e[i__2].i = t.i;
/*<             s(i+1) = s(i+1)*r >*/
        i__2 = i__ + 1;
        i__3 = i__ + 1;
        q__1.r = s[i__3].r * r__.r - s[i__3].i * r__.i, q__1.i = s[i__3].r * 
                r__.i + s[i__3].i * r__.r;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<             if (wantv) call cscal(p,r,v(1,i+1),1) >*/
        if (wantv) {
            cscal_(p, &r__, &v[(i__ + 1) * v_dim1 + 1], &c__1);
        }
/*<   370    continue >*/
L370:
/*<   380 continue >*/
/* L380: */
        ;
    }
/*<   390 continue >*/
L390:

/*     main iteration loop for the singular values. */

/*<       mm = m >*/
    mm = m;
/*<       iter = 0 >*/
    iter = 0;
/*<   400 continue >*/
L400:

/*        quit if all the singular values have been found. */

/*     ...exit */
/*<          if (m .eq. 0) go to 660 >*/
    if (m == 0) {
        goto L660;
    }

/*        if too many iterations have been performed, set */
/*        flag and return. */

/*<          if (iter .lt. maxit) go to 410 >*/
    if (iter < maxit) {
        goto L410;
    }
/*<             info = m >*/
    *info = m;
/*     ......exit */
/*<             go to 660 >*/
    goto L660;
/*<   410    continue >*/
L410:

/*        this section of the program inspects for */
/*        negligible elements in the s and e arrays.  on */
/*        completion the variables kase and l are set as follows. */

/*           kase = 1     if s(m) and e(l-1) are negligible and l.lt.m */
/*           kase = 2     if s(l) is negligible and l.lt.m */
/*           kase = 3     if e(l-1) is negligible, l.lt.m, and */
/*                        s(l), ..., s(m) are not negligible (qr step). */
/*           kase = 4     if e(m-1) is negligible (convergence). */

/*<          do 430 ll = 1, m >*/
    i__1 = m;
    for (ll = 1; ll <= i__1; ++ll) {
/*<             l = m - ll >*/
        l = m - ll;
/*        ...exit */
/*<             if (l .eq. 0) go to 440 >*/
        if (l == 0) {
            goto L440;
        }
/*<             test = cabs(s(l)) + cabs(s(l+1)) >*/
        test = c_abs(&s[l]) + c_abs(&s[l + 1]);
/*<             ztest = test + cabs(e(l)) >*/
        ztest = test + c_abs(&e[l]);
/*<             if (ztest .ne. test) go to 420 >*/
        if (ztest != test) {
            goto L420;
        }
/*<                e(l) = (0.0e0,0.0e0) >*/
        i__2 = l;
        e[i__2].r = (float)0., e[i__2].i = (float)0.;
/*        ......exit */
/*<                go to 440 >*/
        goto L440;
/*<   420       continue >*/
L420:
/*<   430    continue >*/
/* L430: */
        ;
    }
/*<   440    continue >*/
L440:
/*<          if (l .ne. m - 1) go to 450 >*/
    if (l != m - 1) {
        goto L450;
    }
/*<             kase = 4 >*/
    kase = 4;
/*<          go to 520 >*/
    goto L520;
/*<   450    continue >*/
L450:
/*<             lp1 = l + 1 >*/
    lp1 = l + 1;
/*<             mp1 = m + 1 >*/
    mp1 = m + 1;
/*<             do 470 lls = lp1, mp1 >*/
    i__1 = mp1;
    for (lls = lp1; lls <= i__1; ++lls) {
/*<                ls = m - lls + lp1 >*/
        ls = m - lls + lp1;
/*           ...exit */
/*<                if (ls .eq. l) go to 480 >*/
        if (ls == l) {
            goto L480;
        }
/*<                test = 0.0e0 >*/
        test = (float)0.;
/*<                if (ls .ne. m) test = test + cabs(e(ls)) >*/
        if (ls != m) {
            test += c_abs(&e[ls]);
        }
/*<                if (ls .ne. l + 1) test = test + cabs(e(ls-1)) >*/
        if (ls != l + 1) {
            test += c_abs(&e[ls - 1]);
        }
/*<                ztest = test + cabs(s(ls)) >*/
        ztest = test + c_abs(&s[ls]);
/*<                if (ztest .ne. test) go to 460 >*/
        if (ztest != test) {
            goto L460;
        }
/*<                   s(ls) = (0.0e0,0.0e0) >*/
        i__2 = ls;
        s[i__2].r = (float)0., s[i__2].i = (float)0.;
/*           ......exit */
/*<                   go to 480 >*/
        goto L480;
/*<   460          continue >*/
L460:
/*<   470       continue >*/
/* L470: */
        ;
    }
/*<   480       continue >*/
L480:
/*<             if (ls .ne. l) go to 490 >*/
    if (ls != l) {
        goto L490;
    }
/*<                kase = 3 >*/
    kase = 3;
/*<             go to 510 >*/
    goto L510;
/*<   490       continue >*/
L490:
/*<             if (ls .ne. m) go to 500 >*/
    if (ls != m) {
        goto L500;
    }
/*<                kase = 1 >*/
    kase = 1;
/*<             go to 510 >*/
    goto L510;
/*<   500       continue >*/
L500:
/*<                kase = 2 >*/
    kase = 2;
/*<                l = ls >*/
    l = ls;
/*<   510       continue >*/
L510:
/*<   520    continue >*/
L520:
/*<          l = l + 1 >*/
    ++l;

/*        perform the task indicated by kase. */

/*<          go to (530, 560, 580, 610), kase >*/
    switch (kase) {
        case 1:  goto L530;
        case 2:  goto L560;
        case 3:  goto L580;
        case 4:  goto L610;
    }

/*        deflate negligible s(m). */

/*<   530    continue >*/
L530:
/*<             mm1 = m - 1 >*/
    mm1 = m - 1;
/*<             f = real(e(m-1)) >*/
    i__1 = m - 1;
    f = e[i__1].r;
/*<             e(m-1) = (0.0e0,0.0e0) >*/
    i__1 = m - 1;
    e[i__1].r = (float)0., e[i__1].i = (float)0.;
/*<             do 550 kk = l, mm1 >*/
    i__1 = mm1;
    for (kk = l; kk <= i__1; ++kk) {
/*<                k = mm1 - kk + l >*/
        k = mm1 - kk + l;
/*<                t1 = real(s(k)) >*/
        i__2 = k;
        t1 = s[i__2].r;
/*<                call srotg(t1,f,cs,sn) >*/
        srotg_(&t1, &f, &cs, &sn);
/*<                s(k) = cmplx(t1,0.0e0) >*/
        i__2 = k;
        q__1.r = t1, q__1.i = (float)0.;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<                if (k .eq. l) go to 540 >*/
        if (k == l) {
            goto L540;
        }
/*<                   f = -sn*real(e(k-1)) >*/
        i__2 = k - 1;
        f = -sn * e[i__2].r;
/*<                   e(k-1) = cs*e(k-1) >*/
        i__2 = k - 1;
        i__3 = k - 1;
        q__1.r = cs * e[i__3].r, q__1.i = cs * e[i__3].i;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<   540          continue >*/
L540:
/*<                if (wantv) call csrot(p,v(1,k),1,v(1,m),1,cs,sn) >*/
        if (wantv) {
            csrot_(p, &v[k * v_dim1 + 1], &c__1, &v[m * v_dim1 + 1], &c__1, &
                    cs, &sn);
        }
/*<   550       continue >*/
/* L550: */
    }
/*<          go to 650 >*/
    goto L650;

/*        split at negligible s(l). */

/*<   560    continue >*/
L560:
/*<             f = real(e(l-1)) >*/
    i__1 = l - 1;
    f = e[i__1].r;
/*<             e(l-1) = (0.0e0,0.0e0) >*/
    i__1 = l - 1;
    e[i__1].r = (float)0., e[i__1].i = (float)0.;
/*<             do 570 k = l, m >*/
    i__1 = m;
    for (k = l; k <= i__1; ++k) {
/*<                t1 = real(s(k)) >*/
        i__2 = k;
        t1 = s[i__2].r;
/*<                call srotg(t1,f,cs,sn) >*/
        srotg_(&t1, &f, &cs, &sn);
/*<                s(k) = cmplx(t1,0.0e0) >*/
        i__2 = k;
        q__1.r = t1, q__1.i = (float)0.;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<                f = -sn*real(e(k)) >*/
        i__2 = k;
        f = -sn * e[i__2].r;
/*<                e(k) = cs*e(k) >*/
        i__2 = k;
        i__3 = k;
        q__1.r = cs * e[i__3].r, q__1.i = cs * e[i__3].i;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<                if (wantu) call csrot(n,u(1,k),1,u(1,l-1),1,cs,sn) >*/
        if (wantu) {
            csrot_(n, &u[k * u_dim1 + 1], &c__1, &u[(l - 1) * u_dim1 + 1], &
                    c__1, &cs, &sn);
        }
/*<   570       continue >*/
/* L570: */
    }
/*<          go to 650 >*/
    goto L650;

/*        perform one qr step. */

/*<   580    continue >*/
L580:

/*           calculate the shift. */

/*<    >*/
/* Computing MAX */
    r__1 = c_abs(&s[m]), r__2 = c_abs(&s[m - 1]), r__1 = max(r__1,r__2), r__2 
            = c_abs(&e[m - 1]), r__1 = max(r__1,r__2), r__2 = c_abs(&s[l]), 
            r__1 = max(r__1,r__2), r__2 = c_abs(&e[l]);
    scale = dmax(r__1,r__2);
/*<             sm = real(s(m))/scale >*/
    i__1 = m;
    sm = s[i__1].r / scale;
/*<             smm1 = real(s(m-1))/scale >*/
    i__1 = m - 1;
    smm1 = s[i__1].r / scale;
/*<             emm1 = real(e(m-1))/scale >*/
    i__1 = m - 1;
    emm1 = e[i__1].r / scale;
/*<             sl = real(s(l))/scale >*/
    i__1 = l;
    sl = s[i__1].r / scale;
/*<             el = real(e(l))/scale >*/
    i__1 = l;
    el = e[i__1].r / scale;
/*<             b = ((smm1 + sm)*(smm1 - sm) + emm1**2)/2.0e0 >*/
/* Computing 2nd power */
    r__1 = emm1;
    b = ((smm1 + sm) * (smm1 - sm) + r__1 * r__1) / (float)2.;
/*<             c = (sm*emm1)**2 >*/
/* Computing 2nd power */
    r__1 = sm * emm1;
    c__ = r__1 * r__1;
/*<             shift = 0.0e0 >*/
    shift = (float)0.;
/*<             if (b .eq. 0.0e0 .and. c .eq. 0.0e0) go to 590 >*/
    if (b == (float)0. && c__ == (float)0.) {
        goto L590;
    }
/*<                shift = sqrt(b**2+c) >*/
/* Computing 2nd power */
    r__1 = b;
    shift = sqrt(r__1 * r__1 + c__);
/*<                if (b .lt. 0.0e0) shift = -shift >*/
    if (b < (float)0.) {
        shift = -shift;
    }
/*<                shift = c/(b + shift) >*/
    shift = c__ / (b + shift);
/*<   590       continue >*/
L590:
/*<             f = (sl + sm)*(sl - sm) + shift >*/
    f = (sl + sm) * (sl - sm) + shift;
/*<             g = sl*el >*/
    g = sl * el;

/*           chase zeros. */

/*<             mm1 = m - 1 >*/
    mm1 = m - 1;
/*<             do 600 k = l, mm1 >*/
    i__1 = mm1;
    for (k = l; k <= i__1; ++k) {
/*<                call srotg(f,g,cs,sn) >*/
        srotg_(&f, &g, &cs, &sn);
/*<                if (k .ne. l) e(k-1) = cmplx(f,0.0e0) >*/
        if (k != l) {
            i__2 = k - 1;
            q__1.r = f, q__1.i = (float)0.;
            e[i__2].r = q__1.r, e[i__2].i = q__1.i;
        }
/*<                f = cs*real(s(k)) + sn*real(e(k)) >*/
        i__2 = k;
        i__3 = k;
        f = cs * s[i__2].r + sn * e[i__3].r;
/*<                e(k) = cs*e(k) - sn*s(k) >*/
        i__2 = k;
        i__3 = k;
        q__2.r = cs * e[i__3].r, q__2.i = cs * e[i__3].i;
        i__4 = k;
        q__3.r = sn * s[i__4].r, q__3.i = sn * s[i__4].i;
        q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<                g = sn*real(s(k+1)) >*/
        i__2 = k + 1;
        g = sn * s[i__2].r;
/*<                s(k+1) = cs*s(k+1) >*/
        i__2 = k + 1;
        i__3 = k + 1;
        q__1.r = cs * s[i__3].r, q__1.i = cs * s[i__3].i;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<                if (wantv) call csrot(p,v(1,k),1,v(1,k+1),1,cs,sn) >*/
        if (wantv) {
            csrot_(p, &v[k * v_dim1 + 1], &c__1, &v[(k + 1) * v_dim1 + 1], &
                    c__1, &cs, &sn);
        }
/*<                call srotg(f,g,cs,sn) >*/
        srotg_(&f, &g, &cs, &sn);
/*<                s(k) = cmplx(f,0.0e0) >*/
        i__2 = k;
        q__1.r = f, q__1.i = (float)0.;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<                f = cs*real(e(k)) + sn*real(s(k+1)) >*/
        i__2 = k;
        i__3 = k + 1;
        f = cs * e[i__2].r + sn * s[i__3].r;
/*<                s(k+1) = -sn*e(k) + cs*s(k+1) >*/
        i__2 = k + 1;
        r__1 = -sn;
        i__3 = k;
        q__2.r = r__1 * e[i__3].r, q__2.i = r__1 * e[i__3].i;
        i__4 = k + 1;
        q__3.r = cs * s[i__4].r, q__3.i = cs * s[i__4].i;
        q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<                g = sn*real(e(k+1)) >*/
        i__2 = k + 1;
        g = sn * e[i__2].r;
/*<                e(k+1) = cs*e(k+1) >*/
        i__2 = k + 1;
        i__3 = k + 1;
        q__1.r = cs * e[i__3].r, q__1.i = cs * e[i__3].i;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<    >*/
        if (wantu && k < *n) {
            csrot_(n, &u[k * u_dim1 + 1], &c__1, &u[(k + 1) * u_dim1 + 1], &
                    c__1, &cs, &sn);
        }
/*<   600       continue >*/
/* L600: */
    }
/*<             e(m-1) = cmplx(f,0.0e0) >*/
    i__1 = m - 1;
    q__1.r = f, q__1.i = (float)0.;
    e[i__1].r = q__1.r, e[i__1].i = q__1.i;
/*<             iter = iter + 1 >*/
    ++iter;
/*<          go to 650 >*/
    goto L650;

/*        convergence. */

/*<   610    continue >*/
L610:

/*           make the singular value  positive */

/*<             if (real(s(l)) .ge. 0.0e0) go to 620 >*/
    i__1 = l;
    if (s[i__1].r >= (float)0.) {
        goto L620;
    }
/*<                s(l) = -s(l) >*/
    i__1 = l;
    i__2 = l;
    q__1.r = -s[i__2].r, q__1.i = -s[i__2].i;
    s[i__1].r = q__1.r, s[i__1].i = q__1.i;
/*<                if (wantv) call cscal(p,(-1.0e0,0.0e0),v(1,l),1) >*/
    if (wantv) {
        cscal_(p, &c_b53, &v[l * v_dim1 + 1], &c__1);
    }
/*<   620       continue >*/
L620:

/*           order the singular value. */

/*<   630       if (l .eq. mm) go to 640 >*/
L630:
    if (l == mm) {
        goto L640;
    }
/*           ...exit */
/*<                if (real(s(l)) .ge. real(s(l+1))) go to 640 >*/
    i__1 = l;
    i__2 = l + 1;
    if (s[i__1].r >= s[i__2].r) {
        goto L640;
    }
/*<                t = s(l) >*/
    i__1 = l;
    t.r = s[i__1].r, t.i = s[i__1].i;
/*<                s(l) = s(l+1) >*/
    i__1 = l;
    i__2 = l + 1;
    s[i__1].r = s[i__2].r, s[i__1].i = s[i__2].i;
/*<                s(l+1) = t >*/
    i__1 = l + 1;
    s[i__1].r = t.r, s[i__1].i = t.i;
/*<    >*/
    if (wantv && l < *p) {
        cswap_(p, &v[l * v_dim1 + 1], &c__1, &v[(l + 1) * v_dim1 + 1], &c__1);
    }
/*<    >*/
    if (wantu && l < *n) {
        cswap_(n, &u[l * u_dim1 + 1], &c__1, &u[(l + 1) * u_dim1 + 1], &c__1);
    }
/*<                l = l + 1 >*/
    ++l;
/*<             go to 630 >*/
    goto L630;
/*<   640       continue >*/
L640:
/*<             iter = 0 >*/
    iter = 0;
/*<             m = m - 1 >*/
    --m;
/*<   650    continue >*/
L650:
/*<       go to 400 >*/
    goto L400;
/*<   660 continue >*/
L660:
/*<       return >*/
    return 0;
/*<       end >*/
} /* csvdc_ */
示例#26
0
/* Subroutine */ int claror_(char *side, char *init, integer *m, integer *n, 
	complex *a, integer *lda, integer *iseed, complex *x, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    complex q__1, q__2;

    /* Builtin functions */
    double c_abs(complex *);
    void r_cnjg(complex *, complex *);

    /* Local variables */
    static integer kbeg, jcol;
    static real xabs;
    static integer irow, j;
    extern /* Subroutine */ int cgerc_(integer *, integer *, complex *, 
	    complex *, integer *, complex *, integer *, complex *, integer *),
	     cscal_(integer *, complex *, complex *, integer *);
    extern logical lsame_(char *, char *);
    extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
	    , complex *, integer *, complex *, integer *, complex *, complex *
	    , integer *);
    static complex csign;
    static integer ixfrm, itype, nxfrm;
    static real xnorm;
    extern real scnrm2_(integer *, complex *, integer *);
    extern /* Subroutine */ int clacgv_(integer *, complex *, integer *);
    extern /* Complex */ VOID clarnd_(complex *, integer *, integer *);
    extern /* Subroutine */ int claset_(char *, integer *, integer *, complex 
	    *, complex *, complex *, integer *), xerbla_(char *, 
	    integer *);
    static real factor;
    static complex xnorms;


/*  -- LAPACK auxiliary test routine (version 2.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

       CLAROR pre- or post-multiplies an M by N matrix A by a random   
       unitary matrix U, overwriting A. A may optionally be   
       initialized to the identity matrix before multiplying by U.   
       U is generated using the method of G.W. Stewart   
       ( SIAM J. Numer. Anal. 17, 1980, pp. 403-409 ).   
       (BLAS-2 version)   

    Arguments   
    =========   

    SIDE   - CHARACTER*1   
             SIDE specifies whether A is multiplied on the left or right 
  
             by U.   
         SIDE = 'L'   Multiply A on the left (premultiply) by U   
         SIDE = 'R'   Multiply A on the right (postmultiply) by U*   
         SIDE = 'C'   Multiply A on the left by U and the right by U*   
         SIDE = 'T'   Multiply A on the left by U and the right by U'   
             Not modified.   

    INIT   - CHARACTER*1   
             INIT specifies whether or not A should be initialized to   
             the identity matrix.   
                INIT = 'I'   Initialize A to (a section of) the   
                             identity matrix before applying U.   
                INIT = 'N'   No initialization.  Apply U to the   
                             input matrix A.   

             INIT = 'I' may be used to generate square (i.e., unitary)   
             or rectangular orthogonal matrices (orthogonality being   
             in the sense of CDOTC):   

             For square matrices, M=N, and SIDE many be either 'L' or   
             'R'; the rows will be orthogonal to each other, as will the 
  
             columns.   
             For rectangular matrices where M < N, SIDE = 'R' will   
             produce a dense matrix whose rows will be orthogonal and   
             whose columns will not, while SIDE = 'L' will produce a   
             matrix whose rows will be orthogonal, and whose first M   
             columns will be orthogonal, the remaining columns being   
             zero.   
             For matrices where M > N, just use the previous   
             explaination, interchanging 'L' and 'R' and "rows" and   
             "columns".   

             Not modified.   

    M      - INTEGER   
             Number of rows of A. Not modified.   

    N      - INTEGER   
             Number of columns of A. Not modified.   

    A      - COMPLEX array, dimension ( LDA, N )   
             Input and output array. Overwritten by U A ( if SIDE = 'L' ) 
  
             or by A U ( if SIDE = 'R' )   
             or by U A U* ( if SIDE = 'C')   
             or by U A U' ( if SIDE = 'T') on exit.   

    LDA    - INTEGER   
             Leading dimension of A. Must be at least MAX ( 1, M ).   
             Not modified.   

    ISEED  - INTEGER array, dimension ( 4 )   
             On entry ISEED specifies the seed of the random number   
             generator. The array elements should be between 0 and 4095; 
  
             if not they will be reduced mod 4096.  Also, ISEED(4) must   
             be odd.  The random number generator uses a linear   
             congruential sequence limited to small integers, and so   
             should produce machine independent random numbers. The   
             values of ISEED are changed on exit, and can be used in the 
  
             next call to CLAROR to continue the same random number   
             sequence.   
             Modified.   

    X      - COMPLEX array, dimension ( 3*MAX( M, N ) )   
             Workspace. Of length:   
                 2*M + N if SIDE = 'L',   
                 2*N + M if SIDE = 'R',   
                 3*N     if SIDE = 'C' or 'T'.   
             Modified.   

    INFO   - INTEGER   
             An error flag.  It is set to:   
              0  if no error.   
              1  if CLARND returned a bad random number (installation   
                 problem)   
             -1  if SIDE is not L, R, C, or T.   
             -3  if M is negative.   
             -4  if N is negative or if SIDE is C or T and N is not equal 
  
                 to M.   
             -6  if LDA is less than M.   

    ===================================================================== 
  


       Parameter adjustments */
    a_dim1 = *lda;
    a_offset = a_dim1 + 1;
    a -= a_offset;
    --iseed;
    --x;

    /* Function Body */
    if (*n == 0 || *m == 0) {
	return 0;
    }

    itype = 0;
    if (lsame_(side, "L")) {
	itype = 1;
    } else if (lsame_(side, "R")) {
	itype = 2;
    } else if (lsame_(side, "C")) {
	itype = 3;
    } else if (lsame_(side, "T")) {
	itype = 4;
    }

/*     Check for argument errors. */

    *info = 0;
    if (itype == 0) {
	*info = -1;
    } else if (*m < 0) {
	*info = -3;
    } else if (*n < 0 || itype == 3 && *n != *m) {
	*info = -4;
    } else if (*lda < *m) {
	*info = -6;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CLAROR", &i__1);
	return 0;
    }

    if (itype == 1) {
	nxfrm = *m;
    } else {
	nxfrm = *n;
    }

/*     Initialize A to the identity matrix if desired */

    if (lsame_(init, "I")) {
	claset_("Full", m, n, &c_b1, &c_b2, &a[a_offset], lda);
    }

/*     If no rotation possible, still multiply by   
       a random complex number from the circle |x| = 1   

        2)      Compute Rotation by computing Householder   
                Transformations H(2), H(3), ..., H(n).  Note that the   
                order in which they are computed is irrelevant. */

    i__1 = nxfrm;
    for (j = 1; j <= i__1; ++j) {
	i__2 = j;
	x[i__2].r = 0.f, x[i__2].i = 0.f;
/* L40: */
    }

    i__1 = nxfrm;
    for (ixfrm = 2; ixfrm <= i__1; ++ixfrm) {
	kbeg = nxfrm - ixfrm + 1;

/*        Generate independent normal( 0, 1 ) random numbers */

	i__2 = nxfrm;
	for (j = kbeg; j <= i__2; ++j) {
	    i__3 = j;
	    clarnd_(&q__1, &c__3, &iseed[1]);
	    x[i__3].r = q__1.r, x[i__3].i = q__1.i;
/* L50: */
	}

/*        Generate a Householder transformation from the random vector
 X */

	xnorm = scnrm2_(&ixfrm, &x[kbeg], &c__1);
	xabs = c_abs(&x[kbeg]);
	if (xabs != 0.f) {
	    i__2 = kbeg;
	    q__1.r = x[i__2].r / xabs, q__1.i = x[i__2].i / xabs;
	    csign.r = q__1.r, csign.i = q__1.i;
	} else {
	    csign.r = 1.f, csign.i = 0.f;
	}
	q__1.r = xnorm * csign.r, q__1.i = xnorm * csign.i;
	xnorms.r = q__1.r, xnorms.i = q__1.i;
	i__2 = nxfrm + kbeg;
	q__1.r = -(doublereal)csign.r, q__1.i = -(doublereal)csign.i;
	x[i__2].r = q__1.r, x[i__2].i = q__1.i;
	factor = xnorm * (xnorm + xabs);
	if (dabs(factor) < 1e-20f) {
	    *info = 1;
	    i__2 = -(*info);
	    xerbla_("CLAROR", &i__2);
	    return 0;
	} else {
	    factor = 1.f / factor;
	}
	i__2 = kbeg;
	i__3 = kbeg;
	q__1.r = x[i__3].r + xnorms.r, q__1.i = x[i__3].i + xnorms.i;
	x[i__2].r = q__1.r, x[i__2].i = q__1.i;

/*        Apply Householder transformation to A */

	if (itype == 1 || itype == 3 || itype == 4) {

/*           Apply H(k) on the left of A */

	    cgemv_("C", &ixfrm, n, &c_b2, &a[kbeg + a_dim1], lda, &x[kbeg], &
		    c__1, &c_b1, &x[(nxfrm << 1) + 1], &c__1);
	    q__2.r = factor, q__2.i = 0.f;
	    q__1.r = -(doublereal)q__2.r, q__1.i = -(doublereal)q__2.i;
	    cgerc_(&ixfrm, n, &q__1, &x[kbeg], &c__1, &x[(nxfrm << 1) + 1], &
		    c__1, &a[kbeg + a_dim1], lda);

	}

	if (itype >= 2 && itype <= 4) {

/*           Apply H(k)* (or H(k)') on the right of A */

	    if (itype == 4) {
		clacgv_(&ixfrm, &x[kbeg], &c__1);
	    }

	    cgemv_("N", m, &ixfrm, &c_b2, &a[kbeg * a_dim1 + 1], lda, &x[kbeg]
		    , &c__1, &c_b1, &x[(nxfrm << 1) + 1], &c__1);
	    q__2.r = factor, q__2.i = 0.f;
	    q__1.r = -(doublereal)q__2.r, q__1.i = -(doublereal)q__2.i;
	    cgerc_(m, &ixfrm, &q__1, &x[(nxfrm << 1) + 1], &c__1, &x[kbeg], &
		    c__1, &a[kbeg * a_dim1 + 1], lda);

	}
/* L60: */
    }

    clarnd_(&q__1, &c__3, &iseed[1]);
    x[1].r = q__1.r, x[1].i = q__1.i;
    xabs = c_abs(&x[1]);
    if (xabs != 0.f) {
	q__1.r = x[1].r / xabs, q__1.i = x[1].i / xabs;
	csign.r = q__1.r, csign.i = q__1.i;
    } else {
	csign.r = 1.f, csign.i = 0.f;
    }
    i__1 = nxfrm << 1;
    x[i__1].r = csign.r, x[i__1].i = csign.i;

/*     Scale the matrix A by D. */

    if (itype == 1 || itype == 3 || itype == 4) {
	i__1 = *m;
	for (irow = 1; irow <= i__1; ++irow) {
	    r_cnjg(&q__1, &x[nxfrm + irow]);
	    cscal_(n, &q__1, &a[irow + a_dim1], lda);
/* L70: */
	}
    }

    if (itype == 2 || itype == 3) {
	i__1 = *n;
	for (jcol = 1; jcol <= i__1; ++jcol) {
	    cscal_(m, &x[nxfrm + jcol], &a[jcol * a_dim1 + 1], &c__1);
/* L80: */
	}
    }

    if (itype == 4) {
	i__1 = *n;
	for (jcol = 1; jcol <= i__1; ++jcol) {
	    r_cnjg(&q__1, &x[nxfrm + jcol]);
	    cscal_(m, &q__1, &a[jcol * a_dim1 + 1], &c__1);
/* L90: */
	}
    }
    return 0;

/*     End of CLAROR */

} /* claror_ */
示例#27
0
文件: cunbdb1.c 项目: csapng/libflame
/* Subroutine */
int cunbdb1_(integer *m, integer *p, integer *q, complex * x11, integer *ldx11, complex *x21, integer *ldx21, real *theta, real * phi, complex *taup1, complex *taup2, complex *tauq1, complex *work, integer *lwork, integer *info)
{
    /* System generated locals */
    integer x11_dim1, x11_offset, x21_dim1, x21_offset, i__1, i__2, i__3, i__4;
    real r__1, r__2;
    complex q__1;
    /* Builtin functions */
    double atan2(doublereal, doublereal), cos(doublereal), sin(doublereal);
    void r_cnjg(complex *, complex *);
    double sqrt(doublereal);
    /* Local variables */
    integer lworkmin, lworkopt;
    real c__;
    integer i__;
    real s;
    integer childinfo;
    extern /* Subroutine */
    int clarf_(char *, integer *, integer *, complex * , integer *, complex *, complex *, integer *, complex *);
    integer ilarf, llarf;
    extern /* Subroutine */
    int csrot_(integer *, complex *, integer *, complex *, integer *, real *, real *);
    extern real scnrm2_(integer *, complex *, integer *, complex *, integer *) ;
    extern /* Subroutine */
    int clacgv_(integer *, complex *, integer *), xerbla_(char *, integer *);
    logical lquery;
    extern /* Subroutine */
    int cunbdb5_(integer *, integer *, integer *, complex *, integer *, complex *, integer *, complex *, integer *, complex *, integer *, complex *, integer *, integer *);
    integer iorbdb5, lorbdb5;
    extern /* Subroutine */
    int clarfgp_(integer *, complex *, complex *, integer *, complex *);
    /* -- LAPACK computational routine (version 3.5.0) -- */
    /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
    /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
    /* July 2012 */
    /* .. Scalar Arguments .. */
    /* .. */
    /* .. Array Arguments .. */
    /* .. */
    /* ==================================================================== */
    /* .. Parameters .. */
    /* .. */
    /* .. Local Scalars .. */
    /* .. */
    /* .. External Subroutines .. */
    /* .. */
    /* .. External Functions .. */
    /* .. */
    /* .. Intrinsic Function .. */
    /* .. */
    /* .. Executable Statements .. */
    /* Test input arguments */
    /* Parameter adjustments */
    x11_dim1 = *ldx11;
    x11_offset = 1 + x11_dim1;
    x11 -= x11_offset;
    x21_dim1 = *ldx21;
    x21_offset = 1 + x21_dim1;
    x21 -= x21_offset;
    --theta;
    --phi;
    --taup1;
    --taup2;
    --tauq1;
    --work;
    /* Function Body */
    *info = 0;
    lquery = *lwork == -1;
    if (*m < 0)
    {
        *info = -1;
    }
    else if (*p < *q || *m - *p < *q)
    {
        *info = -2;
    }
    else if (*q < 0 || *m - *q < *q)
    {
        *info = -3;
    }
    else if (*ldx11 < max(1,*p))
    {
        *info = -5;
    }
    else /* if(complicated condition) */
    {
        /* Computing MAX */
        i__1 = 1;
        i__2 = *m - *p; // , expr subst
        if (*ldx21 < max(i__1,i__2))
        {
            *info = -7;
        }
    }
    /* Compute workspace */
    if (*info == 0)
    {
        ilarf = 2;
        /* Computing MAX */
        i__1 = *p - 1, i__2 = *m - *p - 1;
        i__1 = max(i__1,i__2);
        i__2 = *q - 1; // ; expr subst
        llarf = max(i__1,i__2);
        iorbdb5 = 2;
        lorbdb5 = *q - 2;
        /* Computing MAX */
        i__1 = ilarf + llarf - 1;
        i__2 = iorbdb5 + lorbdb5 - 1; // , expr subst
        lworkopt = max(i__1,i__2);
        lworkmin = lworkopt;
        work[1].r = (real) lworkopt;
        work[1].i = 0.f; // , expr subst
        if (*lwork < lworkmin && ! lquery)
        {
            *info = -14;
        }
    }
    if (*info != 0)
    {
        i__1 = -(*info);
        xerbla_("CUNBDB1", &i__1);
        return 0;
    }
    else if (lquery)
    {
        return 0;
    }
    /* Reduce columns 1, ..., Q of X11 and X21 */
    i__1 = *q;
    for (i__ = 1;
            i__ <= i__1;
            ++i__)
    {
        i__2 = *p - i__ + 1;
        clarfgp_(&i__2, &x11[i__ + i__ * x11_dim1], &x11[i__ + 1 + i__ * x11_dim1], &c__1, &taup1[i__]);
        i__2 = *m - *p - i__ + 1;
        clarfgp_(&i__2, &x21[i__ + i__ * x21_dim1], &x21[i__ + 1 + i__ * x21_dim1], &c__1, &taup2[i__]);
        theta[i__] = atan2((real) x21[i__ + i__ * x21_dim1].r, (real) x11[i__ + i__ * x11_dim1].r);
        c__ = cos(theta[i__]);
        s = sin(theta[i__]);
        i__2 = i__ + i__ * x11_dim1;
        x11[i__2].r = 1.f;
        x11[i__2].i = 0.f; // , expr subst
        i__2 = i__ + i__ * x21_dim1;
        x21[i__2].r = 1.f;
        x21[i__2].i = 0.f; // , expr subst
        i__2 = *p - i__ + 1;
        i__3 = *q - i__;
        r_cnjg(&q__1, &taup1[i__]);
        clarf_("L", &i__2, &i__3, &x11[i__ + i__ * x11_dim1], &c__1, &q__1, & x11[i__ + (i__ + 1) * x11_dim1], ldx11, &work[ilarf]);
        i__2 = *m - *p - i__ + 1;
        i__3 = *q - i__;
        r_cnjg(&q__1, &taup2[i__]);
        clarf_("L", &i__2, &i__3, &x21[i__ + i__ * x21_dim1], &c__1, &q__1, & x21[i__ + (i__ + 1) * x21_dim1], ldx21, &work[ilarf]);
        if (i__ < *q)
        {
            i__2 = *q - i__;
            csrot_(&i__2, &x11[i__ + (i__ + 1) * x11_dim1], ldx11, &x21[i__ + (i__ + 1) * x21_dim1], ldx21, &c__, &s);
            i__2 = *q - i__;
            clacgv_(&i__2, &x21[i__ + (i__ + 1) * x21_dim1], ldx21);
            i__2 = *q - i__;
            clarfgp_(&i__2, &x21[i__ + (i__ + 1) * x21_dim1], &x21[i__ + (i__ + 2) * x21_dim1], ldx21, &tauq1[i__]);
            i__2 = i__ + (i__ + 1) * x21_dim1;
            s = x21[i__2].r;
            i__2 = i__ + (i__ + 1) * x21_dim1;
            x21[i__2].r = 1.f;
            x21[i__2].i = 0.f; // , expr subst
            i__2 = *p - i__;
            i__3 = *q - i__;
            clarf_("R", &i__2, &i__3, &x21[i__ + (i__ + 1) * x21_dim1], ldx21, &tauq1[i__], &x11[i__ + 1 + (i__ + 1) * x11_dim1], ldx11, &work[ilarf]);
            i__2 = *m - *p - i__;
            i__3 = *q - i__;
            clarf_("R", &i__2, &i__3, &x21[i__ + (i__ + 1) * x21_dim1], ldx21, &tauq1[i__], &x21[i__ + 1 + (i__ + 1) * x21_dim1], ldx21, &work[ilarf]);
            i__2 = *q - i__;
            clacgv_(&i__2, &x21[i__ + (i__ + 1) * x21_dim1], ldx21);
            i__2 = *p - i__;
            /* Computing 2nd power */
            r__1 = scnrm2_(&i__2, &x11[i__ + 1 + (i__ + 1) * x11_dim1], &c__1, &x11[i__ + 1 + (i__ + 1) * x11_dim1], &c__1);
            i__3 = *m - *p - i__;
            /* Computing 2nd power */
            r__2 = scnrm2_(&i__3, &x21[i__ + 1 + (i__ + 1) * x21_dim1], &c__1, &x21[i__ + 1 + (i__ + 1) * x21_dim1], &c__1);
            c__ = sqrt(r__1 * r__1 + r__2 * r__2);
            phi[i__] = atan2(s, c__);
            i__2 = *p - i__;
            i__3 = *m - *p - i__;
            i__4 = *q - i__ - 1;
            cunbdb5_(&i__2, &i__3, &i__4, &x11[i__ + 1 + (i__ + 1) * x11_dim1] , &c__1, &x21[i__ + 1 + (i__ + 1) * x21_dim1], &c__1, & x11[i__ + 1 + (i__ + 2) * x11_dim1], ldx11, &x21[i__ + 1 + (i__ + 2) * x21_dim1], ldx21, &work[iorbdb5], &lorbdb5, &childinfo);
        }
    }
    return 0;
    /* End of CUNBDB1 */
}
示例#28
0
 int claein_(int *rightv, int *noinit, int *n, 
	complex *h__, int *ldh, complex *w, complex *v, complex *b, 
	int *ldb, float *rwork, float *eps3, float *smlnum, int *info)
{
    /* System generated locals */
    int b_dim1, b_offset, h_dim1, h_offset, i__1, i__2, i__3, i__4, i__5;
    float r__1, r__2, r__3, r__4;
    complex q__1, q__2;

    /* Builtin functions */
    double sqrt(double), r_imag(complex *);

    /* Local variables */
    int i__, j;
    complex x, ei, ej;
    int its, ierr;
    complex temp;
    float scale;
    char trans[1];
    float rtemp, rootn, vnorm;
    extern double scnrm2_(int *, complex *, int *);
    extern int icamax_(int *, complex *, int *);
    extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
    extern  int csscal_(int *, float *, complex *, int 
	    *), clatrs_(char *, char *, char *, char *, int *, complex *, 
	    int *, complex *, float *, float *, int *);
    extern double scasum_(int *, complex *, int *);
    char normin[1];
    float nrmsml, growto;


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CLAEIN uses inverse iteration to find a right or left eigenvector */
/*  corresponding to the eigenvalue W of a complex upper Hessenberg */
/*  matrix H. */

/*  Arguments */
/*  ========= */

/*  RIGHTV   (input) LOGICAL */
/*          = .TRUE. : compute right eigenvector; */
/*          = .FALSE.: compute left eigenvector. */

/*  NOINIT   (input) LOGICAL */
/*          = .TRUE. : no initial vector supplied in V */
/*          = .FALSE.: initial vector supplied in V. */

/*  N       (input) INTEGER */
/*          The order of the matrix H.  N >= 0. */

/*  H       (input) COMPLEX array, dimension (LDH,N) */
/*          The upper Hessenberg matrix H. */

/*  LDH     (input) INTEGER */
/*          The leading dimension of the array H.  LDH >= MAX(1,N). */

/*  W       (input) COMPLEX */
/*          The eigenvalue of H whose corresponding right or left */
/*          eigenvector is to be computed. */

/*  V       (input/output) COMPLEX array, dimension (N) */
/*          On entry, if NOINIT = .FALSE., V must contain a starting */
/*          vector for inverse iteration; otherwise V need not be set. */
/*          On exit, V contains the computed eigenvector, normalized so */
/*          that the component of largest magnitude has magnitude 1; here */
/*          the magnitude of a complex number (x,y) is taken to be */
/*          |x| + |y|. */

/*  B       (workspace) COMPLEX array, dimension (LDB,N) */

/*  LDB     (input) INTEGER */
/*          The leading dimension of the array B.  LDB >= MAX(1,N). */

/*  RWORK   (workspace) REAL array, dimension (N) */

/*  EPS3    (input) REAL */
/*          A small machine-dependent value which is used to perturb */
/*          close eigenvalues, and to replace zero pivots. */

/*  SMLNUM  (input) REAL */
/*          A machine-dependent value close to the underflow threshold. */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit */
/*          = 1:  inverse iteration did not converge; V is set to the */
/*                last iterate. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Statement Functions .. */
/*     .. */
/*     .. Statement Function definitions .. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    h_dim1 = *ldh;
    h_offset = 1 + h_dim1;
    h__ -= h_offset;
    --v;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1;
    b -= b_offset;
    --rwork;

    /* Function Body */
    *info = 0;

/*     GROWTO is the threshold used in the acceptance test for an */
/*     eigenvector. */

    rootn = sqrt((float) (*n));
    growto = .1f / rootn;
/* Computing MAX */
    r__1 = 1.f, r__2 = *eps3 * rootn;
    nrmsml = MAX(r__1,r__2) * *smlnum;

/*     Form B = H - W*I (except that the subdiagonal elements are not */
/*     stored). */

    i__1 = *n;
    for (j = 1; j <= i__1; ++j) {
	i__2 = j - 1;
	for (i__ = 1; i__ <= i__2; ++i__) {
	    i__3 = i__ + j * b_dim1;
	    i__4 = i__ + j * h_dim1;
	    b[i__3].r = h__[i__4].r, b[i__3].i = h__[i__4].i;
/* L10: */
	}
	i__2 = j + j * b_dim1;
	i__3 = j + j * h_dim1;
	q__1.r = h__[i__3].r - w->r, q__1.i = h__[i__3].i - w->i;
	b[i__2].r = q__1.r, b[i__2].i = q__1.i;
/* L20: */
    }

    if (*noinit) {

/*        Initialize V. */

	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    i__2 = i__;
	    v[i__2].r = *eps3, v[i__2].i = 0.f;
/* L30: */
	}
    } else {

/*        Scale supplied initial vector. */

	vnorm = scnrm2_(n, &v[1], &c__1);
	r__1 = *eps3 * rootn / MAX(vnorm,nrmsml);
	csscal_(n, &r__1, &v[1], &c__1);
    }

    if (*rightv) {

/*        LU decomposition with partial pivoting of B, replacing zero */
/*        pivots by EPS3. */

	i__1 = *n - 1;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    i__2 = i__ + 1 + i__ * h_dim1;
	    ei.r = h__[i__2].r, ei.i = h__[i__2].i;
	    i__2 = i__ + i__ * b_dim1;
	    if ((r__1 = b[i__2].r, ABS(r__1)) + (r__2 = r_imag(&b[i__ + i__ *
		     b_dim1]), ABS(r__2)) < (r__3 = ei.r, ABS(r__3)) + (
		    r__4 = r_imag(&ei), ABS(r__4))) {

/*              Interchange rows and eliminate. */

		cladiv_(&q__1, &b[i__ + i__ * b_dim1], &ei);
		x.r = q__1.r, x.i = q__1.i;
		i__2 = i__ + i__ * b_dim1;
		b[i__2].r = ei.r, b[i__2].i = ei.i;
		i__2 = *n;
		for (j = i__ + 1; j <= i__2; ++j) {
		    i__3 = i__ + 1 + j * b_dim1;
		    temp.r = b[i__3].r, temp.i = b[i__3].i;
		    i__3 = i__ + 1 + j * b_dim1;
		    i__4 = i__ + j * b_dim1;
		    q__2.r = x.r * temp.r - x.i * temp.i, q__2.i = x.r * 
			    temp.i + x.i * temp.r;
		    q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4].i - q__2.i;
		    b[i__3].r = q__1.r, b[i__3].i = q__1.i;
		    i__3 = i__ + j * b_dim1;
		    b[i__3].r = temp.r, b[i__3].i = temp.i;
/* L40: */
		}
	    } else {

/*              Eliminate without interchange. */

		i__2 = i__ + i__ * b_dim1;
		if (b[i__2].r == 0.f && b[i__2].i == 0.f) {
		    i__3 = i__ + i__ * b_dim1;
		    b[i__3].r = *eps3, b[i__3].i = 0.f;
		}
		cladiv_(&q__1, &ei, &b[i__ + i__ * b_dim1]);
		x.r = q__1.r, x.i = q__1.i;
		if (x.r != 0.f || x.i != 0.f) {
		    i__2 = *n;
		    for (j = i__ + 1; j <= i__2; ++j) {
			i__3 = i__ + 1 + j * b_dim1;
			i__4 = i__ + 1 + j * b_dim1;
			i__5 = i__ + j * b_dim1;
			q__2.r = x.r * b[i__5].r - x.i * b[i__5].i, q__2.i = 
				x.r * b[i__5].i + x.i * b[i__5].r;
			q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4].i - 
				q__2.i;
			b[i__3].r = q__1.r, b[i__3].i = q__1.i;
/* L50: */
		    }
		}
	    }
/* L60: */
	}
	i__1 = *n + *n * b_dim1;
	if (b[i__1].r == 0.f && b[i__1].i == 0.f) {
	    i__2 = *n + *n * b_dim1;
	    b[i__2].r = *eps3, b[i__2].i = 0.f;
	}

	*(unsigned char *)trans = 'N';

    } else {

/*        UL decomposition with partial pivoting of B, replacing zero */
/*        pivots by EPS3. */

	for (j = *n; j >= 2; --j) {
	    i__1 = j + (j - 1) * h_dim1;
	    ej.r = h__[i__1].r, ej.i = h__[i__1].i;
	    i__1 = j + j * b_dim1;
	    if ((r__1 = b[i__1].r, ABS(r__1)) + (r__2 = r_imag(&b[j + j * 
		    b_dim1]), ABS(r__2)) < (r__3 = ej.r, ABS(r__3)) + (r__4 
		    = r_imag(&ej), ABS(r__4))) {

/*              Interchange columns and eliminate. */

		cladiv_(&q__1, &b[j + j * b_dim1], &ej);
		x.r = q__1.r, x.i = q__1.i;
		i__1 = j + j * b_dim1;
		b[i__1].r = ej.r, b[i__1].i = ej.i;
		i__1 = j - 1;
		for (i__ = 1; i__ <= i__1; ++i__) {
		    i__2 = i__ + (j - 1) * b_dim1;
		    temp.r = b[i__2].r, temp.i = b[i__2].i;
		    i__2 = i__ + (j - 1) * b_dim1;
		    i__3 = i__ + j * b_dim1;
		    q__2.r = x.r * temp.r - x.i * temp.i, q__2.i = x.r * 
			    temp.i + x.i * temp.r;
		    q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - q__2.i;
		    b[i__2].r = q__1.r, b[i__2].i = q__1.i;
		    i__2 = i__ + j * b_dim1;
		    b[i__2].r = temp.r, b[i__2].i = temp.i;
/* L70: */
		}
	    } else {

/*              Eliminate without interchange. */

		i__1 = j + j * b_dim1;
		if (b[i__1].r == 0.f && b[i__1].i == 0.f) {
		    i__2 = j + j * b_dim1;
		    b[i__2].r = *eps3, b[i__2].i = 0.f;
		}
		cladiv_(&q__1, &ej, &b[j + j * b_dim1]);
		x.r = q__1.r, x.i = q__1.i;
		if (x.r != 0.f || x.i != 0.f) {
		    i__1 = j - 1;
		    for (i__ = 1; i__ <= i__1; ++i__) {
			i__2 = i__ + (j - 1) * b_dim1;
			i__3 = i__ + (j - 1) * b_dim1;
			i__4 = i__ + j * b_dim1;
			q__2.r = x.r * b[i__4].r - x.i * b[i__4].i, q__2.i = 
				x.r * b[i__4].i + x.i * b[i__4].r;
			q__1.r = b[i__3].r - q__2.r, q__1.i = b[i__3].i - 
				q__2.i;
			b[i__2].r = q__1.r, b[i__2].i = q__1.i;
/* L80: */
		    }
		}
	    }
/* L90: */
	}
	i__1 = b_dim1 + 1;
	if (b[i__1].r == 0.f && b[i__1].i == 0.f) {
	    i__2 = b_dim1 + 1;
	    b[i__2].r = *eps3, b[i__2].i = 0.f;
	}

	*(unsigned char *)trans = 'C';

    }

    *(unsigned char *)normin = 'N';
    i__1 = *n;
    for (its = 1; its <= i__1; ++its) {

/*        Solve U*x = scale*v for a right eigenvector */
/*          or U'*x = scale*v for a left eigenvector, */
/*        overwriting x on v. */

	clatrs_("Upper", trans, "Nonunit", normin, n, &b[b_offset], ldb, &v[1]
, &scale, &rwork[1], &ierr);
	*(unsigned char *)normin = 'Y';

/*        Test for sufficient growth in the norm of v. */

	vnorm = scasum_(n, &v[1], &c__1);
	if (vnorm >= growto * scale) {
	    goto L120;
	}

/*        Choose new orthogonal starting vector and try again. */

	rtemp = *eps3 / (rootn + 1.f);
	v[1].r = *eps3, v[1].i = 0.f;
	i__2 = *n;
	for (i__ = 2; i__ <= i__2; ++i__) {
	    i__3 = i__;
	    v[i__3].r = rtemp, v[i__3].i = 0.f;
/* L100: */
	}
	i__2 = *n - its + 1;
	i__3 = *n - its + 1;
	r__1 = *eps3 * rootn;
	q__1.r = v[i__3].r - r__1, q__1.i = v[i__3].i;
	v[i__2].r = q__1.r, v[i__2].i = q__1.i;
/* L110: */
    }

/*     Failure to find eigenvector in N iterations. */

    *info = 1;

L120:

/*     Normalize eigenvector. */

    i__ = icamax_(n, &v[1], &c__1);
    i__1 = i__;
    r__3 = 1.f / ((r__1 = v[i__1].r, ABS(r__1)) + (r__2 = r_imag(&v[i__]), 
	    ABS(r__2)));
    csscal_(n, &r__3, &v[1], &c__1);

    return 0;

/*     End of CLAEIN */

} /* claein_ */
示例#29
0
 int cgeev_(char *jobvl, char *jobvr, int *n, complex *a, 
	int *lda, complex *w, complex *vl, int *ldvl, complex *vr, 
	int *ldvr, complex *work, int *lwork, float *rwork, int *
	info)
{
    /* System generated locals */
    int a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1, 
	    i__2, i__3;
    float r__1, r__2;
    complex q__1, q__2;

    /* Builtin functions */
    double sqrt(double), r_imag(complex *);
    void r_cnjg(complex *, complex *);

    /* Local variables */
    int i__, k, ihi;
    float scl;
    int ilo;
    float dum[1], eps;
    complex tmp;
    int ibal;
    char side[1];
    float anrm;
    int ierr, itau, iwrk, nout;
    extern  int cscal_(int *, complex *, complex *, 
	    int *);
    extern int lsame_(char *, char *);
    extern double scnrm2_(int *, complex *, int *);
    extern  int cgebak_(char *, char *, int *, int *, 
	    int *, float *, int *, complex *, int *, int *), cgebal_(char *, int *, complex *, int *, 
	    int *, int *, float *, int *), slabad_(float *, 
	    float *);
    int scalea;
    extern double clange_(char *, int *, int *, complex *, 
	    int *, float *);
    float cscale;
    extern  int cgehrd_(int *, int *, int *, 
	    complex *, int *, complex *, complex *, int *, int *),
	     clascl_(char *, int *, int *, float *, float *, int *, 
	    int *, complex *, int *, int *);
    extern double slamch_(char *);
    extern  int csscal_(int *, float *, complex *, int 
	    *), clacpy_(char *, int *, int *, complex *, int *, 
	    complex *, int *), xerbla_(char *, int *);
    extern int ilaenv_(int *, char *, char *, int *, int *, 
	    int *, int *);
    int select[1];
    float bignum;
    extern int isamax_(int *, float *, int *);
    extern  int chseqr_(char *, char *, int *, int *, 
	    int *, complex *, int *, complex *, complex *, int *, 
	    complex *, int *, int *), ctrevc_(char *, 
	    char *, int *, int *, complex *, int *, complex *, 
	    int *, complex *, int *, int *, int *, complex *, 
	    float *, int *), cunghr_(int *, int *, 
	    int *, complex *, int *, complex *, complex *, int *, 
	    int *);
    int minwrk, maxwrk;
    int wantvl;
    float smlnum;
    int hswork, irwork;
    int lquery, wantvr;


/*  -- LAPACK driver routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CGEEV computes for an N-by-N complex nonsymmetric matrix A, the */
/*  eigenvalues and, optionally, the left and/or right eigenvectors. */

/*  The right eigenvector v(j) of A satisfies */
/*                   A * v(j) = lambda(j) * v(j) */
/*  where lambda(j) is its eigenvalue. */
/*  The left eigenvector u(j) of A satisfies */
/*                u(j)**H * A = lambda(j) * u(j)**H */
/*  where u(j)**H denotes the conjugate transpose of u(j). */

/*  The computed eigenvectors are normalized to have Euclidean norm */
/*  equal to 1 and largest component float. */

/*  Arguments */
/*  ========= */

/*  JOBVL   (input) CHARACTER*1 */
/*          = 'N': left eigenvectors of A are not computed; */
/*          = 'V': left eigenvectors of are computed. */

/*  JOBVR   (input) CHARACTER*1 */
/*          = 'N': right eigenvectors of A are not computed; */
/*          = 'V': right eigenvectors of A are computed. */

/*  N       (input) INTEGER */
/*          The order of the matrix A. N >= 0. */

/*  A       (input/output) COMPLEX array, dimension (LDA,N) */
/*          On entry, the N-by-N matrix A. */
/*          On exit, A has been overwritten. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= MAX(1,N). */

/*  W       (output) COMPLEX array, dimension (N) */
/*          W contains the computed eigenvalues. */

/*  VL      (output) COMPLEX array, dimension (LDVL,N) */
/*          If JOBVL = 'V', the left eigenvectors u(j) are stored one */
/*          after another in the columns of VL, in the same order */
/*          as their eigenvalues. */
/*          If JOBVL = 'N', VL is not referenced. */
/*          u(j) = VL(:,j), the j-th column of VL. */

/*  LDVL    (input) INTEGER */
/*          The leading dimension of the array VL.  LDVL >= 1; if */
/*          JOBVL = 'V', LDVL >= N. */

/*  VR      (output) COMPLEX array, dimension (LDVR,N) */
/*          If JOBVR = 'V', the right eigenvectors v(j) are stored one */
/*          after another in the columns of VR, in the same order */
/*          as their eigenvalues. */
/*          If JOBVR = 'N', VR is not referenced. */
/*          v(j) = VR(:,j), the j-th column of VR. */

/*  LDVR    (input) INTEGER */
/*          The leading dimension of the array VR.  LDVR >= 1; if */
/*          JOBVR = 'V', LDVR >= N. */

/*  WORK    (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
/*          On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */

/*  LWORK   (input) INTEGER */
/*          The dimension of the array WORK.  LWORK >= MAX(1,2*N). */
/*          For good performance, LWORK must generally be larger. */

/*          If LWORK = -1, then a workspace query is assumed; the routine */
/*          only calculates the optimal size of the WORK array, returns */
/*          this value as the first entry of the WORK array, and no error */
/*          message related to LWORK is issued by XERBLA. */

/*  RWORK   (workspace) REAL array, dimension (2*N) */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value. */
/*          > 0:  if INFO = i, the QR algorithm failed to compute all the */
/*                eigenvalues, and no eigenvectors have been computed; */
/*                elements and i+1:N of W contain eigenvalues which have */
/*                converged. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input arguments */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --w;
    vl_dim1 = *ldvl;
    vl_offset = 1 + vl_dim1;
    vl -= vl_offset;
    vr_dim1 = *ldvr;
    vr_offset = 1 + vr_dim1;
    vr -= vr_offset;
    --work;
    --rwork;

    /* Function Body */
    *info = 0;
    lquery = *lwork == -1;
    wantvl = lsame_(jobvl, "V");
    wantvr = lsame_(jobvr, "V");
    if (! wantvl && ! lsame_(jobvl, "N")) {
	*info = -1;
    } else if (! wantvr && ! lsame_(jobvr, "N")) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    } else if (*lda < MAX(1,*n)) {
	*info = -5;
    } else if (*ldvl < 1 || wantvl && *ldvl < *n) {
	*info = -8;
    } else if (*ldvr < 1 || wantvr && *ldvr < *n) {
	*info = -10;
    }

/*     Compute workspace */
/*      (Note: Comments in the code beginning "Workspace:" describe the */
/*       minimal amount of workspace needed at that point in the code, */
/*       as well as the preferred amount for good performance. */
/*       CWorkspace refers to complex workspace, and RWorkspace to float */
/*       workspace. NB refers to the optimal block size for the */
/*       immediately following subroutine, as returned by ILAENV. */
/*       HSWORK refers to the workspace preferred by CHSEQR, as */
/*       calculated below. HSWORK is computed assuming ILO=1 and IHI=N, */
/*       the worst case.) */

    if (*info == 0) {
	if (*n == 0) {
	    minwrk = 1;
	    maxwrk = 1;
	} else {
	    maxwrk = *n + *n * ilaenv_(&c__1, "CGEHRD", " ", n, &c__1, n, &
		    c__0);
	    minwrk = *n << 1;
	    if (wantvl) {
/* Computing MAX */
		i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "CUNGHR", 
			 " ", n, &c__1, n, &c_n1);
		maxwrk = MAX(i__1,i__2);
		chseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &w[1], &vl[
			vl_offset], ldvl, &work[1], &c_n1, info);
	    } else if (wantvr) {
/* Computing MAX */
		i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "CUNGHR", 
			 " ", n, &c__1, n, &c_n1);
		maxwrk = MAX(i__1,i__2);
		chseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &w[1], &vr[
			vr_offset], ldvr, &work[1], &c_n1, info);
	    } else {
		chseqr_("E", "N", n, &c__1, n, &a[a_offset], lda, &w[1], &vr[
			vr_offset], ldvr, &work[1], &c_n1, info);
	    }
	    hswork = work[1].r;
/* Computing MAX */
	    i__1 = MAX(maxwrk,hswork);
	    maxwrk = MAX(i__1,minwrk);
	}
	work[1].r = (float) maxwrk, work[1].i = 0.f;

	if (*lwork < minwrk && ! lquery) {
	    *info = -12;
	}
    }

    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CGEEV ", &i__1);
	return 0;
    } else if (lquery) {
	return 0;
    }

/*     Quick return if possible */

    if (*n == 0) {
	return 0;
    }

/*     Get machine constants */

    eps = slamch_("P");
    smlnum = slamch_("S");
    bignum = 1.f / smlnum;
    slabad_(&smlnum, &bignum);
    smlnum = sqrt(smlnum) / eps;
    bignum = 1.f / smlnum;

/*     Scale A if max element outside range [SMLNUM,BIGNUM] */

    anrm = clange_("M", n, n, &a[a_offset], lda, dum);
    scalea = FALSE;
    if (anrm > 0.f && anrm < smlnum) {
	scalea = TRUE;
	cscale = smlnum;
    } else if (anrm > bignum) {
	scalea = TRUE;
	cscale = bignum;
    }
    if (scalea) {
	clascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
		ierr);
    }

/*     Balance the matrix */
/*     (CWorkspace: none) */
/*     (RWorkspace: need N) */

    ibal = 1;
    cgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &rwork[ibal], &ierr);

/*     Reduce to upper Hessenberg form */
/*     (CWorkspace: need 2*N, prefer N+N*NB) */
/*     (RWorkspace: none) */

    itau = 1;
    iwrk = itau + *n;
    i__1 = *lwork - iwrk + 1;
    cgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, 
	     &ierr);

    if (wantvl) {

/*        Want left eigenvectors */
/*        Copy Householder vectors to VL */

	*(unsigned char *)side = 'L';
	clacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
		;

/*        Generate unitary matrix in VL */
/*        (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
/*        (RWorkspace: none) */

	i__1 = *lwork - iwrk + 1;
	cunghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk], 
		 &i__1, &ierr);

/*        Perform QR iteration, accumulating Schur vectors in VL */
/*        (CWorkspace: need 1, prefer HSWORK (see comments) ) */
/*        (RWorkspace: none) */

	iwrk = itau;
	i__1 = *lwork - iwrk + 1;
	chseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vl[
		vl_offset], ldvl, &work[iwrk], &i__1, info);

	if (wantvr) {

/*           Want left and right eigenvectors */
/*           Copy Schur vectors to VR */

	    *(unsigned char *)side = 'B';
	    clacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
	}

    } else if (wantvr) {

/*        Want right eigenvectors */
/*        Copy Householder vectors to VR */

	*(unsigned char *)side = 'R';
	clacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
		;

/*        Generate unitary matrix in VR */
/*        (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) */
/*        (RWorkspace: none) */

	i__1 = *lwork - iwrk + 1;
	cunghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk], 
		 &i__1, &ierr);

/*        Perform QR iteration, accumulating Schur vectors in VR */
/*        (CWorkspace: need 1, prefer HSWORK (see comments) ) */
/*        (RWorkspace: none) */

	iwrk = itau;
	i__1 = *lwork - iwrk + 1;
	chseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vr[
		vr_offset], ldvr, &work[iwrk], &i__1, info);

    } else {

/*        Compute eigenvalues only */
/*        (CWorkspace: need 1, prefer HSWORK (see comments) ) */
/*        (RWorkspace: none) */

	iwrk = itau;
	i__1 = *lwork - iwrk + 1;
	chseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vr[
		vr_offset], ldvr, &work[iwrk], &i__1, info);
    }

/*     If INFO > 0 from CHSEQR, then quit */

    if (*info > 0) {
	goto L50;
    }

    if (wantvl || wantvr) {

/*        Compute left and/or right eigenvectors */
/*        (CWorkspace: need 2*N) */
/*        (RWorkspace: need 2*N) */

	irwork = ibal + *n;
	ctrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl, 
		 &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &rwork[irwork], 
		&ierr);
    }

    if (wantvl) {

/*        Undo balancing of left eigenvectors */
/*        (CWorkspace: none) */
/*        (RWorkspace: need N) */

	cgebak_("B", "L", n, &ilo, &ihi, &rwork[ibal], n, &vl[vl_offset], 
		ldvl, &ierr);

/*        Normalize left eigenvectors and make largest component float */

	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    scl = 1.f / scnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
	    csscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
	    i__2 = *n;
	    for (k = 1; k <= i__2; ++k) {
		i__3 = k + i__ * vl_dim1;
/* Computing 2nd power */
		r__1 = vl[i__3].r;
/* Computing 2nd power */
		r__2 = r_imag(&vl[k + i__ * vl_dim1]);
		rwork[irwork + k - 1] = r__1 * r__1 + r__2 * r__2;
/* L10: */
	    }
	    k = isamax_(n, &rwork[irwork], &c__1);
	    r_cnjg(&q__2, &vl[k + i__ * vl_dim1]);
	    r__1 = sqrt(rwork[irwork + k - 1]);
	    q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
	    tmp.r = q__1.r, tmp.i = q__1.i;
	    cscal_(n, &tmp, &vl[i__ * vl_dim1 + 1], &c__1);
	    i__2 = k + i__ * vl_dim1;
	    i__3 = k + i__ * vl_dim1;
	    r__1 = vl[i__3].r;
	    q__1.r = r__1, q__1.i = 0.f;
	    vl[i__2].r = q__1.r, vl[i__2].i = q__1.i;
/* L20: */
	}
    }

    if (wantvr) {

/*        Undo balancing of right eigenvectors */
/*        (CWorkspace: none) */
/*        (RWorkspace: need N) */

	cgebak_("B", "R", n, &ilo, &ihi, &rwork[ibal], n, &vr[vr_offset], 
		ldvr, &ierr);

/*        Normalize right eigenvectors and make largest component float */

	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    scl = 1.f / scnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
	    csscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
	    i__2 = *n;
	    for (k = 1; k <= i__2; ++k) {
		i__3 = k + i__ * vr_dim1;
/* Computing 2nd power */
		r__1 = vr[i__3].r;
/* Computing 2nd power */
		r__2 = r_imag(&vr[k + i__ * vr_dim1]);
		rwork[irwork + k - 1] = r__1 * r__1 + r__2 * r__2;
/* L30: */
	    }
	    k = isamax_(n, &rwork[irwork], &c__1);
	    r_cnjg(&q__2, &vr[k + i__ * vr_dim1]);
	    r__1 = sqrt(rwork[irwork + k - 1]);
	    q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
	    tmp.r = q__1.r, tmp.i = q__1.i;
	    cscal_(n, &tmp, &vr[i__ * vr_dim1 + 1], &c__1);
	    i__2 = k + i__ * vr_dim1;
	    i__3 = k + i__ * vr_dim1;
	    r__1 = vr[i__3].r;
	    q__1.r = r__1, q__1.i = 0.f;
	    vr[i__2].r = q__1.r, vr[i__2].i = q__1.i;
/* L40: */
	}
    }

/*     Undo scaling if necessary */

L50:
    if (scalea) {
	i__1 = *n - *info;
/* Computing MAX */
	i__3 = *n - *info;
	i__2 = MAX(i__3,1);
	clascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[*info + 1]
, &i__2, &ierr);
	if (*info > 0) {
	    i__1 = ilo - 1;
	    clascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[1], n, 
		     &ierr);
	}
    }

    work[1].r = (float) maxwrk, work[1].i = 0.f;
    return 0;

/*     End of CGEEV */

} /* cgeev_ */
示例#30
0
doublereal 
f2c_scnrm2(integer* N, 
           complex* X, integer* incX)
{
    return scnrm2_(N, X, incX);
}