Ejemplo n.º 1
0
 int sbdsqr_(char *uplo, int *n, int *ncvt, int *
	nru, int *ncc, float *d__, float *e, float *vt, int *ldvt, float *
	u, int *ldu, float *c__, int *ldc, float *work, int *info)
{
    /* System generated locals */
    int c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, 
	    i__2;
    float r__1, r__2, r__3, r__4;
    double d__1;

    /* Builtin functions */
    double pow_dd(double *, double *), sqrt(double), r_sign(float *
	    , float *);

    /* Local variables */
    float f, g, h__;
    int i__, j, m;
    float r__, cs;
    int ll;
    float sn, mu;
    int nm1, nm12, nm13, lll;
    float eps, sll, tol, abse;
    int idir;
    float abss;
    int oldm;
    float cosl;
    int isub, iter;
    float unfl, sinl, cosr, smin, smax, sinr;
    extern  int srot_(int *, float *, int *, float *, 
	    int *, float *, float *), slas2_(float *, float *, float *, float *, 
	     float *);
    extern int lsame_(char *, char *);
    float oldcs;
    extern  int sscal_(int *, float *, float *, int *);
    int oldll;
    float shift, sigmn, oldsn;
    int maxit;
    float sminl;
    extern  int slasr_(char *, char *, char *, int *, 
	    int *, float *, float *, float *, int *);
    float sigmx;
    int lower;
    extern  int sswap_(int *, float *, int *, float *, 
	    int *), slasq1_(int *, float *, float *, float *, int *),
	     slasv2_(float *, float *, float *, float *, float *, float *, float *, 
	    float *, float *);
    extern double slamch_(char *);
    extern  int xerbla_(char *, int *);
    float sminoa;
    extern  int slartg_(float *, float *, float *, float *, float *
);
    float thresh;
    int rotate;
    float tolmul;


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     January 2007 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  SBDSQR computes the singular values and, optionally, the right and/or */
/*  left singular vectors from the singular value decomposition (SVD) of */
/*  a float N-by-N (upper or lower) bidiagonal matrix B using the implicit */
/*  zero-shift QR algorithm.  The SVD of B has the form */

/*     B = Q * S * P**T */

/*  where S is the diagonal matrix of singular values, Q is an orthogonal */
/*  matrix of left singular vectors, and P is an orthogonal matrix of */
/*  right singular vectors.  If left singular vectors are requested, this */
/*  subroutine actually returns U*Q instead of Q, and, if right singular */
/*  vectors are requested, this subroutine returns P**T*VT instead of */
/*  P**T, for given float input matrices U and VT.  When U and VT are the */
/*  orthogonal matrices that reduce a general matrix A to bidiagonal */
/*  form:  A = U*B*VT, as computed by SGEBRD, then */

/*     A = (U*Q) * S * (P**T*VT) */

/*  is the SVD of A.  Optionally, the subroutine may also compute Q**T*C */
/*  for a given float input matrix C. */

/*  See "Computing  Small Singular Values of Bidiagonal Matrices With */
/*  Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, */
/*  LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11, */
/*  no. 5, pp. 873-912, Sept 1990) and */
/*  "Accurate singular values and differential qd algorithms," by */
/*  B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics */
/*  Department, University of California at Berkeley, July 1992 */
/*  for a detailed description of the algorithm. */

/*  Arguments */
/*  ========= */

/*  UPLO    (input) CHARACTER*1 */
/*          = 'U':  B is upper bidiagonal; */
/*          = 'L':  B is lower bidiagonal. */

/*  N       (input) INTEGER */
/*          The order of the matrix B.  N >= 0. */

/*  NCVT    (input) INTEGER */
/*          The number of columns of the matrix VT. NCVT >= 0. */

/*  NRU     (input) INTEGER */
/*          The number of rows of the matrix U. NRU >= 0. */

/*  NCC     (input) INTEGER */
/*          The number of columns of the matrix C. NCC >= 0. */

/*  D       (input/output) REAL array, dimension (N) */
/*          On entry, the n diagonal elements of the bidiagonal matrix B. */
/*          On exit, if INFO=0, the singular values of B in decreasing */
/*          order. */

/*  E       (input/output) REAL array, dimension (N-1) */
/*          On entry, the N-1 offdiagonal elements of the bidiagonal */
/*          matrix B. */
/*          On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E */
/*          will contain the diagonal and superdiagonal elements of a */
/*          bidiagonal matrix orthogonally equivalent to the one given */
/*          as input. */

/*  VT      (input/output) REAL array, dimension (LDVT, NCVT) */
/*          On entry, an N-by-NCVT matrix VT. */
/*          On exit, VT is overwritten by P**T * VT. */
/*          Not referenced if NCVT = 0. */

/*  LDVT    (input) INTEGER */
/*          The leading dimension of the array VT. */
/*          LDVT >= MAX(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0. */

/*  U       (input/output) REAL array, dimension (LDU, N) */
/*          On entry, an NRU-by-N matrix U. */
/*          On exit, U is overwritten by U * Q. */
/*          Not referenced if NRU = 0. */

/*  LDU     (input) INTEGER */
/*          The leading dimension of the array U.  LDU >= MAX(1,NRU). */

/*  C       (input/output) REAL array, dimension (LDC, NCC) */
/*          On entry, an N-by-NCC matrix C. */
/*          On exit, C is overwritten by Q**T * C. */
/*          Not referenced if NCC = 0. */

/*  LDC     (input) INTEGER */
/*          The leading dimension of the array C. */
/*          LDC >= MAX(1,N) if NCC > 0; LDC >=1 if NCC = 0. */

/*  WORK    (workspace) REAL array, dimension (4*N) */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit */
/*          < 0:  If INFO = -i, the i-th argument had an illegal value */
/*          > 0: */
/*             if NCVT = NRU = NCC = 0, */
/*                = 1, a split was marked by a positive value in E */
/*                = 2, current block of Z not diagonalized after 30*N */
/*                     iterations (in inner while loop) */
/*                = 3, termination criterion of outer while loop not met */
/*                     (program created more than N unreduced blocks) */
/*             else NCVT = NRU = NCC = 0, */
/*                   the algorithm did not converge; D and E contain the */
/*                   elements of a bidiagonal matrix which is orthogonally */
/*                   similar to the input matrix B;  if INFO = i, i */
/*                   elements of E have not converged to zero. */

/*  Internal Parameters */
/*  =================== */

/*  TOLMUL  REAL, default = MAX(10,MIN(100,EPS**(-1/8))) */
/*          TOLMUL controls the convergence criterion of the QR loop. */
/*          If it is positive, TOLMUL*EPS is the desired relative */
/*             precision in the computed singular values. */
/*          If it is negative, ABS(TOLMUL*EPS*sigma_max) is the */
/*             desired absolute accuracy in the computed singular */
/*             values (corresponds to relative accuracy */
/*             ABS(TOLMUL*EPS) in the largest singular value. */
/*          ABS(TOLMUL) should be between 1 and 1/EPS, and preferably */
/*             between 10 (for fast convergence) and .1/EPS */
/*             (for there to be some accuracy in the results). */
/*          Default is to lose at either one eighth or 2 of the */
/*             available decimal digits in each computed singular value */
/*             (whichever is smaller). */

/*  MAXITR  INTEGER, default = 6 */
/*          MAXITR controls the maximum number of passes of the */
/*          algorithm through its inner loop. The algorithms stops */
/*          (and so fails to converge) if the number of passes */
/*          through the inner loop exceeds MAXITR*N**2. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input parameters. */

    /* Parameter adjustments */
    --d__;
    --e;
    vt_dim1 = *ldvt;
    vt_offset = 1 + vt_dim1;
    vt -= vt_offset;
    u_dim1 = *ldu;
    u_offset = 1 + u_dim1;
    u -= u_offset;
    c_dim1 = *ldc;
    c_offset = 1 + c_dim1;
    c__ -= c_offset;
    --work;

    /* Function Body */
    *info = 0;
    lower = lsame_(uplo, "L");
    if (! lsame_(uplo, "U") && ! lower) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*ncvt < 0) {
	*info = -3;
    } else if (*nru < 0) {
	*info = -4;
    } else if (*ncc < 0) {
	*info = -5;
    } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < MAX(1,*n)) {
	*info = -9;
    } else if (*ldu < MAX(1,*nru)) {
	*info = -11;
    } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < MAX(1,*n)) {
	*info = -13;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("SBDSQR", &i__1);
	return 0;
    }
    if (*n == 0) {
	return 0;
    }
    if (*n == 1) {
	goto L160;
    }

/*     ROTATE is true if any singular vectors desired, false otherwise */

    rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;

/*     If no singular vectors desired, use qd algorithm */

    if (! rotate) {
	slasq1_(n, &d__[1], &e[1], &work[1], info);
	return 0;
    }

    nm1 = *n - 1;
    nm12 = nm1 + nm1;
    nm13 = nm12 + nm1;
    idir = 0;

/*     Get machine constants */

    eps = slamch_("Epsilon");
    unfl = slamch_("Safe minimum");

/*     If matrix lower bidiagonal, rotate to be upper bidiagonal */
/*     by applying Givens rotations on the left */

    if (lower) {
	i__1 = *n - 1;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
	    d__[i__] = r__;
	    e[i__] = sn * d__[i__ + 1];
	    d__[i__ + 1] = cs * d__[i__ + 1];
	    work[i__] = cs;
	    work[nm1 + i__] = sn;
/* L10: */
	}

/*        Update singular vectors if desired */

	if (*nru > 0) {
	    slasr_("R", "V", "F", nru, n, &work[1], &work[*n], &u[u_offset], 
		    ldu);
	}
	if (*ncc > 0) {
	    slasr_("L", "V", "F", n, ncc, &work[1], &work[*n], &c__[c_offset], 
		     ldc);
	}
    }

/*     Compute singular values to relative accuracy TOL */
/*     (By setting TOL to be negative, algorithm will compute */
/*     singular values to absolute accuracy ABS(TOL)*norm(input matrix)) */

/* Computing MAX */
/* Computing MIN */
    d__1 = (double) eps;
    r__3 = 100.f, r__4 = pow_dd(&d__1, &c_b15);
    r__1 = 10.f, r__2 = MIN(r__3,r__4);
    tolmul = MAX(r__1,r__2);
    tol = tolmul * eps;

/*     Compute approximate maximum, minimum singular values */

    smax = 0.f;
    i__1 = *n;
    for (i__ = 1; i__ <= i__1; ++i__) {
/* Computing MAX */
	r__2 = smax, r__3 = (r__1 = d__[i__], ABS(r__1));
	smax = MAX(r__2,r__3);
/* L20: */
    }
    i__1 = *n - 1;
    for (i__ = 1; i__ <= i__1; ++i__) {
/* Computing MAX */
	r__2 = smax, r__3 = (r__1 = e[i__], ABS(r__1));
	smax = MAX(r__2,r__3);
/* L30: */
    }
    sminl = 0.f;
    if (tol >= 0.f) {

/*        Relative accuracy desired */

	sminoa = ABS(d__[1]);
	if (sminoa == 0.f) {
	    goto L50;
	}
	mu = sminoa;
	i__1 = *n;
	for (i__ = 2; i__ <= i__1; ++i__) {
	    mu = (r__2 = d__[i__], ABS(r__2)) * (mu / (mu + (r__1 = e[i__ - 
		    1], ABS(r__1))));
	    sminoa = MIN(sminoa,mu);
	    if (sminoa == 0.f) {
		goto L50;
	    }
/* L40: */
	}
L50:
	sminoa /= sqrt((float) (*n));
/* Computing MAX */
	r__1 = tol * sminoa, r__2 = *n * 6 * *n * unfl;
	thresh = MAX(r__1,r__2);
    } else {

/*        Absolute accuracy desired */

/* Computing MAX */
	r__1 = ABS(tol) * smax, r__2 = *n * 6 * *n * unfl;
	thresh = MAX(r__1,r__2);
    }

/*     Prepare for main iteration loop for the singular values */
/*     (MAXIT is the maximum number of passes through the inner */
/*     loop permitted before nonconvergence signalled.) */

    maxit = *n * 6 * *n;
    iter = 0;
    oldll = -1;
    oldm = -1;

/*     M points to last element of unconverged part of matrix */

    m = *n;

/*     Begin main iteration loop */

L60:

/*     Check for convergence or exceeding iteration count */

    if (m <= 1) {
	goto L160;
    }
    if (iter > maxit) {
	goto L200;
    }

/*     Find diagonal block of matrix to work on */

    if (tol < 0.f && (r__1 = d__[m], ABS(r__1)) <= thresh) {
	d__[m] = 0.f;
    }
    smax = (r__1 = d__[m], ABS(r__1));
    smin = smax;
    i__1 = m - 1;
    for (lll = 1; lll <= i__1; ++lll) {
	ll = m - lll;
	abss = (r__1 = d__[ll], ABS(r__1));
	abse = (r__1 = e[ll], ABS(r__1));
	if (tol < 0.f && abss <= thresh) {
	    d__[ll] = 0.f;
	}
	if (abse <= thresh) {
	    goto L80;
	}
	smin = MIN(smin,abss);
/* Computing MAX */
	r__1 = MAX(smax,abss);
	smax = MAX(r__1,abse);
/* L70: */
    }
    ll = 0;
    goto L90;
L80:
    e[ll] = 0.f;

/*     Matrix splits since E(LL) = 0 */

    if (ll == m - 1) {

/*        Convergence of bottom singular value, return to top of loop */

	--m;
	goto L60;
    }
L90:
    ++ll;

/*     E(LL) through E(M-1) are nonzero, E(LL-1) is zero */

    if (ll == m - 1) {

/*        2 by 2 block, handle separately */

	slasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr, 
		 &sinl, &cosl);
	d__[m - 1] = sigmx;
	e[m - 1] = 0.f;
	d__[m] = sigmn;

/*        Compute singular vectors, if desired */

	if (*ncvt > 0) {
	    srot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, &
		    cosr, &sinr);
	}
	if (*nru > 0) {
	    srot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], &
		    c__1, &cosl, &sinl);
	}
	if (*ncc > 0) {
	    srot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, &
		    cosl, &sinl);
	}
	m += -2;
	goto L60;
    }

/*     If working on new submatrix, choose shift direction */
/*     (from larger end diagonal element towards smaller) */

    if (ll > oldm || m < oldll) {
	if ((r__1 = d__[ll], ABS(r__1)) >= (r__2 = d__[m], ABS(r__2))) {

/*           Chase bulge from top (big end) to bottom (small end) */

	    idir = 1;
	} else {

/*           Chase bulge from bottom (big end) to top (small end) */

	    idir = 2;
	}
    }

/*     Apply convergence tests */

    if (idir == 1) {

/*        Run convergence test in forward direction */
/*        First apply standard test to bottom of matrix */

	if ((r__2 = e[m - 1], ABS(r__2)) <= ABS(tol) * (r__1 = d__[m], ABS(
		r__1)) || tol < 0.f && (r__3 = e[m - 1], ABS(r__3)) <= 
		thresh) {
	    e[m - 1] = 0.f;
	    goto L60;
	}

	if (tol >= 0.f) {

/*           If relative accuracy desired, */
/*           apply convergence criterion forward */

	    mu = (r__1 = d__[ll], ABS(r__1));
	    sminl = mu;
	    i__1 = m - 1;
	    for (lll = ll; lll <= i__1; ++lll) {
		if ((r__1 = e[lll], ABS(r__1)) <= tol * mu) {
		    e[lll] = 0.f;
		    goto L60;
		}
		mu = (r__2 = d__[lll + 1], ABS(r__2)) * (mu / (mu + (r__1 = 
			e[lll], ABS(r__1))));
		sminl = MIN(sminl,mu);
/* L100: */
	    }
	}

    } else {

/*        Run convergence test in backward direction */
/*        First apply standard test to top of matrix */

	if ((r__2 = e[ll], ABS(r__2)) <= ABS(tol) * (r__1 = d__[ll], ABS(
		r__1)) || tol < 0.f && (r__3 = e[ll], ABS(r__3)) <= thresh) {
	    e[ll] = 0.f;
	    goto L60;
	}

	if (tol >= 0.f) {

/*           If relative accuracy desired, */
/*           apply convergence criterion backward */

	    mu = (r__1 = d__[m], ABS(r__1));
	    sminl = mu;
	    i__1 = ll;
	    for (lll = m - 1; lll >= i__1; --lll) {
		if ((r__1 = e[lll], ABS(r__1)) <= tol * mu) {
		    e[lll] = 0.f;
		    goto L60;
		}
		mu = (r__2 = d__[lll], ABS(r__2)) * (mu / (mu + (r__1 = e[
			lll], ABS(r__1))));
		sminl = MIN(sminl,mu);
/* L110: */
	    }
	}
    }
    oldll = ll;
    oldm = m;

/*     Compute shift.  First, test if shifting would ruin relative */
/*     accuracy, and if so set the shift to zero. */

/* Computing MAX */
    r__1 = eps, r__2 = tol * .01f;
    if (tol >= 0.f && *n * tol * (sminl / smax) <= MAX(r__1,r__2)) {

/*        Use a zero shift to avoid loss of relative accuracy */

	shift = 0.f;
    } else {

/*        Compute the shift from 2-by-2 block at end of matrix */

	if (idir == 1) {
	    sll = (r__1 = d__[ll], ABS(r__1));
	    slas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__);
	} else {
	    sll = (r__1 = d__[m], ABS(r__1));
	    slas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__);
	}

/*        Test if shift negligible, and if so set to zero */

	if (sll > 0.f) {
/* Computing 2nd power */
	    r__1 = shift / sll;
	    if (r__1 * r__1 < eps) {
		shift = 0.f;
	    }
	}
    }

/*     Increment iteration count */

    iter = iter + m - ll;

/*     If SHIFT = 0, do simplified QR iteration */

    if (shift == 0.f) {
	if (idir == 1) {

/*           Chase bulge from top to bottom */
/*           Save cosines and sines for later singular vector updates */

	    cs = 1.f;
	    oldcs = 1.f;
	    i__1 = m - 1;
	    for (i__ = ll; i__ <= i__1; ++i__) {
		r__1 = d__[i__] * cs;
		slartg_(&r__1, &e[i__], &cs, &sn, &r__);
		if (i__ > ll) {
		    e[i__ - 1] = oldsn * r__;
		}
		r__1 = oldcs * r__;
		r__2 = d__[i__ + 1] * sn;
		slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]);
		work[i__ - ll + 1] = cs;
		work[i__ - ll + 1 + nm1] = sn;
		work[i__ - ll + 1 + nm12] = oldcs;
		work[i__ - ll + 1 + nm13] = oldsn;
/* L120: */
	    }
	    h__ = d__[m] * cs;
	    d__[m] = h__ * oldcs;
	    e[m - 1] = h__ * oldsn;

/*           Update singular vectors */

	    if (*ncvt > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
			ll + vt_dim1], ldvt);
	    }
	    if (*nru > 0) {
		i__1 = m - ll + 1;
		slasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13 
			+ 1], &u[ll * u_dim1 + 1], ldu);
	    }
	    if (*ncc > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13 
			+ 1], &c__[ll + c_dim1], ldc);
	    }

/*           Test convergence */

	    if ((r__1 = e[m - 1], ABS(r__1)) <= thresh) {
		e[m - 1] = 0.f;
	    }

	} else {

/*           Chase bulge from bottom to top */
/*           Save cosines and sines for later singular vector updates */

	    cs = 1.f;
	    oldcs = 1.f;
	    i__1 = ll + 1;
	    for (i__ = m; i__ >= i__1; --i__) {
		r__1 = d__[i__] * cs;
		slartg_(&r__1, &e[i__ - 1], &cs, &sn, &r__);
		if (i__ < m) {
		    e[i__] = oldsn * r__;
		}
		r__1 = oldcs * r__;
		r__2 = d__[i__ - 1] * sn;
		slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]);
		work[i__ - ll] = cs;
		work[i__ - ll + nm1] = -sn;
		work[i__ - ll + nm12] = oldcs;
		work[i__ - ll + nm13] = -oldsn;
/* L130: */
	    }
	    h__ = d__[ll] * cs;
	    d__[ll] = h__ * oldcs;
	    e[ll] = h__ * oldsn;

/*           Update singular vectors */

	    if (*ncvt > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
			nm13 + 1], &vt[ll + vt_dim1], ldvt);
	    }
	    if (*nru > 0) {
		i__1 = m - ll + 1;
		slasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
			 u_dim1 + 1], ldu);
	    }
	    if (*ncc > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
			ll + c_dim1], ldc);
	    }

/*           Test convergence */

	    if ((r__1 = e[ll], ABS(r__1)) <= thresh) {
		e[ll] = 0.f;
	    }
	}
    } else {

/*        Use nonzero shift */

	if (idir == 1) {

/*           Chase bulge from top to bottom */
/*           Save cosines and sines for later singular vector updates */

	    f = ((r__1 = d__[ll], ABS(r__1)) - shift) * (r_sign(&c_b49, &d__[
		    ll]) + shift / d__[ll]);
	    g = e[ll];
	    i__1 = m - 1;
	    for (i__ = ll; i__ <= i__1; ++i__) {
		slartg_(&f, &g, &cosr, &sinr, &r__);
		if (i__ > ll) {
		    e[i__ - 1] = r__;
		}
		f = cosr * d__[i__] + sinr * e[i__];
		e[i__] = cosr * e[i__] - sinr * d__[i__];
		g = sinr * d__[i__ + 1];
		d__[i__ + 1] = cosr * d__[i__ + 1];
		slartg_(&f, &g, &cosl, &sinl, &r__);
		d__[i__] = r__;
		f = cosl * e[i__] + sinl * d__[i__ + 1];
		d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__];
		if (i__ < m - 1) {
		    g = sinl * e[i__ + 1];
		    e[i__ + 1] = cosl * e[i__ + 1];
		}
		work[i__ - ll + 1] = cosr;
		work[i__ - ll + 1 + nm1] = sinr;
		work[i__ - ll + 1 + nm12] = cosl;
		work[i__ - ll + 1 + nm13] = sinl;
/* L140: */
	    }
	    e[m - 1] = f;

/*           Update singular vectors */

	    if (*ncvt > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
			ll + vt_dim1], ldvt);
	    }
	    if (*nru > 0) {
		i__1 = m - ll + 1;
		slasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13 
			+ 1], &u[ll * u_dim1 + 1], ldu);
	    }
	    if (*ncc > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13 
			+ 1], &c__[ll + c_dim1], ldc);
	    }

/*           Test convergence */

	    if ((r__1 = e[m - 1], ABS(r__1)) <= thresh) {
		e[m - 1] = 0.f;
	    }

	} else {

/*           Chase bulge from bottom to top */
/*           Save cosines and sines for later singular vector updates */

	    f = ((r__1 = d__[m], ABS(r__1)) - shift) * (r_sign(&c_b49, &d__[
		    m]) + shift / d__[m]);
	    g = e[m - 1];
	    i__1 = ll + 1;
	    for (i__ = m; i__ >= i__1; --i__) {
		slartg_(&f, &g, &cosr, &sinr, &r__);
		if (i__ < m) {
		    e[i__] = r__;
		}
		f = cosr * d__[i__] + sinr * e[i__ - 1];
		e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__];
		g = sinr * d__[i__ - 1];
		d__[i__ - 1] = cosr * d__[i__ - 1];
		slartg_(&f, &g, &cosl, &sinl, &r__);
		d__[i__] = r__;
		f = cosl * e[i__ - 1] + sinl * d__[i__ - 1];
		d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1];
		if (i__ > ll + 1) {
		    g = sinl * e[i__ - 2];
		    e[i__ - 2] = cosl * e[i__ - 2];
		}
		work[i__ - ll] = cosr;
		work[i__ - ll + nm1] = -sinr;
		work[i__ - ll + nm12] = cosl;
		work[i__ - ll + nm13] = -sinl;
/* L150: */
	    }
	    e[ll] = f;

/*           Test convergence */

	    if ((r__1 = e[ll], ABS(r__1)) <= thresh) {
		e[ll] = 0.f;
	    }

/*           Update singular vectors if desired */

	    if (*ncvt > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
			nm13 + 1], &vt[ll + vt_dim1], ldvt);
	    }
	    if (*nru > 0) {
		i__1 = m - ll + 1;
		slasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
			 u_dim1 + 1], ldu);
	    }
	    if (*ncc > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
			ll + c_dim1], ldc);
	    }
	}
    }

/*     QR iteration finished, go back and check convergence */

    goto L60;

/*     All singular values converged, so make them positive */

L160:
    i__1 = *n;
    for (i__ = 1; i__ <= i__1; ++i__) {
	if (d__[i__] < 0.f) {
	    d__[i__] = -d__[i__];

/*           Change sign of singular vectors, if desired */

	    if (*ncvt > 0) {
		sscal_(ncvt, &c_b72, &vt[i__ + vt_dim1], ldvt);
	    }
	}
/* L170: */
    }

/*     Sort the singular values into decreasing order (insertion sort on */
/*     singular values, but only one transposition per singular vector) */

    i__1 = *n - 1;
    for (i__ = 1; i__ <= i__1; ++i__) {

/*        Scan for smallest D(I) */

	isub = 1;
	smin = d__[1];
	i__2 = *n + 1 - i__;
	for (j = 2; j <= i__2; ++j) {
	    if (d__[j] <= smin) {
		isub = j;
		smin = d__[j];
	    }
/* L180: */
	}
	if (isub != *n + 1 - i__) {

/*           Swap singular values and vectors */

	    d__[isub] = d__[*n + 1 - i__];
	    d__[*n + 1 - i__] = smin;
	    if (*ncvt > 0) {
		sswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ + 
			vt_dim1], ldvt);
	    }
	    if (*nru > 0) {
		sswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) * 
			u_dim1 + 1], &c__1);
	    }
	    if (*ncc > 0) {
		sswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ + 
			c_dim1], ldc);
	    }
	}
/* L190: */
    }
    goto L220;

/*     Maximum number of iterations exceeded, failure to converge */

L200:
    *info = 0;
    i__1 = *n - 1;
    for (i__ = 1; i__ <= i__1; ++i__) {
	if (e[i__] != 0.f) {
	    ++(*info);
	}
/* L210: */
    }
L220:
    return 0;

/*     End of SBDSQR */

} /* sbdsqr_ */
Ejemplo n.º 2
0
/* Subroutine */ int clags2_(logical *upper, real *a1, complex *a2, real *a3, 
	real *b1, complex *b2, real *b3, real *csu, complex *snu, real *csv, 
	complex *snv, real *csq, complex *snq)
{
    /* System generated locals */
    real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8;
    complex q__1, q__2, q__3, q__4, q__5;

    /* Builtin functions */
    double c_abs(complex *), r_imag(complex *);
    void r_cnjg(complex *, complex *);

    /* Local variables */
    static real a;
    static complex b, c__;
    static real d__;
    static complex r__, d1;
    static real s1, s2, fb, fc;
    static complex ua11, ua12, ua21, ua22, vb11, vb12, vb21, vb22;
    static real csl, csr, snl, snr, aua11, aua12, aua21, aua22, avb11, avb12, 
	    avb21, avb22, ua11r, ua22r, vb11r, vb22r;
    extern /* Subroutine */ int slasv2_(real *, real *, real *, real *, real *
	    , real *, real *, real *, real *), clartg_(complex *, complex *, 
	    real *, complex *, complex *);


/*  -- LAPACK auxiliary routine (version 3.0) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */
/*     Courant Institute, Argonne National Lab, and Rice University */
/*     September 30, 1994 */

/*     .. Scalar Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CLAGS2 computes 2-by-2 unitary matrices U, V and Q, such */
/*  that if ( UPPER ) then */

/*            U'*A*Q = U'*( A1 A2 )*Q = ( x  0  ) */
/*                        ( 0  A3 )     ( x  x  ) */
/*  and */
/*            V'*B*Q = V'*( B1 B2 )*Q = ( x  0  ) */
/*                        ( 0  B3 )     ( x  x  ) */

/*  or if ( .NOT.UPPER ) then */

/*            U'*A*Q = U'*( A1 0  )*Q = ( x  x  ) */
/*                        ( A2 A3 )     ( 0  x  ) */
/*  and */
/*            V'*B*Q = V'*( B1 0  )*Q = ( x  x  ) */
/*                        ( B2 B3 )     ( 0  x  ) */
/*  where */

/*    U = (     CSU      SNU ), V = (     CSV     SNV ), */
/*        ( -CONJG(SNU)  CSU )      ( -CONJG(SNV) CSV ) */

/*    Q = (     CSQ      SNQ ) */
/*        ( -CONJG(SNQ)  CSQ ) */

/*  Z' denotes the conjugate transpose of Z. */

/*  The rows of the transformed A and B are parallel. Moreover, if the */
/*  input 2-by-2 matrix A is not zero, then the transformed (1,1) entry */
/*  of A is not zero. If the input matrices A and B are both not zero, */
/*  then the transformed (2,2) element of B is not zero, except when the */
/*  first rows of input A and B are parallel and the second rows are */
/*  zero. */

/*  Arguments */
/*  ========= */

/*  UPPER   (input) LOGICAL */
/*          = .TRUE.: the input matrices A and B are upper triangular. */
/*          = .FALSE.: the input matrices A and B are lower triangular. */

/*  A1      (input) REAL */
/*  A2      (input) COMPLEX */
/*  A3      (input) REAL */
/*          On entry, A1, A2 and A3 are elements of the input 2-by-2 */
/*          upper (lower) triangular matrix A. */

/*  B1      (input) REAL */
/*  B2      (input) COMPLEX */
/*  B3      (input) REAL */
/*          On entry, B1, B2 and B3 are elements of the input 2-by-2 */
/*          upper (lower) triangular matrix B. */

/*  CSU     (output) REAL */
/*  SNU     (output) COMPLEX */
/*          The desired unitary matrix U. */

/*  CSV     (output) REAL */
/*  SNV     (output) COMPLEX */
/*          The desired unitary matrix V. */

/*  CSQ     (output) REAL */
/*  SNQ     (output) COMPLEX */
/*          The desired unitary matrix Q. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Statement Functions .. */
/*     .. */
/*     .. Statement Function definitions .. */
/*     .. */
/*     .. Executable Statements .. */

    if (*upper) {

/*        Input matrices A and B are upper triangular matrices */

/*        Form matrix C = A*adj(B) = ( a b ) */
/*                                   ( 0 d ) */

	a = *a1 * *b3;
	d__ = *a3 * *b1;
	q__2.r = *b1 * a2->r, q__2.i = *b1 * a2->i;
	q__3.r = *a1 * b2->r, q__3.i = *a1 * b2->i;
	q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
	b.r = q__1.r, b.i = q__1.i;
	fb = c_abs(&b);

/*        Transform complex 2-by-2 matrix C to real matrix by unitary */
/*        diagonal matrix diag(1,D1). */

	d1.r = 1.f, d1.i = 0.f;
	if (fb != 0.f) {
	    q__1.r = b.r / fb, q__1.i = b.i / fb;
	    d1.r = q__1.r, d1.i = q__1.i;
	}

/*        The SVD of real 2 by 2 triangular C */

/*         ( CSL -SNL )*( A B )*(  CSR  SNR ) = ( R 0 ) */
/*         ( SNL  CSL ) ( 0 D ) ( -SNR  CSR )   ( 0 T ) */

	slasv2_(&a, &fb, &d__, &s1, &s2, &snr, &csr, &snl, &csl);

	if (dabs(csl) >= dabs(snl) || dabs(csr) >= dabs(snr)) {

/*           Compute the (1,1) and (1,2) elements of U'*A and V'*B, */
/*           and (1,2) element of |U|'*|A| and |V|'*|B|. */

	    ua11r = csl * *a1;
	    q__2.r = csl * a2->r, q__2.i = csl * a2->i;
	    q__4.r = snl * d1.r, q__4.i = snl * d1.i;
	    q__3.r = *a3 * q__4.r, q__3.i = *a3 * q__4.i;
	    q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
	    ua12.r = q__1.r, ua12.i = q__1.i;

	    vb11r = csr * *b1;
	    q__2.r = csr * b2->r, q__2.i = csr * b2->i;
	    q__4.r = snr * d1.r, q__4.i = snr * d1.i;
	    q__3.r = *b3 * q__4.r, q__3.i = *b3 * q__4.i;
	    q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
	    vb12.r = q__1.r, vb12.i = q__1.i;

	    aua12 = dabs(csl) * ((r__1 = a2->r, dabs(r__1)) + (r__2 = r_imag(
		    a2), dabs(r__2))) + dabs(snl) * dabs(*a3);
	    avb12 = dabs(csr) * ((r__1 = b2->r, dabs(r__1)) + (r__2 = r_imag(
		    b2), dabs(r__2))) + dabs(snr) * dabs(*b3);

/*           zero (1,2) elements of U'*A and V'*B */

	    if (dabs(ua11r) + ((r__1 = ua12.r, dabs(r__1)) + (r__2 = r_imag(&
		    ua12), dabs(r__2))) == 0.f) {
		q__2.r = vb11r, q__2.i = 0.f;
		q__1.r = -q__2.r, q__1.i = -q__2.i;
		r_cnjg(&q__3, &vb12);
		clartg_(&q__1, &q__3, csq, snq, &r__);
	    } else if (dabs(vb11r) + ((r__1 = vb12.r, dabs(r__1)) + (r__2 = 
		    r_imag(&vb12), dabs(r__2))) == 0.f) {
		q__2.r = ua11r, q__2.i = 0.f;
		q__1.r = -q__2.r, q__1.i = -q__2.i;
		r_cnjg(&q__3, &ua12);
		clartg_(&q__1, &q__3, csq, snq, &r__);
	    } else if (aua12 / (dabs(ua11r) + ((r__1 = ua12.r, dabs(r__1)) + (
		    r__2 = r_imag(&ua12), dabs(r__2)))) <= avb12 / (dabs(
		    vb11r) + ((r__3 = vb12.r, dabs(r__3)) + (r__4 = r_imag(&
		    vb12), dabs(r__4))))) {
		q__2.r = ua11r, q__2.i = 0.f;
		q__1.r = -q__2.r, q__1.i = -q__2.i;
		r_cnjg(&q__3, &ua12);
		clartg_(&q__1, &q__3, csq, snq, &r__);
	    } else {
		q__2.r = vb11r, q__2.i = 0.f;
		q__1.r = -q__2.r, q__1.i = -q__2.i;
		r_cnjg(&q__3, &vb12);
		clartg_(&q__1, &q__3, csq, snq, &r__);
	    }

	    *csu = csl;
	    q__2.r = -d1.r, q__2.i = -d1.i;
	    q__1.r = snl * q__2.r, q__1.i = snl * q__2.i;
	    snu->r = q__1.r, snu->i = q__1.i;
	    *csv = csr;
	    q__2.r = -d1.r, q__2.i = -d1.i;
	    q__1.r = snr * q__2.r, q__1.i = snr * q__2.i;
	    snv->r = q__1.r, snv->i = q__1.i;

	} else {

/*           Compute the (2,1) and (2,2) elements of U'*A and V'*B, */
/*           and (2,2) element of |U|'*|A| and |V|'*|B|. */

	    r_cnjg(&q__4, &d1);
	    q__3.r = -q__4.r, q__3.i = -q__4.i;
	    q__2.r = snl * q__3.r, q__2.i = snl * q__3.i;
	    q__1.r = *a1 * q__2.r, q__1.i = *a1 * q__2.i;
	    ua21.r = q__1.r, ua21.i = q__1.i;
	    r_cnjg(&q__5, &d1);
	    q__4.r = -q__5.r, q__4.i = -q__5.i;
	    q__3.r = snl * q__4.r, q__3.i = snl * q__4.i;
	    q__2.r = q__3.r * a2->r - q__3.i * a2->i, q__2.i = q__3.r * a2->i 
		    + q__3.i * a2->r;
	    r__1 = csl * *a3;
	    q__1.r = q__2.r + r__1, q__1.i = q__2.i;
	    ua22.r = q__1.r, ua22.i = q__1.i;

	    r_cnjg(&q__4, &d1);
	    q__3.r = -q__4.r, q__3.i = -q__4.i;
	    q__2.r = snr * q__3.r, q__2.i = snr * q__3.i;
	    q__1.r = *b1 * q__2.r, q__1.i = *b1 * q__2.i;
	    vb21.r = q__1.r, vb21.i = q__1.i;
	    r_cnjg(&q__5, &d1);
	    q__4.r = -q__5.r, q__4.i = -q__5.i;
	    q__3.r = snr * q__4.r, q__3.i = snr * q__4.i;
	    q__2.r = q__3.r * b2->r - q__3.i * b2->i, q__2.i = q__3.r * b2->i 
		    + q__3.i * b2->r;
	    r__1 = csr * *b3;
	    q__1.r = q__2.r + r__1, q__1.i = q__2.i;
	    vb22.r = q__1.r, vb22.i = q__1.i;

	    aua22 = dabs(snl) * ((r__1 = a2->r, dabs(r__1)) + (r__2 = r_imag(
		    a2), dabs(r__2))) + dabs(csl) * dabs(*a3);
	    avb22 = dabs(snr) * ((r__1 = b2->r, dabs(r__1)) + (r__2 = r_imag(
		    b2), dabs(r__2))) + dabs(csr) * dabs(*b3);

/*           zero (2,2) elements of U'*A and V'*B, and then swap. */

	    if ((r__1 = ua21.r, dabs(r__1)) + (r__2 = r_imag(&ua21), dabs(
		    r__2)) + ((r__3 = ua22.r, dabs(r__3)) + (r__4 = r_imag(&
		    ua22), dabs(r__4))) == 0.f) {
		r_cnjg(&q__2, &vb21);
		q__1.r = -q__2.r, q__1.i = -q__2.i;
		r_cnjg(&q__3, &vb22);
		clartg_(&q__1, &q__3, csq, snq, &r__);
	    } else if ((r__1 = vb21.r, dabs(r__1)) + (r__2 = r_imag(&vb21), 
		    dabs(r__2)) + c_abs(&vb22) == 0.f) {
		r_cnjg(&q__2, &ua21);
		q__1.r = -q__2.r, q__1.i = -q__2.i;
		r_cnjg(&q__3, &ua22);
		clartg_(&q__1, &q__3, csq, snq, &r__);
	    } else if (aua22 / ((r__1 = ua21.r, dabs(r__1)) + (r__2 = r_imag(&
		    ua21), dabs(r__2)) + ((r__3 = ua22.r, dabs(r__3)) + (r__4 
		    = r_imag(&ua22), dabs(r__4)))) <= avb22 / ((r__5 = vb21.r,
		     dabs(r__5)) + (r__6 = r_imag(&vb21), dabs(r__6)) + ((
		    r__7 = vb22.r, dabs(r__7)) + (r__8 = r_imag(&vb22), dabs(
		    r__8))))) {
		r_cnjg(&q__2, &ua21);
		q__1.r = -q__2.r, q__1.i = -q__2.i;
		r_cnjg(&q__3, &ua22);
		clartg_(&q__1, &q__3, csq, snq, &r__);
	    } else {
		r_cnjg(&q__2, &vb21);
		q__1.r = -q__2.r, q__1.i = -q__2.i;
		r_cnjg(&q__3, &vb22);
		clartg_(&q__1, &q__3, csq, snq, &r__);
	    }

	    *csu = snl;
	    q__1.r = csl * d1.r, q__1.i = csl * d1.i;
	    snu->r = q__1.r, snu->i = q__1.i;
	    *csv = snr;
	    q__1.r = csr * d1.r, q__1.i = csr * d1.i;
	    snv->r = q__1.r, snv->i = q__1.i;

	}

    } else {

/*        Input matrices A and B are lower triangular matrices */

/*        Form matrix C = A*adj(B) = ( a 0 ) */
/*                                   ( c d ) */

	a = *a1 * *b3;
	d__ = *a3 * *b1;
	q__2.r = *b3 * a2->r, q__2.i = *b3 * a2->i;
	q__3.r = *a3 * b2->r, q__3.i = *a3 * b2->i;
	q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
	c__.r = q__1.r, c__.i = q__1.i;
	fc = c_abs(&c__);

/*        Transform complex 2-by-2 matrix C to real matrix by unitary */
/*        diagonal matrix diag(d1,1). */

	d1.r = 1.f, d1.i = 0.f;
	if (fc != 0.f) {
	    q__1.r = c__.r / fc, q__1.i = c__.i / fc;
	    d1.r = q__1.r, d1.i = q__1.i;
	}

/*        The SVD of real 2 by 2 triangular C */

/*         ( CSL -SNL )*( A 0 )*(  CSR  SNR ) = ( R 0 ) */
/*         ( SNL  CSL ) ( C D ) ( -SNR  CSR )   ( 0 T ) */

	slasv2_(&a, &fc, &d__, &s1, &s2, &snr, &csr, &snl, &csl);

	if (dabs(csr) >= dabs(snr) || dabs(csl) >= dabs(snl)) {

/*           Compute the (2,1) and (2,2) elements of U'*A and V'*B, */
/*           and (2,1) element of |U|'*|A| and |V|'*|B|. */

	    q__4.r = -d1.r, q__4.i = -d1.i;
	    q__3.r = snr * q__4.r, q__3.i = snr * q__4.i;
	    q__2.r = *a1 * q__3.r, q__2.i = *a1 * q__3.i;
	    q__5.r = csr * a2->r, q__5.i = csr * a2->i;
	    q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
	    ua21.r = q__1.r, ua21.i = q__1.i;
	    ua22r = csr * *a3;

	    q__4.r = -d1.r, q__4.i = -d1.i;
	    q__3.r = snl * q__4.r, q__3.i = snl * q__4.i;
	    q__2.r = *b1 * q__3.r, q__2.i = *b1 * q__3.i;
	    q__5.r = csl * b2->r, q__5.i = csl * b2->i;
	    q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
	    vb21.r = q__1.r, vb21.i = q__1.i;
	    vb22r = csl * *b3;

	    aua21 = dabs(snr) * dabs(*a1) + dabs(csr) * ((r__1 = a2->r, dabs(
		    r__1)) + (r__2 = r_imag(a2), dabs(r__2)));
	    avb21 = dabs(snl) * dabs(*b1) + dabs(csl) * ((r__1 = b2->r, dabs(
		    r__1)) + (r__2 = r_imag(b2), dabs(r__2)));

/*           zero (2,1) elements of U'*A and V'*B. */

	    if ((r__1 = ua21.r, dabs(r__1)) + (r__2 = r_imag(&ua21), dabs(
		    r__2)) + dabs(ua22r) == 0.f) {
		q__1.r = vb22r, q__1.i = 0.f;
		clartg_(&q__1, &vb21, csq, snq, &r__);
	    } else if ((r__1 = vb21.r, dabs(r__1)) + (r__2 = r_imag(&vb21), 
		    dabs(r__2)) + dabs(vb22r) == 0.f) {
		q__1.r = ua22r, q__1.i = 0.f;
		clartg_(&q__1, &ua21, csq, snq, &r__);
	    } else if (aua21 / ((r__1 = ua21.r, dabs(r__1)) + (r__2 = r_imag(&
		    ua21), dabs(r__2)) + dabs(ua22r)) <= avb21 / ((r__3 = 
		    vb21.r, dabs(r__3)) + (r__4 = r_imag(&vb21), dabs(r__4)) 
		    + dabs(vb22r))) {
		q__1.r = ua22r, q__1.i = 0.f;
		clartg_(&q__1, &ua21, csq, snq, &r__);
	    } else {
		q__1.r = vb22r, q__1.i = 0.f;
		clartg_(&q__1, &vb21, csq, snq, &r__);
	    }

	    *csu = csr;
	    r_cnjg(&q__3, &d1);
	    q__2.r = -q__3.r, q__2.i = -q__3.i;
	    q__1.r = snr * q__2.r, q__1.i = snr * q__2.i;
	    snu->r = q__1.r, snu->i = q__1.i;
	    *csv = csl;
	    r_cnjg(&q__3, &d1);
	    q__2.r = -q__3.r, q__2.i = -q__3.i;
	    q__1.r = snl * q__2.r, q__1.i = snl * q__2.i;
	    snv->r = q__1.r, snv->i = q__1.i;

	} else {

/*           Compute the (1,1) and (1,2) elements of U'*A and V'*B, */
/*           and (1,1) element of |U|'*|A| and |V|'*|B|. */

	    r__1 = csr * *a1;
	    r_cnjg(&q__4, &d1);
	    q__3.r = snr * q__4.r, q__3.i = snr * q__4.i;
	    q__2.r = q__3.r * a2->r - q__3.i * a2->i, q__2.i = q__3.r * a2->i 
		    + q__3.i * a2->r;
	    q__1.r = r__1 + q__2.r, q__1.i = q__2.i;
	    ua11.r = q__1.r, ua11.i = q__1.i;
	    r_cnjg(&q__3, &d1);
	    q__2.r = snr * q__3.r, q__2.i = snr * q__3.i;
	    q__1.r = *a3 * q__2.r, q__1.i = *a3 * q__2.i;
	    ua12.r = q__1.r, ua12.i = q__1.i;

	    r__1 = csl * *b1;
	    r_cnjg(&q__4, &d1);
	    q__3.r = snl * q__4.r, q__3.i = snl * q__4.i;
	    q__2.r = q__3.r * b2->r - q__3.i * b2->i, q__2.i = q__3.r * b2->i 
		    + q__3.i * b2->r;
	    q__1.r = r__1 + q__2.r, q__1.i = q__2.i;
	    vb11.r = q__1.r, vb11.i = q__1.i;
	    r_cnjg(&q__3, &d1);
	    q__2.r = snl * q__3.r, q__2.i = snl * q__3.i;
	    q__1.r = *b3 * q__2.r, q__1.i = *b3 * q__2.i;
	    vb12.r = q__1.r, vb12.i = q__1.i;

	    aua11 = dabs(csr) * dabs(*a1) + dabs(snr) * ((r__1 = a2->r, dabs(
		    r__1)) + (r__2 = r_imag(a2), dabs(r__2)));
	    avb11 = dabs(csl) * dabs(*b1) + dabs(snl) * ((r__1 = b2->r, dabs(
		    r__1)) + (r__2 = r_imag(b2), dabs(r__2)));

/*           zero (1,1) elements of U'*A and V'*B, and then swap. */

	    if ((r__1 = ua11.r, dabs(r__1)) + (r__2 = r_imag(&ua11), dabs(
		    r__2)) + ((r__3 = ua12.r, dabs(r__3)) + (r__4 = r_imag(&
		    ua12), dabs(r__4))) == 0.f) {
		clartg_(&vb12, &vb11, csq, snq, &r__);
	    } else if ((r__1 = vb11.r, dabs(r__1)) + (r__2 = r_imag(&vb11), 
		    dabs(r__2)) + ((r__3 = vb12.r, dabs(r__3)) + (r__4 = 
		    r_imag(&vb12), dabs(r__4))) == 0.f) {
		clartg_(&ua12, &ua11, csq, snq, &r__);
	    } else if (aua11 / ((r__1 = ua11.r, dabs(r__1)) + (r__2 = r_imag(&
		    ua11), dabs(r__2)) + ((r__3 = ua12.r, dabs(r__3)) + (r__4 
		    = r_imag(&ua12), dabs(r__4)))) <= avb11 / ((r__5 = vb11.r,
		     dabs(r__5)) + (r__6 = r_imag(&vb11), dabs(r__6)) + ((
		    r__7 = vb12.r, dabs(r__7)) + (r__8 = r_imag(&vb12), dabs(
		    r__8))))) {
		clartg_(&ua12, &ua11, csq, snq, &r__);
	    } else {
		clartg_(&vb12, &vb11, csq, snq, &r__);
	    }

	    *csu = snr;
	    r_cnjg(&q__2, &d1);
	    q__1.r = csr * q__2.r, q__1.i = csr * q__2.i;
	    snu->r = q__1.r, snu->i = q__1.i;
	    *csv = snl;
	    r_cnjg(&q__2, &d1);
	    q__1.r = csl * q__2.r, q__1.i = csl * q__2.i;
	    snv->r = q__1.r, snv->i = q__1.i;

	}

    }

    return 0;

/*     End of CLAGS2 */

} /* clags2_ */
Ejemplo n.º 3
0
/* Subroutine */ int slags2_(logical *upper, real *a1, real *a2, real *a3, 
	real *b1, real *b2, real *b3, real *csu, real *snu, real *csv, real *
	snv, real *csq, real *snq)
{
/*  -- LAPACK auxiliary routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    SLAGS2 computes 2-by-2 orthogonal matrices U, V and Q, such   
    that if ( UPPER ) then   

              U'*A*Q = U'*( A1 A2 )*Q = ( x  0  )   
                          ( 0  A3 )     ( x  x  )   
    and   
              V'*B*Q = V'*( B1 B2 )*Q = ( x  0  )   
                          ( 0  B3 )     ( x  x  )   

    or if ( .NOT.UPPER ) then   

              U'*A*Q = U'*( A1 0  )*Q = ( x  x  )   
                          ( A2 A3 )     ( 0  x  )   
    and   
              V'*B*Q = V'*( B1 0  )*Q = ( x  x  )   
                          ( B2 B3 )     ( 0  x  )   

    The rows of the transformed A and B are parallel, where   

      U = (  CSU  SNU ), V = (  CSV SNV ), Q = (  CSQ   SNQ )   
          ( -SNU  CSU )      ( -SNV CSV )      ( -SNQ   CSQ )   

    Z' denotes the transpose of Z.   


    Arguments   
    =========   

    UPPER   (input) LOGICAL   
            = .TRUE.: the input matrices A and B are upper triangular.   
            = .FALSE.: the input matrices A and B are lower triangular.   

    A1      (input) REAL   
    A2      (input) REAL   
    A3      (input) REAL   
            On entry, A1, A2 and A3 are elements of the input 2-by-2   
            upper (lower) triangular matrix A.   

    B1      (input) REAL   
    B2      (input) REAL   
    B3      (input) REAL   
            On entry, B1, B2 and B3 are elements of the input 2-by-2   
            upper (lower) triangular matrix B.   

    CSU     (output) REAL   
    SNU     (output) REAL   
            The desired orthogonal matrix U.   

    CSV     (output) REAL   
    SNV     (output) REAL   
            The desired orthogonal matrix V.   

    CSQ     (output) REAL   
    SNQ     (output) REAL   
            The desired orthogonal matrix Q.   

    ===================================================================== */
    /* System generated locals */
    real r__1;
    /* Local variables */
    static real aua11, aua12, aua21, aua22, avb11, avb12, avb21, avb22, ua11r,
	     ua22r, vb11r, vb22r, a, b, c__, d__, r__, s1, s2;
    extern /* Subroutine */ int slasv2_(real *, real *, real *, real *, real *
	    , real *, real *, real *, real *), slartg_(real *, real *, real *,
	     real *, real *);
    static real ua11, ua12, ua21, ua22, vb11, vb12, vb21, vb22, csl, csr, snl,
	     snr;



    if (*upper) {

/*        Input matrices A and B are upper triangular matrices   

          Form matrix C = A*adj(B) = ( a b )   
                                     ( 0 d ) */

	a = *a1 * *b3;
	d__ = *a3 * *b1;
	b = *a2 * *b1 - *a1 * *b2;

/*        The SVD of real 2-by-2 triangular C   

           ( CSL -SNL )*( A B )*(  CSR  SNR ) = ( R 0 )   
           ( SNL  CSL ) ( 0 D ) ( -SNR  CSR )   ( 0 T ) */

	slasv2_(&a, &b, &d__, &s1, &s2, &snr, &csr, &snl, &csl);

	if (dabs(csl) >= dabs(snl) || dabs(csr) >= dabs(snr)) {

/*           Compute the (1,1) and (1,2) elements of U'*A and V'*B,   
             and (1,2) element of |U|'*|A| and |V|'*|B|. */

	    ua11r = csl * *a1;
	    ua12 = csl * *a2 + snl * *a3;

	    vb11r = csr * *b1;
	    vb12 = csr * *b2 + snr * *b3;

	    aua12 = dabs(csl) * dabs(*a2) + dabs(snl) * dabs(*a3);
	    avb12 = dabs(csr) * dabs(*b2) + dabs(snr) * dabs(*b3);

/*           zero (1,2) elements of U'*A and V'*B */

	    if (dabs(ua11r) + dabs(ua12) != 0.f) {
		if (aua12 / (dabs(ua11r) + dabs(ua12)) <= avb12 / (dabs(vb11r)
			 + dabs(vb12))) {
		    r__1 = -ua11r;
		    slartg_(&r__1, &ua12, csq, snq, &r__);
		} else {
		    r__1 = -vb11r;
		    slartg_(&r__1, &vb12, csq, snq, &r__);
		}
	    } else {
		r__1 = -vb11r;
		slartg_(&r__1, &vb12, csq, snq, &r__);
	    }

	    *csu = csl;
	    *snu = -snl;
	    *csv = csr;
	    *snv = -snr;

	} else {

/*           Compute the (2,1) and (2,2) elements of U'*A and V'*B,   
             and (2,2) element of |U|'*|A| and |V|'*|B|. */

	    ua21 = -snl * *a1;
	    ua22 = -snl * *a2 + csl * *a3;

	    vb21 = -snr * *b1;
	    vb22 = -snr * *b2 + csr * *b3;

	    aua22 = dabs(snl) * dabs(*a2) + dabs(csl) * dabs(*a3);
	    avb22 = dabs(snr) * dabs(*b2) + dabs(csr) * dabs(*b3);

/*           zero (2,2) elements of U'*A and V'*B, and then swap. */

	    if (dabs(ua21) + dabs(ua22) != 0.f) {
		if (aua22 / (dabs(ua21) + dabs(ua22)) <= avb22 / (dabs(vb21) 
			+ dabs(vb22))) {
		    r__1 = -ua21;
		    slartg_(&r__1, &ua22, csq, snq, &r__);
		} else {
		    r__1 = -vb21;
		    slartg_(&r__1, &vb22, csq, snq, &r__);
		}
	    } else {
		r__1 = -vb21;
		slartg_(&r__1, &vb22, csq, snq, &r__);
	    }

	    *csu = snl;
	    *snu = csl;
	    *csv = snr;
	    *snv = csr;

	}

    } else {

/*        Input matrices A and B are lower triangular matrices   

          Form matrix C = A*adj(B) = ( a 0 )   
                                     ( c d ) */

	a = *a1 * *b3;
	d__ = *a3 * *b1;
	c__ = *a2 * *b3 - *a3 * *b2;

/*        The SVD of real 2-by-2 triangular C   

           ( CSL -SNL )*( A 0 )*(  CSR  SNR ) = ( R 0 )   
           ( SNL  CSL ) ( C D ) ( -SNR  CSR )   ( 0 T ) */

	slasv2_(&a, &c__, &d__, &s1, &s2, &snr, &csr, &snl, &csl);

	if (dabs(csr) >= dabs(snr) || dabs(csl) >= dabs(snl)) {

/*           Compute the (2,1) and (2,2) elements of U'*A and V'*B,   
             and (2,1) element of |U|'*|A| and |V|'*|B|. */

	    ua21 = -snr * *a1 + csr * *a2;
	    ua22r = csr * *a3;

	    vb21 = -snl * *b1 + csl * *b2;
	    vb22r = csl * *b3;

	    aua21 = dabs(snr) * dabs(*a1) + dabs(csr) * dabs(*a2);
	    avb21 = dabs(snl) * dabs(*b1) + dabs(csl) * dabs(*b2);

/*           zero (2,1) elements of U'*A and V'*B. */

	    if (dabs(ua21) + dabs(ua22r) != 0.f) {
		if (aua21 / (dabs(ua21) + dabs(ua22r)) <= avb21 / (dabs(vb21) 
			+ dabs(vb22r))) {
		    slartg_(&ua22r, &ua21, csq, snq, &r__);
		} else {
		    slartg_(&vb22r, &vb21, csq, snq, &r__);
		}
	    } else {
		slartg_(&vb22r, &vb21, csq, snq, &r__);
	    }

	    *csu = csr;
	    *snu = -snr;
	    *csv = csl;
	    *snv = -snl;

	} else {

/*           Compute the (1,1) and (1,2) elements of U'*A and V'*B,   
             and (1,1) element of |U|'*|A| and |V|'*|B|. */

	    ua11 = csr * *a1 + snr * *a2;
	    ua12 = snr * *a3;

	    vb11 = csl * *b1 + snl * *b2;
	    vb12 = snl * *b3;

	    aua11 = dabs(csr) * dabs(*a1) + dabs(snr) * dabs(*a2);
	    avb11 = dabs(csl) * dabs(*b1) + dabs(snl) * dabs(*b2);

/*           zero (1,1) elements of U'*A and V'*B, and then swap. */

	    if (dabs(ua11) + dabs(ua12) != 0.f) {
		if (aua11 / (dabs(ua11) + dabs(ua12)) <= avb11 / (dabs(vb11) 
			+ dabs(vb12))) {
		    slartg_(&ua12, &ua11, csq, snq, &r__);
		} else {
		    slartg_(&vb12, &vb11, csq, snq, &r__);
		}
	    } else {
		slartg_(&vb12, &vb11, csq, snq, &r__);
	    }

	    *csu = snr;
	    *snu = csr;
	    *csv = snl;
	    *snv = csl;

	}

    }

    return 0;

/*     End of SLAGS2 */

} /* slags2_ */
Ejemplo n.º 4
0
/* Subroutine */ int sbdsqr_(char *uplo, integer *n, integer *ncvt, integer *
	nru, integer *ncc, real *d__, real *e, real *vt, integer *ldvt, real *
	u, integer *ldu, real *c__, integer *ldc, real *work, integer *info)
{
    /* System generated locals */
    integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, 
	    i__2;
    real r__1, r__2, r__3, r__4;
    doublereal d__1;

    /* Builtin functions */
    double pow_dd(doublereal *, doublereal *), sqrt(doublereal), r_sign(real *
	    , real *);

    /* Local variables */
    static real abse;
    static integer idir;
    static real abss;
    static integer oldm;
    static real cosl;
    static integer isub, iter;
    static real unfl, sinl, cosr, smin, smax, sinr;
    extern /* Subroutine */ int srot_(integer *, real *, integer *, real *, 
	    integer *, real *, real *), slas2_(real *, real *, real *, real *,
	     real *);
    static real f, g, h__;
    static integer i__, j, m;
    static real r__;
    extern logical lsame_(char *, char *);
    static real oldcs;
    extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
    static integer oldll;
    static real shift, sigmn, oldsn;
    static integer maxit;
    static real sminl;
    extern /* Subroutine */ int slasr_(char *, char *, char *, integer *, 
	    integer *, real *, real *, real *, integer *);
    static real sigmx;
    static logical lower;
    extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *, 
	    integer *), slasq1_(integer *, real *, real *, real *, integer *),
	     slasv2_(real *, real *, real *, real *, real *, real *, real *, 
	    real *, real *);
    static real cs;
    static integer ll;
    static real sn, mu;
    extern doublereal slamch_(char *);
    extern /* Subroutine */ int xerbla_(char *, integer *);
    static real sminoa;
    extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real *
	    );
    static real thresh;
    static logical rotate;
    static real sminlo;
    static integer nm1;
    static real tolmul;
    static integer nm12, nm13, lll;
    static real eps, sll, tol;


#define c___ref(a_1,a_2) c__[(a_2)*c_dim1 + a_1]
#define u_ref(a_1,a_2) u[(a_2)*u_dim1 + a_1]
#define vt_ref(a_1,a_2) vt[(a_2)*vt_dim1 + a_1]


/*  -- LAPACK routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       October 31, 1999   


    Purpose   
    =======   

    SBDSQR computes the singular value decomposition (SVD) of a real   
    N-by-N (upper or lower) bidiagonal matrix B:  B = Q * S * P' (P'   
    denotes the transpose of P), where S is a diagonal matrix with   
    non-negative diagonal elements (the singular values of B), and Q   
    and P are orthogonal matrices.   

    The routine computes S, and optionally computes U * Q, P' * VT,   
    or Q' * C, for given real input matrices U, VT, and C.   

    See "Computing  Small Singular Values of Bidiagonal Matrices With   
    Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan,   
    LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11,   
    no. 5, pp. 873-912, Sept 1990) and   
    "Accurate singular values and differential qd algorithms," by   
    B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics   
    Department, University of California at Berkeley, July 1992   
    for a detailed description of the algorithm.   

    Arguments   
    =========   

    UPLO    (input) CHARACTER*1   
            = 'U':  B is upper bidiagonal;   
            = 'L':  B is lower bidiagonal.   

    N       (input) INTEGER   
            The order of the matrix B.  N >= 0.   

    NCVT    (input) INTEGER   
            The number of columns of the matrix VT. NCVT >= 0.   

    NRU     (input) INTEGER   
            The number of rows of the matrix U. NRU >= 0.   

    NCC     (input) INTEGER   
            The number of columns of the matrix C. NCC >= 0.   

    D       (input/output) REAL array, dimension (N)   
            On entry, the n diagonal elements of the bidiagonal matrix B.   
            On exit, if INFO=0, the singular values of B in decreasing   
            order.   

    E       (input/output) REAL array, dimension (N)   
            On entry, the elements of E contain the   
            offdiagonal elements of the bidiagonal matrix whose SVD   
            is desired. On normal exit (INFO = 0), E is destroyed.   
            If the algorithm does not converge (INFO > 0), D and E   
            will contain the diagonal and superdiagonal elements of a   
            bidiagonal matrix orthogonally equivalent to the one given   
            as input. E(N) is used for workspace.   

    VT      (input/output) REAL array, dimension (LDVT, NCVT)   
            On entry, an N-by-NCVT matrix VT.   
            On exit, VT is overwritten by P' * VT.   
            VT is not referenced if NCVT = 0.   

    LDVT    (input) INTEGER   
            The leading dimension of the array VT.   
            LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0.   

    U       (input/output) REAL array, dimension (LDU, N)   
            On entry, an NRU-by-N matrix U.   
            On exit, U is overwritten by U * Q.   
            U is not referenced if NRU = 0.   

    LDU     (input) INTEGER   
            The leading dimension of the array U.  LDU >= max(1,NRU).   

    C       (input/output) REAL array, dimension (LDC, NCC)   
            On entry, an N-by-NCC matrix C.   
            On exit, C is overwritten by Q' * C.   
            C is not referenced if NCC = 0.   

    LDC     (input) INTEGER   
            The leading dimension of the array C.   
            LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0.   

    WORK    (workspace) REAL array, dimension (4*N)   

    INFO    (output) INTEGER   
            = 0:  successful exit   
            < 0:  If INFO = -i, the i-th argument had an illegal value   
            > 0:  the algorithm did not converge; D and E contain the   
                  elements of a bidiagonal matrix which is orthogonally   
                  similar to the input matrix B;  if INFO = i, i   
                  elements of E have not converged to zero.   

    Internal Parameters   
    ===================   

    TOLMUL  REAL, default = max(10,min(100,EPS**(-1/8)))   
            TOLMUL controls the convergence criterion of the QR loop.   
            If it is positive, TOLMUL*EPS is the desired relative   
               precision in the computed singular values.   
            If it is negative, abs(TOLMUL*EPS*sigma_max) is the   
               desired absolute accuracy in the computed singular   
               values (corresponds to relative accuracy   
               abs(TOLMUL*EPS) in the largest singular value.   
            abs(TOLMUL) should be between 1 and 1/EPS, and preferably   
               between 10 (for fast convergence) and .1/EPS   
               (for there to be some accuracy in the results).   
            Default is to lose at either one eighth or 2 of the   
               available decimal digits in each computed singular value   
               (whichever is smaller).   

    MAXITR  INTEGER, default = 6   
            MAXITR controls the maximum number of passes of the   
            algorithm through its inner loop. The algorithms stops   
            (and so fails to converge) if the number of passes   
            through the inner loop exceeds MAXITR*N**2.   

    =====================================================================   


       Test the input parameters.   

       Parameter adjustments */
    --d__;
    --e;
    vt_dim1 = *ldvt;
    vt_offset = 1 + vt_dim1 * 1;
    vt -= vt_offset;
    u_dim1 = *ldu;
    u_offset = 1 + u_dim1 * 1;
    u -= u_offset;
    c_dim1 = *ldc;
    c_offset = 1 + c_dim1 * 1;
    c__ -= c_offset;
    --work;

    /* Function Body */
    *info = 0;
    lower = lsame_(uplo, "L");
    if (! lsame_(uplo, "U") && ! lower) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*ncvt < 0) {
	*info = -3;
    } else if (*nru < 0) {
	*info = -4;
    } else if (*ncc < 0) {
	*info = -5;
    } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) {
	*info = -9;
    } else if (*ldu < max(1,*nru)) {
	*info = -11;
    } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) {
	*info = -13;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("SBDSQR", &i__1);
	return 0;
    }
    if (*n == 0) {
	return 0;
    }
    if (*n == 1) {
	goto L160;
    }

/*     ROTATE is true if any singular vectors desired, false otherwise */

    rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;

/*     If no singular vectors desired, use qd algorithm */

    if (! rotate) {
	slasq1_(n, &d__[1], &e[1], &work[1], info);
	return 0;
    }

    nm1 = *n - 1;
    nm12 = nm1 + nm1;
    nm13 = nm12 + nm1;
    idir = 0;

/*     Get machine constants */

    eps = slamch_("Epsilon");
    unfl = slamch_("Safe minimum");

/*     If matrix lower bidiagonal, rotate to be upper bidiagonal   
       by applying Givens rotations on the left */

    if (lower) {
	i__1 = *n - 1;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
	    d__[i__] = r__;
	    e[i__] = sn * d__[i__ + 1];
	    d__[i__ + 1] = cs * d__[i__ + 1];
	    work[i__] = cs;
	    work[nm1 + i__] = sn;
/* L10: */
	}

/*        Update singular vectors if desired */

	if (*nru > 0) {
	    slasr_("R", "V", "F", nru, n, &work[1], &work[*n], &u[u_offset], 
		    ldu);
	}
	if (*ncc > 0) {
	    slasr_("L", "V", "F", n, ncc, &work[1], &work[*n], &c__[c_offset],
		     ldc);
	}
    }

/*     Compute singular values to relative accuracy TOL   
       (By setting TOL to be negative, algorithm will compute   
       singular values to absolute accuracy ABS(TOL)*norm(input matrix))   

   Computing MAX   
   Computing MIN */
    d__1 = (doublereal) eps;
    r__3 = 100.f, r__4 = pow_dd(&d__1, &c_b15);
    r__1 = 10.f, r__2 = dmin(r__3,r__4);
    tolmul = dmax(r__1,r__2);
    tol = tolmul * eps;

/*     Compute approximate maximum, minimum singular values */

    smax = 0.f;
    i__1 = *n;
    for (i__ = 1; i__ <= i__1; ++i__) {
/* Computing MAX */
	r__2 = smax, r__3 = (r__1 = d__[i__], dabs(r__1));
	smax = dmax(r__2,r__3);
/* L20: */
    }
    i__1 = *n - 1;
    for (i__ = 1; i__ <= i__1; ++i__) {
/* Computing MAX */
	r__2 = smax, r__3 = (r__1 = e[i__], dabs(r__1));
	smax = dmax(r__2,r__3);
/* L30: */
    }
    sminl = 0.f;
    if (tol >= 0.f) {

/*        Relative accuracy desired */

	sminoa = dabs(d__[1]);
	if (sminoa == 0.f) {
	    goto L50;
	}
	mu = sminoa;
	i__1 = *n;
	for (i__ = 2; i__ <= i__1; ++i__) {
	    mu = (r__2 = d__[i__], dabs(r__2)) * (mu / (mu + (r__1 = e[i__ - 
		    1], dabs(r__1))));
	    sminoa = dmin(sminoa,mu);
	    if (sminoa == 0.f) {
		goto L50;
	    }
/* L40: */
	}
L50:
	sminoa /= sqrt((real) (*n));
/* Computing MAX */
	r__1 = tol * sminoa, r__2 = *n * 6 * *n * unfl;
	thresh = dmax(r__1,r__2);
    } else {

/*        Absolute accuracy desired   

   Computing MAX */
	r__1 = dabs(tol) * smax, r__2 = *n * 6 * *n * unfl;
	thresh = dmax(r__1,r__2);
    }

/*     Prepare for main iteration loop for the singular values   
       (MAXIT is the maximum number of passes through the inner   
       loop permitted before nonconvergence signalled.) */

    maxit = *n * 6 * *n;
    iter = 0;
    oldll = -1;
    oldm = -1;

/*     M points to last element of unconverged part of matrix */

    m = *n;

/*     Begin main iteration loop */

L60:

/*     Check for convergence or exceeding iteration count */

    if (m <= 1) {
	goto L160;
    }
    if (iter > maxit) {
	goto L200;
    }

/*     Find diagonal block of matrix to work on */

    if (tol < 0.f && (r__1 = d__[m], dabs(r__1)) <= thresh) {
	d__[m] = 0.f;
    }
    smax = (r__1 = d__[m], dabs(r__1));
    smin = smax;
    i__1 = m - 1;
    for (lll = 1; lll <= i__1; ++lll) {
	ll = m - lll;
	abss = (r__1 = d__[ll], dabs(r__1));
	abse = (r__1 = e[ll], dabs(r__1));
	if (tol < 0.f && abss <= thresh) {
	    d__[ll] = 0.f;
	}
	if (abse <= thresh) {
	    goto L80;
	}
	smin = dmin(smin,abss);
/* Computing MAX */
	r__1 = max(smax,abss);
	smax = dmax(r__1,abse);
/* L70: */
    }
    ll = 0;
    goto L90;
L80:
    e[ll] = 0.f;

/*     Matrix splits since E(LL) = 0 */

    if (ll == m - 1) {

/*        Convergence of bottom singular value, return to top of loop */

	--m;
	goto L60;
    }
L90:
    ++ll;

/*     E(LL) through E(M-1) are nonzero, E(LL-1) is zero */

    if (ll == m - 1) {

/*        2 by 2 block, handle separately */

	slasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr,
		 &sinl, &cosl);
	d__[m - 1] = sigmx;
	e[m - 1] = 0.f;
	d__[m] = sigmn;

/*        Compute singular vectors, if desired */

	if (*ncvt > 0) {
	    srot_(ncvt, &vt_ref(m - 1, 1), ldvt, &vt_ref(m, 1), ldvt, &cosr, &
		    sinr);
	}
	if (*nru > 0) {
	    srot_(nru, &u_ref(1, m - 1), &c__1, &u_ref(1, m), &c__1, &cosl, &
		    sinl);
	}
	if (*ncc > 0) {
	    srot_(ncc, &c___ref(m - 1, 1), ldc, &c___ref(m, 1), ldc, &cosl, &
		    sinl);
	}
	m += -2;
	goto L60;
    }

/*     If working on new submatrix, choose shift direction   
       (from larger end diagonal element towards smaller) */

    if (ll > oldm || m < oldll) {
	if ((r__1 = d__[ll], dabs(r__1)) >= (r__2 = d__[m], dabs(r__2))) {

/*           Chase bulge from top (big end) to bottom (small end) */

	    idir = 1;
	} else {

/*           Chase bulge from bottom (big end) to top (small end) */

	    idir = 2;
	}
    }

/*     Apply convergence tests */

    if (idir == 1) {

/*        Run convergence test in forward direction   
          First apply standard test to bottom of matrix */

	if ((r__2 = e[m - 1], dabs(r__2)) <= dabs(tol) * (r__1 = d__[m], dabs(
		r__1)) || tol < 0.f && (r__3 = e[m - 1], dabs(r__3)) <= 
		thresh) {
	    e[m - 1] = 0.f;
	    goto L60;
	}

	if (tol >= 0.f) {

/*           If relative accuracy desired,   
             apply convergence criterion forward */

	    mu = (r__1 = d__[ll], dabs(r__1));
	    sminl = mu;
	    i__1 = m - 1;
	    for (lll = ll; lll <= i__1; ++lll) {
		if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) {
		    e[lll] = 0.f;
		    goto L60;
		}
		sminlo = sminl;
		mu = (r__2 = d__[lll + 1], dabs(r__2)) * (mu / (mu + (r__1 = 
			e[lll], dabs(r__1))));
		sminl = dmin(sminl,mu);
/* L100: */
	    }
	}

    } else {

/*        Run convergence test in backward direction   
          First apply standard test to top of matrix */

	if ((r__2 = e[ll], dabs(r__2)) <= dabs(tol) * (r__1 = d__[ll], dabs(
		r__1)) || tol < 0.f && (r__3 = e[ll], dabs(r__3)) <= thresh) {
	    e[ll] = 0.f;
	    goto L60;
	}

	if (tol >= 0.f) {

/*           If relative accuracy desired,   
             apply convergence criterion backward */

	    mu = (r__1 = d__[m], dabs(r__1));
	    sminl = mu;
	    i__1 = ll;
	    for (lll = m - 1; lll >= i__1; --lll) {
		if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) {
		    e[lll] = 0.f;
		    goto L60;
		}
		sminlo = sminl;
		mu = (r__2 = d__[lll], dabs(r__2)) * (mu / (mu + (r__1 = e[
			lll], dabs(r__1))));
		sminl = dmin(sminl,mu);
/* L110: */
	    }
	}
    }
    oldll = ll;
    oldm = m;

/*     Compute shift.  First, test if shifting would ruin relative   
       accuracy, and if so set the shift to zero.   

   Computing MAX */
    r__1 = eps, r__2 = tol * .01f;
    if (tol >= 0.f && *n * tol * (sminl / smax) <= dmax(r__1,r__2)) {

/*        Use a zero shift to avoid loss of relative accuracy */

	shift = 0.f;
    } else {

/*        Compute the shift from 2-by-2 block at end of matrix */

	if (idir == 1) {
	    sll = (r__1 = d__[ll], dabs(r__1));
	    slas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__);
	} else {
	    sll = (r__1 = d__[m], dabs(r__1));
	    slas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__);
	}

/*        Test if shift negligible, and if so set to zero */

	if (sll > 0.f) {
/* Computing 2nd power */
	    r__1 = shift / sll;
	    if (r__1 * r__1 < eps) {
		shift = 0.f;
	    }
	}
    }

/*     Increment iteration count */

    iter = iter + m - ll;

/*     If SHIFT = 0, do simplified QR iteration */

    if (shift == 0.f) {
	if (idir == 1) {

/*           Chase bulge from top to bottom   
             Save cosines and sines for later singular vector updates */

	    cs = 1.f;
	    oldcs = 1.f;
	    i__1 = m - 1;
	    for (i__ = ll; i__ <= i__1; ++i__) {
		r__1 = d__[i__] * cs;
		slartg_(&r__1, &e[i__], &cs, &sn, &r__);
		if (i__ > ll) {
		    e[i__ - 1] = oldsn * r__;
		}
		r__1 = oldcs * r__;
		r__2 = d__[i__ + 1] * sn;
		slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]);
		work[i__ - ll + 1] = cs;
		work[i__ - ll + 1 + nm1] = sn;
		work[i__ - ll + 1 + nm12] = oldcs;
		work[i__ - ll + 1 + nm13] = oldsn;
/* L120: */
	    }
	    h__ = d__[m] * cs;
	    d__[m] = h__ * oldcs;
	    e[m - 1] = h__ * oldsn;

/*           Update singular vectors */

	    if (*ncvt > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &
			vt_ref(ll, 1), ldvt);
	    }
	    if (*nru > 0) {
		i__1 = m - ll + 1;
		slasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13 
			+ 1], &u_ref(1, ll), ldu);
	    }
	    if (*ncc > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13 
			+ 1], &c___ref(ll, 1), ldc);
	    }

/*           Test convergence */

	    if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) {
		e[m - 1] = 0.f;
	    }

	} else {

/*           Chase bulge from bottom to top   
             Save cosines and sines for later singular vector updates */

	    cs = 1.f;
	    oldcs = 1.f;
	    i__1 = ll + 1;
	    for (i__ = m; i__ >= i__1; --i__) {
		r__1 = d__[i__] * cs;
		slartg_(&r__1, &e[i__ - 1], &cs, &sn, &r__);
		if (i__ < m) {
		    e[i__] = oldsn * r__;
		}
		r__1 = oldcs * r__;
		r__2 = d__[i__ - 1] * sn;
		slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]);
		work[i__ - ll] = cs;
		work[i__ - ll + nm1] = -sn;
		work[i__ - ll + nm12] = oldcs;
		work[i__ - ll + nm13] = -oldsn;
/* L130: */
	    }
	    h__ = d__[ll] * cs;
	    d__[ll] = h__ * oldcs;
	    e[ll] = h__ * oldsn;

/*           Update singular vectors */

	    if (*ncvt > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
			nm13 + 1], &vt_ref(ll, 1), ldvt);
	    }
	    if (*nru > 0) {
		i__1 = m - ll + 1;
		slasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u_ref(
			1, ll), ldu);
	    }
	    if (*ncc > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &
			c___ref(ll, 1), ldc);
	    }

/*           Test convergence */

	    if ((r__1 = e[ll], dabs(r__1)) <= thresh) {
		e[ll] = 0.f;
	    }
	}
    } else {

/*        Use nonzero shift */

	if (idir == 1) {

/*           Chase bulge from top to bottom   
             Save cosines and sines for later singular vector updates */

	    f = ((r__1 = d__[ll], dabs(r__1)) - shift) * (r_sign(&c_b49, &d__[
		    ll]) + shift / d__[ll]);
	    g = e[ll];
	    i__1 = m - 1;
	    for (i__ = ll; i__ <= i__1; ++i__) {
		slartg_(&f, &g, &cosr, &sinr, &r__);
		if (i__ > ll) {
		    e[i__ - 1] = r__;
		}
		f = cosr * d__[i__] + sinr * e[i__];
		e[i__] = cosr * e[i__] - sinr * d__[i__];
		g = sinr * d__[i__ + 1];
		d__[i__ + 1] = cosr * d__[i__ + 1];
		slartg_(&f, &g, &cosl, &sinl, &r__);
		d__[i__] = r__;
		f = cosl * e[i__] + sinl * d__[i__ + 1];
		d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__];
		if (i__ < m - 1) {
		    g = sinl * e[i__ + 1];
		    e[i__ + 1] = cosl * e[i__ + 1];
		}
		work[i__ - ll + 1] = cosr;
		work[i__ - ll + 1 + nm1] = sinr;
		work[i__ - ll + 1 + nm12] = cosl;
		work[i__ - ll + 1 + nm13] = sinl;
/* L140: */
	    }
	    e[m - 1] = f;

/*           Update singular vectors */

	    if (*ncvt > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &
			vt_ref(ll, 1), ldvt);
	    }
	    if (*nru > 0) {
		i__1 = m - ll + 1;
		slasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13 
			+ 1], &u_ref(1, ll), ldu);
	    }
	    if (*ncc > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13 
			+ 1], &c___ref(ll, 1), ldc);
	    }

/*           Test convergence */

	    if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) {
		e[m - 1] = 0.f;
	    }

	} else {

/*           Chase bulge from bottom to top   
             Save cosines and sines for later singular vector updates */

	    f = ((r__1 = d__[m], dabs(r__1)) - shift) * (r_sign(&c_b49, &d__[
		    m]) + shift / d__[m]);
	    g = e[m - 1];
	    i__1 = ll + 1;
	    for (i__ = m; i__ >= i__1; --i__) {
		slartg_(&f, &g, &cosr, &sinr, &r__);
		if (i__ < m) {
		    e[i__] = r__;
		}
		f = cosr * d__[i__] + sinr * e[i__ - 1];
		e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__];
		g = sinr * d__[i__ - 1];
		d__[i__ - 1] = cosr * d__[i__ - 1];
		slartg_(&f, &g, &cosl, &sinl, &r__);
		d__[i__] = r__;
		f = cosl * e[i__ - 1] + sinl * d__[i__ - 1];
		d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1];
		if (i__ > ll + 1) {
		    g = sinl * e[i__ - 2];
		    e[i__ - 2] = cosl * e[i__ - 2];
		}
		work[i__ - ll] = cosr;
		work[i__ - ll + nm1] = -sinr;
		work[i__ - ll + nm12] = cosl;
		work[i__ - ll + nm13] = -sinl;
/* L150: */
	    }
	    e[ll] = f;

/*           Test convergence */

	    if ((r__1 = e[ll], dabs(r__1)) <= thresh) {
		e[ll] = 0.f;
	    }

/*           Update singular vectors if desired */

	    if (*ncvt > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
			nm13 + 1], &vt_ref(ll, 1), ldvt);
	    }
	    if (*nru > 0) {
		i__1 = m - ll + 1;
		slasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u_ref(
			1, ll), ldu);
	    }
	    if (*ncc > 0) {
		i__1 = m - ll + 1;
		slasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &
			c___ref(ll, 1), ldc);
	    }
	}
    }

/*     QR iteration finished, go back and check convergence */

    goto L60;

/*     All singular values converged, so make them positive */

L160:
    i__1 = *n;
    for (i__ = 1; i__ <= i__1; ++i__) {
	if (d__[i__] < 0.f) {
	    d__[i__] = -d__[i__];

/*           Change sign of singular vectors, if desired */

	    if (*ncvt > 0) {
		sscal_(ncvt, &c_b72, &vt_ref(i__, 1), ldvt);
	    }
	}
/* L170: */
    }

/*     Sort the singular values into decreasing order (insertion sort on   
       singular values, but only one transposition per singular vector) */

    i__1 = *n - 1;
    for (i__ = 1; i__ <= i__1; ++i__) {

/*        Scan for smallest D(I) */

	isub = 1;
	smin = d__[1];
	i__2 = *n + 1 - i__;
	for (j = 2; j <= i__2; ++j) {
	    if (d__[j] <= smin) {
		isub = j;
		smin = d__[j];
	    }
/* L180: */
	}
	if (isub != *n + 1 - i__) {

/*           Swap singular values and vectors */

	    d__[isub] = d__[*n + 1 - i__];
	    d__[*n + 1 - i__] = smin;
	    if (*ncvt > 0) {
		sswap_(ncvt, &vt_ref(isub, 1), ldvt, &vt_ref(*n + 1 - i__, 1),
			 ldvt);
	    }
	    if (*nru > 0) {
		sswap_(nru, &u_ref(1, isub), &c__1, &u_ref(1, *n + 1 - i__), &
			c__1);
	    }
	    if (*ncc > 0) {
		sswap_(ncc, &c___ref(isub, 1), ldc, &c___ref(*n + 1 - i__, 1),
			 ldc);
	    }
	}
/* L190: */
    }
    goto L220;

/*     Maximum number of iterations exceeded, failure to converge */

L200:
    *info = 0;
    i__1 = *n - 1;
    for (i__ = 1; i__ <= i__1; ++i__) {
	if (e[i__] != 0.f) {
	    ++(*info);
	}
/* L210: */
    }
L220:
    return 0;

/*     End of SBDSQR */

} /* sbdsqr_ */
Ejemplo n.º 5
0
/* Subroutine */ int slagv2_(real *a, integer *lda, real *b, integer *ldb, 
	real *alphar, real *alphai, real *beta, real *csl, real *snl, real *
	csr, real *snr)
{
/*  -- LAPACK auxiliary routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   


    Purpose   
    =======   

    SLAGV2 computes the Generalized Schur factorization of a real 2-by-2   
    matrix pencil (A,B) where B is upper triangular. This routine   
    computes orthogonal (rotation) matrices given by CSL, SNL and CSR,   
    SNR such that   

    1) if the pencil (A,B) has two real eigenvalues (include 0/0 or 1/0   
       types), then   

       [ a11 a12 ] := [  CSL  SNL ] [ a11 a12 ] [  CSR -SNR ]   
       [  0  a22 ]    [ -SNL  CSL ] [ a21 a22 ] [  SNR  CSR ]   

       [ b11 b12 ] := [  CSL  SNL ] [ b11 b12 ] [  CSR -SNR ]   
       [  0  b22 ]    [ -SNL  CSL ] [  0  b22 ] [  SNR  CSR ],   

    2) if the pencil (A,B) has a pair of complex conjugate eigenvalues,   
       then   

       [ a11 a12 ] := [  CSL  SNL ] [ a11 a12 ] [  CSR -SNR ]   
       [ a21 a22 ]    [ -SNL  CSL ] [ a21 a22 ] [  SNR  CSR ]   

       [ b11  0  ] := [  CSL  SNL ] [ b11 b12 ] [  CSR -SNR ]   
       [  0  b22 ]    [ -SNL  CSL ] [  0  b22 ] [  SNR  CSR ]   

       where b11 >= b22 > 0.   


    Arguments   
    =========   

    A       (input/output) REAL array, dimension (LDA, 2)   
            On entry, the 2 x 2 matrix A.   
            On exit, A is overwritten by the ``A-part'' of the   
            generalized Schur form.   

    LDA     (input) INTEGER   
            THe leading dimension of the array A.  LDA >= 2.   

    B       (input/output) REAL array, dimension (LDB, 2)   
            On entry, the upper triangular 2 x 2 matrix B.   
            On exit, B is overwritten by the ``B-part'' of the   
            generalized Schur form.   

    LDB     (input) INTEGER   
            THe leading dimension of the array B.  LDB >= 2.   

    ALPHAR  (output) REAL array, dimension (2)   
    ALPHAI  (output) REAL array, dimension (2)   
    BETA    (output) REAL array, dimension (2)   
            (ALPHAR(k)+i*ALPHAI(k))/BETA(k) are the eigenvalues of the   
            pencil (A,B), k=1,2, i = sqrt(-1).  Note that BETA(k) may   
            be zero.   

    CSL     (output) REAL   
            The cosine of the left rotation matrix.   

    SNL     (output) REAL   
            The sine of the left rotation matrix.   

    CSR     (output) REAL   
            The cosine of the right rotation matrix.   

    SNR     (output) REAL   
            The sine of the right rotation matrix.   

    Further Details   
    ===============   

    Based on contributions by   
       Mark Fahey, Department of Mathematics, Univ. of Kentucky, USA   

    =====================================================================   


       Parameter adjustments */
    /* Table of constant values */
    static integer c__2 = 2;
    static integer c__1 = 1;
    
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset;
    real r__1, r__2, r__3, r__4, r__5, r__6;
    /* Local variables */
    extern /* Subroutine */ int srot_(integer *, real *, integer *, real *, 
	    integer *, real *, real *), slag2_(real *, integer *, real *, 
	    integer *, real *, real *, real *, real *, real *, real *);
    static real r__, t, anorm, bnorm, h1, h2, h3, scale1, scale2;
    extern /* Subroutine */ int slasv2_(real *, real *, real *, real *, real *
	    , real *, real *, real *, real *);
    extern doublereal slapy2_(real *, real *);
    static real ascale, bscale, wi, qq, rr;
    extern doublereal slamch_(char *);
    static real safmin;
    extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real *
	    );
    static real wr1, wr2, ulp;
#define a_ref(a_1,a_2) a[(a_2)*a_dim1 + a_1]
#define b_ref(a_1,a_2) b[(a_2)*b_dim1 + a_1]


    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;
    --alphar;
    --alphai;
    --beta;

    /* Function Body */
    safmin = slamch_("S");
    ulp = slamch_("P");

/*     Scale A   

   Computing MAX */
    r__5 = (r__1 = a_ref(1, 1), dabs(r__1)) + (r__2 = a_ref(2, 1), dabs(r__2))
	    , r__6 = (r__3 = a_ref(1, 2), dabs(r__3)) + (r__4 = a_ref(2, 2), 
	    dabs(r__4)), r__5 = max(r__5,r__6);
    anorm = dmax(r__5,safmin);
    ascale = 1.f / anorm;
    a_ref(1, 1) = ascale * a_ref(1, 1);
    a_ref(1, 2) = ascale * a_ref(1, 2);
    a_ref(2, 1) = ascale * a_ref(2, 1);
    a_ref(2, 2) = ascale * a_ref(2, 2);

/*     Scale B   

   Computing MAX */
    r__4 = (r__3 = b_ref(1, 1), dabs(r__3)), r__5 = (r__1 = b_ref(1, 2), dabs(
	    r__1)) + (r__2 = b_ref(2, 2), dabs(r__2)), r__4 = max(r__4,r__5);
    bnorm = dmax(r__4,safmin);
    bscale = 1.f / bnorm;
    b_ref(1, 1) = bscale * b_ref(1, 1);
    b_ref(1, 2) = bscale * b_ref(1, 2);
    b_ref(2, 2) = bscale * b_ref(2, 2);

/*     Check if A can be deflated */

    if ((r__1 = a_ref(2, 1), dabs(r__1)) <= ulp) {
	*csl = 1.f;
	*snl = 0.f;
	*csr = 1.f;
	*snr = 0.f;
	a_ref(2, 1) = 0.f;
	b_ref(2, 1) = 0.f;

/*     Check if B is singular */

    } else if ((r__1 = b_ref(1, 1), dabs(r__1)) <= ulp) {
	slartg_(&a_ref(1, 1), &a_ref(2, 1), csl, snl, &r__);
	*csr = 1.f;
	*snr = 0.f;
	srot_(&c__2, &a_ref(1, 1), lda, &a_ref(2, 1), lda, csl, snl);
	srot_(&c__2, &b_ref(1, 1), ldb, &b_ref(2, 1), ldb, csl, snl);
	a_ref(2, 1) = 0.f;
	b_ref(1, 1) = 0.f;
	b_ref(2, 1) = 0.f;

    } else if ((r__1 = b_ref(2, 2), dabs(r__1)) <= ulp) {
	slartg_(&a_ref(2, 2), &a_ref(2, 1), csr, snr, &t);
	*snr = -(*snr);
	srot_(&c__2, &a_ref(1, 1), &c__1, &a_ref(1, 2), &c__1, csr, snr);
	srot_(&c__2, &b_ref(1, 1), &c__1, &b_ref(1, 2), &c__1, csr, snr);
	*csl = 1.f;
	*snl = 0.f;
	a_ref(2, 1) = 0.f;
	b_ref(2, 1) = 0.f;
	b_ref(2, 2) = 0.f;

    } else {

/*        B is nonsingular, first compute the eigenvalues of (A,B) */

	slag2_(&a[a_offset], lda, &b[b_offset], ldb, &safmin, &scale1, &
		scale2, &wr1, &wr2, &wi);

	if (wi == 0.f) {

/*           two real eigenvalues, compute s*A-w*B */

	    h1 = scale1 * a_ref(1, 1) - wr1 * b_ref(1, 1);
	    h2 = scale1 * a_ref(1, 2) - wr1 * b_ref(1, 2);
	    h3 = scale1 * a_ref(2, 2) - wr1 * b_ref(2, 2);

	    rr = slapy2_(&h1, &h2);
	    r__1 = scale1 * a_ref(2, 1);
	    qq = slapy2_(&r__1, &h3);

	    if (rr > qq) {

/*              find right rotation matrix to zero 1,1 element of   
                (sA - wB) */

		slartg_(&h2, &h1, csr, snr, &t);

	    } else {

/*              find right rotation matrix to zero 2,1 element of   
                (sA - wB) */

		r__1 = scale1 * a_ref(2, 1);
		slartg_(&h3, &r__1, csr, snr, &t);

	    }

	    *snr = -(*snr);
	    srot_(&c__2, &a_ref(1, 1), &c__1, &a_ref(1, 2), &c__1, csr, snr);
	    srot_(&c__2, &b_ref(1, 1), &c__1, &b_ref(1, 2), &c__1, csr, snr);

/*           compute inf norms of A and B   

   Computing MAX */
	    r__5 = (r__1 = a_ref(1, 1), dabs(r__1)) + (r__2 = a_ref(1, 2), 
		    dabs(r__2)), r__6 = (r__3 = a_ref(2, 1), dabs(r__3)) + (
		    r__4 = a_ref(2, 2), dabs(r__4));
	    h1 = dmax(r__5,r__6);
/* Computing MAX */
	    r__5 = (r__1 = b_ref(1, 1), dabs(r__1)) + (r__2 = b_ref(1, 2), 
		    dabs(r__2)), r__6 = (r__3 = b_ref(2, 1), dabs(r__3)) + (
		    r__4 = b_ref(2, 2), dabs(r__4));
	    h2 = dmax(r__5,r__6);

	    if (scale1 * h1 >= dabs(wr1) * h2) {

/*              find left rotation matrix Q to zero out B(2,1) */

		slartg_(&b_ref(1, 1), &b_ref(2, 1), csl, snl, &r__);

	    } else {

/*              find left rotation matrix Q to zero out A(2,1) */

		slartg_(&a_ref(1, 1), &a_ref(2, 1), csl, snl, &r__);

	    }

	    srot_(&c__2, &a_ref(1, 1), lda, &a_ref(2, 1), lda, csl, snl);
	    srot_(&c__2, &b_ref(1, 1), ldb, &b_ref(2, 1), ldb, csl, snl);

	    a_ref(2, 1) = 0.f;
	    b_ref(2, 1) = 0.f;

	} else {

/*           a pair of complex conjugate eigenvalues   
             first compute the SVD of the matrix B */

	    slasv2_(&b_ref(1, 1), &b_ref(1, 2), &b_ref(2, 2), &r__, &t, snr, 
		    csr, snl, csl);

/*           Form (A,B) := Q(A,B)Z' where Q is left rotation matrix and   
             Z is right rotation matrix computed from SLASV2 */

	    srot_(&c__2, &a_ref(1, 1), lda, &a_ref(2, 1), lda, csl, snl);
	    srot_(&c__2, &b_ref(1, 1), ldb, &b_ref(2, 1), ldb, csl, snl);
	    srot_(&c__2, &a_ref(1, 1), &c__1, &a_ref(1, 2), &c__1, csr, snr);
	    srot_(&c__2, &b_ref(1, 1), &c__1, &b_ref(1, 2), &c__1, csr, snr);

	    b_ref(2, 1) = 0.f;
	    b_ref(1, 2) = 0.f;

	}

    }

/*     Unscaling */

    a_ref(1, 1) = anorm * a_ref(1, 1);
    a_ref(2, 1) = anorm * a_ref(2, 1);
    a_ref(1, 2) = anorm * a_ref(1, 2);
    a_ref(2, 2) = anorm * a_ref(2, 2);
    b_ref(1, 1) = bnorm * b_ref(1, 1);
    b_ref(2, 1) = bnorm * b_ref(2, 1);
    b_ref(1, 2) = bnorm * b_ref(1, 2);
    b_ref(2, 2) = bnorm * b_ref(2, 2);

    if (wi == 0.f) {
	alphar[1] = a_ref(1, 1);
	alphar[2] = a_ref(2, 2);
	alphai[1] = 0.f;
	alphai[2] = 0.f;
	beta[1] = b_ref(1, 1);
	beta[2] = b_ref(2, 2);
    } else {
	alphar[1] = anorm * wr1 / scale1 / bnorm;
	alphai[1] = anorm * wi / scale1 / bnorm;
	alphar[2] = alphar[1];
	alphai[2] = -alphai[1];
	beta[1] = 1.f;
	beta[2] = 1.f;
    }

/* L10: */

    return 0;

/*     End of SLAGV2 */

} /* slagv2_ */