/* Subroutine */ int cunbdb1_(integer *m, integer *p, integer *q, complex * x11, integer *ldx11, complex *x21, integer *ldx21, real *theta, real * phi, complex *taup1, complex *taup2, complex *tauq1, complex *work, integer *lwork, integer *info) { /* System generated locals */ integer x11_dim1, x11_offset, x21_dim1, x21_offset, i__1, i__2, i__3, i__4; real r__1, r__2; complex q__1; /* Builtin functions */ double atan2(doublereal, doublereal), cos(doublereal), sin(doublereal); void r_cnjg(complex *, complex *); double sqrt(doublereal); /* Local variables */ integer lworkmin, lworkopt; real c__; integer i__; real s; integer childinfo; extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex * , integer *, complex *, complex *, integer *, complex *); integer ilarf, llarf; extern /* Subroutine */ int csrot_(integer *, complex *, integer *, complex *, integer *, real *, real *); extern real scnrm2_(integer *, complex *, integer *, complex *, integer *) ; extern /* Subroutine */ int clacgv_(integer *, complex *, integer *), xerbla_(char *, integer *); logical lquery; extern /* Subroutine */ int cunbdb5_(integer *, integer *, integer *, complex *, integer *, complex *, integer *, complex *, integer *, complex *, integer *, complex *, integer *, integer *); integer iorbdb5, lorbdb5; extern /* Subroutine */ int clarfgp_(integer *, complex *, complex *, integer *, complex *); /* -- LAPACK computational routine (version 3.5.0) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* July 2012 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ==================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Intrinsic Function .. */ /* .. */ /* .. Executable Statements .. */ /* Test input arguments */ /* Parameter adjustments */ x11_dim1 = *ldx11; x11_offset = 1 + x11_dim1; x11 -= x11_offset; x21_dim1 = *ldx21; x21_offset = 1 + x21_dim1; x21 -= x21_offset; --theta; --phi; --taup1; --taup2; --tauq1; --work; /* Function Body */ *info = 0; lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*p < *q || *m - *p < *q) { *info = -2; } else if (*q < 0 || *m - *q < *q) { *info = -3; } else if (*ldx11 < max(1,*p)) { *info = -5; } else /* if(complicated condition) */ { /* Computing MAX */ i__1 = 1; i__2 = *m - *p; // , expr subst if (*ldx21 < max(i__1,i__2)) { *info = -7; } } /* Compute workspace */ if (*info == 0) { ilarf = 2; /* Computing MAX */ i__1 = *p - 1, i__2 = *m - *p - 1; i__1 = max(i__1,i__2); i__2 = *q - 1; // ; expr subst llarf = max(i__1,i__2); iorbdb5 = 2; lorbdb5 = *q - 2; /* Computing MAX */ i__1 = ilarf + llarf - 1; i__2 = iorbdb5 + lorbdb5 - 1; // , expr subst lworkopt = max(i__1,i__2); lworkmin = lworkopt; work[1].r = (real) lworkopt; work[1].i = 0.f; // , expr subst if (*lwork < lworkmin && ! lquery) { *info = -14; } } if (*info != 0) { i__1 = -(*info); xerbla_("CUNBDB1", &i__1); return 0; } else if (lquery) { return 0; } /* Reduce columns 1, ..., Q of X11 and X21 */ i__1 = *q; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = *p - i__ + 1; clarfgp_(&i__2, &x11[i__ + i__ * x11_dim1], &x11[i__ + 1 + i__ * x11_dim1], &c__1, &taup1[i__]); i__2 = *m - *p - i__ + 1; clarfgp_(&i__2, &x21[i__ + i__ * x21_dim1], &x21[i__ + 1 + i__ * x21_dim1], &c__1, &taup2[i__]); theta[i__] = atan2((real) x21[i__ + i__ * x21_dim1].r, (real) x11[i__ + i__ * x11_dim1].r); c__ = cos(theta[i__]); s = sin(theta[i__]); i__2 = i__ + i__ * x11_dim1; x11[i__2].r = 1.f; x11[i__2].i = 0.f; // , expr subst i__2 = i__ + i__ * x21_dim1; x21[i__2].r = 1.f; x21[i__2].i = 0.f; // , expr subst i__2 = *p - i__ + 1; i__3 = *q - i__; r_cnjg(&q__1, &taup1[i__]); clarf_("L", &i__2, &i__3, &x11[i__ + i__ * x11_dim1], &c__1, &q__1, & x11[i__ + (i__ + 1) * x11_dim1], ldx11, &work[ilarf]); i__2 = *m - *p - i__ + 1; i__3 = *q - i__; r_cnjg(&q__1, &taup2[i__]); clarf_("L", &i__2, &i__3, &x21[i__ + i__ * x21_dim1], &c__1, &q__1, & x21[i__ + (i__ + 1) * x21_dim1], ldx21, &work[ilarf]); if (i__ < *q) { i__2 = *q - i__; csrot_(&i__2, &x11[i__ + (i__ + 1) * x11_dim1], ldx11, &x21[i__ + (i__ + 1) * x21_dim1], ldx21, &c__, &s); i__2 = *q - i__; clacgv_(&i__2, &x21[i__ + (i__ + 1) * x21_dim1], ldx21); i__2 = *q - i__; clarfgp_(&i__2, &x21[i__ + (i__ + 1) * x21_dim1], &x21[i__ + (i__ + 2) * x21_dim1], ldx21, &tauq1[i__]); i__2 = i__ + (i__ + 1) * x21_dim1; s = x21[i__2].r; i__2 = i__ + (i__ + 1) * x21_dim1; x21[i__2].r = 1.f; x21[i__2].i = 0.f; // , expr subst i__2 = *p - i__; i__3 = *q - i__; clarf_("R", &i__2, &i__3, &x21[i__ + (i__ + 1) * x21_dim1], ldx21, &tauq1[i__], &x11[i__ + 1 + (i__ + 1) * x11_dim1], ldx11, &work[ilarf]); i__2 = *m - *p - i__; i__3 = *q - i__; clarf_("R", &i__2, &i__3, &x21[i__ + (i__ + 1) * x21_dim1], ldx21, &tauq1[i__], &x21[i__ + 1 + (i__ + 1) * x21_dim1], ldx21, &work[ilarf]); i__2 = *q - i__; clacgv_(&i__2, &x21[i__ + (i__ + 1) * x21_dim1], ldx21); i__2 = *p - i__; /* Computing 2nd power */ r__1 = scnrm2_(&i__2, &x11[i__ + 1 + (i__ + 1) * x11_dim1], &c__1, &x11[i__ + 1 + (i__ + 1) * x11_dim1], &c__1); i__3 = *m - *p - i__; /* Computing 2nd power */ r__2 = scnrm2_(&i__3, &x21[i__ + 1 + (i__ + 1) * x21_dim1], &c__1, &x21[i__ + 1 + (i__ + 1) * x21_dim1], &c__1); c__ = sqrt(r__1 * r__1 + r__2 * r__2); phi[i__] = atan2(s, c__); i__2 = *p - i__; i__3 = *m - *p - i__; i__4 = *q - i__ - 1; cunbdb5_(&i__2, &i__3, &i__4, &x11[i__ + 1 + (i__ + 1) * x11_dim1] , &c__1, &x21[i__ + 1 + (i__ + 1) * x21_dim1], &c__1, & x11[i__ + 1 + (i__ + 2) * x11_dim1], ldx11, &x21[i__ + 1 + (i__ + 2) * x21_dim1], ldx21, &work[iorbdb5], &lorbdb5, &childinfo); } } return 0; /* End of CUNBDB1 */ }
void csrot(int N, complex *x, int incx, complex *y, int incy, float c, float s) { csrot_(&N, x, &incx, y, &incy, &c, &s); }
/*< subroutine csvdc(x,ldx,n,p,s,e,u,ldu,v,ldv,work,job,info) >*/ /* Subroutine */ int csvdc_(complex *x, integer *ldx, integer *n, integer *p, complex *s, complex *e, complex *u, integer *ldu, complex *v, integer *ldv, complex *work, integer *job, integer *info) { /* System generated locals */ integer x_dim1, x_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4; real r__1, r__2, r__3, r__4; complex q__1, q__2, q__3; /* Builtin functions */ double r_imag(complex *), c_abs(complex *); void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *); double sqrt(doublereal); /* Local variables */ real b, c__, f, g; integer i__, j, k, l=0, m; complex r__, t; real t1, el; integer kk; real cs; integer ll, mm, ls=0; real sl; integer lu; real sm, sn; integer lm1, mm1, lp1, mp1, nct, ncu, lls, nrt; real emm1, smm1; integer kase, jobu, iter; real test; integer nctp1, nrtp1; extern /* Subroutine */ int cscal_(integer *, complex *, complex *, integer *); real scale; extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer *, complex *, integer *); real shift; extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); integer maxit; extern /* Subroutine */ int caxpy_(integer *, complex *, complex *, integer *, complex *, integer *), csrot_(integer *, complex *, integer *, complex *, integer *, real *, real *); logical wantu, wantv; extern /* Subroutine */ int srotg_(real *, real *, real *, real *); real ztest; extern doublereal scnrm2_(integer *, complex *, integer *); /*< integer ldx,n,p,ldu,ldv,job,info >*/ /*< complex x(ldx,1),s(1),e(1),u(ldu,1),v(ldv,1),work(1) >*/ /* csvdc is a subroutine to reduce a complex nxp matrix x by */ /* unitary transformations u and v to diagonal form. the */ /* diagonal elements s(i) are the singular values of x. the */ /* columns of u are the corresponding left singular vectors, */ /* and the columns of v the right singular vectors. */ /* on entry */ /* x complex(ldx,p), where ldx.ge.n. */ /* x contains the matrix whose singular value */ /* decomposition is to be computed. x is */ /* destroyed by csvdc. */ /* ldx integer. */ /* ldx is the leading dimension of the array x. */ /* n integer. */ /* n is the number of rows of the matrix x. */ /* p integer. */ /* p is the number of columns of the matrix x. */ /* ldu integer. */ /* ldu is the leading dimension of the array u */ /* (see below). */ /* ldv integer. */ /* ldv is the leading dimension of the array v */ /* (see below). */ /* work complex(n). */ /* work is a scratch array. */ /* job integer. */ /* job controls the computation of the singular */ /* vectors. it has the decimal expansion ab */ /* with the following meaning */ /* a.eq.0 do not compute the left singular */ /* vectors. */ /* a.eq.1 return the n left singular vectors */ /* in u. */ /* a.ge.2 returns the first min(n,p) */ /* left singular vectors in u. */ /* b.eq.0 do not compute the right singular */ /* vectors. */ /* b.eq.1 return the right singular vectors */ /* in v. */ /* on return */ /* s complex(mm), where mm=min(n+1,p). */ /* the first min(n,p) entries of s contain the */ /* singular values of x arranged in descending */ /* order of magnitude. */ /* e complex(p). */ /* e ordinarily contains zeros. however see the */ /* discussion of info for exceptions. */ /* u complex(ldu,k), where ldu.ge.n. if joba.eq.1 then */ /* k.eq.n, if joba.ge.2 then */ /* k.eq.min(n,p). */ /* u contains the matrix of left singular vectors. */ /* u is not referenced if joba.eq.0. if n.le.p */ /* or if joba.gt.2, then u may be identified with x */ /* in the subroutine call. */ /* v complex(ldv,p), where ldv.ge.p. */ /* v contains the matrix of right singular vectors. */ /* v is not referenced if jobb.eq.0. if p.le.n, */ /* then v may be identified whth x in the */ /* subroutine call. */ /* info integer. */ /* the singular values (and their corresponding */ /* singular vectors) s(info+1),s(info+2),...,s(m) */ /* are correct (here m=min(n,p)). thus if */ /* info.eq.0, all the singular values and their */ /* vectors are correct. in any event, the matrix */ /* b = ctrans(u)*x*v is the bidiagonal matrix */ /* with the elements of s on its diagonal and the */ /* elements of e on its super-diagonal (ctrans(u) */ /* is the conjugate-transpose of u). thus the */ /* singular values of x and b are the same. */ /* linpack. this version dated 03/19/79 . */ /* correction to shift calculation made 2/85. */ /* g.w. stewart, university of maryland, argonne national lab. */ /* csvdc uses the following functions and subprograms. */ /* external csrot */ /* blas caxpy,cdotc,cscal,cswap,scnrm2,srotg */ /* fortran abs,aimag,amax1,cabs,cmplx */ /* fortran conjg,max0,min0,mod,real,sqrt */ /* internal variables */ /*< >*/ /*< complex cdotc,t,r >*/ /*< >*/ /*< logical wantu,wantv >*/ /*< complex csign,zdum,zdum1,zdum2 >*/ /*< real cabs1 >*/ /*< cabs1(zdum) = abs(real(zdum)) + abs(aimag(zdum)) >*/ /*< csign(zdum1,zdum2) = cabs(zdum1)*(zdum2/cabs(zdum2)) >*/ /* set the maximum number of iterations. */ /*< maxit = 1000 >*/ /* Parameter adjustments */ x_dim1 = *ldx; x_offset = 1 + x_dim1; x -= x_offset; --s; --e; u_dim1 = *ldu; u_offset = 1 + u_dim1; u -= u_offset; v_dim1 = *ldv; v_offset = 1 + v_dim1; v -= v_offset; --work; /* Function Body */ maxit = 1000; /* determine what is to be computed. */ /*< wantu = .false. >*/ wantu = FALSE_; /*< wantv = .false. >*/ wantv = FALSE_; /*< jobu = mod(job,100)/10 >*/ jobu = *job % 100 / 10; /*< ncu = n >*/ ncu = *n; /*< if (jobu .gt. 1) ncu = min0(n,p) >*/ if (jobu > 1) { ncu = min(*n,*p); } /*< if (jobu .ne. 0) wantu = .true. >*/ if (jobu != 0) { wantu = TRUE_; } /*< if (mod(job,10) .ne. 0) wantv = .true. >*/ if (*job % 10 != 0) { wantv = TRUE_; } /* reduce x to bidiagonal form, storing the diagonal elements */ /* in s and the super-diagonal elements in e. */ /*< info = 0 >*/ *info = 0; /*< nct = min0(n-1,p) >*/ /* Computing MIN */ i__1 = *n - 1; nct = min(i__1,*p); /*< nrt = max0(0,min0(p-2,n)) >*/ /* Computing MAX */ /* Computing MIN */ i__3 = *p - 2; i__1 = 0, i__2 = min(i__3,*n); nrt = max(i__1,i__2); /*< lu = max0(nct,nrt) >*/ lu = max(nct,nrt); /*< if (lu .lt. 1) go to 170 >*/ if (lu < 1) { goto L170; } /*< do 160 l = 1, lu >*/ i__1 = lu; for (l = 1; l <= i__1; ++l) { /*< lp1 = l + 1 >*/ lp1 = l + 1; /*< if (l .gt. nct) go to 20 >*/ if (l > nct) { goto L20; } /* compute the transformation for the l-th column and */ /* place the l-th diagonal in s(l). */ /*< s(l) = cmplx(scnrm2(n-l+1,x(l,l),1),0.0e0) >*/ i__2 = l; i__3 = *n - l + 1; r__1 = scnrm2_(&i__3, &x[l + l * x_dim1], &c__1); q__1.r = r__1, q__1.i = (float)0.; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< if (cabs1(s(l)) .eq. 0.0e0) go to 10 >*/ i__2 = l; if ((r__1 = s[i__2].r, dabs(r__1)) + (r__2 = r_imag(&s[l]), dabs(r__2) ) == (float)0.) { goto L10; } /*< if (cabs1(x(l,l)) .ne. 0.0e0) s(l) = csign(s(l),x(l,l)) >*/ i__2 = l + l * x_dim1; if ((r__1 = x[i__2].r, dabs(r__1)) + (r__2 = r_imag(&x[l + l * x_dim1] ), dabs(r__2)) != (float)0.) { i__3 = l; r__3 = c_abs(&s[l]); i__4 = l + l * x_dim1; r__4 = c_abs(&x[l + l * x_dim1]); q__2.r = x[i__4].r / r__4, q__2.i = x[i__4].i / r__4; q__1.r = r__3 * q__2.r, q__1.i = r__3 * q__2.i; s[i__3].r = q__1.r, s[i__3].i = q__1.i; } /*< call cscal(n-l+1,1.0e0/s(l),x(l,l),1) >*/ i__2 = *n - l + 1; c_div(&q__1, &c_b8, &s[l]); cscal_(&i__2, &q__1, &x[l + l * x_dim1], &c__1); /*< x(l,l) = (1.0e0,0.0e0) + x(l,l) >*/ i__2 = l + l * x_dim1; i__3 = l + l * x_dim1; q__1.r = x[i__3].r + (float)1., q__1.i = x[i__3].i + (float)0.; x[i__2].r = q__1.r, x[i__2].i = q__1.i; /*< 10 continue >*/ L10: /*< s(l) = -s(l) >*/ i__2 = l; i__3 = l; q__1.r = -s[i__3].r, q__1.i = -s[i__3].i; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< 20 continue >*/ L20: /*< if (p .lt. lp1) go to 50 >*/ if (*p < lp1) { goto L50; } /*< do 40 j = lp1, p >*/ i__2 = *p; for (j = lp1; j <= i__2; ++j) { /*< if (l .gt. nct) go to 30 >*/ if (l > nct) { goto L30; } /*< if (cabs1(s(l)) .eq. 0.0e0) go to 30 >*/ i__3 = l; if ((r__1 = s[i__3].r, dabs(r__1)) + (r__2 = r_imag(&s[l]), dabs( r__2)) == (float)0.) { goto L30; } /* apply the transformation. */ /*< t = -cdotc(n-l+1,x(l,l),1,x(l,j),1)/x(l,l) >*/ i__3 = *n - l + 1; cdotc_(&q__3, &i__3, &x[l + l * x_dim1], &c__1, &x[l + j * x_dim1] , &c__1); q__2.r = -q__3.r, q__2.i = -q__3.i; c_div(&q__1, &q__2, &x[l + l * x_dim1]); t.r = q__1.r, t.i = q__1.i; /*< call caxpy(n-l+1,t,x(l,l),1,x(l,j),1) >*/ i__3 = *n - l + 1; caxpy_(&i__3, &t, &x[l + l * x_dim1], &c__1, &x[l + j * x_dim1], & c__1); /*< 30 continue >*/ L30: /* place the l-th row of x into e for the */ /* subsequent calculation of the row transformation. */ /*< e(j) = conjg(x(l,j)) >*/ i__3 = j; r_cnjg(&q__1, &x[l + j * x_dim1]); e[i__3].r = q__1.r, e[i__3].i = q__1.i; /*< 40 continue >*/ /* L40: */ } /*< 50 continue >*/ L50: /*< if (.not.wantu .or. l .gt. nct) go to 70 >*/ if (! wantu || l > nct) { goto L70; } /* place the transformation in u for subsequent back */ /* multiplication. */ /*< do 60 i = l, n >*/ i__2 = *n; for (i__ = l; i__ <= i__2; ++i__) { /*< u(i,l) = x(i,l) >*/ i__3 = i__ + l * u_dim1; i__4 = i__ + l * x_dim1; u[i__3].r = x[i__4].r, u[i__3].i = x[i__4].i; /*< 60 continue >*/ /* L60: */ } /*< 70 continue >*/ L70: /*< if (l .gt. nrt) go to 150 >*/ if (l > nrt) { goto L150; } /* compute the l-th row transformation and place the */ /* l-th super-diagonal in e(l). */ /*< e(l) = cmplx(scnrm2(p-l,e(lp1),1),0.0e0) >*/ i__2 = l; i__3 = *p - l; r__1 = scnrm2_(&i__3, &e[lp1], &c__1); q__1.r = r__1, q__1.i = (float)0.; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< if (cabs1(e(l)) .eq. 0.0e0) go to 80 >*/ i__2 = l; if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[l]), dabs(r__2) ) == (float)0.) { goto L80; } /*< if (cabs1(e(lp1)) .ne. 0.0e0) e(l) = csign(e(l),e(lp1)) >*/ i__2 = lp1; if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[lp1]), dabs( r__2)) != (float)0.) { i__3 = l; r__3 = c_abs(&e[l]); i__4 = lp1; r__4 = c_abs(&e[lp1]); q__2.r = e[i__4].r / r__4, q__2.i = e[i__4].i / r__4; q__1.r = r__3 * q__2.r, q__1.i = r__3 * q__2.i; e[i__3].r = q__1.r, e[i__3].i = q__1.i; } /*< call cscal(p-l,1.0e0/e(l),e(lp1),1) >*/ i__2 = *p - l; c_div(&q__1, &c_b8, &e[l]); cscal_(&i__2, &q__1, &e[lp1], &c__1); /*< e(lp1) = (1.0e0,0.0e0) + e(lp1) >*/ i__2 = lp1; i__3 = lp1; q__1.r = e[i__3].r + (float)1., q__1.i = e[i__3].i + (float)0.; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< 80 continue >*/ L80: /*< e(l) = -conjg(e(l)) >*/ i__2 = l; r_cnjg(&q__2, &e[l]); q__1.r = -q__2.r, q__1.i = -q__2.i; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< if (lp1 .gt. n .or. cabs1(e(l)) .eq. 0.0e0) go to 120 >*/ i__2 = l; if (lp1 > *n || (r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[l]) , dabs(r__2)) == (float)0.) { goto L120; } /* apply the transformation. */ /*< do 90 i = lp1, n >*/ i__2 = *n; for (i__ = lp1; i__ <= i__2; ++i__) { /*< work(i) = (0.0e0,0.0e0) >*/ i__3 = i__; work[i__3].r = (float)0., work[i__3].i = (float)0.; /*< 90 continue >*/ /* L90: */ } /*< do 100 j = lp1, p >*/ i__2 = *p; for (j = lp1; j <= i__2; ++j) { /*< call caxpy(n-l,e(j),x(lp1,j),1,work(lp1),1) >*/ i__3 = *n - l; caxpy_(&i__3, &e[j], &x[lp1 + j * x_dim1], &c__1, &work[lp1], & c__1); /*< 100 continue >*/ /* L100: */ } /*< do 110 j = lp1, p >*/ i__2 = *p; for (j = lp1; j <= i__2; ++j) { /*< >*/ i__3 = *n - l; i__4 = j; q__3.r = -e[i__4].r, q__3.i = -e[i__4].i; c_div(&q__2, &q__3, &e[lp1]); r_cnjg(&q__1, &q__2); caxpy_(&i__3, &q__1, &work[lp1], &c__1, &x[lp1 + j * x_dim1], & c__1); /*< 110 continue >*/ /* L110: */ } /*< 120 continue >*/ L120: /*< if (.not.wantv) go to 140 >*/ if (! wantv) { goto L140; } /* place the transformation in v for subsequent */ /* back multiplication. */ /*< do 130 i = lp1, p >*/ i__2 = *p; for (i__ = lp1; i__ <= i__2; ++i__) { /*< v(i,l) = e(i) >*/ i__3 = i__ + l * v_dim1; i__4 = i__; v[i__3].r = e[i__4].r, v[i__3].i = e[i__4].i; /*< 130 continue >*/ /* L130: */ } /*< 140 continue >*/ L140: /*< 150 continue >*/ L150: /*< 160 continue >*/ /* L160: */ ; } /*< 170 continue >*/ L170: /* set up the final bidiagonal matrix or order m. */ /*< m = min0(p,n+1) >*/ /* Computing MIN */ i__1 = *p, i__2 = *n + 1; m = min(i__1,i__2); /*< nctp1 = nct + 1 >*/ nctp1 = nct + 1; /*< nrtp1 = nrt + 1 >*/ nrtp1 = nrt + 1; /*< if (nct .lt. p) s(nctp1) = x(nctp1,nctp1) >*/ if (nct < *p) { i__1 = nctp1; i__2 = nctp1 + nctp1 * x_dim1; s[i__1].r = x[i__2].r, s[i__1].i = x[i__2].i; } /*< if (n .lt. m) s(m) = (0.0e0,0.0e0) >*/ if (*n < m) { i__1 = m; s[i__1].r = (float)0., s[i__1].i = (float)0.; } /*< if (nrtp1 .lt. m) e(nrtp1) = x(nrtp1,m) >*/ if (nrtp1 < m) { i__1 = nrtp1; i__2 = nrtp1 + m * x_dim1; e[i__1].r = x[i__2].r, e[i__1].i = x[i__2].i; } /*< e(m) = (0.0e0,0.0e0) >*/ i__1 = m; e[i__1].r = (float)0., e[i__1].i = (float)0.; /* if required, generate u. */ /*< if (.not.wantu) go to 300 >*/ if (! wantu) { goto L300; } /*< if (ncu .lt. nctp1) go to 200 >*/ if (ncu < nctp1) { goto L200; } /*< do 190 j = nctp1, ncu >*/ i__1 = ncu; for (j = nctp1; j <= i__1; ++j) { /*< do 180 i = 1, n >*/ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { /*< u(i,j) = (0.0e0,0.0e0) >*/ i__3 = i__ + j * u_dim1; u[i__3].r = (float)0., u[i__3].i = (float)0.; /*< 180 continue >*/ /* L180: */ } /*< u(j,j) = (1.0e0,0.0e0) >*/ i__2 = j + j * u_dim1; u[i__2].r = (float)1., u[i__2].i = (float)0.; /*< 190 continue >*/ /* L190: */ } /*< 200 continue >*/ L200: /*< if (nct .lt. 1) go to 290 >*/ if (nct < 1) { goto L290; } /*< do 280 ll = 1, nct >*/ i__1 = nct; for (ll = 1; ll <= i__1; ++ll) { /*< l = nct - ll + 1 >*/ l = nct - ll + 1; /*< if (cabs1(s(l)) .eq. 0.0e0) go to 250 >*/ i__2 = l; if ((r__1 = s[i__2].r, dabs(r__1)) + (r__2 = r_imag(&s[l]), dabs(r__2) ) == (float)0.) { goto L250; } /*< lp1 = l + 1 >*/ lp1 = l + 1; /*< if (ncu .lt. lp1) go to 220 >*/ if (ncu < lp1) { goto L220; } /*< do 210 j = lp1, ncu >*/ i__2 = ncu; for (j = lp1; j <= i__2; ++j) { /*< t = -cdotc(n-l+1,u(l,l),1,u(l,j),1)/u(l,l) >*/ i__3 = *n - l + 1; cdotc_(&q__3, &i__3, &u[l + l * u_dim1], &c__1, &u[l + j * u_dim1] , &c__1); q__2.r = -q__3.r, q__2.i = -q__3.i; c_div(&q__1, &q__2, &u[l + l * u_dim1]); t.r = q__1.r, t.i = q__1.i; /*< call caxpy(n-l+1,t,u(l,l),1,u(l,j),1) >*/ i__3 = *n - l + 1; caxpy_(&i__3, &t, &u[l + l * u_dim1], &c__1, &u[l + j * u_dim1], & c__1); /*< 210 continue >*/ /* L210: */ } /*< 220 continue >*/ L220: /*< call cscal(n-l+1,(-1.0e0,0.0e0),u(l,l),1) >*/ i__2 = *n - l + 1; cscal_(&i__2, &c_b53, &u[l + l * u_dim1], &c__1); /*< u(l,l) = (1.0e0,0.0e0) + u(l,l) >*/ i__2 = l + l * u_dim1; i__3 = l + l * u_dim1; q__1.r = u[i__3].r + (float)1., q__1.i = u[i__3].i + (float)0.; u[i__2].r = q__1.r, u[i__2].i = q__1.i; /*< lm1 = l - 1 >*/ lm1 = l - 1; /*< if (lm1 .lt. 1) go to 240 >*/ if (lm1 < 1) { goto L240; } /*< do 230 i = 1, lm1 >*/ i__2 = lm1; for (i__ = 1; i__ <= i__2; ++i__) { /*< u(i,l) = (0.0e0,0.0e0) >*/ i__3 = i__ + l * u_dim1; u[i__3].r = (float)0., u[i__3].i = (float)0.; /*< 230 continue >*/ /* L230: */ } /*< 240 continue >*/ L240: /*< go to 270 >*/ goto L270; /*< 250 continue >*/ L250: /*< do 260 i = 1, n >*/ i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { /*< u(i,l) = (0.0e0,0.0e0) >*/ i__3 = i__ + l * u_dim1; u[i__3].r = (float)0., u[i__3].i = (float)0.; /*< 260 continue >*/ /* L260: */ } /*< u(l,l) = (1.0e0,0.0e0) >*/ i__2 = l + l * u_dim1; u[i__2].r = (float)1., u[i__2].i = (float)0.; /*< 270 continue >*/ L270: /*< 280 continue >*/ /* L280: */ ; } /*< 290 continue >*/ L290: /*< 300 continue >*/ L300: /* if it is required, generate v. */ /*< if (.not.wantv) go to 350 >*/ if (! wantv) { goto L350; } /*< do 340 ll = 1, p >*/ i__1 = *p; for (ll = 1; ll <= i__1; ++ll) { /*< l = p - ll + 1 >*/ l = *p - ll + 1; /*< lp1 = l + 1 >*/ lp1 = l + 1; /*< if (l .gt. nrt) go to 320 >*/ if (l > nrt) { goto L320; } /*< if (cabs1(e(l)) .eq. 0.0e0) go to 320 >*/ i__2 = l; if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[l]), dabs(r__2) ) == (float)0.) { goto L320; } /*< do 310 j = lp1, p >*/ i__2 = *p; for (j = lp1; j <= i__2; ++j) { /*< t = -cdotc(p-l,v(lp1,l),1,v(lp1,j),1)/v(lp1,l) >*/ i__3 = *p - l; cdotc_(&q__3, &i__3, &v[lp1 + l * v_dim1], &c__1, &v[lp1 + j * v_dim1], &c__1); q__2.r = -q__3.r, q__2.i = -q__3.i; c_div(&q__1, &q__2, &v[lp1 + l * v_dim1]); t.r = q__1.r, t.i = q__1.i; /*< call caxpy(p-l,t,v(lp1,l),1,v(lp1,j),1) >*/ i__3 = *p - l; caxpy_(&i__3, &t, &v[lp1 + l * v_dim1], &c__1, &v[lp1 + j * v_dim1], &c__1); /*< 310 continue >*/ /* L310: */ } /*< 320 continue >*/ L320: /*< do 330 i = 1, p >*/ i__2 = *p; for (i__ = 1; i__ <= i__2; ++i__) { /*< v(i,l) = (0.0e0,0.0e0) >*/ i__3 = i__ + l * v_dim1; v[i__3].r = (float)0., v[i__3].i = (float)0.; /*< 330 continue >*/ /* L330: */ } /*< v(l,l) = (1.0e0,0.0e0) >*/ i__2 = l + l * v_dim1; v[i__2].r = (float)1., v[i__2].i = (float)0.; /*< 340 continue >*/ /* L340: */ } /*< 350 continue >*/ L350: /* transform s and e so that they are real. */ /*< do 380 i = 1, m >*/ i__1 = m; for (i__ = 1; i__ <= i__1; ++i__) { /*< if (cabs1(s(i)) .eq. 0.0e0) go to 360 >*/ i__2 = i__; if ((r__1 = s[i__2].r, dabs(r__1)) + (r__2 = r_imag(&s[i__]), dabs( r__2)) == (float)0.) { goto L360; } /*< t = cmplx(cabs(s(i)),0.0e0) >*/ r__1 = c_abs(&s[i__]); q__1.r = r__1, q__1.i = (float)0.; t.r = q__1.r, t.i = q__1.i; /*< r = s(i)/t >*/ c_div(&q__1, &s[i__], &t); r__.r = q__1.r, r__.i = q__1.i; /*< s(i) = t >*/ i__2 = i__; s[i__2].r = t.r, s[i__2].i = t.i; /*< if (i .lt. m) e(i) = e(i)/r >*/ if (i__ < m) { i__2 = i__; c_div(&q__1, &e[i__], &r__); e[i__2].r = q__1.r, e[i__2].i = q__1.i; } /*< if (wantu) call cscal(n,r,u(1,i),1) >*/ if (wantu) { cscal_(n, &r__, &u[i__ * u_dim1 + 1], &c__1); } /*< 360 continue >*/ L360: /* ...exit */ /*< if (i .eq. m) go to 390 >*/ if (i__ == m) { goto L390; } /*< if (cabs1(e(i)) .eq. 0.0e0) go to 370 >*/ i__2 = i__; if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[i__]), dabs( r__2)) == (float)0.) { goto L370; } /*< t = cmplx(cabs(e(i)),0.0e0) >*/ r__1 = c_abs(&e[i__]); q__1.r = r__1, q__1.i = (float)0.; t.r = q__1.r, t.i = q__1.i; /*< r = t/e(i) >*/ c_div(&q__1, &t, &e[i__]); r__.r = q__1.r, r__.i = q__1.i; /*< e(i) = t >*/ i__2 = i__; e[i__2].r = t.r, e[i__2].i = t.i; /*< s(i+1) = s(i+1)*r >*/ i__2 = i__ + 1; i__3 = i__ + 1; q__1.r = s[i__3].r * r__.r - s[i__3].i * r__.i, q__1.i = s[i__3].r * r__.i + s[i__3].i * r__.r; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< if (wantv) call cscal(p,r,v(1,i+1),1) >*/ if (wantv) { cscal_(p, &r__, &v[(i__ + 1) * v_dim1 + 1], &c__1); } /*< 370 continue >*/ L370: /*< 380 continue >*/ /* L380: */ ; } /*< 390 continue >*/ L390: /* main iteration loop for the singular values. */ /*< mm = m >*/ mm = m; /*< iter = 0 >*/ iter = 0; /*< 400 continue >*/ L400: /* quit if all the singular values have been found. */ /* ...exit */ /*< if (m .eq. 0) go to 660 >*/ if (m == 0) { goto L660; } /* if too many iterations have been performed, set */ /* flag and return. */ /*< if (iter .lt. maxit) go to 410 >*/ if (iter < maxit) { goto L410; } /*< info = m >*/ *info = m; /* ......exit */ /*< go to 660 >*/ goto L660; /*< 410 continue >*/ L410: /* this section of the program inspects for */ /* negligible elements in the s and e arrays. on */ /* completion the variables kase and l are set as follows. */ /* kase = 1 if s(m) and e(l-1) are negligible and l.lt.m */ /* kase = 2 if s(l) is negligible and l.lt.m */ /* kase = 3 if e(l-1) is negligible, l.lt.m, and */ /* s(l), ..., s(m) are not negligible (qr step). */ /* kase = 4 if e(m-1) is negligible (convergence). */ /*< do 430 ll = 1, m >*/ i__1 = m; for (ll = 1; ll <= i__1; ++ll) { /*< l = m - ll >*/ l = m - ll; /* ...exit */ /*< if (l .eq. 0) go to 440 >*/ if (l == 0) { goto L440; } /*< test = cabs(s(l)) + cabs(s(l+1)) >*/ test = c_abs(&s[l]) + c_abs(&s[l + 1]); /*< ztest = test + cabs(e(l)) >*/ ztest = test + c_abs(&e[l]); /*< if (ztest .ne. test) go to 420 >*/ if (ztest != test) { goto L420; } /*< e(l) = (0.0e0,0.0e0) >*/ i__2 = l; e[i__2].r = (float)0., e[i__2].i = (float)0.; /* ......exit */ /*< go to 440 >*/ goto L440; /*< 420 continue >*/ L420: /*< 430 continue >*/ /* L430: */ ; } /*< 440 continue >*/ L440: /*< if (l .ne. m - 1) go to 450 >*/ if (l != m - 1) { goto L450; } /*< kase = 4 >*/ kase = 4; /*< go to 520 >*/ goto L520; /*< 450 continue >*/ L450: /*< lp1 = l + 1 >*/ lp1 = l + 1; /*< mp1 = m + 1 >*/ mp1 = m + 1; /*< do 470 lls = lp1, mp1 >*/ i__1 = mp1; for (lls = lp1; lls <= i__1; ++lls) { /*< ls = m - lls + lp1 >*/ ls = m - lls + lp1; /* ...exit */ /*< if (ls .eq. l) go to 480 >*/ if (ls == l) { goto L480; } /*< test = 0.0e0 >*/ test = (float)0.; /*< if (ls .ne. m) test = test + cabs(e(ls)) >*/ if (ls != m) { test += c_abs(&e[ls]); } /*< if (ls .ne. l + 1) test = test + cabs(e(ls-1)) >*/ if (ls != l + 1) { test += c_abs(&e[ls - 1]); } /*< ztest = test + cabs(s(ls)) >*/ ztest = test + c_abs(&s[ls]); /*< if (ztest .ne. test) go to 460 >*/ if (ztest != test) { goto L460; } /*< s(ls) = (0.0e0,0.0e0) >*/ i__2 = ls; s[i__2].r = (float)0., s[i__2].i = (float)0.; /* ......exit */ /*< go to 480 >*/ goto L480; /*< 460 continue >*/ L460: /*< 470 continue >*/ /* L470: */ ; } /*< 480 continue >*/ L480: /*< if (ls .ne. l) go to 490 >*/ if (ls != l) { goto L490; } /*< kase = 3 >*/ kase = 3; /*< go to 510 >*/ goto L510; /*< 490 continue >*/ L490: /*< if (ls .ne. m) go to 500 >*/ if (ls != m) { goto L500; } /*< kase = 1 >*/ kase = 1; /*< go to 510 >*/ goto L510; /*< 500 continue >*/ L500: /*< kase = 2 >*/ kase = 2; /*< l = ls >*/ l = ls; /*< 510 continue >*/ L510: /*< 520 continue >*/ L520: /*< l = l + 1 >*/ ++l; /* perform the task indicated by kase. */ /*< go to (530, 560, 580, 610), kase >*/ switch (kase) { case 1: goto L530; case 2: goto L560; case 3: goto L580; case 4: goto L610; } /* deflate negligible s(m). */ /*< 530 continue >*/ L530: /*< mm1 = m - 1 >*/ mm1 = m - 1; /*< f = real(e(m-1)) >*/ i__1 = m - 1; f = e[i__1].r; /*< e(m-1) = (0.0e0,0.0e0) >*/ i__1 = m - 1; e[i__1].r = (float)0., e[i__1].i = (float)0.; /*< do 550 kk = l, mm1 >*/ i__1 = mm1; for (kk = l; kk <= i__1; ++kk) { /*< k = mm1 - kk + l >*/ k = mm1 - kk + l; /*< t1 = real(s(k)) >*/ i__2 = k; t1 = s[i__2].r; /*< call srotg(t1,f,cs,sn) >*/ srotg_(&t1, &f, &cs, &sn); /*< s(k) = cmplx(t1,0.0e0) >*/ i__2 = k; q__1.r = t1, q__1.i = (float)0.; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< if (k .eq. l) go to 540 >*/ if (k == l) { goto L540; } /*< f = -sn*real(e(k-1)) >*/ i__2 = k - 1; f = -sn * e[i__2].r; /*< e(k-1) = cs*e(k-1) >*/ i__2 = k - 1; i__3 = k - 1; q__1.r = cs * e[i__3].r, q__1.i = cs * e[i__3].i; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< 540 continue >*/ L540: /*< if (wantv) call csrot(p,v(1,k),1,v(1,m),1,cs,sn) >*/ if (wantv) { csrot_(p, &v[k * v_dim1 + 1], &c__1, &v[m * v_dim1 + 1], &c__1, & cs, &sn); } /*< 550 continue >*/ /* L550: */ } /*< go to 650 >*/ goto L650; /* split at negligible s(l). */ /*< 560 continue >*/ L560: /*< f = real(e(l-1)) >*/ i__1 = l - 1; f = e[i__1].r; /*< e(l-1) = (0.0e0,0.0e0) >*/ i__1 = l - 1; e[i__1].r = (float)0., e[i__1].i = (float)0.; /*< do 570 k = l, m >*/ i__1 = m; for (k = l; k <= i__1; ++k) { /*< t1 = real(s(k)) >*/ i__2 = k; t1 = s[i__2].r; /*< call srotg(t1,f,cs,sn) >*/ srotg_(&t1, &f, &cs, &sn); /*< s(k) = cmplx(t1,0.0e0) >*/ i__2 = k; q__1.r = t1, q__1.i = (float)0.; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< f = -sn*real(e(k)) >*/ i__2 = k; f = -sn * e[i__2].r; /*< e(k) = cs*e(k) >*/ i__2 = k; i__3 = k; q__1.r = cs * e[i__3].r, q__1.i = cs * e[i__3].i; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< if (wantu) call csrot(n,u(1,k),1,u(1,l-1),1,cs,sn) >*/ if (wantu) { csrot_(n, &u[k * u_dim1 + 1], &c__1, &u[(l - 1) * u_dim1 + 1], & c__1, &cs, &sn); } /*< 570 continue >*/ /* L570: */ } /*< go to 650 >*/ goto L650; /* perform one qr step. */ /*< 580 continue >*/ L580: /* calculate the shift. */ /*< >*/ /* Computing MAX */ r__1 = c_abs(&s[m]), r__2 = c_abs(&s[m - 1]), r__1 = max(r__1,r__2), r__2 = c_abs(&e[m - 1]), r__1 = max(r__1,r__2), r__2 = c_abs(&s[l]), r__1 = max(r__1,r__2), r__2 = c_abs(&e[l]); scale = dmax(r__1,r__2); /*< sm = real(s(m))/scale >*/ i__1 = m; sm = s[i__1].r / scale; /*< smm1 = real(s(m-1))/scale >*/ i__1 = m - 1; smm1 = s[i__1].r / scale; /*< emm1 = real(e(m-1))/scale >*/ i__1 = m - 1; emm1 = e[i__1].r / scale; /*< sl = real(s(l))/scale >*/ i__1 = l; sl = s[i__1].r / scale; /*< el = real(e(l))/scale >*/ i__1 = l; el = e[i__1].r / scale; /*< b = ((smm1 + sm)*(smm1 - sm) + emm1**2)/2.0e0 >*/ /* Computing 2nd power */ r__1 = emm1; b = ((smm1 + sm) * (smm1 - sm) + r__1 * r__1) / (float)2.; /*< c = (sm*emm1)**2 >*/ /* Computing 2nd power */ r__1 = sm * emm1; c__ = r__1 * r__1; /*< shift = 0.0e0 >*/ shift = (float)0.; /*< if (b .eq. 0.0e0 .and. c .eq. 0.0e0) go to 590 >*/ if (b == (float)0. && c__ == (float)0.) { goto L590; } /*< shift = sqrt(b**2+c) >*/ /* Computing 2nd power */ r__1 = b; shift = sqrt(r__1 * r__1 + c__); /*< if (b .lt. 0.0e0) shift = -shift >*/ if (b < (float)0.) { shift = -shift; } /*< shift = c/(b + shift) >*/ shift = c__ / (b + shift); /*< 590 continue >*/ L590: /*< f = (sl + sm)*(sl - sm) + shift >*/ f = (sl + sm) * (sl - sm) + shift; /*< g = sl*el >*/ g = sl * el; /* chase zeros. */ /*< mm1 = m - 1 >*/ mm1 = m - 1; /*< do 600 k = l, mm1 >*/ i__1 = mm1; for (k = l; k <= i__1; ++k) { /*< call srotg(f,g,cs,sn) >*/ srotg_(&f, &g, &cs, &sn); /*< if (k .ne. l) e(k-1) = cmplx(f,0.0e0) >*/ if (k != l) { i__2 = k - 1; q__1.r = f, q__1.i = (float)0.; e[i__2].r = q__1.r, e[i__2].i = q__1.i; } /*< f = cs*real(s(k)) + sn*real(e(k)) >*/ i__2 = k; i__3 = k; f = cs * s[i__2].r + sn * e[i__3].r; /*< e(k) = cs*e(k) - sn*s(k) >*/ i__2 = k; i__3 = k; q__2.r = cs * e[i__3].r, q__2.i = cs * e[i__3].i; i__4 = k; q__3.r = sn * s[i__4].r, q__3.i = sn * s[i__4].i; q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< g = sn*real(s(k+1)) >*/ i__2 = k + 1; g = sn * s[i__2].r; /*< s(k+1) = cs*s(k+1) >*/ i__2 = k + 1; i__3 = k + 1; q__1.r = cs * s[i__3].r, q__1.i = cs * s[i__3].i; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< if (wantv) call csrot(p,v(1,k),1,v(1,k+1),1,cs,sn) >*/ if (wantv) { csrot_(p, &v[k * v_dim1 + 1], &c__1, &v[(k + 1) * v_dim1 + 1], & c__1, &cs, &sn); } /*< call srotg(f,g,cs,sn) >*/ srotg_(&f, &g, &cs, &sn); /*< s(k) = cmplx(f,0.0e0) >*/ i__2 = k; q__1.r = f, q__1.i = (float)0.; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< f = cs*real(e(k)) + sn*real(s(k+1)) >*/ i__2 = k; i__3 = k + 1; f = cs * e[i__2].r + sn * s[i__3].r; /*< s(k+1) = -sn*e(k) + cs*s(k+1) >*/ i__2 = k + 1; r__1 = -sn; i__3 = k; q__2.r = r__1 * e[i__3].r, q__2.i = r__1 * e[i__3].i; i__4 = k + 1; q__3.r = cs * s[i__4].r, q__3.i = cs * s[i__4].i; q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i; s[i__2].r = q__1.r, s[i__2].i = q__1.i; /*< g = sn*real(e(k+1)) >*/ i__2 = k + 1; g = sn * e[i__2].r; /*< e(k+1) = cs*e(k+1) >*/ i__2 = k + 1; i__3 = k + 1; q__1.r = cs * e[i__3].r, q__1.i = cs * e[i__3].i; e[i__2].r = q__1.r, e[i__2].i = q__1.i; /*< >*/ if (wantu && k < *n) { csrot_(n, &u[k * u_dim1 + 1], &c__1, &u[(k + 1) * u_dim1 + 1], & c__1, &cs, &sn); } /*< 600 continue >*/ /* L600: */ } /*< e(m-1) = cmplx(f,0.0e0) >*/ i__1 = m - 1; q__1.r = f, q__1.i = (float)0.; e[i__1].r = q__1.r, e[i__1].i = q__1.i; /*< iter = iter + 1 >*/ ++iter; /*< go to 650 >*/ goto L650; /* convergence. */ /*< 610 continue >*/ L610: /* make the singular value positive */ /*< if (real(s(l)) .ge. 0.0e0) go to 620 >*/ i__1 = l; if (s[i__1].r >= (float)0.) { goto L620; } /*< s(l) = -s(l) >*/ i__1 = l; i__2 = l; q__1.r = -s[i__2].r, q__1.i = -s[i__2].i; s[i__1].r = q__1.r, s[i__1].i = q__1.i; /*< if (wantv) call cscal(p,(-1.0e0,0.0e0),v(1,l),1) >*/ if (wantv) { cscal_(p, &c_b53, &v[l * v_dim1 + 1], &c__1); } /*< 620 continue >*/ L620: /* order the singular value. */ /*< 630 if (l .eq. mm) go to 640 >*/ L630: if (l == mm) { goto L640; } /* ...exit */ /*< if (real(s(l)) .ge. real(s(l+1))) go to 640 >*/ i__1 = l; i__2 = l + 1; if (s[i__1].r >= s[i__2].r) { goto L640; } /*< t = s(l) >*/ i__1 = l; t.r = s[i__1].r, t.i = s[i__1].i; /*< s(l) = s(l+1) >*/ i__1 = l; i__2 = l + 1; s[i__1].r = s[i__2].r, s[i__1].i = s[i__2].i; /*< s(l+1) = t >*/ i__1 = l + 1; s[i__1].r = t.r, s[i__1].i = t.i; /*< >*/ if (wantv && l < *p) { cswap_(p, &v[l * v_dim1 + 1], &c__1, &v[(l + 1) * v_dim1 + 1], &c__1); } /*< >*/ if (wantu && l < *n) { cswap_(n, &u[l * u_dim1 + 1], &c__1, &u[(l + 1) * u_dim1 + 1], &c__1); } /*< l = l + 1 >*/ ++l; /*< go to 630 >*/ goto L630; /*< 640 continue >*/ L640: /*< iter = 0 >*/ iter = 0; /*< m = m - 1 >*/ --m; /*< 650 continue >*/ L650: /*< go to 400 >*/ goto L400; /*< 660 continue >*/ L660: /*< return >*/ return 0; /*< end >*/ } /* csvdc_ */
/* Subroutine */ int claed8_(integer *k, integer *n, integer *qsiz, complex * q, integer *ldq, real *d__, real *rho, integer *cutpnt, real *z__, real *dlamda, complex *q2, integer *ldq2, real *w, integer *indxp, integer *indx, integer *indxq, integer *perm, integer *givptr, integer *givcol, real *givnum, integer *info) { /* System generated locals */ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1; real r__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ real c__; integer i__, j; real s, t; integer k2, n1, n2, jp, n1p1; real eps, tau, tol; integer jlam, imax, jmax; extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *), ccopy_(integer *, complex *, integer *, complex *, integer *), csrot_(integer *, complex *, integer *, complex *, integer *, real *, real *), scopy_(integer *, real *, integer *, real *, integer *); extern doublereal slapy2_(real *, real *), slamch_(char *); extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex *, integer *, complex *, integer *), xerbla_(char *, integer *); extern integer isamax_(integer *, real *, integer *); extern /* Subroutine */ int slamrg_(integer *, integer *, real *, integer *, integer *, integer *); /* -- LAPACK routine (version 3.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CLAED8 merges the two sets of eigenvalues together into a single */ /* sorted set. Then it tries to deflate the size of the problem. */ /* There are two ways in which deflation can occur: when two or more */ /* eigenvalues are close together or if there is a tiny element in the */ /* Z vector. For each such occurrence the order of the related secular */ /* equation problem is reduced by one. */ /* Arguments */ /* ========= */ /* K (output) INTEGER */ /* Contains the number of non-deflated eigenvalues. */ /* This is the order of the related secular equation. */ /* N (input) INTEGER */ /* The dimension of the symmetric tridiagonal matrix. N >= 0. */ /* QSIZ (input) INTEGER */ /* The dimension of the unitary matrix used to reduce */ /* the dense or band matrix to tridiagonal form. */ /* QSIZ >= N if ICOMPQ = 1. */ /* Q (input/output) COMPLEX array, dimension (LDQ,N) */ /* On entry, Q contains the eigenvectors of the partially solved */ /* system which has been previously updated in matrix */ /* multiplies with other partially solved eigensystems. */ /* On exit, Q contains the trailing (N-K) updated eigenvectors */ /* (those which were deflated) in its last N-K columns. */ /* LDQ (input) INTEGER */ /* The leading dimension of the array Q. LDQ >= max( 1, N ). */ /* D (input/output) REAL array, dimension (N) */ /* On entry, D contains the eigenvalues of the two submatrices to */ /* be combined. On exit, D contains the trailing (N-K) updated */ /* eigenvalues (those which were deflated) sorted into increasing */ /* order. */ /* RHO (input/output) REAL */ /* Contains the off diagonal element associated with the rank-1 */ /* cut which originally split the two submatrices which are now */ /* being recombined. RHO is modified during the computation to */ /* the value required by SLAED3. */ /* CUTPNT (input) INTEGER */ /* Contains the location of the last eigenvalue in the leading */ /* sub-matrix. MIN(1,N) <= CUTPNT <= N. */ /* Z (input) REAL array, dimension (N) */ /* On input this vector contains the updating vector (the last */ /* row of the first sub-eigenvector matrix and the first row of */ /* the second sub-eigenvector matrix). The contents of Z are */ /* destroyed during the updating process. */ /* DLAMDA (output) REAL array, dimension (N) */ /* Contains a copy of the first K eigenvalues which will be used */ /* by SLAED3 to form the secular equation. */ /* Q2 (output) COMPLEX array, dimension (LDQ2,N) */ /* If ICOMPQ = 0, Q2 is not referenced. Otherwise, */ /* Contains a copy of the first K eigenvectors which will be used */ /* by SLAED7 in a matrix multiply (SGEMM) to update the new */ /* eigenvectors. */ /* LDQ2 (input) INTEGER */ /* The leading dimension of the array Q2. LDQ2 >= max( 1, N ). */ /* W (output) REAL array, dimension (N) */ /* This will hold the first k values of the final */ /* deflation-altered z-vector and will be passed to SLAED3. */ /* INDXP (workspace) INTEGER array, dimension (N) */ /* This will contain the permutation used to place deflated */ /* values of D at the end of the array. On output INDXP(1:K) */ /* points to the nondeflated D-values and INDXP(K+1:N) */ /* points to the deflated eigenvalues. */ /* INDX (workspace) INTEGER array, dimension (N) */ /* This will contain the permutation used to sort the contents of */ /* D into ascending order. */ /* INDXQ (input) INTEGER array, dimension (N) */ /* This contains the permutation which separately sorts the two */ /* sub-problems in D into ascending order. Note that elements in */ /* the second half of this permutation must first have CUTPNT */ /* added to their values in order to be accurate. */ /* PERM (output) INTEGER array, dimension (N) */ /* Contains the permutations (from deflation and sorting) to be */ /* applied to each eigenblock. */ /* GIVPTR (output) INTEGER */ /* Contains the number of Givens rotations which took place in */ /* this subproblem. */ /* GIVCOL (output) INTEGER array, dimension (2, N) */ /* Each pair of numbers indicates a pair of columns to take place */ /* in a Givens rotation. */ /* GIVNUM (output) REAL array, dimension (2, N) */ /* Each number indicates the S value to be used in the */ /* corresponding Givens rotation. */ /* INFO (output) INTEGER */ /* = 0: successful exit. */ /* < 0: if INFO = -i, the i-th argument had an illegal value. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ q_dim1 = *ldq; q_offset = 1 + q_dim1; q -= q_offset; --d__; --z__; --dlamda; q2_dim1 = *ldq2; q2_offset = 1 + q2_dim1; q2 -= q2_offset; --w; --indxp; --indx; --indxq; --perm; givcol -= 3; givnum -= 3; /* Function Body */ *info = 0; if (*n < 0) { *info = -2; } else if (*qsiz < *n) { *info = -3; } else if (*ldq < max(1,*n)) { *info = -5; } else if (*cutpnt < min(1,*n) || *cutpnt > *n) { *info = -8; } else if (*ldq2 < max(1,*n)) { *info = -12; } if (*info != 0) { i__1 = -(*info); xerbla_("CLAED8", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } n1 = *cutpnt; n2 = *n - n1; n1p1 = n1 + 1; if (*rho < 0.f) { sscal_(&n2, &c_b3, &z__[n1p1], &c__1); } /* Normalize z so that norm(z) = 1 */ t = 1.f / sqrt(2.f); i__1 = *n; for (j = 1; j <= i__1; ++j) { indx[j] = j; /* L10: */ } sscal_(n, &t, &z__[1], &c__1); *rho = (r__1 = *rho * 2.f, dabs(r__1)); /* Sort the eigenvalues into increasing order */ i__1 = *n; for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) { indxq[i__] += *cutpnt; /* L20: */ } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dlamda[i__] = d__[indxq[i__]]; w[i__] = z__[indxq[i__]]; /* L30: */ } i__ = 1; j = *cutpnt + 1; slamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { d__[i__] = dlamda[indx[i__]]; z__[i__] = w[indx[i__]]; /* L40: */ } /* Calculate the allowable deflation tolerance */ imax = isamax_(n, &z__[1], &c__1); jmax = isamax_(n, &d__[1], &c__1); eps = slamch_("Epsilon"); tol = eps * 8.f * (r__1 = d__[jmax], dabs(r__1)); /* If the rank-1 modifier is small enough, no more needs to be done */ /* -- except to reorganize Q so that its columns correspond with the */ /* elements in D. */ if (*rho * (r__1 = z__[imax], dabs(r__1)) <= tol) { *k = 0; i__1 = *n; for (j = 1; j <= i__1; ++j) { perm[j] = indxq[indx[j]]; ccopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1] , &c__1); /* L50: */ } clacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq); return 0; } /* If there are multiple eigenvalues then the problem deflates. Here */ /* the number of equal eigenvalues are found. As each equal */ /* eigenvalue is found, an elementary reflector is computed to rotate */ /* the corresponding eigensubspace so that the corresponding */ /* components of Z are zero in this new basis. */ *k = 0; *givptr = 0; k2 = *n + 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*rho * (r__1 = z__[j], dabs(r__1)) <= tol) { /* Deflate due to small z component. */ --k2; indxp[k2] = j; if (j == *n) { goto L100; } } else { jlam = j; goto L70; } /* L60: */ } L70: ++j; if (j > *n) { goto L90; } if (*rho * (r__1 = z__[j], dabs(r__1)) <= tol) { /* Deflate due to small z component. */ --k2; indxp[k2] = j; } else { /* Check if eigenvalues are close enough to allow deflation. */ s = z__[jlam]; c__ = z__[j]; /* Find sqrt(a**2+b**2) without overflow or */ /* destructive underflow. */ tau = slapy2_(&c__, &s); t = d__[j] - d__[jlam]; c__ /= tau; s = -s / tau; if ((r__1 = t * c__ * s, dabs(r__1)) <= tol) { /* Deflation is possible. */ z__[j] = tau; z__[jlam] = 0.f; /* Record the appropriate Givens rotation */ ++(*givptr); givcol[(*givptr << 1) + 1] = indxq[indx[jlam]]; givcol[(*givptr << 1) + 2] = indxq[indx[j]]; givnum[(*givptr << 1) + 1] = c__; givnum[(*givptr << 1) + 2] = s; csrot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[indxq[ indx[j]] * q_dim1 + 1], &c__1, &c__, &s); t = d__[jlam] * c__ * c__ + d__[j] * s * s; d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__; d__[jlam] = t; --k2; i__ = 1; L80: if (k2 + i__ <= *n) { if (d__[jlam] < d__[indxp[k2 + i__]]) { indxp[k2 + i__ - 1] = indxp[k2 + i__]; indxp[k2 + i__] = jlam; ++i__; goto L80; } else { indxp[k2 + i__ - 1] = jlam; } } else { indxp[k2 + i__ - 1] = jlam; } jlam = j; } else { ++(*k); w[*k] = z__[jlam]; dlamda[*k] = d__[jlam]; indxp[*k] = jlam; jlam = j; } } goto L70; L90: /* Record the last eigenvalue. */ ++(*k); w[*k] = z__[jlam]; dlamda[*k] = d__[jlam]; indxp[*k] = jlam; L100: /* Sort the eigenvalues and corresponding eigenvectors into DLAMDA */ /* and Q2 respectively. The eigenvalues/vectors which were not */ /* deflated go into the first K slots of DLAMDA and Q2 respectively, */ /* while those which were deflated go into the last N - K slots. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { jp = indxp[j]; dlamda[j] = d__[jp]; perm[j] = indxq[indx[jp]]; ccopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1], & c__1); /* L110: */ } /* The deflated eigenvalues and their corresponding vectors go back */ /* into the last N - K slots of D and Q respectively. */ if (*k < *n) { i__1 = *n - *k; scopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1); i__1 = *n - *k; clacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*k + 1) * q_dim1 + 1], ldq); } return 0; /* End of CLAED8 */ } /* claed8_ */
/* Subroutine */ int cbdsqr_(char *uplo, integer *n, integer *ncvt, integer * nru, integer *ncc, real *d__, real *e, complex *vt, integer *ldvt, complex *u, integer *ldu, complex *c__, integer *ldc, real *rwork, integer *info) { /* System generated locals */ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; real r__1, r__2, r__3, r__4; doublereal d__1; /* Local variables */ real f, g, h__; integer i__, j, m; real r__, cs; integer ll; real sn, mu; integer nm1, nm12, nm13, lll; real eps, sll, tol, abse; integer idir; real abss; integer oldm; real cosl; integer isub, iter; real unfl, sinl, cosr, smin, smax, sinr; real oldcs; integer oldll; real shift, sigmn, oldsn; integer maxit; real sminl, sigmx; logical lower; real sminoa; real thresh; logical rotate; real tolmul; /* -- LAPACK routine (version 3.2) -- */ /* November 2006 */ /* Purpose */ /* ======= */ /* CBDSQR computes the singular values and, optionally, the right and/or */ /* left singular vectors from the singular value decomposition (SVD) of */ /* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit */ /* zero-shift QR algorithm. The SVD of B has the form */ /* B = Q * S * P**H */ /* where S is the diagonal matrix of singular values, Q is an orthogonal */ /* matrix of left singular vectors, and P is an orthogonal matrix of */ /* right singular vectors. If left singular vectors are requested, this */ /* subroutine actually returns U*Q instead of Q, and, if right singular */ /* vectors are requested, this subroutine returns P**H*VT instead of */ /* P**H, for given complex input matrices U and VT. When U and VT are */ /* the unitary matrices that reduce a general matrix A to bidiagonal */ /* form: A = U*B*VT, as computed by CGEBRD, then */ /* A = (U*Q) * S * (P**H*VT) */ /* is the SVD of A. Optionally, the subroutine may also compute Q**H*C */ /* for a given complex input matrix C. */ /* See "Computing Small Singular Values of Bidiagonal Matrices With */ /* Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, */ /* LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11, */ /* no. 5, pp. 873-912, Sept 1990) and */ /* "Accurate singular values and differential qd algorithms," by */ /* B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics */ /* Department, University of California at Berkeley, July 1992 */ /* for a detailed description of the algorithm. */ /* Arguments */ /* ========= */ /* UPLO (input) CHARACTER*1 */ /* = 'U': B is upper bidiagonal; */ /* = 'L': B is lower bidiagonal. */ /* N (input) INTEGER */ /* The order of the matrix B. N >= 0. */ /* NCVT (input) INTEGER */ /* The number of columns of the matrix VT. NCVT >= 0. */ /* NRU (input) INTEGER */ /* The number of rows of the matrix U. NRU >= 0. */ /* NCC (input) INTEGER */ /* The number of columns of the matrix C. NCC >= 0. */ /* D (input/output) REAL array, dimension (N) */ /* On entry, the n diagonal elements of the bidiagonal matrix B. */ /* On exit, if INFO=0, the singular values of B in decreasing */ /* order. */ /* E (input/output) REAL array, dimension (N-1) */ /* On entry, the N-1 offdiagonal elements of the bidiagonal */ /* matrix B. */ /* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E */ /* will contain the diagonal and superdiagonal elements of a */ /* bidiagonal matrix orthogonally equivalent to the one given */ /* as input. */ /* VT (input/output) COMPLEX array, dimension (LDVT, NCVT) */ /* On entry, an N-by-NCVT matrix VT. */ /* On exit, VT is overwritten by P**H * VT. */ /* Not referenced if NCVT = 0. */ /* LDVT (input) INTEGER */ /* The leading dimension of the array VT. */ /* LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0. */ /* U (input/output) COMPLEX array, dimension (LDU, N) */ /* On entry, an NRU-by-N matrix U. */ /* On exit, U is overwritten by U * Q. */ /* Not referenced if NRU = 0. */ /* LDU (input) INTEGER */ /* The leading dimension of the array U. LDU >= max(1,NRU). */ /* C (input/output) COMPLEX array, dimension (LDC, NCC) */ /* On entry, an N-by-NCC matrix C. */ /* On exit, C is overwritten by Q**H * C. */ /* Not referenced if NCC = 0. */ /* LDC (input) INTEGER */ /* The leading dimension of the array C. */ /* LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. */ /* RWORK (workspace) REAL array, dimension (2*N) */ /* if NCVT = NRU = NCC = 0, (max(1, 4*N-4)) otherwise */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: If INFO = -i, the i-th argument had an illegal value */ /* > 0: the algorithm did not converge; D and E contain the */ /* elements of a bidiagonal matrix which is orthogonally */ /* similar to the input matrix B; if INFO = i, i */ /* elements of E have not converged to zero. */ /* Internal Parameters */ /* =================== */ /* TOLMUL REAL, default = max(10,min(100,EPS**(-1/8))) */ /* TOLMUL controls the convergence criterion of the QR loop. */ /* If it is positive, TOLMUL*EPS is the desired relative */ /* precision in the computed singular values. */ /* If it is negative, abs(TOLMUL*EPS*sigma_max) is the */ /* desired absolute accuracy in the computed singular */ /* values (corresponds to relative accuracy */ /* abs(TOLMUL*EPS) in the largest singular value. */ /* abs(TOLMUL) should be between 1 and 1/EPS, and preferably */ /* between 10 (for fast convergence) and .1/EPS */ /* (for there to be some accuracy in the results). */ /* Default is to lose at either one eighth or 2 of the */ /* available decimal digits in each computed singular value */ /* (whichever is smaller). */ /* MAXITR INTEGER, default = 6 */ /* MAXITR controls the maximum number of passes of the */ /* algorithm through its inner loop. The algorithms stops */ /* (and so fails to converge) if the number of passes */ /* through the inner loop exceeds MAXITR*N**2. */ /* ===================================================================== */ /* Test the input parameters. */ /* Parameter adjustments */ --d__; --e; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1; vt -= vt_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1; u -= u_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1; c__ -= c_offset; --rwork; /* Function Body */ *info = 0; lower = lsame_(uplo, "L"); if (! lsame_(uplo, "U") && ! lower) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ncvt < 0) { *info = -3; } else if (*nru < 0) { *info = -4; } else if (*ncc < 0) { *info = -5; } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) { *info = -9; } else if (*ldu < max(1,*nru)) { *info = -11; } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) { *info = -13; } if (*info != 0) { i__1 = -(*info); xerbla_("CBDSQR", &i__1); return 0; } if (*n == 0) { return 0; } if (*n == 1) { goto L160; } /* ROTATE is true if any singular vectors desired, false otherwise */ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0; /* If no singular vectors desired, use qd algorithm */ if (! rotate) { slasq1_(n, &d__[1], &e[1], &rwork[1], info); return 0; } nm1 = *n - 1; nm12 = nm1 + nm1; nm13 = nm12 + nm1; idir = 0; /* Get machine constants */ eps = slamch_("Epsilon"); unfl = slamch_("Safe minimum"); /* If matrix lower bidiagonal, rotate to be upper bidiagonal */ /* by applying Givens rotations on the left */ if (lower) { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { slartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; rwork[i__] = cs; rwork[nm1 + i__] = sn; } /* Update singular vectors if desired */ if (*nru > 0) { clasr_("R", "V", "F", nru, n, &rwork[1], &rwork[*n], &u[u_offset], ldu); } if (*ncc > 0) { clasr_("L", "V", "F", n, ncc, &rwork[1], &rwork[*n], &c__[ c_offset], ldc); } } /* Compute singular values to relative accuracy TOL */ /* (By setting TOL to be negative, algorithm will compute */ /* singular values to absolute accuracy ABS(TOL)*norm(input matrix)) */ /* Computing MAX */ /* Computing MIN */ d__1 = (doublereal) eps; r__3 = 100.f, r__4 = pow_dd(&d__1, &c_b15); r__1 = 10.f, r__2 = dmin(r__3,r__4); tolmul = dmax(r__1,r__2); tol = tolmul * eps; /* Compute approximate maximum, minimum singular values */ smax = 0.f; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ r__2 = smax, r__3 = (r__1 = d__[i__], dabs(r__1)); smax = dmax(r__2,r__3); } i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ r__2 = smax, r__3 = (r__1 = e[i__], dabs(r__1)); smax = dmax(r__2,r__3); } sminl = 0.f; if (tol >= 0.f) { /* Relative accuracy desired */ sminoa = dabs(d__[1]); if (sminoa == 0.f) { goto L50; } mu = sminoa; i__1 = *n; for (i__ = 2; i__ <= i__1; ++i__) { mu = (r__2 = d__[i__], dabs(r__2)) * (mu / (mu + (r__1 = e[i__ - 1], dabs(r__1)))); sminoa = dmin(sminoa,mu); if (sminoa == 0.f) { goto L50; } } L50: sminoa /= sqrt((real) (*n)); /* Computing MAX */ r__1 = tol * sminoa, r__2 = *n * 6 * *n * unfl; thresh = dmax(r__1,r__2); } else { /* Absolute accuracy desired */ /* Computing MAX */ r__1 = dabs(tol) * smax, r__2 = *n * 6 * *n * unfl; thresh = dmax(r__1,r__2); } /* Prepare for main iteration loop for the singular values */ /* (MAXIT is the maximum number of passes through the inner */ /* loop permitted before nonconvergence signalled.) */ maxit = *n * 6 * *n; iter = 0; oldll = -1; oldm = -1; /* M points to last element of unconverged part of matrix */ m = *n; /* Begin main iteration loop */ L60: /* Check for convergence or exceeding iteration count */ if (m <= 1) { goto L160; } if (iter > maxit) { goto L200; } /* Find diagonal block of matrix to work on */ if (tol < 0.f && (r__1 = d__[m], dabs(r__1)) <= thresh) { d__[m] = 0.f; } smax = (r__1 = d__[m], dabs(r__1)); smin = smax; i__1 = m - 1; for (lll = 1; lll <= i__1; ++lll) { ll = m - lll; abss = (r__1 = d__[ll], dabs(r__1)); abse = (r__1 = e[ll], dabs(r__1)); if (tol < 0.f && abss <= thresh) { d__[ll] = 0.f; } if (abse <= thresh) { goto L80; } smin = dmin(smin,abss); /* Computing MAX */ r__1 = max(smax,abss); smax = dmax(r__1,abse); } ll = 0; goto L90; L80: e[ll] = 0.f; /* Matrix splits since E(LL) = 0 */ if (ll == m - 1) { /* Convergence of bottom singular value, return to top of loop */ --m; goto L60; } L90: ++ll; /* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */ if (ll == m - 1) { /* 2 by 2 block, handle separately */ slasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr, &sinl, &cosl); d__[m - 1] = sigmx; e[m - 1] = 0.f; d__[m] = sigmn; /* Compute singular vectors, if desired */ if (*ncvt > 0) { csrot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, & cosr, &sinr); } if (*nru > 0) { csrot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], & c__1, &cosl, &sinl); } if (*ncc > 0) { csrot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, & cosl, &sinl); } m += -2; goto L60; } /* If working on new submatrix, choose shift direction */ /* (from larger end diagonal element towards smaller) */ if (ll > oldm || m < oldll) { if ((r__1 = d__[ll], dabs(r__1)) >= (r__2 = d__[m], dabs(r__2))) { /* Chase bulge from top (big end) to bottom (small end) */ idir = 1; } else { /* Chase bulge from bottom (big end) to top (small end) */ idir = 2; } } /* Apply convergence tests */ if (idir == 1) { /* Run convergence test in forward direction */ /* First apply standard test to bottom of matrix */ if ((r__2 = e[m - 1], dabs(r__2)) <= dabs(tol) * (r__1 = d__[m], dabs( r__1)) || tol < 0.f && (r__3 = e[m - 1], dabs(r__3)) <= thresh) { e[m - 1] = 0.f; goto L60; } if (tol >= 0.f) { /* If relative accuracy desired, */ /* apply convergence criterion forward */ mu = (r__1 = d__[ll], dabs(r__1)); sminl = mu; i__1 = m - 1; for (lll = ll; lll <= i__1; ++lll) { if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) { e[lll] = 0.f; goto L60; } mu = (r__2 = d__[lll + 1], dabs(r__2)) * (mu / (mu + (r__1 = e[lll], dabs(r__1)))); sminl = dmin(sminl,mu); } } } else { /* Run convergence test in backward direction */ /* First apply standard test to top of matrix */ if ((r__2 = e[ll], dabs(r__2)) <= dabs(tol) * (r__1 = d__[ll], dabs( r__1)) || tol < 0.f && (r__3 = e[ll], dabs(r__3)) <= thresh) { e[ll] = 0.f; goto L60; } if (tol >= 0.f) { /* If relative accuracy desired, */ /* apply convergence criterion backward */ mu = (r__1 = d__[m], dabs(r__1)); sminl = mu; i__1 = ll; for (lll = m - 1; lll >= i__1; --lll) { if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) { e[lll] = 0.f; goto L60; } mu = (r__2 = d__[lll], dabs(r__2)) * (mu / (mu + (r__1 = e[ lll], dabs(r__1)))); sminl = dmin(sminl,mu); } } } oldll = ll; oldm = m; /* Compute shift. First, test if shifting would ruin relative */ /* accuracy, and if so set the shift to zero. */ /* Computing MAX */ r__1 = eps, r__2 = tol * .01f; if (tol >= 0.f && *n * tol * (sminl / smax) <= dmax(r__1,r__2)) { /* Use a zero shift to avoid loss of relative accuracy */ shift = 0.f; } else { /* Compute the shift from 2-by-2 block at end of matrix */ if (idir == 1) { sll = (r__1 = d__[ll], dabs(r__1)); slas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__); } else { sll = (r__1 = d__[m], dabs(r__1)); slas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__); } /* Test if shift negligible, and if so set to zero */ if (sll > 0.f) { /* Computing 2nd power */ r__1 = shift / sll; if (r__1 * r__1 < eps) { shift = 0.f; } } } /* Increment iteration count */ iter = iter + m - ll; /* If SHIFT = 0, do simplified QR iteration */ if (shift == 0.f) { if (idir == 1) { /* Chase bulge from top to bottom */ /* Save cosines and sines for later singular vector updates */ cs = 1.f; oldcs = 1.f; i__1 = m - 1; for (i__ = ll; i__ <= i__1; ++i__) { r__1 = d__[i__] * cs; slartg_(&r__1, &e[i__], &cs, &sn, &r__); if (i__ > ll) { e[i__ - 1] = oldsn * r__; } r__1 = oldcs * r__; r__2 = d__[i__ + 1] * sn; slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]); rwork[i__ - ll + 1] = cs; rwork[i__ - ll + 1 + nm1] = sn; rwork[i__ - ll + 1 + nm12] = oldcs; rwork[i__ - ll + 1 + nm13] = oldsn; } h__ = d__[m] * cs; d__[m] = h__ * oldcs; e[m - 1] = h__ * oldsn; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; clasr_("L", "V", "F", &i__1, ncvt, &rwork[1], &rwork[*n], &vt[ ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; clasr_("R", "V", "F", nru, &i__1, &rwork[nm12 + 1], &rwork[ nm13 + 1], &u[ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; clasr_("L", "V", "F", &i__1, ncc, &rwork[nm12 + 1], &rwork[ nm13 + 1], &c__[ll + c_dim1], ldc); } /* Test convergence */ if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) { e[m - 1] = 0.f; } } else { /* Chase bulge from bottom to top */ /* Save cosines and sines for later singular vector updates */ cs = 1.f; oldcs = 1.f; i__1 = ll + 1; for (i__ = m; i__ >= i__1; --i__) { r__1 = d__[i__] * cs; slartg_(&r__1, &e[i__ - 1], &cs, &sn, &r__); if (i__ < m) { e[i__] = oldsn * r__; } r__1 = oldcs * r__; r__2 = d__[i__ - 1] * sn; slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]); rwork[i__ - ll] = cs; rwork[i__ - ll + nm1] = -sn; rwork[i__ - ll + nm12] = oldcs; rwork[i__ - ll + nm13] = -oldsn; } h__ = d__[ll] * cs; d__[ll] = h__ * oldcs; e[ll] = h__ * oldsn; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; clasr_("L", "V", "B", &i__1, ncvt, &rwork[nm12 + 1], &rwork[ nm13 + 1], &vt[ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; clasr_("R", "V", "B", nru, &i__1, &rwork[1], &rwork[*n], &u[ ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; clasr_("L", "V", "B", &i__1, ncc, &rwork[1], &rwork[*n], &c__[ ll + c_dim1], ldc); } /* Test convergence */ if ((r__1 = e[ll], dabs(r__1)) <= thresh) { e[ll] = 0.f; } } } else { /* Use nonzero shift */ if (idir == 1) { /* Chase bulge from top to bottom */ /* Save cosines and sines for later singular vector updates */ f = ((r__1 = d__[ll], dabs(r__1)) - shift) * (r_sign(&c_b49, &d__[ ll]) + shift / d__[ll]); g = e[ll]; i__1 = m - 1; for (i__ = ll; i__ <= i__1; ++i__) { slartg_(&f, &g, &cosr, &sinr, &r__); if (i__ > ll) { e[i__ - 1] = r__; } f = cosr * d__[i__] + sinr * e[i__]; e[i__] = cosr * e[i__] - sinr * d__[i__]; g = sinr * d__[i__ + 1]; d__[i__ + 1] = cosr * d__[i__ + 1]; slartg_(&f, &g, &cosl, &sinl, &r__); d__[i__] = r__; f = cosl * e[i__] + sinl * d__[i__ + 1]; d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__]; if (i__ < m - 1) { g = sinl * e[i__ + 1]; e[i__ + 1] = cosl * e[i__ + 1]; } rwork[i__ - ll + 1] = cosr; rwork[i__ - ll + 1 + nm1] = sinr; rwork[i__ - ll + 1 + nm12] = cosl; rwork[i__ - ll + 1 + nm13] = sinl; } e[m - 1] = f; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; clasr_("L", "V", "F", &i__1, ncvt, &rwork[1], &rwork[*n], &vt[ ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; clasr_("R", "V", "F", nru, &i__1, &rwork[nm12 + 1], &rwork[ nm13 + 1], &u[ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; clasr_("L", "V", "F", &i__1, ncc, &rwork[nm12 + 1], &rwork[ nm13 + 1], &c__[ll + c_dim1], ldc); } /* Test convergence */ if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) { e[m - 1] = 0.f; } } else { /* Chase bulge from bottom to top */ /* Save cosines and sines for later singular vector updates */ f = ((r__1 = d__[m], dabs(r__1)) - shift) * (r_sign(&c_b49, &d__[ m]) + shift / d__[m]); g = e[m - 1]; i__1 = ll + 1; for (i__ = m; i__ >= i__1; --i__) { slartg_(&f, &g, &cosr, &sinr, &r__); if (i__ < m) { e[i__] = r__; } f = cosr * d__[i__] + sinr * e[i__ - 1]; e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__]; g = sinr * d__[i__ - 1]; d__[i__ - 1] = cosr * d__[i__ - 1]; slartg_(&f, &g, &cosl, &sinl, &r__); d__[i__] = r__; f = cosl * e[i__ - 1] + sinl * d__[i__ - 1]; d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1]; if (i__ > ll + 1) { g = sinl * e[i__ - 2]; e[i__ - 2] = cosl * e[i__ - 2]; } rwork[i__ - ll] = cosr; rwork[i__ - ll + nm1] = -sinr; rwork[i__ - ll + nm12] = cosl; rwork[i__ - ll + nm13] = -sinl; } e[ll] = f; /* Test convergence */ if ((r__1 = e[ll], dabs(r__1)) <= thresh) { e[ll] = 0.f; } /* Update singular vectors if desired */ if (*ncvt > 0) { i__1 = m - ll + 1; clasr_("L", "V", "B", &i__1, ncvt, &rwork[nm12 + 1], &rwork[ nm13 + 1], &vt[ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; clasr_("R", "V", "B", nru, &i__1, &rwork[1], &rwork[*n], &u[ ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; clasr_("L", "V", "B", &i__1, ncc, &rwork[1], &rwork[*n], &c__[ ll + c_dim1], ldc); } } } /* QR iteration finished, go back and check convergence */ goto L60; /* All singular values converged, so make them positive */ L160: i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (d__[i__] < 0.f) { d__[i__] = -d__[i__]; /* Change sign of singular vectors, if desired */ if (*ncvt > 0) { csscal_(ncvt, &c_b72, &vt[i__ + vt_dim1], ldvt); } } } /* Sort the singular values into decreasing order (insertion sort on */ /* singular values, but only one transposition per singular vector) */ i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Scan for smallest D(I) */ isub = 1; smin = d__[1]; i__2 = *n + 1 - i__; for (j = 2; j <= i__2; ++j) { if (d__[j] <= smin) { isub = j; smin = d__[j]; } } if (isub != *n + 1 - i__) { /* Swap singular values and vectors */ d__[isub] = d__[*n + 1 - i__]; d__[*n + 1 - i__] = smin; if (*ncvt > 0) { cswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ + vt_dim1], ldvt); } if (*nru > 0) { cswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) * u_dim1 + 1], &c__1); } if (*ncc > 0) { cswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ + c_dim1], ldc); } } } goto L220; /* Maximum number of iterations exceeded, failure to converge */ L200: *info = 0; i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { if (e[i__] != 0.f) { ++(*info); } } L220: return 0; /* End of CBDSQR */ } /* cbdsqr_ */
/* Subroutine */ int clals0_(integer *icompq, integer *nl, integer *nr, integer *sqre, integer *nrhs, complex *b, integer *ldb, complex *bx, integer *ldbx, integer *perm, integer *givptr, integer *givcol, integer *ldgcol, real *givnum, integer *ldgnum, real *poles, real * difl, real *difr, real *z__, integer *k, real *c__, real *s, real * rwork, integer *info) { /* System generated locals */ integer givcol_dim1, givcol_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset, b_dim1, b_offset, bx_dim1, bx_offset, i__1, i__2, i__3, i__4, i__5; real r__1; complex q__1; /* Builtin functions */ double r_imag(complex *); /* Local variables */ integer i__, j, m, n; real dj; integer nlp1, jcol; real temp; integer jrow; extern doublereal snrm2_(integer *, real *, integer *); real diflj, difrj, dsigj; extern /* Subroutine */ int ccopy_(integer *, complex *, integer *, complex *, integer *), sgemv_(char *, integer *, integer *, real * , real *, integer *, real *, integer *, real *, real *, integer *), csrot_(integer *, complex *, integer *, complex *, integer *, real *, real *); extern doublereal slamc3_(real *, real *); extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *, real *, integer *, integer *, complex *, integer *, integer *), csscal_(integer *, real *, complex *, integer *), clacpy_(char *, integer *, integer *, complex *, integer *, complex *, integer *), xerbla_(char *, integer *); real dsigjp; /* -- LAPACK routine (version 3.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CLALS0 applies back the multiplying factors of either the left or the */ /* right singular vector matrix of a diagonal matrix appended by a row */ /* to the right hand side matrix B in solving the least squares problem */ /* using the divide-and-conquer SVD approach. */ /* For the left singular vector matrix, three types of orthogonal */ /* matrices are involved: */ /* (1L) Givens rotations: the number of such rotations is GIVPTR; the */ /* pairs of columns/rows they were applied to are stored in GIVCOL; */ /* and the C- and S-values of these rotations are stored in GIVNUM. */ /* (2L) Permutation. The (NL+1)-st row of B is to be moved to the first */ /* row, and for J=2:N, PERM(J)-th row of B is to be moved to the */ /* J-th row. */ /* (3L) The left singular vector matrix of the remaining matrix. */ /* For the right singular vector matrix, four types of orthogonal */ /* matrices are involved: */ /* (1R) The right singular vector matrix of the remaining matrix. */ /* (2R) If SQRE = 1, one extra Givens rotation to generate the right */ /* null space. */ /* (3R) The inverse transformation of (2L). */ /* (4R) The inverse transformation of (1L). */ /* Arguments */ /* ========= */ /* ICOMPQ (input) INTEGER */ /* Specifies whether singular vectors are to be computed in */ /* factored form: */ /* = 0: Left singular vector matrix. */ /* = 1: Right singular vector matrix. */ /* NL (input) INTEGER */ /* The row dimension of the upper block. NL >= 1. */ /* NR (input) INTEGER */ /* The row dimension of the lower block. NR >= 1. */ /* SQRE (input) INTEGER */ /* = 0: the lower block is an NR-by-NR square matrix. */ /* = 1: the lower block is an NR-by-(NR+1) rectangular matrix. */ /* The bidiagonal matrix has row dimension N = NL + NR + 1, */ /* and column dimension M = N + SQRE. */ /* NRHS (input) INTEGER */ /* The number of columns of B and BX. NRHS must be at least 1. */ /* B (input/output) COMPLEX array, dimension ( LDB, NRHS ) */ /* On input, B contains the right hand sides of the least */ /* squares problem in rows 1 through M. On output, B contains */ /* the solution X in rows 1 through N. */ /* LDB (input) INTEGER */ /* The leading dimension of B. LDB must be at least */ /* max(1,MAX( M, N ) ). */ /* BX (workspace) COMPLEX array, dimension ( LDBX, NRHS ) */ /* LDBX (input) INTEGER */ /* The leading dimension of BX. */ /* PERM (input) INTEGER array, dimension ( N ) */ /* The permutations (from deflation and sorting) applied */ /* to the two blocks. */ /* GIVPTR (input) INTEGER */ /* The number of Givens rotations which took place in this */ /* subproblem. */ /* GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 ) */ /* Each pair of numbers indicates a pair of rows/columns */ /* involved in a Givens rotation. */ /* LDGCOL (input) INTEGER */ /* The leading dimension of GIVCOL, must be at least N. */ /* GIVNUM (input) REAL array, dimension ( LDGNUM, 2 ) */ /* Each number indicates the C or S value used in the */ /* corresponding Givens rotation. */ /* LDGNUM (input) INTEGER */ /* The leading dimension of arrays DIFR, POLES and */ /* GIVNUM, must be at least K. */ /* POLES (input) REAL array, dimension ( LDGNUM, 2 ) */ /* On entry, POLES(1:K, 1) contains the new singular */ /* values obtained from solving the secular equation, and */ /* POLES(1:K, 2) is an array containing the poles in the secular */ /* equation. */ /* DIFL (input) REAL array, dimension ( K ). */ /* On entry, DIFL(I) is the distance between I-th updated */ /* (undeflated) singular value and the I-th (undeflated) old */ /* singular value. */ /* DIFR (input) REAL array, dimension ( LDGNUM, 2 ). */ /* On entry, DIFR(I, 1) contains the distances between I-th */ /* updated (undeflated) singular value and the I+1-th */ /* (undeflated) old singular value. And DIFR(I, 2) is the */ /* normalizing factor for the I-th right singular vector. */ /* Z (input) REAL array, dimension ( K ) */ /* Contain the components of the deflation-adjusted updating row */ /* vector. */ /* K (input) INTEGER */ /* Contains the dimension of the non-deflated matrix, */ /* This is the order of the related secular equation. 1 <= K <=N. */ /* C (input) REAL */ /* C contains garbage if SQRE =0 and the C-value of a Givens */ /* rotation related to the right null space if SQRE = 1. */ /* S (input) REAL */ /* S contains garbage if SQRE =0 and the S-value of a Givens */ /* rotation related to the right null space if SQRE = 1. */ /* RWORK (workspace) REAL array, dimension */ /* ( K*(1+NRHS) + 2*NRHS ) */ /* INFO (output) INTEGER */ /* = 0: successful exit. */ /* < 0: if INFO = -i, the i-th argument had an illegal value. */ /* Further Details */ /* =============== */ /* Based on contributions by */ /* Ming Gu and Ren-Cang Li, Computer Science Division, University of */ /* California at Berkeley, USA */ /* Osni Marques, LBNL/NERSC, USA */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ b_dim1 = *ldb; b_offset = 1 + b_dim1; b -= b_offset; bx_dim1 = *ldbx; bx_offset = 1 + bx_dim1; bx -= bx_offset; --perm; givcol_dim1 = *ldgcol; givcol_offset = 1 + givcol_dim1; givcol -= givcol_offset; difr_dim1 = *ldgnum; difr_offset = 1 + difr_dim1; difr -= difr_offset; poles_dim1 = *ldgnum; poles_offset = 1 + poles_dim1; poles -= poles_offset; givnum_dim1 = *ldgnum; givnum_offset = 1 + givnum_dim1; givnum -= givnum_offset; --difl; --z__; --rwork; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*nl < 1) { *info = -2; } else if (*nr < 1) { *info = -3; } else if (*sqre < 0 || *sqre > 1) { *info = -4; } n = *nl + *nr + 1; if (*nrhs < 1) { *info = -5; } else if (*ldb < n) { *info = -7; } else if (*ldbx < n) { *info = -9; } else if (*givptr < 0) { *info = -11; } else if (*ldgcol < n) { *info = -13; } else if (*ldgnum < n) { *info = -15; } else if (*k < 1) { *info = -20; } if (*info != 0) { i__1 = -(*info); xerbla_("CLALS0", &i__1); return 0; } m = n + *sqre; nlp1 = *nl + 1; if (*icompq == 0) { /* Apply back orthogonal transformations from the left. */ /* Step (1L): apply back the Givens rotations performed. */ i__1 = *givptr; for (i__ = 1; i__ <= i__1; ++i__) { csrot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, & b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ + (givnum_dim1 << 1)], &givnum[i__ + givnum_dim1]); /* L10: */ } /* Step (2L): permute rows of B. */ ccopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx); i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { ccopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1], ldbx); /* L20: */ } /* Step (3L): apply the inverse of the left singular vector */ /* matrix to BX. */ if (*k == 1) { ccopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb); if (z__[1] < 0.f) { csscal_(nrhs, &c_b5, &b[b_offset], ldb); } } else { i__1 = *k; for (j = 1; j <= i__1; ++j) { diflj = difl[j]; dj = poles[j + poles_dim1]; dsigj = -poles[j + (poles_dim1 << 1)]; if (j < *k) { difrj = -difr[j + difr_dim1]; dsigjp = -poles[j + 1 + (poles_dim1 << 1)]; } if (z__[j] == 0.f || poles[j + (poles_dim1 << 1)] == 0.f) { rwork[j] = 0.f; } else { rwork[j] = -poles[j + (poles_dim1 << 1)] * z__[j] / diflj / (poles[j + (poles_dim1 << 1)] + dj); } i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { if (z__[i__] == 0.f || poles[i__ + (poles_dim1 << 1)] == 0.f) { rwork[i__] = 0.f; } else { rwork[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__] / (slamc3_(&poles[i__ + (poles_dim1 << 1)], & dsigj) - diflj) / (poles[i__ + (poles_dim1 << 1)] + dj); } /* L30: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { if (z__[i__] == 0.f || poles[i__ + (poles_dim1 << 1)] == 0.f) { rwork[i__] = 0.f; } else { rwork[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__] / (slamc3_(&poles[i__ + (poles_dim1 << 1)], & dsigjp) + difrj) / (poles[i__ + (poles_dim1 << 1)] + dj); } /* L40: */ } rwork[1] = -1.f; temp = snrm2_(k, &rwork[1], &c__1); /* Since B and BX are complex, the following call to SGEMV */ /* is performed in two steps (real and imaginary parts). */ /* CALL SGEMV( 'T', K, NRHS, ONE, BX, LDBX, WORK, 1, ZERO, */ /* $ B( J, 1 ), LDB ) */ i__ = *k + (*nrhs << 1); i__2 = *nrhs; for (jcol = 1; jcol <= i__2; ++jcol) { i__3 = *k; for (jrow = 1; jrow <= i__3; ++jrow) { ++i__; i__4 = jrow + jcol * bx_dim1; rwork[i__] = bx[i__4].r; /* L50: */ } /* L60: */ } sgemv_("T", k, nrhs, &c_b13, &rwork[*k + 1 + (*nrhs << 1)], k, &rwork[1], &c__1, &c_b15, &rwork[*k + 1], &c__1); i__ = *k + (*nrhs << 1); i__2 = *nrhs; for (jcol = 1; jcol <= i__2; ++jcol) { i__3 = *k; for (jrow = 1; jrow <= i__3; ++jrow) { ++i__; rwork[i__] = r_imag(&bx[jrow + jcol * bx_dim1]); /* L70: */ } /* L80: */ } sgemv_("T", k, nrhs, &c_b13, &rwork[*k + 1 + (*nrhs << 1)], k, &rwork[1], &c__1, &c_b15, &rwork[*k + 1 + *nrhs], & c__1); i__2 = *nrhs; for (jcol = 1; jcol <= i__2; ++jcol) { i__3 = j + jcol * b_dim1; i__4 = jcol + *k; i__5 = jcol + *k + *nrhs; q__1.r = rwork[i__4], q__1.i = rwork[i__5]; b[i__3].r = q__1.r, b[i__3].i = q__1.i; /* L90: */ } clascl_("G", &c__0, &c__0, &temp, &c_b13, &c__1, nrhs, &b[j + b_dim1], ldb, info); /* L100: */ } } /* Move the deflated rows of BX to B also. */ if (*k < max(m,n)) { i__1 = n - *k; clacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1 + b_dim1], ldb); } } else { /* Apply back the right orthogonal transformations. */ /* Step (1R): apply back the new right singular vector matrix */ /* to B. */ if (*k == 1) { ccopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx); } else { i__1 = *k; for (j = 1; j <= i__1; ++j) { dsigj = poles[j + (poles_dim1 << 1)]; if (z__[j] == 0.f) { rwork[j] = 0.f; } else { rwork[j] = -z__[j] / difl[j] / (dsigj + poles[j + poles_dim1]) / difr[j + (difr_dim1 << 1)]; } i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { if (z__[j] == 0.f) { rwork[i__] = 0.f; } else { r__1 = -poles[i__ + 1 + (poles_dim1 << 1)]; rwork[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difr[ i__ + difr_dim1]) / (dsigj + poles[i__ + poles_dim1]) / difr[i__ + (difr_dim1 << 1)]; } /* L110: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { if (z__[j] == 0.f) { rwork[i__] = 0.f; } else { r__1 = -poles[i__ + (poles_dim1 << 1)]; rwork[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difl[ i__]) / (dsigj + poles[i__ + poles_dim1]) / difr[i__ + (difr_dim1 << 1)]; } /* L120: */ } /* Since B and BX are complex, the following call to SGEMV */ /* is performed in two steps (real and imaginary parts). */ /* CALL SGEMV( 'T', K, NRHS, ONE, B, LDB, WORK, 1, ZERO, */ /* $ BX( J, 1 ), LDBX ) */ i__ = *k + (*nrhs << 1); i__2 = *nrhs; for (jcol = 1; jcol <= i__2; ++jcol) { i__3 = *k; for (jrow = 1; jrow <= i__3; ++jrow) { ++i__; i__4 = jrow + jcol * b_dim1; rwork[i__] = b[i__4].r; /* L130: */ } /* L140: */ } sgemv_("T", k, nrhs, &c_b13, &rwork[*k + 1 + (*nrhs << 1)], k, &rwork[1], &c__1, &c_b15, &rwork[*k + 1], &c__1); i__ = *k + (*nrhs << 1); i__2 = *nrhs; for (jcol = 1; jcol <= i__2; ++jcol) { i__3 = *k; for (jrow = 1; jrow <= i__3; ++jrow) { ++i__; rwork[i__] = r_imag(&b[jrow + jcol * b_dim1]); /* L150: */ } /* L160: */ } sgemv_("T", k, nrhs, &c_b13, &rwork[*k + 1 + (*nrhs << 1)], k, &rwork[1], &c__1, &c_b15, &rwork[*k + 1 + *nrhs], & c__1); i__2 = *nrhs; for (jcol = 1; jcol <= i__2; ++jcol) { i__3 = j + jcol * bx_dim1; i__4 = jcol + *k; i__5 = jcol + *k + *nrhs; q__1.r = rwork[i__4], q__1.i = rwork[i__5]; bx[i__3].r = q__1.r, bx[i__3].i = q__1.i; /* L170: */ } /* L180: */ } } /* Step (2R): if SQRE = 1, apply back the rotation that is */ /* related to the right null space of the subproblem. */ if (*sqre == 1) { ccopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx); csrot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__, s); } if (*k < max(m,n)) { i__1 = n - *k; clacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 + bx_dim1], ldbx); } /* Step (3R): permute rows of B. */ ccopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb); if (*sqre == 1) { ccopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb); } i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { ccopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1], ldb); /* L190: */ } /* Step (4R): apply back the Givens rotations performed. */ for (i__ = *givptr; i__ >= 1; --i__) { r__1 = -givnum[i__ + givnum_dim1]; csrot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, & b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ + (givnum_dim1 << 1)], &r__1); /* L200: */ } } return 0; /* End of CLALS0 */ } /* clals0_ */
/* Subroutine */ int cbdsqr_(char *uplo, integer *n, integer *ncvt, integer * nru, integer *ncc, real *d__, real *e, complex *vt, integer *ldvt, complex *u, integer *ldu, complex *c__, integer *ldc, real *rwork, integer *info) { /* System generated locals */ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; real r__1, r__2, r__3, r__4; doublereal d__1; /* Builtin functions */ double pow_dd(doublereal *, doublereal *), sqrt(doublereal), r_sign(real * , real *); /* Local variables */ static real abse; static integer idir; static real abss; static integer oldm; static real cosl; static integer isub, iter; static real unfl, sinl, cosr, smin, smax, sinr; extern /* Subroutine */ int slas2_(real *, real *, real *, real *, real *) ; static real f, g, h__; static integer i__, j, m; static real r__; extern logical lsame_(char *, char *); static real oldcs; extern /* Subroutine */ int clasr_(char *, char *, char *, integer *, integer *, real *, real *, complex *, integer *); static integer oldll; static real shift, sigmn, oldsn; extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); static integer maxit; static real sminl, sigmx; static logical lower; extern /* Subroutine */ int csrot_(integer *, complex *, integer *, complex *, integer *, real *, real *), slasq1_(integer *, real *, real *, real *, integer *), slasv2_(real *, real *, real *, real * , real *, real *, real *, real *, real *); static real cs; static integer ll; static real sn, mu; extern doublereal slamch_(char *); extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer *), xerbla_(char *, integer *); static real sminoa; extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real * ); static real thresh; static logical rotate; static real sminlo; static integer nm1; static real tolmul; static integer nm12, nm13, lll; static real eps, sll, tol; #define c___subscr(a_1,a_2) (a_2)*c_dim1 + a_1 #define c___ref(a_1,a_2) c__[c___subscr(a_1,a_2)] #define u_subscr(a_1,a_2) (a_2)*u_dim1 + a_1 #define u_ref(a_1,a_2) u[u_subscr(a_1,a_2)] #define vt_subscr(a_1,a_2) (a_2)*vt_dim1 + a_1 #define vt_ref(a_1,a_2) vt[vt_subscr(a_1,a_2)] /* -- LAPACK routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1999 Purpose ======= CBDSQR computes the singular value decomposition (SVD) of a real N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P' denotes the transpose of P), where S is a diagonal matrix with non-negative diagonal elements (the singular values of B), and Q and P are orthogonal matrices. The routine computes S, and optionally computes U * Q, P' * VT, or Q' * C, for given complex input matrices U, VT, and C. See "Computing Small Singular Values of Bidiagonal Matrices With Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11, no. 5, pp. 873-912, Sept 1990) and "Accurate singular values and differential qd algorithms," by B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics Department, University of California at Berkeley, July 1992 for a detailed description of the algorithm. Arguments ========= UPLO (input) CHARACTER*1 = 'U': B is upper bidiagonal; = 'L': B is lower bidiagonal. N (input) INTEGER The order of the matrix B. N >= 0. NCVT (input) INTEGER The number of columns of the matrix VT. NCVT >= 0. NRU (input) INTEGER The number of rows of the matrix U. NRU >= 0. NCC (input) INTEGER The number of columns of the matrix C. NCC >= 0. D (input/output) REAL array, dimension (N) On entry, the n diagonal elements of the bidiagonal matrix B. On exit, if INFO=0, the singular values of B in decreasing order. E (input/output) REAL array, dimension (N) On entry, the elements of E contain the offdiagonal elements of of the bidiagonal matrix whose SVD is desired. On normal exit (INFO = 0), E is destroyed. If the algorithm does not converge (INFO > 0), D and E will contain the diagonal and superdiagonal elements of a bidiagonal matrix orthogonally equivalent to the one given as input. E(N) is used for workspace. VT (input/output) COMPLEX array, dimension (LDVT, NCVT) On entry, an N-by-NCVT matrix VT. On exit, VT is overwritten by P' * VT. VT is not referenced if NCVT = 0. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0. U (input/output) COMPLEX array, dimension (LDU, N) On entry, an NRU-by-N matrix U. On exit, U is overwritten by U * Q. U is not referenced if NRU = 0. LDU (input) INTEGER The leading dimension of the array U. LDU >= max(1,NRU). C (input/output) COMPLEX array, dimension (LDC, NCC) On entry, an N-by-NCC matrix C. On exit, C is overwritten by Q' * C. C is not referenced if NCC = 0. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. RWORK (workspace) REAL array, dimension (4*N) INFO (output) INTEGER = 0: successful exit < 0: If INFO = -i, the i-th argument had an illegal value > 0: the algorithm did not converge; D and E contain the elements of a bidiagonal matrix which is orthogonally similar to the input matrix B; if INFO = i, i elements of E have not converged to zero. Internal Parameters =================== TOLMUL REAL, default = max(10,min(100,EPS**(-1/8))) TOLMUL controls the convergence criterion of the QR loop. If it is positive, TOLMUL*EPS is the desired relative precision in the computed singular values. If it is negative, abs(TOLMUL*EPS*sigma_max) is the desired absolute accuracy in the computed singular values (corresponds to relative accuracy abs(TOLMUL*EPS) in the largest singular value. abs(TOLMUL) should be between 1 and 1/EPS, and preferably between 10 (for fast convergence) and .1/EPS (for there to be some accuracy in the results). Default is to lose at either one eighth or 2 of the available decimal digits in each computed singular value (whichever is smaller). MAXITR INTEGER, default = 6 MAXITR controls the maximum number of passes of the algorithm through its inner loop. The algorithms stops (and so fails to converge) if the number of passes through the inner loop exceeds MAXITR*N**2. ===================================================================== Test the input parameters. Parameter adjustments */ --d__; --e; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --rwork; /* Function Body */ *info = 0; lower = lsame_(uplo, "L"); if (! lsame_(uplo, "U") && ! lower) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ncvt < 0) { *info = -3; } else if (*nru < 0) { *info = -4; } else if (*ncc < 0) { *info = -5; } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) { *info = -9; } else if (*ldu < max(1,*nru)) { *info = -11; } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) { *info = -13; } if (*info != 0) { i__1 = -(*info); xerbla_("CBDSQR", &i__1); return 0; } if (*n == 0) { return 0; } if (*n == 1) { goto L160; } /* ROTATE is true if any singular vectors desired, false otherwise */ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0; /* If no singular vectors desired, use qd algorithm */ if (! rotate) { slasq1_(n, &d__[1], &e[1], &rwork[1], info); return 0; } nm1 = *n - 1; nm12 = nm1 + nm1; nm13 = nm12 + nm1; idir = 0; /* Get machine constants */ eps = slamch_("Epsilon"); unfl = slamch_("Safe minimum"); /* If matrix lower bidiagonal, rotate to be upper bidiagonal by applying Givens rotations on the left */ if (lower) { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { slartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; rwork[i__] = cs; rwork[nm1 + i__] = sn; /* L10: */ } /* Update singular vectors if desired */ if (*nru > 0) { clasr_("R", "V", "F", nru, n, &rwork[1], &rwork[*n], &u[u_offset], ldu); } if (*ncc > 0) { clasr_("L", "V", "F", n, ncc, &rwork[1], &rwork[*n], &c__[ c_offset], ldc); } } /* Compute singular values to relative accuracy TOL (By setting TOL to be negative, algorithm will compute singular values to absolute accuracy ABS(TOL)*norm(input matrix)) Computing MAX Computing MIN */ d__1 = (doublereal) eps; r__3 = 100.f, r__4 = pow_dd(&d__1, &c_b15); r__1 = 10.f, r__2 = dmin(r__3,r__4); tolmul = dmax(r__1,r__2); tol = tolmul * eps; /* Compute approximate maximum, minimum singular values */ smax = 0.f; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ r__2 = smax, r__3 = (r__1 = d__[i__], dabs(r__1)); smax = dmax(r__2,r__3); /* L20: */ } i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ r__2 = smax, r__3 = (r__1 = e[i__], dabs(r__1)); smax = dmax(r__2,r__3); /* L30: */ } sminl = 0.f; if (tol >= 0.f) { /* Relative accuracy desired */ sminoa = dabs(d__[1]); if (sminoa == 0.f) { goto L50; } mu = sminoa; i__1 = *n; for (i__ = 2; i__ <= i__1; ++i__) { mu = (r__2 = d__[i__], dabs(r__2)) * (mu / (mu + (r__1 = e[i__ - 1], dabs(r__1)))); sminoa = dmin(sminoa,mu); if (sminoa == 0.f) { goto L50; } /* L40: */ } L50: sminoa /= sqrt((real) (*n)); /* Computing MAX */ r__1 = tol * sminoa, r__2 = *n * 6 * *n * unfl; thresh = dmax(r__1,r__2); } else { /* Absolute accuracy desired Computing MAX */ r__1 = dabs(tol) * smax, r__2 = *n * 6 * *n * unfl; thresh = dmax(r__1,r__2); } /* Prepare for main iteration loop for the singular values (MAXIT is the maximum number of passes through the inner loop permitted before nonconvergence signalled.) */ maxit = *n * 6 * *n; iter = 0; oldll = -1; oldm = -1; /* M points to last element of unconverged part of matrix */ m = *n; /* Begin main iteration loop */ L60: /* Check for convergence or exceeding iteration count */ if (m <= 1) { goto L160; } if (iter > maxit) { goto L200; } /* Find diagonal block of matrix to work on */ if (tol < 0.f && (r__1 = d__[m], dabs(r__1)) <= thresh) { d__[m] = 0.f; } smax = (r__1 = d__[m], dabs(r__1)); smin = smax; i__1 = m - 1; for (lll = 1; lll <= i__1; ++lll) { ll = m - lll; abss = (r__1 = d__[ll], dabs(r__1)); abse = (r__1 = e[ll], dabs(r__1)); if (tol < 0.f && abss <= thresh) { d__[ll] = 0.f; } if (abse <= thresh) { goto L80; } smin = dmin(smin,abss); /* Computing MAX */ r__1 = max(smax,abss); smax = dmax(r__1,abse); /* L70: */ } ll = 0; goto L90; L80: e[ll] = 0.f; /* Matrix splits since E(LL) = 0 */ if (ll == m - 1) { /* Convergence of bottom singular value, return to top of loop */ --m; goto L60; } L90: ++ll; /* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */ if (ll == m - 1) { /* 2 by 2 block, handle separately */ slasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr, &sinl, &cosl); d__[m - 1] = sigmx; e[m - 1] = 0.f; d__[m] = sigmn; /* Compute singular vectors, if desired */ if (*ncvt > 0) { csrot_(ncvt, &vt_ref(m - 1, 1), ldvt, &vt_ref(m, 1), ldvt, &cosr, &sinr); } if (*nru > 0) { csrot_(nru, &u_ref(1, m - 1), &c__1, &u_ref(1, m), &c__1, &cosl, & sinl); } if (*ncc > 0) { csrot_(ncc, &c___ref(m - 1, 1), ldc, &c___ref(m, 1), ldc, &cosl, & sinl); } m += -2; goto L60; } /* If working on new submatrix, choose shift direction (from larger end diagonal element towards smaller) */ if (ll > oldm || m < oldll) { if ((r__1 = d__[ll], dabs(r__1)) >= (r__2 = d__[m], dabs(r__2))) { /* Chase bulge from top (big end) to bottom (small end) */ idir = 1; } else { /* Chase bulge from bottom (big end) to top (small end) */ idir = 2; } } /* Apply convergence tests */ if (idir == 1) { /* Run convergence test in forward direction First apply standard test to bottom of matrix */ if ((r__2 = e[m - 1], dabs(r__2)) <= dabs(tol) * (r__1 = d__[m], dabs( r__1)) || tol < 0.f && (r__3 = e[m - 1], dabs(r__3)) <= thresh) { e[m - 1] = 0.f; goto L60; } if (tol >= 0.f) { /* If relative accuracy desired, apply convergence criterion forward */ mu = (r__1 = d__[ll], dabs(r__1)); sminl = mu; i__1 = m - 1; for (lll = ll; lll <= i__1; ++lll) { if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) { e[lll] = 0.f; goto L60; } sminlo = sminl; mu = (r__2 = d__[lll + 1], dabs(r__2)) * (mu / (mu + (r__1 = e[lll], dabs(r__1)))); sminl = dmin(sminl,mu); /* L100: */ } } } else { /* Run convergence test in backward direction First apply standard test to top of matrix */ if ((r__2 = e[ll], dabs(r__2)) <= dabs(tol) * (r__1 = d__[ll], dabs( r__1)) || tol < 0.f && (r__3 = e[ll], dabs(r__3)) <= thresh) { e[ll] = 0.f; goto L60; } if (tol >= 0.f) { /* If relative accuracy desired, apply convergence criterion backward */ mu = (r__1 = d__[m], dabs(r__1)); sminl = mu; i__1 = ll; for (lll = m - 1; lll >= i__1; --lll) { if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) { e[lll] = 0.f; goto L60; } sminlo = sminl; mu = (r__2 = d__[lll], dabs(r__2)) * (mu / (mu + (r__1 = e[ lll], dabs(r__1)))); sminl = dmin(sminl,mu); /* L110: */ } } } oldll = ll; oldm = m; /* Compute shift. First, test if shifting would ruin relative accuracy, and if so set the shift to zero. Computing MAX */ r__1 = eps, r__2 = tol * .01f; if (tol >= 0.f && *n * tol * (sminl / smax) <= dmax(r__1,r__2)) { /* Use a zero shift to avoid loss of relative accuracy */ shift = 0.f; } else { /* Compute the shift from 2-by-2 block at end of matrix */ if (idir == 1) { sll = (r__1 = d__[ll], dabs(r__1)); slas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__); } else { sll = (r__1 = d__[m], dabs(r__1)); slas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__); } /* Test if shift negligible, and if so set to zero */ if (sll > 0.f) { /* Computing 2nd power */ r__1 = shift / sll; if (r__1 * r__1 < eps) { shift = 0.f; } } } /* Increment iteration count */ iter = iter + m - ll; /* If SHIFT = 0, do simplified QR iteration */ if (shift == 0.f) { if (idir == 1) { /* Chase bulge from top to bottom Save cosines and sines for later singular vector updates */ cs = 1.f; oldcs = 1.f; i__1 = m - 1; for (i__ = ll; i__ <= i__1; ++i__) { r__1 = d__[i__] * cs; slartg_(&r__1, &e[i__], &cs, &sn, &r__); if (i__ > ll) { e[i__ - 1] = oldsn * r__; } r__1 = oldcs * r__; r__2 = d__[i__ + 1] * sn; slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]); rwork[i__ - ll + 1] = cs; rwork[i__ - ll + 1 + nm1] = sn; rwork[i__ - ll + 1 + nm12] = oldcs; rwork[i__ - ll + 1 + nm13] = oldsn; /* L120: */ } h__ = d__[m] * cs; d__[m] = h__ * oldcs; e[m - 1] = h__ * oldsn; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; clasr_("L", "V", "F", &i__1, ncvt, &rwork[1], &rwork[*n], & vt_ref(ll, 1), ldvt); } if (*nru > 0) { i__1 = m - ll + 1; clasr_("R", "V", "F", nru, &i__1, &rwork[nm12 + 1], &rwork[ nm13 + 1], &u_ref(1, ll), ldu); } if (*ncc > 0) { i__1 = m - ll + 1; clasr_("L", "V", "F", &i__1, ncc, &rwork[nm12 + 1], &rwork[ nm13 + 1], &c___ref(ll, 1), ldc); } /* Test convergence */ if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) { e[m - 1] = 0.f; } } else { /* Chase bulge from bottom to top Save cosines and sines for later singular vector updates */ cs = 1.f; oldcs = 1.f; i__1 = ll + 1; for (i__ = m; i__ >= i__1; --i__) { r__1 = d__[i__] * cs; slartg_(&r__1, &e[i__ - 1], &cs, &sn, &r__); if (i__ < m) { e[i__] = oldsn * r__; } r__1 = oldcs * r__; r__2 = d__[i__ - 1] * sn; slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]); rwork[i__ - ll] = cs; rwork[i__ - ll + nm1] = -sn; rwork[i__ - ll + nm12] = oldcs; rwork[i__ - ll + nm13] = -oldsn; /* L130: */ } h__ = d__[ll] * cs; d__[ll] = h__ * oldcs; e[ll] = h__ * oldsn; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; clasr_("L", "V", "B", &i__1, ncvt, &rwork[nm12 + 1], &rwork[ nm13 + 1], &vt_ref(ll, 1), ldvt); } if (*nru > 0) { i__1 = m - ll + 1; clasr_("R", "V", "B", nru, &i__1, &rwork[1], &rwork[*n], & u_ref(1, ll), ldu); } if (*ncc > 0) { i__1 = m - ll + 1; clasr_("L", "V", "B", &i__1, ncc, &rwork[1], &rwork[*n], & c___ref(ll, 1), ldc); } /* Test convergence */ if ((r__1 = e[ll], dabs(r__1)) <= thresh) { e[ll] = 0.f; } } } else { /* Use nonzero shift */ if (idir == 1) { /* Chase bulge from top to bottom Save cosines and sines for later singular vector updates */ f = ((r__1 = d__[ll], dabs(r__1)) - shift) * (r_sign(&c_b49, &d__[ ll]) + shift / d__[ll]); g = e[ll]; i__1 = m - 1; for (i__ = ll; i__ <= i__1; ++i__) { slartg_(&f, &g, &cosr, &sinr, &r__); if (i__ > ll) { e[i__ - 1] = r__; } f = cosr * d__[i__] + sinr * e[i__]; e[i__] = cosr * e[i__] - sinr * d__[i__]; g = sinr * d__[i__ + 1]; d__[i__ + 1] = cosr * d__[i__ + 1]; slartg_(&f, &g, &cosl, &sinl, &r__); d__[i__] = r__; f = cosl * e[i__] + sinl * d__[i__ + 1]; d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__]; if (i__ < m - 1) { g = sinl * e[i__ + 1]; e[i__ + 1] = cosl * e[i__ + 1]; } rwork[i__ - ll + 1] = cosr; rwork[i__ - ll + 1 + nm1] = sinr; rwork[i__ - ll + 1 + nm12] = cosl; rwork[i__ - ll + 1 + nm13] = sinl; /* L140: */ } e[m - 1] = f; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; clasr_("L", "V", "F", &i__1, ncvt, &rwork[1], &rwork[*n], & vt_ref(ll, 1), ldvt); } if (*nru > 0) { i__1 = m - ll + 1; clasr_("R", "V", "F", nru, &i__1, &rwork[nm12 + 1], &rwork[ nm13 + 1], &u_ref(1, ll), ldu); } if (*ncc > 0) { i__1 = m - ll + 1; clasr_("L", "V", "F", &i__1, ncc, &rwork[nm12 + 1], &rwork[ nm13 + 1], &c___ref(ll, 1), ldc); } /* Test convergence */ if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) { e[m - 1] = 0.f; } } else { /* Chase bulge from bottom to top Save cosines and sines for later singular vector updates */ f = ((r__1 = d__[m], dabs(r__1)) - shift) * (r_sign(&c_b49, &d__[ m]) + shift / d__[m]); g = e[m - 1]; i__1 = ll + 1; for (i__ = m; i__ >= i__1; --i__) { slartg_(&f, &g, &cosr, &sinr, &r__); if (i__ < m) { e[i__] = r__; } f = cosr * d__[i__] + sinr * e[i__ - 1]; e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__]; g = sinr * d__[i__ - 1]; d__[i__ - 1] = cosr * d__[i__ - 1]; slartg_(&f, &g, &cosl, &sinl, &r__); d__[i__] = r__; f = cosl * e[i__ - 1] + sinl * d__[i__ - 1]; d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1]; if (i__ > ll + 1) { g = sinl * e[i__ - 2]; e[i__ - 2] = cosl * e[i__ - 2]; } rwork[i__ - ll] = cosr; rwork[i__ - ll + nm1] = -sinr; rwork[i__ - ll + nm12] = cosl; rwork[i__ - ll + nm13] = -sinl; /* L150: */ } e[ll] = f; /* Test convergence */ if ((r__1 = e[ll], dabs(r__1)) <= thresh) { e[ll] = 0.f; } /* Update singular vectors if desired */ if (*ncvt > 0) { i__1 = m - ll + 1; clasr_("L", "V", "B", &i__1, ncvt, &rwork[nm12 + 1], &rwork[ nm13 + 1], &vt_ref(ll, 1), ldvt); } if (*nru > 0) { i__1 = m - ll + 1; clasr_("R", "V", "B", nru, &i__1, &rwork[1], &rwork[*n], & u_ref(1, ll), ldu); } if (*ncc > 0) { i__1 = m - ll + 1; clasr_("L", "V", "B", &i__1, ncc, &rwork[1], &rwork[*n], & c___ref(ll, 1), ldc); } } } /* QR iteration finished, go back and check convergence */ goto L60; /* All singular values converged, so make them positive */ L160: i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (d__[i__] < 0.f) { d__[i__] = -d__[i__]; /* Change sign of singular vectors, if desired */ if (*ncvt > 0) { csscal_(ncvt, &c_b72, &vt_ref(i__, 1), ldvt); } } /* L170: */ } /* Sort the singular values into decreasing order (insertion sort on singular values, but only one transposition per singular vector) */ i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Scan for smallest D(I) */ isub = 1; smin = d__[1]; i__2 = *n + 1 - i__; for (j = 2; j <= i__2; ++j) { if (d__[j] <= smin) { isub = j; smin = d__[j]; } /* L180: */ } if (isub != *n + 1 - i__) { /* Swap singular values and vectors */ d__[isub] = d__[*n + 1 - i__]; d__[*n + 1 - i__] = smin; if (*ncvt > 0) { cswap_(ncvt, &vt_ref(isub, 1), ldvt, &vt_ref(*n + 1 - i__, 1), ldvt); } if (*nru > 0) { cswap_(nru, &u_ref(1, isub), &c__1, &u_ref(1, *n + 1 - i__), & c__1); } if (*ncc > 0) { cswap_(ncc, &c___ref(isub, 1), ldc, &c___ref(*n + 1 - i__, 1), ldc); } } /* L190: */ } goto L220; /* Maximum number of iterations exceeded, failure to converge */ L200: *info = 0; i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { if (e[i__] != 0.f) { ++(*info); } /* L210: */ } L220: return 0; /* End of CBDSQR */ } /* cbdsqr_ */
/* Subroutine */ int clalsd_(char *uplo, integer *smlsiz, integer *n, integer *nrhs, real *d__, real *e, complex *b, integer *ldb, real *rcond, integer *rank, complex *work, real *rwork, integer *iwork, integer * info) { /* System generated locals */ integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6; real r__1; complex q__1; /* Builtin functions */ double r_imag(complex *), log(doublereal), r_sign(real *, real *); /* Local variables */ integer c__, i__, j, k; real r__; integer s, u, z__; real cs; integer bx; real sn; integer st, vt, nm1, st1; real eps; integer iwk; real tol; integer difl, difr; real rcnd; integer jcol, irwb, perm, nsub, nlvl, sqre, bxst, jrow, irwu, jimag, jreal; extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *, integer *, real *, real *, integer *, real *, integer *, real *, real *, integer *); integer irwib; extern /* Subroutine */ int ccopy_(integer *, complex *, integer *, complex *, integer *); integer poles, sizei, irwrb, nsize; extern /* Subroutine */ int csrot_(integer *, complex *, integer *, complex *, integer *, real *, real *); integer irwvt, icmpq1, icmpq2; extern /* Subroutine */ int clalsa_(integer *, integer *, integer *, integer *, complex *, integer *, complex *, integer *, real *, integer *, real *, integer *, real *, real *, real *, real *, integer *, integer *, integer *, integer *, real *, real *, real * , real *, integer *, integer *), clascl_(char *, integer *, integer *, real *, real *, integer *, integer *, complex *, integer *, integer *); extern doublereal slamch_(char *); extern /* Subroutine */ int slasda_(integer *, integer *, integer *, integer *, real *, real *, real *, integer *, real *, integer *, real *, real *, real *, real *, integer *, integer *, integer *, integer *, real *, real *, real *, real *, integer *, integer *), clacpy_(char *, integer *, integer *, complex *, integer *, complex *, integer *), claset_(char *, integer *, integer *, complex *, complex *, complex *, integer *), xerbla_( char *, integer *), slascl_(char *, integer *, integer *, real *, real *, integer *, integer *, real *, integer *, integer * ); extern integer isamax_(integer *, real *, integer *); integer givcol; extern /* Subroutine */ int slasdq_(char *, integer *, integer *, integer *, integer *, integer *, real *, real *, real *, integer *, real * , integer *, real *, integer *, real *, integer *), slaset_(char *, integer *, integer *, real *, real *, real *, integer *), slartg_(real *, real *, real *, real *, real * ); real orgnrm; integer givnum; extern doublereal slanst_(char *, integer *, real *, real *); extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *); integer givptr, nrwork, irwwrk, smlszp; /* -- LAPACK routine (version 3.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CLALSD uses the singular value decomposition of A to solve the least */ /* squares problem of finding X to minimize the Euclidean norm of each */ /* column of A*X-B, where A is N-by-N upper bidiagonal, and X and B */ /* are N-by-NRHS. The solution X overwrites B. */ /* The singular values of A smaller than RCOND times the largest */ /* singular value are treated as zero in solving the least squares */ /* problem; in this case a minimum norm solution is returned. */ /* The actual singular values are returned in D in ascending order. */ /* This code makes very mild assumptions about floating point */ /* arithmetic. It will work on machines with a guard digit in */ /* add/subtract, or on those binary machines without guard digits */ /* which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. */ /* It could conceivably fail on hexadecimal or decimal machines */ /* without guard digits, but we know of none. */ /* Arguments */ /* ========= */ /* UPLO (input) CHARACTER*1 */ /* = 'U': D and E define an upper bidiagonal matrix. */ /* = 'L': D and E define a lower bidiagonal matrix. */ /* SMLSIZ (input) INTEGER */ /* The maximum size of the subproblems at the bottom of the */ /* computation tree. */ /* N (input) INTEGER */ /* The dimension of the bidiagonal matrix. N >= 0. */ /* NRHS (input) INTEGER */ /* The number of columns of B. NRHS must be at least 1. */ /* D (input/output) REAL array, dimension (N) */ /* On entry D contains the main diagonal of the bidiagonal */ /* matrix. On exit, if INFO = 0, D contains its singular values. */ /* E (input/output) REAL array, dimension (N-1) */ /* Contains the super-diagonal entries of the bidiagonal matrix. */ /* On exit, E has been destroyed. */ /* B (input/output) COMPLEX array, dimension (LDB,NRHS) */ /* On input, B contains the right hand sides of the least */ /* squares problem. On output, B contains the solution X. */ /* LDB (input) INTEGER */ /* The leading dimension of B in the calling subprogram. */ /* LDB must be at least max(1,N). */ /* RCOND (input) REAL */ /* The singular values of A less than or equal to RCOND times */ /* the largest singular value are treated as zero in solving */ /* the least squares problem. If RCOND is negative, */ /* machine precision is used instead. */ /* For example, if diag(S)*X=B were the least squares problem, */ /* where diag(S) is a diagonal matrix of singular values, the */ /* solution would be X(i) = B(i) / S(i) if S(i) is greater than */ /* RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to */ /* RCOND*max(S). */ /* RANK (output) INTEGER */ /* The number of singular values of A greater than RCOND times */ /* the largest singular value. */ /* WORK (workspace) COMPLEX array, dimension (N * NRHS). */ /* RWORK (workspace) REAL array, dimension at least */ /* (9*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS + (SMLSIZ+1)**2), */ /* where */ /* NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) */ /* IWORK (workspace) INTEGER array, dimension (3*N*NLVL + 11*N). */ /* INFO (output) INTEGER */ /* = 0: successful exit. */ /* < 0: if INFO = -i, the i-th argument had an illegal value. */ /* > 0: The algorithm failed to compute an singular value while */ /* working on the submatrix lying in rows and columns */ /* INFO/(N+1) through MOD(INFO,N+1). */ /* Further Details */ /* =============== */ /* Based on contributions by */ /* Ming Gu and Ren-Cang Li, Computer Science Division, University of */ /* California at Berkeley, USA */ /* Osni Marques, LBNL/NERSC, USA */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ --d__; --e; b_dim1 = *ldb; b_offset = 1 + b_dim1; b -= b_offset; --work; --rwork; --iwork; /* Function Body */ *info = 0; if (*n < 0) { *info = -3; } else if (*nrhs < 1) { *info = -4; } else if (*ldb < 1 || *ldb < *n) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("CLALSD", &i__1); return 0; } eps = slamch_("Epsilon"); /* Set up the tolerance. */ if (*rcond <= 0.f || *rcond >= 1.f) { rcnd = eps; } else { rcnd = *rcond; } *rank = 0; /* Quick return if possible. */ if (*n == 0) { return 0; } else if (*n == 1) { if (d__[1] == 0.f) { claset_("A", &c__1, nrhs, &c_b1, &c_b1, &b[b_offset], ldb); } else { *rank = 1; clascl_("G", &c__0, &c__0, &d__[1], &c_b10, &c__1, nrhs, &b[ b_offset], ldb, info); d__[1] = dabs(d__[1]); } return 0; } /* Rotate the matrix if it is lower bidiagonal. */ if (*(unsigned char *)uplo == 'L') { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { slartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; if (*nrhs == 1) { csrot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], & c__1, &cs, &sn); } else { rwork[(i__ << 1) - 1] = cs; rwork[i__ * 2] = sn; } /* L10: */ } if (*nrhs > 1) { i__1 = *nrhs; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = *n - 1; for (j = 1; j <= i__2; ++j) { cs = rwork[(j << 1) - 1]; sn = rwork[j * 2]; csrot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__ * b_dim1], &c__1, &cs, &sn); /* L20: */ } /* L30: */ } } } /* Scale. */ nm1 = *n - 1; orgnrm = slanst_("M", n, &d__[1], &e[1]); if (orgnrm == 0.f) { claset_("A", n, nrhs, &c_b1, &c_b1, &b[b_offset], ldb); return 0; } slascl_("G", &c__0, &c__0, &orgnrm, &c_b10, n, &c__1, &d__[1], n, info); slascl_("G", &c__0, &c__0, &orgnrm, &c_b10, &nm1, &c__1, &e[1], &nm1, info); /* If N is smaller than the minimum divide size SMLSIZ, then solve */ /* the problem with another solver. */ if (*n <= *smlsiz) { irwu = 1; irwvt = irwu + *n * *n; irwwrk = irwvt + *n * *n; irwrb = irwwrk; irwib = irwrb + *n * *nrhs; irwb = irwib + *n * *nrhs; slaset_("A", n, n, &c_b35, &c_b10, &rwork[irwu], n); slaset_("A", n, n, &c_b35, &c_b10, &rwork[irwvt], n); slasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &rwork[irwvt], n, &rwork[irwu], n, &rwork[irwwrk], &c__1, &rwork[irwwrk], info); if (*info != 0) { return 0; } /* In the real version, B is passed to SLASDQ and multiplied */ /* internally by Q'. Here B is complex and that product is */ /* computed below in two steps (real and imaginary parts). */ j = irwb - 1; i__1 = *nrhs; for (jcol = 1; jcol <= i__1; ++jcol) { i__2 = *n; for (jrow = 1; jrow <= i__2; ++jrow) { ++j; i__3 = jrow + jcol * b_dim1; rwork[j] = b[i__3].r; /* L40: */ } /* L50: */ } sgemm_("T", "N", n, nrhs, n, &c_b10, &rwork[irwu], n, &rwork[irwb], n, &c_b35, &rwork[irwrb], n); j = irwb - 1; i__1 = *nrhs; for (jcol = 1; jcol <= i__1; ++jcol) { i__2 = *n; for (jrow = 1; jrow <= i__2; ++jrow) { ++j; rwork[j] = r_imag(&b[jrow + jcol * b_dim1]); /* L60: */ } /* L70: */ } sgemm_("T", "N", n, nrhs, n, &c_b10, &rwork[irwu], n, &rwork[irwb], n, &c_b35, &rwork[irwib], n); jreal = irwrb - 1; jimag = irwib - 1; i__1 = *nrhs; for (jcol = 1; jcol <= i__1; ++jcol) { i__2 = *n; for (jrow = 1; jrow <= i__2; ++jrow) { ++jreal; ++jimag; i__3 = jrow + jcol * b_dim1; i__4 = jreal; i__5 = jimag; q__1.r = rwork[i__4], q__1.i = rwork[i__5]; b[i__3].r = q__1.r, b[i__3].i = q__1.i; /* L80: */ } /* L90: */ } tol = rcnd * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1)); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (d__[i__] <= tol) { claset_("A", &c__1, nrhs, &c_b1, &c_b1, &b[i__ + b_dim1], ldb); } else { clascl_("G", &c__0, &c__0, &d__[i__], &c_b10, &c__1, nrhs, &b[ i__ + b_dim1], ldb, info); ++(*rank); } /* L100: */ } /* Since B is complex, the following call to SGEMM is performed */ /* in two steps (real and imaginary parts). That is for V * B */ /* (in the real version of the code V' is stored in WORK). */ /* CALL SGEMM( 'T', 'N', N, NRHS, N, ONE, WORK, N, B, LDB, ZERO, */ /* $ WORK( NWORK ), N ) */ j = irwb - 1; i__1 = *nrhs; for (jcol = 1; jcol <= i__1; ++jcol) { i__2 = *n; for (jrow = 1; jrow <= i__2; ++jrow) { ++j; i__3 = jrow + jcol * b_dim1; rwork[j] = b[i__3].r; /* L110: */ } /* L120: */ } sgemm_("T", "N", n, nrhs, n, &c_b10, &rwork[irwvt], n, &rwork[irwb], n, &c_b35, &rwork[irwrb], n); j = irwb - 1; i__1 = *nrhs; for (jcol = 1; jcol <= i__1; ++jcol) { i__2 = *n; for (jrow = 1; jrow <= i__2; ++jrow) { ++j; rwork[j] = r_imag(&b[jrow + jcol * b_dim1]); /* L130: */ } /* L140: */ } sgemm_("T", "N", n, nrhs, n, &c_b10, &rwork[irwvt], n, &rwork[irwb], n, &c_b35, &rwork[irwib], n); jreal = irwrb - 1; jimag = irwib - 1; i__1 = *nrhs; for (jcol = 1; jcol <= i__1; ++jcol) { i__2 = *n; for (jrow = 1; jrow <= i__2; ++jrow) { ++jreal; ++jimag; i__3 = jrow + jcol * b_dim1; i__4 = jreal; i__5 = jimag; q__1.r = rwork[i__4], q__1.i = rwork[i__5]; b[i__3].r = q__1.r, b[i__3].i = q__1.i; /* L150: */ } /* L160: */ } /* Unscale. */ slascl_("G", &c__0, &c__0, &c_b10, &orgnrm, n, &c__1, &d__[1], n, info); slasrt_("D", n, &d__[1], info); clascl_("G", &c__0, &c__0, &orgnrm, &c_b10, n, nrhs, &b[b_offset], ldb, info); return 0; } /* Book-keeping and setting up some constants. */ nlvl = (integer) (log((real) (*n) / (real) (*smlsiz + 1)) / log(2.f)) + 1; smlszp = *smlsiz + 1; u = 1; vt = *smlsiz * *n + 1; difl = vt + smlszp * *n; difr = difl + nlvl * *n; z__ = difr + (nlvl * *n << 1); c__ = z__ + nlvl * *n; s = c__ + *n; poles = s + *n; givnum = poles + (nlvl << 1) * *n; nrwork = givnum + (nlvl << 1) * *n; bx = 1; irwrb = nrwork; irwib = irwrb + *smlsiz * *nrhs; irwb = irwib + *smlsiz * *nrhs; sizei = *n + 1; k = sizei + *n; givptr = k + *n; perm = givptr + *n; givcol = perm + nlvl * *n; iwk = givcol + (nlvl * *n << 1); st = 1; sqre = 0; icmpq1 = 1; icmpq2 = 0; nsub = 0; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if ((r__1 = d__[i__], dabs(r__1)) < eps) { d__[i__] = r_sign(&eps, &d__[i__]); } /* L170: */ } i__1 = nm1; for (i__ = 1; i__ <= i__1; ++i__) { if ((r__1 = e[i__], dabs(r__1)) < eps || i__ == nm1) { ++nsub; iwork[nsub] = st; /* Subproblem found. First determine its size and then */ /* apply divide and conquer on it. */ if (i__ < nm1) { /* A subproblem with E(I) small for I < NM1. */ nsize = i__ - st + 1; iwork[sizei + nsub - 1] = nsize; } else if ((r__1 = e[i__], dabs(r__1)) >= eps) { /* A subproblem with E(NM1) not too small but I = NM1. */ nsize = *n - st + 1; iwork[sizei + nsub - 1] = nsize; } else { /* A subproblem with E(NM1) small. This implies an */ /* 1-by-1 subproblem at D(N), which is not solved */ /* explicitly. */ nsize = i__ - st + 1; iwork[sizei + nsub - 1] = nsize; ++nsub; iwork[nsub] = *n; iwork[sizei + nsub - 1] = 1; ccopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n); } st1 = st - 1; if (nsize == 1) { /* This is a 1-by-1 subproblem and is not solved */ /* explicitly. */ ccopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n); } else if (nsize <= *smlsiz) { /* This is a small subproblem and is solved by SLASDQ. */ slaset_("A", &nsize, &nsize, &c_b35, &c_b10, &rwork[vt + st1], n); slaset_("A", &nsize, &nsize, &c_b35, &c_b10, &rwork[u + st1], n); slasdq_("U", &c__0, &nsize, &nsize, &nsize, &c__0, &d__[st], & e[st], &rwork[vt + st1], n, &rwork[u + st1], n, & rwork[nrwork], &c__1, &rwork[nrwork], info) ; if (*info != 0) { return 0; } /* In the real version, B is passed to SLASDQ and multiplied */ /* internally by Q'. Here B is complex and that product is */ /* computed below in two steps (real and imaginary parts). */ j = irwb - 1; i__2 = *nrhs; for (jcol = 1; jcol <= i__2; ++jcol) { i__3 = st + nsize - 1; for (jrow = st; jrow <= i__3; ++jrow) { ++j; i__4 = jrow + jcol * b_dim1; rwork[j] = b[i__4].r; /* L180: */ } /* L190: */ } sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b10, &rwork[u + st1] , n, &rwork[irwb], &nsize, &c_b35, &rwork[irwrb], & nsize); j = irwb - 1; i__2 = *nrhs; for (jcol = 1; jcol <= i__2; ++jcol) { i__3 = st + nsize - 1; for (jrow = st; jrow <= i__3; ++jrow) { ++j; rwork[j] = r_imag(&b[jrow + jcol * b_dim1]); /* L200: */ } /* L210: */ } sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b10, &rwork[u + st1] , n, &rwork[irwb], &nsize, &c_b35, &rwork[irwib], & nsize); jreal = irwrb - 1; jimag = irwib - 1; i__2 = *nrhs; for (jcol = 1; jcol <= i__2; ++jcol) { i__3 = st + nsize - 1; for (jrow = st; jrow <= i__3; ++jrow) { ++jreal; ++jimag; i__4 = jrow + jcol * b_dim1; i__5 = jreal; i__6 = jimag; q__1.r = rwork[i__5], q__1.i = rwork[i__6]; b[i__4].r = q__1.r, b[i__4].i = q__1.i; /* L220: */ } /* L230: */ } clacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n); } else { /* A large problem. Solve it using divide and conquer. */ slasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], & rwork[u + st1], n, &rwork[vt + st1], &iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1], &rwork[z__ + st1], &rwork[poles + st1], &iwork[givptr + st1], & iwork[givcol + st1], n, &iwork[perm + st1], &rwork[ givnum + st1], &rwork[c__ + st1], &rwork[s + st1], & rwork[nrwork], &iwork[iwk], info); if (*info != 0) { return 0; } bxst = bx + st1; clalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, & work[bxst], n, &rwork[u + st1], n, &rwork[vt + st1], & iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1] , &rwork[z__ + st1], &rwork[poles + st1], &iwork[ givptr + st1], &iwork[givcol + st1], n, &iwork[perm + st1], &rwork[givnum + st1], &rwork[c__ + st1], &rwork[ s + st1], &rwork[nrwork], &iwork[iwk], info); if (*info != 0) { return 0; } } st = i__ + 1; } /* L240: */ } /* Apply the singular values and treat the tiny ones as zero. */ tol = rcnd * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1)); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Some of the elements in D can be negative because 1-by-1 */ /* subproblems were not solved explicitly. */ if ((r__1 = d__[i__], dabs(r__1)) <= tol) { claset_("A", &c__1, nrhs, &c_b1, &c_b1, &work[bx + i__ - 1], n); } else { ++(*rank); clascl_("G", &c__0, &c__0, &d__[i__], &c_b10, &c__1, nrhs, &work[ bx + i__ - 1], n, info); } d__[i__] = (r__1 = d__[i__], dabs(r__1)); /* L250: */ } /* Now apply back the right singular vectors. */ icmpq2 = 1; i__1 = nsub; for (i__ = 1; i__ <= i__1; ++i__) { st = iwork[i__]; st1 = st - 1; nsize = iwork[sizei + i__ - 1]; bxst = bx + st1; if (nsize == 1) { ccopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb); } else if (nsize <= *smlsiz) { /* Since B and BX are complex, the following call to SGEMM */ /* is performed in two steps (real and imaginary parts). */ /* CALL SGEMM( 'T', 'N', NSIZE, NRHS, NSIZE, ONE, */ /* $ RWORK( VT+ST1 ), N, RWORK( BXST ), N, ZERO, */ /* $ B( ST, 1 ), LDB ) */ j = bxst - *n - 1; jreal = irwb - 1; i__2 = *nrhs; for (jcol = 1; jcol <= i__2; ++jcol) { j += *n; i__3 = nsize; for (jrow = 1; jrow <= i__3; ++jrow) { ++jreal; i__4 = j + jrow; rwork[jreal] = work[i__4].r; /* L260: */ } /* L270: */ } sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b10, &rwork[vt + st1], n, &rwork[irwb], &nsize, &c_b35, &rwork[irwrb], &nsize); j = bxst - *n - 1; jimag = irwb - 1; i__2 = *nrhs; for (jcol = 1; jcol <= i__2; ++jcol) { j += *n; i__3 = nsize; for (jrow = 1; jrow <= i__3; ++jrow) { ++jimag; rwork[jimag] = r_imag(&work[j + jrow]); /* L280: */ } /* L290: */ } sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b10, &rwork[vt + st1], n, &rwork[irwb], &nsize, &c_b35, &rwork[irwib], &nsize); jreal = irwrb - 1; jimag = irwib - 1; i__2 = *nrhs; for (jcol = 1; jcol <= i__2; ++jcol) { i__3 = st + nsize - 1; for (jrow = st; jrow <= i__3; ++jrow) { ++jreal; ++jimag; i__4 = jrow + jcol * b_dim1; i__5 = jreal; i__6 = jimag; q__1.r = rwork[i__5], q__1.i = rwork[i__6]; b[i__4].r = q__1.r, b[i__4].i = q__1.i; /* L300: */ } /* L310: */ } } else { clalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st + b_dim1], ldb, &rwork[u + st1], n, &rwork[vt + st1], & iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1], & rwork[z__ + st1], &rwork[poles + st1], &iwork[givptr + st1], &iwork[givcol + st1], n, &iwork[perm + st1], &rwork[ givnum + st1], &rwork[c__ + st1], &rwork[s + st1], &rwork[ nrwork], &iwork[iwk], info); if (*info != 0) { return 0; } } /* L320: */ } /* Unscale and sort the singular values. */ slascl_("G", &c__0, &c__0, &c_b10, &orgnrm, n, &c__1, &d__[1], n, info); slasrt_("D", n, &d__[1], info); clascl_("G", &c__0, &c__0, &orgnrm, &c_b10, n, nrhs, &b[b_offset], ldb, info); return 0; /* End of CLALSD */ } /* clalsd_ */