/* Subroutine */ int cgeqp3_(integer *m, integer *n, complex *a, integer *lda, integer *jpvt, complex *tau, complex *work, integer *lwork, real * rwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ integer j, jb, na, nb, sm, sn, nx, fjb, iws, nfxd, nbmin; extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); integer minmn, minws; extern /* Subroutine */ int claqp2_(integer *, integer *, integer *, complex *, integer *, integer *, complex *, real *, real *, complex *); extern real scnrm2_(integer *, complex *, integer *); extern /* Subroutine */ int cgeqrf_(integer *, integer *, complex *, integer *, complex *, complex *, integer *, integer *), xerbla_( char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *); extern /* Subroutine */ int claqps_(integer *, integer *, integer *, integer *, integer *, complex *, integer *, integer *, complex *, real *, real *, complex *, complex *, integer *); integer topbmn, sminmn; extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *, integer *, complex *, integer *, complex *, complex *, integer *, complex *, integer *, integer *); integer lwkopt; logical lquery; /* -- LAPACK computational routine (version 3.4.2) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* September 2012 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test input arguments */ /* ==================== */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --jpvt; --tau; --work; --rwork; /* Function Body */ *info = 0; lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info == 0) { minmn = min(*m,*n); if (minmn == 0) { iws = 1; lwkopt = 1; } else { iws = *n + 1; nb = ilaenv_(&c__1, "CGEQRF", " ", m, n, &c_n1, &c_n1); lwkopt = (*n + 1) * nb; } work[1].r = (real) lwkopt; work[1].i = 0.f; // , expr subst if (*lwork < iws && ! lquery) { *info = -8; } } if (*info != 0) { i__1 = -(*info); xerbla_("CGEQP3", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible. */ if (minmn == 0) { return 0; } /* Move initial columns up front. */ nfxd = 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (jpvt[j] != 0) { if (j != nfxd) { cswap_(m, &a[j * a_dim1 + 1], &c__1, &a[nfxd * a_dim1 + 1], & c__1); jpvt[j] = jpvt[nfxd]; jpvt[nfxd] = j; } else { jpvt[j] = j; } ++nfxd; } else { jpvt[j] = j; } /* L10: */ } --nfxd; /* Factorize fixed columns */ /* ======================= */ /* Compute the QR factorization of fixed columns and update */ /* remaining columns. */ if (nfxd > 0) { na = min(*m,nfxd); /* CC CALL CGEQR2( M, NA, A, LDA, TAU, WORK, INFO ) */ cgeqrf_(m, &na, &a[a_offset], lda, &tau[1], &work[1], lwork, info); /* Computing MAX */ i__1 = iws; i__2 = (integer) work[1].r; // , expr subst iws = max(i__1,i__2); if (na < *n) { /* CC CALL CUNM2R( 'Left', 'Conjugate Transpose', M, N-NA, */ /* CC $ NA, A, LDA, TAU, A( 1, NA+1 ), LDA, WORK, */ /* CC $ INFO ) */ i__1 = *n - na; cunmqr_("Left", "Conjugate Transpose", m, &i__1, &na, &a[a_offset] , lda, &tau[1], &a[(na + 1) * a_dim1 + 1], lda, &work[1], lwork, info); /* Computing MAX */ i__1 = iws; i__2 = (integer) work[1].r; // , expr subst iws = max(i__1,i__2); } } /* Factorize free columns */ /* ====================== */ if (nfxd < minmn) { sm = *m - nfxd; sn = *n - nfxd; sminmn = minmn - nfxd; /* Determine the block size. */ nb = ilaenv_(&c__1, "CGEQRF", " ", &sm, &sn, &c_n1, &c_n1); nbmin = 2; nx = 0; if (nb > 1 && nb < sminmn) { /* Determine when to cross over from blocked to unblocked code. */ /* Computing MAX */ i__1 = 0; i__2 = ilaenv_(&c__3, "CGEQRF", " ", &sm, &sn, &c_n1, & c_n1); // , expr subst nx = max(i__1,i__2); if (nx < sminmn) { /* Determine if workspace is large enough for blocked code. */ minws = (sn + 1) * nb; iws = max(iws,minws); if (*lwork < minws) { /* Not enough workspace to use optimal NB: Reduce NB and */ /* determine the minimum value of NB. */ nb = *lwork / (sn + 1); /* Computing MAX */ i__1 = 2; i__2 = ilaenv_(&c__2, "CGEQRF", " ", &sm, &sn, & c_n1, &c_n1); // , expr subst nbmin = max(i__1,i__2); } } } /* Initialize partial column norms. The first N elements of work */ /* store the exact column norms. */ i__1 = *n; for (j = nfxd + 1; j <= i__1; ++j) { rwork[j] = scnrm2_(&sm, &a[nfxd + 1 + j * a_dim1], &c__1); rwork[*n + j] = rwork[j]; /* L20: */ } if (nb >= nbmin && nb < sminmn && nx < sminmn) { /* Use blocked code initially. */ j = nfxd + 1; /* Compute factorization: while loop. */ topbmn = minmn - nx; L30: if (j <= topbmn) { /* Computing MIN */ i__1 = nb; i__2 = topbmn - j + 1; // , expr subst jb = min(i__1,i__2); /* Factorize JB columns among columns J:N. */ i__1 = *n - j + 1; i__2 = j - 1; i__3 = *n - j + 1; claqps_(m, &i__1, &i__2, &jb, &fjb, &a[j * a_dim1 + 1], lda, & jpvt[j], &tau[j], &rwork[j], &rwork[*n + j], &work[1], &work[jb + 1], &i__3); j += fjb; goto L30; } } else { j = nfxd + 1; } /* Use unblocked code to factor the last or only block. */ if (j <= minmn) { i__1 = *n - j + 1; i__2 = j - 1; claqp2_(m, &i__1, &i__2, &a[j * a_dim1 + 1], lda, &jpvt[j], &tau[ j], &rwork[j], &rwork[*n + j], &work[1]); } } work[1].r = (real) iws; work[1].i = 0.f; // , expr subst return 0; /* End of CGEQP3 */ }
/* Subroutine */ int cdrvpb_(logical *dotype, integer *nn, integer *nval, integer *nrhs, real *thresh, logical *tsterr, integer *nmax, complex * a, complex *afac, complex *asav, complex *b, complex *bsav, complex * x, complex *xact, real *s, complex *work, real *rwork, integer *nout) { /* Initialized data */ static integer iseedy[4] = { 1988,1989,1990,1991 }; static char facts[1*3] = "F" "N" "E"; static char equeds[1*2] = "N" "Y"; /* Format strings */ static char fmt_9999[] = "(1x,a6,\002, UPLO='\002,a1,\002', N =\002,i5" ",\002, KD =\002,i5,\002, type \002,i1,\002, test(\002,i1,\002)" "=\002,g12.5)"; static char fmt_9997[] = "(1x,a6,\002( '\002,a1,\002', '\002,a1,\002'," " \002,i5,\002, \002,i5,\002, ... ), EQUED='\002,a1,\002', type" " \002,i1,\002, test(\002,i1,\002)=\002,g12.5)"; static char fmt_9998[] = "(1x,a6,\002( '\002,a1,\002', '\002,a1,\002'," " \002,i5,\002, \002,i5,\002, ... ), type \002,i1,\002, test(\002" ",i1,\002)=\002,g12.5)"; /* System generated locals */ address a__1[2]; integer i__1, i__2, i__3, i__4, i__5, i__6, i__7[2]; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen); integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void); /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ integer i__, k, n, i1, i2, k1, kd, nb, in, kl, iw, ku, nt, lda, ikd, nkd, ldab; char fact[1]; integer ioff, mode, koff; real amax; char path[3]; integer imat, info; char dist[1], uplo[1], type__[1]; integer nrun, ifact; extern /* Subroutine */ int cget04_(integer *, integer *, complex *, integer *, complex *, integer *, real *, real *); integer nfail, iseed[4], nfact; extern /* Subroutine */ int cpbt01_(char *, integer *, integer *, complex *, integer *, complex *, integer *, real *, real *), cpbt02_(char *, integer *, integer *, integer *, complex *, integer *, complex *, integer *, complex *, integer *, real *, real *), cpbt05_(char *, integer *, integer *, integer *, complex *, integer *, complex *, integer *, complex *, integer *, complex *, integer *, real *, real *, real *); integer kdval[4]; extern logical lsame_(char *, char *); char equed[1]; integer nbmin; real rcond, roldc, scond; integer nimat; extern doublereal sget06_(real *, real *); real anorm; extern /* Subroutine */ int ccopy_(integer *, complex *, integer *, complex *, integer *), cpbsv_(char *, integer *, integer *, integer *, complex *, integer *, complex *, integer *, integer *); logical equil; extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); integer iuplo, izero, nerrs; logical zerot; char xtype[1]; extern /* Subroutine */ int clatb4_(char *, integer *, integer *, integer *, char *, integer *, integer *, real *, integer *, real *, char * ), aladhd_(integer *, char *); extern doublereal clanhb_(char *, char *, integer *, integer *, complex *, integer *, real *), clange_(char *, integer *, integer *, complex *, integer *, real *); extern /* Subroutine */ int claqhb_(char *, integer *, integer *, complex *, integer *, real *, real *, real *, char *), alaerh_(char *, char *, integer *, integer *, char *, integer *, integer *, integer *, integer *, integer *, integer *, integer *, integer *, integer *), claipd_(integer *, complex *, integer *, integer *); logical prefac; real rcondc; logical nofact; char packit[1]; integer iequed; extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex *, integer *, complex *, integer *), clarhs_(char *, char *, char *, char *, integer *, integer *, integer *, integer *, integer *, complex *, integer *, complex *, integer *, complex *, integer *, integer *, integer *), claset_(char *, integer *, integer *, complex *, complex *, complex *, integer *), cpbequ_(char *, integer *, integer *, complex *, integer *, real *, real *, real *, integer *), alasvm_(char *, integer *, integer *, integer *, integer *); real cndnum; extern /* Subroutine */ int clatms_(integer *, integer *, char *, integer *, char *, real *, integer *, real *, real *, integer *, integer * , char *, complex *, integer *, complex *, integer *), cpbtrf_(char *, integer *, integer *, complex *, integer *, integer *); real ainvnm; extern /* Subroutine */ int cpbtrs_(char *, integer *, integer *, integer *, complex *, integer *, complex *, integer *, integer *), xlaenv_(integer *, integer *), cpbsvx_(char *, char *, integer *, integer *, integer *, complex *, integer *, complex *, integer *, char *, real *, complex *, integer *, complex *, integer *, real *, real *, real *, complex *, real *, integer *), cerrvx_(char *, integer *); real result[6]; /* Fortran I/O blocks */ static cilist io___57 = { 0, 0, 0, fmt_9999, 0 }; static cilist io___60 = { 0, 0, 0, fmt_9997, 0 }; static cilist io___61 = { 0, 0, 0, fmt_9998, 0 }; /* -- LAPACK test routine (version 3.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CDRVPB tests the driver routines CPBSV and -SVX. */ /* Arguments */ /* ========= */ /* DOTYPE (input) LOGICAL array, dimension (NTYPES) */ /* The matrix types to be used for testing. Matrices of type j */ /* (for 1 <= j <= NTYPES) are used for testing if DOTYPE(j) = */ /* .TRUE.; if DOTYPE(j) = .FALSE., then type j is not used. */ /* NN (input) INTEGER */ /* The number of values of N contained in the vector NVAL. */ /* NVAL (input) INTEGER array, dimension (NN) */ /* The values of the matrix dimension N. */ /* NRHS (input) INTEGER */ /* The number of right hand side vectors to be generated for */ /* each linear system. */ /* THRESH (input) REAL */ /* The threshold value for the test ratios. A result is */ /* included in the output file if RESULT >= THRESH. To have */ /* every test ratio printed, use THRESH = 0. */ /* TSTERR (input) LOGICAL */ /* Flag that indicates whether error exits are to be tested. */ /* NMAX (input) INTEGER */ /* The maximum value permitted for N, used in dimensioning the */ /* work arrays. */ /* A (workspace) COMPLEX array, dimension (NMAX*NMAX) */ /* AFAC (workspace) COMPLEX array, dimension (NMAX*NMAX) */ /* ASAV (workspace) COMPLEX array, dimension (NMAX*NMAX) */ /* B (workspace) COMPLEX array, dimension (NMAX*NRHS) */ /* BSAV (workspace) COMPLEX array, dimension (NMAX*NRHS) */ /* X (workspace) COMPLEX array, dimension (NMAX*NRHS) */ /* XACT (workspace) COMPLEX array, dimension (NMAX*NRHS) */ /* S (workspace) REAL array, dimension (NMAX) */ /* WORK (workspace) COMPLEX array, dimension */ /* (NMAX*max(3,NRHS)) */ /* RWORK (workspace) REAL array, dimension (NMAX+2*NRHS) */ /* NOUT (input) INTEGER */ /* The unit number for output. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. Local Arrays .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Scalars in Common .. */ /* .. */ /* .. Common blocks .. */ /* .. */ /* .. Data statements .. */ /* Parameter adjustments */ --rwork; --work; --s; --xact; --x; --bsav; --b; --asav; --afac; --a; --nval; --dotype; /* Function Body */ /* .. */ /* .. Executable Statements .. */ /* Initialize constants and the random number seed. */ s_copy(path, "Complex precision", (ftnlen)1, (ftnlen)17); s_copy(path + 1, "PB", (ftnlen)2, (ftnlen)2); nrun = 0; nfail = 0; nerrs = 0; for (i__ = 1; i__ <= 4; ++i__) { iseed[i__ - 1] = iseedy[i__ - 1]; /* L10: */ } /* Test the error exits */ if (*tsterr) { cerrvx_(path, nout); } infoc_1.infot = 0; kdval[0] = 0; /* Set the block size and minimum block size for testing. */ nb = 1; nbmin = 2; xlaenv_(&c__1, &nb); xlaenv_(&c__2, &nbmin); /* Do for each value of N in NVAL */ i__1 = *nn; for (in = 1; in <= i__1; ++in) { n = nval[in]; lda = max(n,1); *(unsigned char *)xtype = 'N'; /* Set limits on the number of loop iterations. */ /* Computing MAX */ i__2 = 1, i__3 = min(n,4); nkd = max(i__2,i__3); nimat = 8; if (n == 0) { nimat = 1; } kdval[1] = n + (n + 1) / 4; kdval[2] = (n * 3 - 1) / 4; kdval[3] = (n + 1) / 4; i__2 = nkd; for (ikd = 1; ikd <= i__2; ++ikd) { /* Do for KD = 0, (5*N+1)/4, (3N-1)/4, and (N+1)/4. This order */ /* makes it easier to skip redundant values for small values */ /* of N. */ kd = kdval[ikd - 1]; ldab = kd + 1; /* Do first for UPLO = 'U', then for UPLO = 'L' */ for (iuplo = 1; iuplo <= 2; ++iuplo) { koff = 1; if (iuplo == 1) { *(unsigned char *)uplo = 'U'; *(unsigned char *)packit = 'Q'; /* Computing MAX */ i__3 = 1, i__4 = kd + 2 - n; koff = max(i__3,i__4); } else { *(unsigned char *)uplo = 'L'; *(unsigned char *)packit = 'B'; } i__3 = nimat; for (imat = 1; imat <= i__3; ++imat) { /* Do the tests only if DOTYPE( IMAT ) is true. */ if (! dotype[imat]) { goto L80; } /* Skip types 2, 3, or 4 if the matrix size is too small. */ zerot = imat >= 2 && imat <= 4; if (zerot && n < imat - 1) { goto L80; } if (! zerot || ! dotype[1]) { /* Set up parameters with CLATB4 and generate a test */ /* matrix with CLATMS. */ clatb4_(path, &imat, &n, &n, type__, &kl, &ku, &anorm, &mode, &cndnum, dist); s_copy(srnamc_1.srnamt, "CLATMS", (ftnlen)6, (ftnlen) 6); clatms_(&n, &n, dist, iseed, type__, &rwork[1], &mode, &cndnum, &anorm, &kd, &kd, packit, &a[koff], &ldab, &work[1], &info); /* Check error code from CLATMS. */ if (info != 0) { alaerh_(path, "CLATMS", &info, &c__0, uplo, &n, & n, &c_n1, &c_n1, &c_n1, &imat, &nfail, & nerrs, nout); goto L80; } } else if (izero > 0) { /* Use the same matrix for types 3 and 4 as for type */ /* 2 by copying back the zeroed out column, */ iw = (lda << 1) + 1; if (iuplo == 1) { ioff = (izero - 1) * ldab + kd + 1; i__4 = izero - i1; ccopy_(&i__4, &work[iw], &c__1, &a[ioff - izero + i1], &c__1); iw = iw + izero - i1; i__4 = i2 - izero + 1; /* Computing MAX */ i__6 = ldab - 1; i__5 = max(i__6,1); ccopy_(&i__4, &work[iw], &c__1, &a[ioff], &i__5); } else { ioff = (i1 - 1) * ldab + 1; i__4 = izero - i1; /* Computing MAX */ i__6 = ldab - 1; i__5 = max(i__6,1); ccopy_(&i__4, &work[iw], &c__1, &a[ioff + izero - i1], &i__5); ioff = (izero - 1) * ldab + 1; iw = iw + izero - i1; i__4 = i2 - izero + 1; ccopy_(&i__4, &work[iw], &c__1, &a[ioff], &c__1); } } /* For types 2-4, zero one row and column of the matrix */ /* to test that INFO is returned correctly. */ izero = 0; if (zerot) { if (imat == 2) { izero = 1; } else if (imat == 3) { izero = n; } else { izero = n / 2 + 1; } /* Save the zeroed out row and column in WORK(*,3) */ iw = lda << 1; /* Computing MIN */ i__5 = (kd << 1) + 1; i__4 = min(i__5,n); for (i__ = 1; i__ <= i__4; ++i__) { i__5 = iw + i__; work[i__5].r = 0.f, work[i__5].i = 0.f; /* L20: */ } ++iw; /* Computing MAX */ i__4 = izero - kd; i1 = max(i__4,1); /* Computing MIN */ i__4 = izero + kd; i2 = min(i__4,n); if (iuplo == 1) { ioff = (izero - 1) * ldab + kd + 1; i__4 = izero - i1; cswap_(&i__4, &a[ioff - izero + i1], &c__1, &work[ iw], &c__1); iw = iw + izero - i1; i__4 = i2 - izero + 1; /* Computing MAX */ i__6 = ldab - 1; i__5 = max(i__6,1); cswap_(&i__4, &a[ioff], &i__5, &work[iw], &c__1); } else { ioff = (i1 - 1) * ldab + 1; i__4 = izero - i1; /* Computing MAX */ i__6 = ldab - 1; i__5 = max(i__6,1); cswap_(&i__4, &a[ioff + izero - i1], &i__5, &work[ iw], &c__1); ioff = (izero - 1) * ldab + 1; iw = iw + izero - i1; i__4 = i2 - izero + 1; cswap_(&i__4, &a[ioff], &c__1, &work[iw], &c__1); } } /* Set the imaginary part of the diagonals. */ if (iuplo == 1) { claipd_(&n, &a[kd + 1], &ldab, &c__0); } else { claipd_(&n, &a[1], &ldab, &c__0); } /* Save a copy of the matrix A in ASAV. */ i__4 = kd + 1; clacpy_("Full", &i__4, &n, &a[1], &ldab, &asav[1], &ldab); for (iequed = 1; iequed <= 2; ++iequed) { *(unsigned char *)equed = *(unsigned char *)&equeds[ iequed - 1]; if (iequed == 1) { nfact = 3; } else { nfact = 1; } i__4 = nfact; for (ifact = 1; ifact <= i__4; ++ifact) { *(unsigned char *)fact = *(unsigned char *)&facts[ ifact - 1]; prefac = lsame_(fact, "F"); nofact = lsame_(fact, "N"); equil = lsame_(fact, "E"); if (zerot) { if (prefac) { goto L60; } rcondc = 0.f; } else if (! lsame_(fact, "N")) { /* Compute the condition number for comparison */ /* with the value returned by CPBSVX (FACT = */ /* 'N' reuses the condition number from the */ /* previous iteration with FACT = 'F'). */ i__5 = kd + 1; clacpy_("Full", &i__5, &n, &asav[1], &ldab, & afac[1], &ldab); if (equil || iequed > 1) { /* Compute row and column scale factors to */ /* equilibrate the matrix A. */ cpbequ_(uplo, &n, &kd, &afac[1], &ldab, & s[1], &scond, &amax, &info); if (info == 0 && n > 0) { if (iequed > 1) { scond = 0.f; } /* Equilibrate the matrix. */ claqhb_(uplo, &n, &kd, &afac[1], & ldab, &s[1], &scond, &amax, equed); } } /* Save the condition number of the */ /* non-equilibrated system for use in CGET04. */ if (equil) { roldc = rcondc; } /* Compute the 1-norm of A. */ anorm = clanhb_("1", uplo, &n, &kd, &afac[1], &ldab, &rwork[1]); /* Factor the matrix A. */ cpbtrf_(uplo, &n, &kd, &afac[1], &ldab, &info); /* Form the inverse of A. */ claset_("Full", &n, &n, &c_b47, &c_b48, &a[1], &lda); s_copy(srnamc_1.srnamt, "CPBTRS", (ftnlen)6, ( ftnlen)6); cpbtrs_(uplo, &n, &kd, &n, &afac[1], &ldab, & a[1], &lda, &info); /* Compute the 1-norm condition number of A. */ ainvnm = clange_("1", &n, &n, &a[1], &lda, & rwork[1]); if (anorm <= 0.f || ainvnm <= 0.f) { rcondc = 1.f; } else { rcondc = 1.f / anorm / ainvnm; } } /* Restore the matrix A. */ i__5 = kd + 1; clacpy_("Full", &i__5, &n, &asav[1], &ldab, &a[1], &ldab); /* Form an exact solution and set the right hand */ /* side. */ s_copy(srnamc_1.srnamt, "CLARHS", (ftnlen)6, ( ftnlen)6); clarhs_(path, xtype, uplo, " ", &n, &n, &kd, &kd, nrhs, &a[1], &ldab, &xact[1], &lda, &b[1], &lda, iseed, &info); *(unsigned char *)xtype = 'C'; clacpy_("Full", &n, nrhs, &b[1], &lda, &bsav[1], & lda); if (nofact) { /* --- Test CPBSV --- */ /* Compute the L*L' or U'*U factorization of the */ /* matrix and solve the system. */ i__5 = kd + 1; clacpy_("Full", &i__5, &n, &a[1], &ldab, & afac[1], &ldab); clacpy_("Full", &n, nrhs, &b[1], &lda, &x[1], &lda); s_copy(srnamc_1.srnamt, "CPBSV ", (ftnlen)6, ( ftnlen)6); cpbsv_(uplo, &n, &kd, nrhs, &afac[1], &ldab, & x[1], &lda, &info); /* Check error code from CPBSV . */ if (info != izero) { alaerh_(path, "CPBSV ", &info, &izero, uplo, &n, &n, &kd, &kd, nrhs, & imat, &nfail, &nerrs, nout); goto L40; } else if (info != 0) { goto L40; } /* Reconstruct matrix from factors and compute */ /* residual. */ cpbt01_(uplo, &n, &kd, &a[1], &ldab, &afac[1], &ldab, &rwork[1], result); /* Compute residual of the computed solution. */ clacpy_("Full", &n, nrhs, &b[1], &lda, &work[ 1], &lda); cpbt02_(uplo, &n, &kd, nrhs, &a[1], &ldab, &x[ 1], &lda, &work[1], &lda, &rwork[1], & result[1]); /* Check solution from generated exact solution. */ cget04_(&n, nrhs, &x[1], &lda, &xact[1], &lda, &rcondc, &result[2]); nt = 3; /* Print information about the tests that did */ /* not pass the threshold. */ i__5 = nt; for (k = 1; k <= i__5; ++k) { if (result[k - 1] >= *thresh) { if (nfail == 0 && nerrs == 0) { aladhd_(nout, path); } io___57.ciunit = *nout; s_wsfe(&io___57); do_fio(&c__1, "CPBSV ", (ftnlen)6); do_fio(&c__1, uplo, (ftnlen)1); do_fio(&c__1, (char *)&n, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&kd, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&imat, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&k, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&result[k - 1], (ftnlen)sizeof(real)); e_wsfe(); ++nfail; } /* L30: */ } nrun += nt; L40: ; } /* --- Test CPBSVX --- */ if (! prefac) { i__5 = kd + 1; claset_("Full", &i__5, &n, &c_b47, &c_b47, & afac[1], &ldab); } claset_("Full", &n, nrhs, &c_b47, &c_b47, &x[1], & lda); if (iequed > 1 && n > 0) { /* Equilibrate the matrix if FACT='F' and */ /* EQUED='Y' */ claqhb_(uplo, &n, &kd, &a[1], &ldab, &s[1], & scond, &amax, equed); } /* Solve the system and compute the condition */ /* number and error bounds using CPBSVX. */ s_copy(srnamc_1.srnamt, "CPBSVX", (ftnlen)6, ( ftnlen)6); cpbsvx_(fact, uplo, &n, &kd, nrhs, &a[1], &ldab, & afac[1], &ldab, equed, &s[1], &b[1], &lda, &x[1], &lda, &rcond, &rwork[1], &rwork[* nrhs + 1], &work[1], &rwork[(*nrhs << 1) + 1], &info); /* Check the error code from CPBSVX. */ if (info != izero) { /* Writing concatenation */ i__7[0] = 1, a__1[0] = fact; i__7[1] = 1, a__1[1] = uplo; s_cat(ch__1, a__1, i__7, &c__2, (ftnlen)2); alaerh_(path, "CPBSVX", &info, &izero, ch__1, &n, &n, &kd, &kd, nrhs, &imat, &nfail, &nerrs, nout); goto L60; } if (info == 0) { if (! prefac) { /* Reconstruct matrix from factors and */ /* compute residual. */ cpbt01_(uplo, &n, &kd, &a[1], &ldab, & afac[1], &ldab, &rwork[(*nrhs << 1) + 1], result); k1 = 1; } else { k1 = 2; } /* Compute residual of the computed solution. */ clacpy_("Full", &n, nrhs, &bsav[1], &lda, & work[1], &lda); cpbt02_(uplo, &n, &kd, nrhs, &asav[1], &ldab, &x[1], &lda, &work[1], &lda, &rwork[(* nrhs << 1) + 1], &result[1]); /* Check solution from generated exact solution. */ if (nofact || prefac && lsame_(equed, "N")) { cget04_(&n, nrhs, &x[1], &lda, &xact[1], & lda, &rcondc, &result[2]); } else { cget04_(&n, nrhs, &x[1], &lda, &xact[1], & lda, &roldc, &result[2]); } /* Check the error bounds from iterative */ /* refinement. */ cpbt05_(uplo, &n, &kd, nrhs, &asav[1], &ldab, &b[1], &lda, &x[1], &lda, &xact[1], & lda, &rwork[1], &rwork[*nrhs + 1], & result[3]); } else { k1 = 6; } /* Compare RCOND from CPBSVX with the computed */ /* value in RCONDC. */ result[5] = sget06_(&rcond, &rcondc); /* Print information about the tests that did not */ /* pass the threshold. */ for (k = k1; k <= 6; ++k) { if (result[k - 1] >= *thresh) { if (nfail == 0 && nerrs == 0) { aladhd_(nout, path); } if (prefac) { io___60.ciunit = *nout; s_wsfe(&io___60); do_fio(&c__1, "CPBSVX", (ftnlen)6); do_fio(&c__1, fact, (ftnlen)1); do_fio(&c__1, uplo, (ftnlen)1); do_fio(&c__1, (char *)&n, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&kd, (ftnlen) sizeof(integer)); do_fio(&c__1, equed, (ftnlen)1); do_fio(&c__1, (char *)&imat, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&k, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&result[k - 1], (ftnlen)sizeof(real)); e_wsfe(); } else { io___61.ciunit = *nout; s_wsfe(&io___61); do_fio(&c__1, "CPBSVX", (ftnlen)6); do_fio(&c__1, fact, (ftnlen)1); do_fio(&c__1, uplo, (ftnlen)1); do_fio(&c__1, (char *)&n, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&kd, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&imat, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&k, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&result[k - 1], (ftnlen)sizeof(real)); e_wsfe(); } ++nfail; } /* L50: */ } nrun = nrun + 7 - k1; L60: ; } /* L70: */ } L80: ; } /* L90: */ } /* L100: */ } /* L110: */ } /* Print a summary of the results. */ alasvm_(path, nout, &nfail, &nrun, &nerrs); return 0; /* End of CDRVPB */ } /* cdrvpb_ */
/* Subroutine */ int cgebak_(char *job, char *side, integer *n, integer *ilo, integer *ihi, real *scale, integer *m, complex *v, integer *ldv, integer *info) { /* System generated locals */ integer v_dim1, v_offset, i__1; /* Local variables */ integer i__, k; real s; integer ii; extern logical lsame_(char *, char *); extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); logical leftv; extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer *), xerbla_(char *, integer *); logical rightv; /* -- LAPACK computational routine (version 3.4.0) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* November 2011 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Decode and Test the input parameters */ /* Parameter adjustments */ --scale; v_dim1 = *ldv; v_offset = 1 + v_dim1; v -= v_offset; /* Function Body */ rightv = lsame_(side, "R"); leftv = lsame_(side, "L"); *info = 0; if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S") && ! lsame_(job, "B")) { *info = -1; } else if (! rightv && ! leftv) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -4; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -5; } else if (*m < 0) { *info = -7; } else if (*ldv < max(1,*n)) { *info = -9; } if (*info != 0) { i__1 = -(*info); xerbla_("CGEBAK", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (*m == 0) { return 0; } if (lsame_(job, "N")) { return 0; } if (*ilo == *ihi) { goto L30; } /* Backward balance */ if (lsame_(job, "S") || lsame_(job, "B")) { if (rightv) { i__1 = *ihi; for (i__ = *ilo; i__ <= i__1; ++i__) { s = scale[i__]; csscal_(m, &s, &v[i__ + v_dim1], ldv); /* L10: */ } } if (leftv) { i__1 = *ihi; for (i__ = *ilo; i__ <= i__1; ++i__) { s = 1.f / scale[i__]; csscal_(m, &s, &v[i__ + v_dim1], ldv); /* L20: */ } } } /* Backward permutation */ /* For I = ILO-1 step -1 until 1, */ /* IHI+1 step 1 until N do -- */ L30: if (lsame_(job, "P") || lsame_(job, "B")) { if (rightv) { i__1 = *n; for (ii = 1; ii <= i__1; ++ii) { i__ = ii; if (i__ >= *ilo && i__ <= *ihi) { goto L40; } if (i__ < *ilo) { i__ = *ilo - ii; } k = scale[i__]; if (k == i__) { goto L40; } cswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv); L40: ; } } if (leftv) { i__1 = *n; for (ii = 1; ii <= i__1; ++ii) { i__ = ii; if (i__ >= *ilo && i__ <= *ihi) { goto L50; } if (i__ < *ilo) { i__ = *ilo - ii; } k = scale[i__]; if (k == i__) { goto L50; } cswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv); L50: ; } } } return 0; /* End of CGEBAK */ }
/* Subroutine */ int cgetf2_(integer *m, integer *n, complex *a, integer *lda, integer *ipiv, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; complex q__1; /* Builtin functions */ double c_abs(complex *); void c_div(complex *, complex *, complex *); /* Local variables */ integer i__, j, jp; extern /* Subroutine */ int cscal_(integer *, complex *, complex *, integer *), cgeru_(integer *, integer *, complex *, complex *, integer *, complex *, integer *, complex *, integer *); real sfmin; extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); extern integer icamax_(integer *, complex *, integer *); extern doublereal slamch_(char *); extern /* Subroutine */ int xerbla_(char *, integer *); /* -- LAPACK routine (version 3.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CGETF2 computes an LU factorization of a general m-by-n matrix A */ /* using partial pivoting with row interchanges. */ /* The factorization has the form */ /* A = P * L * U */ /* where P is a permutation matrix, L is lower triangular with unit */ /* diagonal elements (lower trapezoidal if m > n), and U is upper */ /* triangular (upper trapezoidal if m < n). */ /* This is the right-looking Level 2 BLAS version of the algorithm. */ /* Arguments */ /* ========= */ /* M (input) INTEGER */ /* The number of rows of the matrix A. M >= 0. */ /* N (input) INTEGER */ /* The number of columns of the matrix A. N >= 0. */ /* A (input/output) COMPLEX array, dimension (LDA,N) */ /* On entry, the m by n matrix to be factored. */ /* On exit, the factors L and U from the factorization */ /* A = P*L*U; the unit diagonal elements of L are not stored. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= max(1,M). */ /* IPIV (output) INTEGER array, dimension (min(M,N)) */ /* The pivot indices; for 1 <= i <= min(M,N), row i of the */ /* matrix was interchanged with row IPIV(i). */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -k, the k-th argument had an illegal value */ /* > 0: if INFO = k, U(k,k) is exactly zero. The factorization */ /* has been completed, but the factor U is exactly */ /* singular, and division by zero will occur if it is used */ /* to solve a system of equations. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --ipiv; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("CGETF2", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } /* Compute machine safe minimum */ sfmin = slamch_("S"); i__1 = min(*m,*n); for (j = 1; j <= i__1; ++j) { /* Find pivot and test for singularity. */ i__2 = *m - j + 1; jp = j - 1 + icamax_(&i__2, &a[j + j * a_dim1], &c__1); ipiv[j] = jp; i__2 = jp + j * a_dim1; if (a[i__2].r != 0.f || a[i__2].i != 0.f) { /* Apply the interchange to columns 1:N. */ if (jp != j) { cswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda); } /* Compute elements J+1:M of J-th column. */ if (j < *m) { if (c_abs(&a[j + j * a_dim1]) >= sfmin) { i__2 = *m - j; c_div(&q__1, &c_b1, &a[j + j * a_dim1]); cscal_(&i__2, &q__1, &a[j + 1 + j * a_dim1], &c__1); } else { i__2 = *m - j; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = j + i__ + j * a_dim1; c_div(&q__1, &a[j + i__ + j * a_dim1], &a[j + j * a_dim1]); a[i__3].r = q__1.r, a[i__3].i = q__1.i; /* L20: */ } } } } else if (*info == 0) { *info = j; } if (j < min(*m,*n)) { /* Update trailing submatrix. */ i__2 = *m - j; i__3 = *n - j; q__1.r = -1.f, q__1.i = -0.f; cgeru_(&i__2, &i__3, &q__1, &a[j + 1 + j * a_dim1], &c__1, &a[j + (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], lda) ; } /* L10: */ } return 0; /* End of CGETF2 */ } /* cgetf2_ */
/* Subroutine */ int clasyf_(char *uplo, integer *n, integer *nb, integer *kb, complex *a, integer *lda, integer *ipiv, complex *w, integer *ldw, integer *info) { /* System generated locals */ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3, i__4, i__5; real r__1, r__2, r__3, r__4; complex q__1, q__2, q__3; /* Builtin functions */ double sqrt(doublereal), r_imag(complex *); void c_div(complex *, complex *, complex *); /* Local variables */ integer j, k; complex t, r1, d11, d21, d22; integer jb, jj, kk, jp, kp, kw, kkw, imax, jmax; real alpha; extern /* Subroutine */ int cscal_(integer *, complex *, complex *, integer *), cgemm_(char *, char *, integer *, integer *, integer * , complex *, complex *, integer *, complex *, integer *, complex * , complex *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex * , complex *, integer *, complex *, integer *, complex *, complex * , integer *), ccopy_(integer *, complex *, integer *, complex *, integer *), cswap_(integer *, complex *, integer *, complex *, integer *); integer kstep; real absakk; extern integer icamax_(integer *, complex *, integer *); real colmax, rowmax; /* -- LAPACK computational routine (version 3.5.0) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* November 2013 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Statement Functions .. */ /* .. */ /* .. Statement Function definitions .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --ipiv; w_dim1 = *ldw; w_offset = 1 + w_dim1; w -= w_offset; /* Function Body */ *info = 0; /* Initialize ALPHA for use in choosing pivot block size. */ alpha = (sqrt(17.f) + 1.f) / 8.f; if (lsame_(uplo, "U")) { /* Factorize the trailing columns of A using the upper triangle */ /* of A and working backwards, and compute the matrix W = U12*D */ /* for use in updating A11 */ /* K is the main loop index, decreasing from N in steps of 1 or 2 */ /* KW is the column of W which corresponds to column K of A */ k = *n; L10: kw = *nb + k - *n; /* Exit from loop */ if (k <= *n - *nb + 1 && *nb < *n || k < 1) { goto L30; } /* Copy column K of A to column KW of W and update it */ ccopy_(&k, &a[k * a_dim1 + 1], &c__1, &w[kw * w_dim1 + 1], &c__1); if (k < *n) { i__1 = *n - k; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemv_("No transpose", &k, &i__1, &q__1, &a[(k + 1) * a_dim1 + 1], lda, &w[k + (kw + 1) * w_dim1], ldw, &c_b1, &w[kw * w_dim1 + 1], &c__1); } kstep = 1; /* Determine rows and columns to be interchanged and whether */ /* a 1-by-1 or 2-by-2 pivot block will be used */ i__1 = k + kw * w_dim1; absakk = (r__1 = w[i__1].r, f2c_abs(r__1)) + (r__2 = r_imag(&w[k + kw * w_dim1]), f2c_abs(r__2)); /* IMAX is the row-index of the largest off-diagonal element in */ /* column K, and COLMAX is its absolute value. */ /* Determine both COLMAX and IMAX. */ if (k > 1) { i__1 = k - 1; imax = icamax_(&i__1, &w[kw * w_dim1 + 1], &c__1); i__1 = imax + kw * w_dim1; colmax = (r__1 = w[i__1].r, f2c_abs(r__1)) + (r__2 = r_imag(&w[imax + kw * w_dim1]), f2c_abs(r__2)); } else { colmax = 0.f; } if (max(absakk,colmax) == 0.f) { /* Column K is zero or underflow: set INFO and continue */ if (*info == 0) { *info = k; } kp = k; } else { if (absakk >= alpha * colmax) { /* no interchange, use 1-by-1 pivot block */ kp = k; } else { /* Copy column IMAX to column KW-1 of W and update it */ ccopy_(&imax, &a[imax * a_dim1 + 1], &c__1, &w[(kw - 1) * w_dim1 + 1], &c__1); i__1 = k - imax; ccopy_(&i__1, &a[imax + (imax + 1) * a_dim1], lda, &w[imax + 1 + (kw - 1) * w_dim1], &c__1); if (k < *n) { i__1 = *n - k; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemv_("No transpose", &k, &i__1, &q__1, &a[(k + 1) * a_dim1 + 1], lda, &w[imax + (kw + 1) * w_dim1], ldw, &c_b1, &w[(kw - 1) * w_dim1 + 1], &c__1); } /* JMAX is the column-index of the largest off-diagonal */ /* element in row IMAX, and ROWMAX is its absolute value */ i__1 = k - imax; jmax = imax + icamax_(&i__1, &w[imax + 1 + (kw - 1) * w_dim1], &c__1); i__1 = jmax + (kw - 1) * w_dim1; rowmax = (r__1 = w[i__1].r, f2c_abs(r__1)) + (r__2 = r_imag(&w[ jmax + (kw - 1) * w_dim1]), f2c_abs(r__2)); if (imax > 1) { i__1 = imax - 1; jmax = icamax_(&i__1, &w[(kw - 1) * w_dim1 + 1], &c__1); /* Computing MAX */ i__1 = jmax + (kw - 1) * w_dim1; r__3 = rowmax; r__4 = (r__1 = w[i__1].r, f2c_abs(r__1)) + ( r__2 = r_imag(&w[jmax + (kw - 1) * w_dim1]), f2c_abs( r__2)); // , expr subst rowmax = max(r__3,r__4); } if (absakk >= alpha * colmax * (colmax / rowmax)) { /* no interchange, use 1-by-1 pivot block */ kp = k; } else /* if(complicated condition) */ { i__1 = imax + (kw - 1) * w_dim1; if ((r__1 = w[i__1].r, f2c_abs(r__1)) + (r__2 = r_imag(&w[ imax + (kw - 1) * w_dim1]), f2c_abs(r__2)) >= alpha * rowmax) { /* interchange rows and columns K and IMAX, use 1-by-1 */ /* pivot block */ kp = imax; /* copy column KW-1 of W to column KW of W */ ccopy_(&k, &w[(kw - 1) * w_dim1 + 1], &c__1, &w[kw * w_dim1 + 1], &c__1); } else { /* interchange rows and columns K-1 and IMAX, use 2-by-2 */ /* pivot block */ kp = imax; kstep = 2; } } } /* ============================================================ */ /* KK is the column of A where pivoting step stopped */ kk = k - kstep + 1; /* KKW is the column of W which corresponds to column KK of A */ kkw = *nb + kk - *n; /* Interchange rows and columns KP and KK. */ /* Updated column KP is already stored in column KKW of W. */ if (kp != kk) { /* Copy non-updated column KK to column KP of submatrix A */ /* at step K. No need to copy element into column K */ /* (or K and K-1 for 2-by-2 pivot) of A, since these columns */ /* will be later overwritten. */ i__1 = kp + kp * a_dim1; i__2 = kk + kk * a_dim1; a[i__1].r = a[i__2].r; a[i__1].i = a[i__2].i; // , expr subst i__1 = kk - 1 - kp; ccopy_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + (kp + 1) * a_dim1], lda); if (kp > 1) { i__1 = kp - 1; ccopy_(&i__1, &a[kk * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &c__1); } /* Interchange rows KK and KP in last K+1 to N columns of A */ /* (columns K (or K and K-1 for 2-by-2 pivot) of A will be */ /* later overwritten). Interchange rows KK and KP */ /* in last KKW to NB columns of W. */ if (k < *n) { i__1 = *n - k; cswap_(&i__1, &a[kk + (k + 1) * a_dim1], lda, &a[kp + (k + 1) * a_dim1], lda); } i__1 = *n - kk + 1; cswap_(&i__1, &w[kk + kkw * w_dim1], ldw, &w[kp + kkw * w_dim1], ldw); } if (kstep == 1) { /* 1-by-1 pivot block D(k): column kw of W now holds */ /* W(kw) = U(k)*D(k), */ /* where U(k) is the k-th column of U */ /* Store subdiag. elements of column U(k) */ /* and 1-by-1 block D(k) in column k of A. */ /* NOTE: Diagonal element U(k,k) is a UNIT element */ /* and not stored. */ /* A(k,k) := D(k,k) = W(k,kw) */ /* A(1:k-1,k) := U(1:k-1,k) = W(1:k-1,kw)/D(k,k) */ ccopy_(&k, &w[kw * w_dim1 + 1], &c__1, &a[k * a_dim1 + 1], & c__1); c_div(&q__1, &c_b1, &a[k + k * a_dim1]); r1.r = q__1.r; r1.i = q__1.i; // , expr subst i__1 = k - 1; cscal_(&i__1, &r1, &a[k * a_dim1 + 1], &c__1); } else { /* 2-by-2 pivot block D(k): columns kw and kw-1 of W now hold */ /* ( W(kw-1) W(kw) ) = ( U(k-1) U(k) )*D(k) */ /* where U(k) and U(k-1) are the k-th and (k-1)-th columns */ /* of U */ /* Store U(1:k-2,k-1) and U(1:k-2,k) and 2-by-2 */ /* block D(k-1:k,k-1:k) in columns k-1 and k of A. */ /* NOTE: 2-by-2 diagonal block U(k-1:k,k-1:k) is a UNIT */ /* block and not stored. */ /* A(k-1:k,k-1:k) := D(k-1:k,k-1:k) = W(k-1:k,kw-1:kw) */ /* A(1:k-2,k-1:k) := U(1:k-2,k:k-1:k) = */ /* = W(1:k-2,kw-1:kw) * ( D(k-1:k,k-1:k)**(-1) ) */ if (k > 2) { /* Compose the columns of the inverse of 2-by-2 pivot */ /* block D in the following way to reduce the number */ /* of FLOPS when we myltiply panel ( W(kw-1) W(kw) ) by */ /* this inverse */ /* D**(-1) = ( d11 d21 )**(-1) = */ /* ( d21 d22 ) */ /* = 1/(d11*d22-d21**2) * ( ( d22 ) (-d21 ) ) = */ /* ( (-d21 ) ( d11 ) ) */ /* = 1/d21 * 1/((d11/d21)*(d22/d21)-1) * */ /* * ( ( d22/d21 ) ( -1 ) ) = */ /* ( ( -1 ) ( d11/d21 ) ) */ /* = 1/d21 * 1/(D22*D11-1) * ( ( D11 ) ( -1 ) ) = */ /* ( ( -1 ) ( D22 ) ) */ /* = 1/d21 * T * ( ( D11 ) ( -1 ) ) */ /* ( ( -1 ) ( D22 ) ) */ /* = D21 * ( ( D11 ) ( -1 ) ) */ /* ( ( -1 ) ( D22 ) ) */ i__1 = k - 1 + kw * w_dim1; d21.r = w[i__1].r; d21.i = w[i__1].i; // , expr subst c_div(&q__1, &w[k + kw * w_dim1], &d21); d11.r = q__1.r; d11.i = q__1.i; // , expr subst c_div(&q__1, &w[k - 1 + (kw - 1) * w_dim1], &d21); d22.r = q__1.r; d22.i = q__1.i; // , expr subst q__3.r = d11.r * d22.r - d11.i * d22.i; q__3.i = d11.r * d22.i + d11.i * d22.r; // , expr subst q__2.r = q__3.r - 1.f; q__2.i = q__3.i - 0.f; // , expr subst c_div(&q__1, &c_b1, &q__2); t.r = q__1.r; t.i = q__1.i; // , expr subst /* Update elements in columns A(k-1) and A(k) as */ /* dot products of rows of ( W(kw-1) W(kw) ) and columns */ /* of D**(-1) */ c_div(&q__1, &t, &d21); d21.r = q__1.r; d21.i = q__1.i; // , expr subst i__1 = k - 2; for (j = 1; j <= i__1; ++j) { i__2 = j + (k - 1) * a_dim1; i__3 = j + (kw - 1) * w_dim1; q__3.r = d11.r * w[i__3].r - d11.i * w[i__3].i; q__3.i = d11.r * w[i__3].i + d11.i * w[i__3] .r; // , expr subst i__4 = j + kw * w_dim1; q__2.r = q__3.r - w[i__4].r; q__2.i = q__3.i - w[i__4] .i; // , expr subst q__1.r = d21.r * q__2.r - d21.i * q__2.i; q__1.i = d21.r * q__2.i + d21.i * q__2.r; // , expr subst a[i__2].r = q__1.r; a[i__2].i = q__1.i; // , expr subst i__2 = j + k * a_dim1; i__3 = j + kw * w_dim1; q__3.r = d22.r * w[i__3].r - d22.i * w[i__3].i; q__3.i = d22.r * w[i__3].i + d22.i * w[i__3] .r; // , expr subst i__4 = j + (kw - 1) * w_dim1; q__2.r = q__3.r - w[i__4].r; q__2.i = q__3.i - w[i__4] .i; // , expr subst q__1.r = d21.r * q__2.r - d21.i * q__2.i; q__1.i = d21.r * q__2.i + d21.i * q__2.r; // , expr subst a[i__2].r = q__1.r; a[i__2].i = q__1.i; // , expr subst /* L20: */ } } /* Copy D(k) to A */ i__1 = k - 1 + (k - 1) * a_dim1; i__2 = k - 1 + (kw - 1) * w_dim1; a[i__1].r = w[i__2].r; a[i__1].i = w[i__2].i; // , expr subst i__1 = k - 1 + k * a_dim1; i__2 = k - 1 + kw * w_dim1; a[i__1].r = w[i__2].r; a[i__1].i = w[i__2].i; // , expr subst i__1 = k + k * a_dim1; i__2 = k + kw * w_dim1; a[i__1].r = w[i__2].r; a[i__1].i = w[i__2].i; // , expr subst } } /* Store details of the interchanges in IPIV */ if (kstep == 1) { ipiv[k] = kp; } else { ipiv[k] = -kp; ipiv[k - 1] = -kp; } /* Decrease K and return to the start of the main loop */ k -= kstep; goto L10; L30: /* Update the upper triangle of A11 (= A(1:k,1:k)) as */ /* A11 := A11 - U12*D*U12**T = A11 - U12*W**T */ /* computing blocks of NB columns at a time */ i__1 = -(*nb); for (j = (k - 1) / *nb * *nb + 1; i__1 < 0 ? j >= 1 : j <= 1; j += i__1) { /* Computing MIN */ i__2 = *nb; i__3 = k - j + 1; // , expr subst jb = min(i__2,i__3); /* Update the upper triangle of the diagonal block */ i__2 = j + jb - 1; for (jj = j; jj <= i__2; ++jj) { i__3 = jj - j + 1; i__4 = *n - k; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemv_("No transpose", &i__3, &i__4, &q__1, &a[j + (k + 1) * a_dim1], lda, &w[jj + (kw + 1) * w_dim1], ldw, &c_b1, &a[j + jj * a_dim1], &c__1); /* L40: */ } /* Update the rectangular superdiagonal block */ i__2 = j - 1; i__3 = *n - k; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemm_("No transpose", "Transpose", &i__2, &jb, &i__3, &q__1, &a[( k + 1) * a_dim1 + 1], lda, &w[j + (kw + 1) * w_dim1], ldw, &c_b1, &a[j * a_dim1 + 1], lda); /* L50: */ } /* Put U12 in standard form by partially undoing the interchanges */ /* in columns k+1:n looping backwards from k+1 to n */ j = k + 1; L60: /* Undo the interchanges (if any) of rows JJ and JP at each */ /* step J */ /* (Here, J is a diagonal index) */ jj = j; jp = ipiv[j]; if (jp < 0) { jp = -jp; /* (Here, J is a diagonal index) */ ++j; } /* (NOTE: Here, J is used to determine row length. Length N-J+1 */ /* of the rows to swap back doesn't include diagonal element) */ ++j; if (jp != jj && j <= *n) { i__1 = *n - j + 1; cswap_(&i__1, &a[jp + j * a_dim1], lda, &a[jj + j * a_dim1], lda); } if (j < *n) { goto L60; } /* Set KB to the number of columns factorized */ *kb = *n - k; } else { /* Factorize the leading columns of A using the lower triangle */ /* of A and working forwards, and compute the matrix W = L21*D */ /* for use in updating A22 */ /* K is the main loop index, increasing from 1 in steps of 1 or 2 */ k = 1; L70: /* Exit from loop */ if (k >= *nb && *nb < *n || k > *n) { goto L90; } /* Copy column K of A to column K of W and update it */ i__1 = *n - k + 1; ccopy_(&i__1, &a[k + k * a_dim1], &c__1, &w[k + k * w_dim1], &c__1); i__1 = *n - k + 1; i__2 = k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemv_("No transpose", &i__1, &i__2, &q__1, &a[k + a_dim1], lda, &w[k + w_dim1], ldw, &c_b1, &w[k + k * w_dim1], &c__1); kstep = 1; /* Determine rows and columns to be interchanged and whether */ /* a 1-by-1 or 2-by-2 pivot block will be used */ i__1 = k + k * w_dim1; absakk = (r__1 = w[i__1].r, f2c_abs(r__1)) + (r__2 = r_imag(&w[k + k * w_dim1]), f2c_abs(r__2)); /* IMAX is the row-index of the largest off-diagonal element in */ /* column K, and COLMAX is its absolute value. */ /* Determine both COLMAX and IMAX. */ if (k < *n) { i__1 = *n - k; imax = k + icamax_(&i__1, &w[k + 1 + k * w_dim1], &c__1); i__1 = imax + k * w_dim1; colmax = (r__1 = w[i__1].r, f2c_abs(r__1)) + (r__2 = r_imag(&w[imax + k * w_dim1]), f2c_abs(r__2)); } else { colmax = 0.f; } if (max(absakk,colmax) == 0.f) { /* Column K is zero or underflow: set INFO and continue */ if (*info == 0) { *info = k; } kp = k; } else { if (absakk >= alpha * colmax) { /* no interchange, use 1-by-1 pivot block */ kp = k; } else { /* Copy column IMAX to column K+1 of W and update it */ i__1 = imax - k; ccopy_(&i__1, &a[imax + k * a_dim1], lda, &w[k + (k + 1) * w_dim1], &c__1); i__1 = *n - imax + 1; ccopy_(&i__1, &a[imax + imax * a_dim1], &c__1, &w[imax + (k + 1) * w_dim1], &c__1); i__1 = *n - k + 1; i__2 = k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemv_("No transpose", &i__1, &i__2, &q__1, &a[k + a_dim1], lda, &w[imax + w_dim1], ldw, &c_b1, &w[k + (k + 1) * w_dim1], &c__1); /* JMAX is the column-index of the largest off-diagonal */ /* element in row IMAX, and ROWMAX is its absolute value */ i__1 = imax - k; jmax = k - 1 + icamax_(&i__1, &w[k + (k + 1) * w_dim1], &c__1) ; i__1 = jmax + (k + 1) * w_dim1; rowmax = (r__1 = w[i__1].r, f2c_abs(r__1)) + (r__2 = r_imag(&w[ jmax + (k + 1) * w_dim1]), f2c_abs(r__2)); if (imax < *n) { i__1 = *n - imax; jmax = imax + icamax_(&i__1, &w[imax + 1 + (k + 1) * w_dim1], &c__1); /* Computing MAX */ i__1 = jmax + (k + 1) * w_dim1; r__3 = rowmax; r__4 = (r__1 = w[i__1].r, f2c_abs(r__1)) + ( r__2 = r_imag(&w[jmax + (k + 1) * w_dim1]), f2c_abs( r__2)); // , expr subst rowmax = max(r__3,r__4); } if (absakk >= alpha * colmax * (colmax / rowmax)) { /* no interchange, use 1-by-1 pivot block */ kp = k; } else /* if(complicated condition) */ { i__1 = imax + (k + 1) * w_dim1; if ((r__1 = w[i__1].r, f2c_abs(r__1)) + (r__2 = r_imag(&w[ imax + (k + 1) * w_dim1]), f2c_abs(r__2)) >= alpha * rowmax) { /* interchange rows and columns K and IMAX, use 1-by-1 */ /* pivot block */ kp = imax; /* copy column K+1 of W to column K of W */ i__1 = *n - k + 1; ccopy_(&i__1, &w[k + (k + 1) * w_dim1], &c__1, &w[k + k * w_dim1], &c__1); } else { /* interchange rows and columns K+1 and IMAX, use 2-by-2 */ /* pivot block */ kp = imax; kstep = 2; } } } /* ============================================================ */ /* KK is the column of A where pivoting step stopped */ kk = k + kstep - 1; /* Interchange rows and columns KP and KK. */ /* Updated column KP is already stored in column KK of W. */ if (kp != kk) { /* Copy non-updated column KK to column KP of submatrix A */ /* at step K. No need to copy element into column K */ /* (or K and K+1 for 2-by-2 pivot) of A, since these columns */ /* will be later overwritten. */ i__1 = kp + kp * a_dim1; i__2 = kk + kk * a_dim1; a[i__1].r = a[i__2].r; a[i__1].i = a[i__2].i; // , expr subst i__1 = kp - kk - 1; ccopy_(&i__1, &a[kk + 1 + kk * a_dim1], &c__1, &a[kp + (kk + 1) * a_dim1], lda); if (kp < *n) { i__1 = *n - kp; ccopy_(&i__1, &a[kp + 1 + kk * a_dim1], &c__1, &a[kp + 1 + kp * a_dim1], &c__1); } /* Interchange rows KK and KP in first K-1 columns of A */ /* (columns K (or K and K+1 for 2-by-2 pivot) of A will be */ /* later overwritten). Interchange rows KK and KP */ /* in first KK columns of W. */ if (k > 1) { i__1 = k - 1; cswap_(&i__1, &a[kk + a_dim1], lda, &a[kp + a_dim1], lda); } cswap_(&kk, &w[kk + w_dim1], ldw, &w[kp + w_dim1], ldw); } if (kstep == 1) { /* 1-by-1 pivot block D(k): column k of W now holds */ /* W(k) = L(k)*D(k), */ /* where L(k) is the k-th column of L */ /* Store subdiag. elements of column L(k) */ /* and 1-by-1 block D(k) in column k of A. */ /* (NOTE: Diagonal element L(k,k) is a UNIT element */ /* and not stored) */ /* A(k,k) := D(k,k) = W(k,k) */ /* A(k+1:N,k) := L(k+1:N,k) = W(k+1:N,k)/D(k,k) */ i__1 = *n - k + 1; ccopy_(&i__1, &w[k + k * w_dim1], &c__1, &a[k + k * a_dim1], & c__1); if (k < *n) { c_div(&q__1, &c_b1, &a[k + k * a_dim1]); r1.r = q__1.r; r1.i = q__1.i; // , expr subst i__1 = *n - k; cscal_(&i__1, &r1, &a[k + 1 + k * a_dim1], &c__1); } } else { /* 2-by-2 pivot block D(k): columns k and k+1 of W now hold */ /* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */ /* where L(k) and L(k+1) are the k-th and (k+1)-th columns */ /* of L */ /* Store L(k+2:N,k) and L(k+2:N,k+1) and 2-by-2 */ /* block D(k:k+1,k:k+1) in columns k and k+1 of A. */ /* (NOTE: 2-by-2 diagonal block L(k:k+1,k:k+1) is a UNIT */ /* block and not stored) */ /* A(k:k+1,k:k+1) := D(k:k+1,k:k+1) = W(k:k+1,k:k+1) */ /* A(k+2:N,k:k+1) := L(k+2:N,k:k+1) = */ /* = W(k+2:N,k:k+1) * ( D(k:k+1,k:k+1)**(-1) ) */ if (k < *n - 1) { /* Compose the columns of the inverse of 2-by-2 pivot */ /* block D in the following way to reduce the number */ /* of FLOPS when we myltiply panel ( W(k) W(k+1) ) by */ /* this inverse */ /* D**(-1) = ( d11 d21 )**(-1) = */ /* ( d21 d22 ) */ /* = 1/(d11*d22-d21**2) * ( ( d22 ) (-d21 ) ) = */ /* ( (-d21 ) ( d11 ) ) */ /* = 1/d21 * 1/((d11/d21)*(d22/d21)-1) * */ /* * ( ( d22/d21 ) ( -1 ) ) = */ /* ( ( -1 ) ( d11/d21 ) ) */ /* = 1/d21 * 1/(D22*D11-1) * ( ( D11 ) ( -1 ) ) = */ /* ( ( -1 ) ( D22 ) ) */ /* = 1/d21 * T * ( ( D11 ) ( -1 ) ) */ /* ( ( -1 ) ( D22 ) ) */ /* = D21 * ( ( D11 ) ( -1 ) ) */ /* ( ( -1 ) ( D22 ) ) */ i__1 = k + 1 + k * w_dim1; d21.r = w[i__1].r; d21.i = w[i__1].i; // , expr subst c_div(&q__1, &w[k + 1 + (k + 1) * w_dim1], &d21); d11.r = q__1.r; d11.i = q__1.i; // , expr subst c_div(&q__1, &w[k + k * w_dim1], &d21); d22.r = q__1.r; d22.i = q__1.i; // , expr subst q__3.r = d11.r * d22.r - d11.i * d22.i; q__3.i = d11.r * d22.i + d11.i * d22.r; // , expr subst q__2.r = q__3.r - 1.f; q__2.i = q__3.i - 0.f; // , expr subst c_div(&q__1, &c_b1, &q__2); t.r = q__1.r; t.i = q__1.i; // , expr subst c_div(&q__1, &t, &d21); d21.r = q__1.r; d21.i = q__1.i; // , expr subst /* Update elements in columns A(k) and A(k+1) as */ /* dot products of rows of ( W(k) W(k+1) ) and columns */ /* of D**(-1) */ i__1 = *n; for (j = k + 2; j <= i__1; ++j) { i__2 = j + k * a_dim1; i__3 = j + k * w_dim1; q__3.r = d11.r * w[i__3].r - d11.i * w[i__3].i; q__3.i = d11.r * w[i__3].i + d11.i * w[i__3] .r; // , expr subst i__4 = j + (k + 1) * w_dim1; q__2.r = q__3.r - w[i__4].r; q__2.i = q__3.i - w[i__4] .i; // , expr subst q__1.r = d21.r * q__2.r - d21.i * q__2.i; q__1.i = d21.r * q__2.i + d21.i * q__2.r; // , expr subst a[i__2].r = q__1.r; a[i__2].i = q__1.i; // , expr subst i__2 = j + (k + 1) * a_dim1; i__3 = j + (k + 1) * w_dim1; q__3.r = d22.r * w[i__3].r - d22.i * w[i__3].i; q__3.i = d22.r * w[i__3].i + d22.i * w[i__3] .r; // , expr subst i__4 = j + k * w_dim1; q__2.r = q__3.r - w[i__4].r; q__2.i = q__3.i - w[i__4] .i; // , expr subst q__1.r = d21.r * q__2.r - d21.i * q__2.i; q__1.i = d21.r * q__2.i + d21.i * q__2.r; // , expr subst a[i__2].r = q__1.r; a[i__2].i = q__1.i; // , expr subst /* L80: */ } } /* Copy D(k) to A */ i__1 = k + k * a_dim1; i__2 = k + k * w_dim1; a[i__1].r = w[i__2].r; a[i__1].i = w[i__2].i; // , expr subst i__1 = k + 1 + k * a_dim1; i__2 = k + 1 + k * w_dim1; a[i__1].r = w[i__2].r; a[i__1].i = w[i__2].i; // , expr subst i__1 = k + 1 + (k + 1) * a_dim1; i__2 = k + 1 + (k + 1) * w_dim1; a[i__1].r = w[i__2].r; a[i__1].i = w[i__2].i; // , expr subst } } /* Store details of the interchanges in IPIV */ if (kstep == 1) { ipiv[k] = kp; } else { ipiv[k] = -kp; ipiv[k + 1] = -kp; } /* Increase K and return to the start of the main loop */ k += kstep; goto L70; L90: /* Update the lower triangle of A22 (= A(k:n,k:n)) as */ /* A22 := A22 - L21*D*L21**T = A22 - L21*W**T */ /* computing blocks of NB columns at a time */ i__1 = *n; i__2 = *nb; for (j = k; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { /* Computing MIN */ i__3 = *nb; i__4 = *n - j + 1; // , expr subst jb = min(i__3,i__4); /* Update the lower triangle of the diagonal block */ i__3 = j + jb - 1; for (jj = j; jj <= i__3; ++jj) { i__4 = j + jb - jj; i__5 = k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemv_("No transpose", &i__4, &i__5, &q__1, &a[jj + a_dim1], lda, &w[jj + w_dim1], ldw, &c_b1, &a[jj + jj * a_dim1] , &c__1); /* L100: */ } /* Update the rectangular subdiagonal block */ if (j + jb <= *n) { i__3 = *n - j - jb + 1; i__4 = k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &q__1, &a[j + jb + a_dim1], lda, &w[j + w_dim1], ldw, &c_b1, &a[j + jb + j * a_dim1], lda); } /* L110: */ } /* Put L21 in standard form by partially undoing the interchanges */ /* of rows in columns 1:k-1 looping backwards from k-1 to 1 */ j = k - 1; L120: /* Undo the interchanges (if any) of rows JJ and JP at each */ /* step J */ /* (Here, J is a diagonal index) */ jj = j; jp = ipiv[j]; if (jp < 0) { jp = -jp; /* (Here, J is a diagonal index) */ --j; } /* (NOTE: Here, J is used to determine row length. Length J */ /* of the rows to swap back doesn't include diagonal element) */ --j; if (jp != jj && j >= 1) { cswap_(&j, &a[jp + a_dim1], lda, &a[jj + a_dim1], lda); } if (j > 1) { goto L120; } /* Set KB to the number of columns factorized */ *kb = k - 1; } return 0; /* End of CLASYF */ }
/* Subroutine */ int cgbtrs_(char *trans, integer *n, integer *kl, integer * ku, integer *nrhs, complex *ab, integer *ldab, integer *ipiv, complex *b, integer *ldb, integer *info) { /* System generated locals */ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1, i__2, i__3; complex q__1; /* Local variables */ integer i__, j, l, kd, lm; logical lnoti; logical notran; /* -- LAPACK routine (version 3.2) -- */ /* November 2006 */ /* Purpose */ /* ======= */ /* CGBTRS solves a system of linear equations */ /* A * X = B, A**T * X = B, or A**H * X = B */ /* with a general band matrix A using the LU factorization computed */ /* by CGBTRF. */ /* Arguments */ /* ========= */ /* TRANS (input) CHARACTER*1 */ /* Specifies the form of the system of equations. */ /* = 'N': A * X = B (No transpose) */ /* = 'T': A**T * X = B (Transpose) */ /* = 'C': A**H * X = B (Conjugate transpose) */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. */ /* KL (input) INTEGER */ /* The number of subdiagonals within the band of A. KL >= 0. */ /* KU (input) INTEGER */ /* The number of superdiagonals within the band of A. KU >= 0. */ /* NRHS (input) INTEGER */ /* The number of right hand sides, i.e., the number of columns */ /* of the matrix B. NRHS >= 0. */ /* AB (input) COMPLEX array, dimension (LDAB,N) */ /* Details of the LU factorization of the band matrix A, as */ /* computed by CGBTRF. U is stored as an upper triangular band */ /* matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and */ /* the multipliers used during the factorization are stored in */ /* rows KL+KU+2 to 2*KL+KU+1. */ /* LDAB (input) INTEGER */ /* The leading dimension of the array AB. LDAB >= 2*KL+KU+1. */ /* IPIV (input) INTEGER array, dimension (N) */ /* The pivot indices; for 1 <= i <= N, row i of the matrix was */ /* interchanged with row IPIV(i). */ /* B (input/output) COMPLEX array, dimension (LDB,NRHS) */ /* On entry, the right hand side matrix B. */ /* On exit, the solution matrix X. */ /* LDB (input) INTEGER */ /* The leading dimension of the array B. LDB >= max(1,N). */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value */ /* ===================================================================== */ /* Test the input parameters. */ /* Parameter adjustments */ ab_dim1 = *ldab; ab_offset = 1 + ab_dim1; ab -= ab_offset; --ipiv; b_dim1 = *ldb; b_offset = 1 + b_dim1; b -= b_offset; /* Function Body */ *info = 0; notran = lsame_(trans, "N"); if (! notran && ! lsame_(trans, "T") && ! lsame_( trans, "C")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*kl < 0) { *info = -3; } else if (*ku < 0) { *info = -4; } else if (*nrhs < 0) { *info = -5; } else if (*ldab < (*kl << 1) + *ku + 1) { *info = -7; } else if (*ldb < max(1,*n)) { *info = -10; } if (*info != 0) { i__1 = -(*info); xerbla_("CGBTRS", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *nrhs == 0) { return 0; } kd = *ku + *kl + 1; lnoti = *kl > 0; if (notran) { /* Solve A*X = B. */ /* Solve L*X = B, overwriting B with X. */ /* L is represented as a product of permutations and unit lower */ /* where each transformation L(i) is a rank-one modification of */ /* the identity matrix. */ if (lnoti) { i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__2 = *kl, i__3 = *n - j; lm = min(i__2,i__3); l = ipiv[j]; if (l != j) { cswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb); } q__1.r = -1.f, q__1.i = -0.f; cgeru_(&lm, nrhs, &q__1, &ab[kd + 1 + j * ab_dim1], &c__1, &b[ j + b_dim1], ldb, &b[j + 1 + b_dim1], ldb); } } i__1 = *nrhs; for (i__ = 1; i__ <= i__1; ++i__) { /* Solve U*X = B, overwriting B with X. */ i__2 = *kl + *ku; ctbsv_("Upper", "No transpose", "Non-unit", n, &i__2, &ab[ ab_offset], ldab, &b[i__ * b_dim1 + 1], &c__1); } } else if (lsame_(trans, "T")) { /* Solve A**T * X = B. */ i__1 = *nrhs; for (i__ = 1; i__ <= i__1; ++i__) { /* Solve U**T * X = B, overwriting B with X. */ i__2 = *kl + *ku; ctbsv_("Upper", "Transpose", "Non-unit", n, &i__2, &ab[ab_offset], ldab, &b[i__ * b_dim1 + 1], &c__1); } /* Solve L**T * X = B, overwriting B with X. */ if (lnoti) { for (j = *n - 1; j >= 1; --j) { /* Computing MIN */ i__1 = *kl, i__2 = *n - j; lm = min(i__1,i__2); q__1.r = -1.f, q__1.i = -0.f; cgemv_("Transpose", &lm, nrhs, &q__1, &b[j + 1 + b_dim1], ldb, &ab[kd + 1 + j * ab_dim1], &c__1, &c_b1, &b[j + b_dim1], ldb); l = ipiv[j]; if (l != j) { cswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb); } } } } else { /* Solve A**H * X = B. */ i__1 = *nrhs; for (i__ = 1; i__ <= i__1; ++i__) { /* Solve U**H * X = B, overwriting B with X. */ i__2 = *kl + *ku; ctbsv_("Upper", "Conjugate transpose", "Non-unit", n, &i__2, &ab[ ab_offset], ldab, &b[i__ * b_dim1 + 1], &c__1); } /* Solve L**H * X = B, overwriting B with X. */ if (lnoti) { for (j = *n - 1; j >= 1; --j) { /* Computing MIN */ i__1 = *kl, i__2 = *n - j; lm = min(i__1,i__2); clacgv_(nrhs, &b[j + b_dim1], ldb); q__1.r = -1.f, q__1.i = -0.f; cgemv_("Conjugate transpose", &lm, nrhs, &q__1, &b[j + 1 + b_dim1], ldb, &ab[kd + 1 + j * ab_dim1], &c__1, &c_b1, &b[j + b_dim1], ldb); clacgv_(nrhs, &b[j + b_dim1], ldb); l = ipiv[j]; if (l != j) { cswap_(nrhs, &b[l + b_dim1], ldb, &b[j + b_dim1], ldb); } } } } return 0; /* End of CGBTRS */ } /* cgbtrs_ */
/* Subroutine */ int cgbtf2_(integer *m, integer *n, integer *kl, integer *ku, complex *ab, integer *ldab, integer *ipiv, integer *info) { /* System generated locals */ integer ab_dim1, ab_offset, i__1, i__2, i__3, i__4; complex q__1; /* Builtin functions */ void c_div(complex *, complex *, complex *); /* Local variables */ integer i__, j, km, jp, ju, kv; extern /* Subroutine */ int cscal_(integer *, complex *, complex *, integer *), cgeru_(integer *, integer *, complex *, complex *, integer *, complex *, integer *, complex *, integer *), cswap_( integer *, complex *, integer *, complex *, integer *); extern integer icamax_(integer *, complex *, integer *); extern /* Subroutine */ int xerbla_(char *, integer *); /* -- LAPACK computational routine (version 3.4.2) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* September 2012 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* KV is the number of superdiagonals in the factor U, allowing for */ /* fill-in. */ /* Parameter adjustments */ ab_dim1 = *ldab; ab_offset = 1 + ab_dim1; ab -= ab_offset; --ipiv; /* Function Body */ kv = *ku + *kl; /* Test the input parameters. */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*kl < 0) { *info = -3; } else if (*ku < 0) { *info = -4; } else if (*ldab < *kl + kv + 1) { *info = -6; } if (*info != 0) { i__1 = -(*info); xerbla_("CGBTF2", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } /* Gaussian elimination with partial pivoting */ /* Set fill-in elements in columns KU+2 to KV to zero. */ i__1 = min(kv,*n); for (j = *ku + 2; j <= i__1; ++j) { i__2 = *kl; for (i__ = kv - j + 2; i__ <= i__2; ++i__) { i__3 = i__ + j * ab_dim1; ab[i__3].r = 0.f; ab[i__3].i = 0.f; // , expr subst /* L10: */ } /* L20: */ } /* JU is the index of the last column affected by the current stage */ /* of the factorization. */ ju = 1; i__1 = min(*m,*n); for (j = 1; j <= i__1; ++j) { /* Set fill-in elements in column J+KV to zero. */ if (j + kv <= *n) { i__2 = *kl; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = i__ + (j + kv) * ab_dim1; ab[i__3].r = 0.f; ab[i__3].i = 0.f; // , expr subst /* L30: */ } } /* Find pivot and test for singularity. KM is the number of */ /* subdiagonal elements in the current column. */ /* Computing MIN */ i__2 = *kl; i__3 = *m - j; // , expr subst km = min(i__2,i__3); i__2 = km + 1; jp = icamax_(&i__2, &ab[kv + 1 + j * ab_dim1], &c__1); ipiv[j] = jp + j - 1; i__2 = kv + jp + j * ab_dim1; if (ab[i__2].r != 0.f || ab[i__2].i != 0.f) { /* Computing MAX */ /* Computing MIN */ i__4 = j + *ku + jp - 1; i__2 = ju; i__3 = min(i__4,*n); // , expr subst ju = max(i__2,i__3); /* Apply interchange to columns J to JU. */ if (jp != 1) { i__2 = ju - j + 1; i__3 = *ldab - 1; i__4 = *ldab - 1; cswap_(&i__2, &ab[kv + jp + j * ab_dim1], &i__3, &ab[kv + 1 + j * ab_dim1], &i__4); } if (km > 0) { /* Compute multipliers. */ c_div(&q__1, &c_b1, &ab[kv + 1 + j * ab_dim1]); cscal_(&km, &q__1, &ab[kv + 2 + j * ab_dim1], &c__1); /* Update trailing submatrix within the band. */ if (ju > j) { i__2 = ju - j; q__1.r = -1.f; q__1.i = -0.f; // , expr subst i__3 = *ldab - 1; i__4 = *ldab - 1; cgeru_(&km, &i__2, &q__1, &ab[kv + 2 + j * ab_dim1], & c__1, &ab[kv + (j + 1) * ab_dim1], &i__3, &ab[kv + 1 + (j + 1) * ab_dim1], &i__4); } } } else { /* If pivot is zero, set INFO to the index of the pivot */ /* unless a zero pivot has already been found. */ if (*info == 0) { *info = j; } } /* L40: */ } return 0; /* End of CGBTF2 */ }
/* Subroutine */ int csteqr_(char *compz, integer *n, real *d__, real *e, complex *z__, integer *ldz, real *work, integer *info, ftnlen compz_len) { /* System generated locals */ integer z_dim1, z_offset, i__1, i__2; real r__1, r__2; /* Builtin functions */ double sqrt(doublereal), r_sign(real *, real *); /* Local variables */ static real b, c__, f, g; static integer i__, j, k, l, m; static real p, r__, s; static integer l1, ii, mm, lm1, mm1, nm1; static real rt1, rt2, eps; static integer lsv; static real tst, eps2; static integer lend, jtot; extern /* Subroutine */ int slae2_(real *, real *, real *, real *, real *) ; extern logical lsame_(char *, char *, ftnlen, ftnlen); extern /* Subroutine */ int clasr_(char *, char *, char *, integer *, integer *, real *, real *, complex *, integer *, ftnlen, ftnlen, ftnlen); static real anorm; extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); static integer lendm1, lendp1; extern /* Subroutine */ int slaev2_(real *, real *, real *, real *, real * , real *, real *); extern doublereal slapy2_(real *, real *); static integer iscale; extern doublereal slamch_(char *, ftnlen); extern /* Subroutine */ int claset_(char *, integer *, integer *, complex *, complex *, complex *, integer *, ftnlen); static real safmin; extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); static real safmax; extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *, real *, integer *, integer *, real *, integer *, integer *, ftnlen); static integer lendsv; extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real * ); static real ssfmin; static integer nmaxit, icompz; static real ssfmax; extern doublereal slanst_(char *, integer *, real *, real *, ftnlen); extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *, ftnlen); /* -- LAPACK routine (version 3.0) -- */ /* Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */ /* Courant Institute, Argonne National Lab, and Rice University */ /* September 30, 1994 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CSTEQR computes all eigenvalues and, optionally, eigenvectors of a */ /* symmetric tridiagonal matrix using the implicit QL or QR method. */ /* The eigenvectors of a full or band complex Hermitian matrix can also */ /* be found if CHETRD or CHPTRD or CHBTRD has been used to reduce this */ /* matrix to tridiagonal form. */ /* Arguments */ /* ========= */ /* COMPZ (input) CHARACTER*1 */ /* = 'N': Compute eigenvalues only. */ /* = 'V': Compute eigenvalues and eigenvectors of the original */ /* Hermitian matrix. On entry, Z must contain the */ /* unitary matrix used to reduce the original matrix */ /* to tridiagonal form. */ /* = 'I': Compute eigenvalues and eigenvectors of the */ /* tridiagonal matrix. Z is initialized to the identity */ /* matrix. */ /* N (input) INTEGER */ /* The order of the matrix. N >= 0. */ /* D (input/output) REAL array, dimension (N) */ /* On entry, the diagonal elements of the tridiagonal matrix. */ /* On exit, if INFO = 0, the eigenvalues in ascending order. */ /* E (input/output) REAL array, dimension (N-1) */ /* On entry, the (n-1) subdiagonal elements of the tridiagonal */ /* matrix. */ /* On exit, E has been destroyed. */ /* Z (input/output) COMPLEX array, dimension (LDZ, N) */ /* On entry, if COMPZ = 'V', then Z contains the unitary */ /* matrix used in the reduction to tridiagonal form. */ /* On exit, if INFO = 0, then if COMPZ = 'V', Z contains the */ /* orthonormal eigenvectors of the original Hermitian matrix, */ /* and if COMPZ = 'I', Z contains the orthonormal eigenvectors */ /* of the symmetric tridiagonal matrix. */ /* If COMPZ = 'N', then Z is not referenced. */ /* LDZ (input) INTEGER */ /* The leading dimension of the array Z. LDZ >= 1, and if */ /* eigenvectors are desired, then LDZ >= max(1,N). */ /* WORK (workspace) REAL array, dimension (max(1,2*N-2)) */ /* If COMPZ = 'N', then WORK is not referenced. */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value */ /* > 0: the algorithm has failed to find all the eigenvalues in */ /* a total of 30*N iterations; if INFO = i, then i */ /* elements of E have not converged to zero; on exit, D */ /* and E contain the elements of a symmetric tridiagonal */ /* matrix which is unitarily similar to the original */ /* matrix. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ --d__; --e; z_dim1 = *ldz; z_offset = 1 + z_dim1; z__ -= z_offset; --work; /* Function Body */ *info = 0; if (lsame_(compz, "N", (ftnlen)1, (ftnlen)1)) { icompz = 0; } else if (lsame_(compz, "V", (ftnlen)1, (ftnlen)1)) { icompz = 1; } else if (lsame_(compz, "I", (ftnlen)1, (ftnlen)1)) { icompz = 2; } else { icompz = -1; } if (icompz < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) { *info = -6; } if (*info != 0) { i__1 = -(*info); xerbla_("CSTEQR", &i__1, (ftnlen)6); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (*n == 1) { if (icompz == 2) { i__1 = z_dim1 + 1; z__[i__1].r = 1.f, z__[i__1].i = 0.f; } return 0; } /* Determine the unit roundoff and over/underflow thresholds. */ eps = slamch_("E", (ftnlen)1); /* Computing 2nd power */ r__1 = eps; eps2 = r__1 * r__1; safmin = slamch_("S", (ftnlen)1); safmax = 1.f / safmin; ssfmax = sqrt(safmax) / 3.f; ssfmin = sqrt(safmin) / eps2; /* Compute the eigenvalues and eigenvectors of the tridiagonal */ /* matrix. */ if (icompz == 2) { claset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz, (ftnlen)4); } nmaxit = *n * 30; jtot = 0; /* Determine where the matrix splits and choose QL or QR iteration */ /* for each block, according to whether top or bottom diagonal */ /* element is smaller. */ l1 = 1; nm1 = *n - 1; L10: if (l1 > *n) { goto L160; } if (l1 > 1) { e[l1 - 1] = 0.f; } if (l1 <= nm1) { i__1 = nm1; for (m = l1; m <= i__1; ++m) { tst = (r__1 = e[m], dabs(r__1)); if (tst == 0.f) { goto L30; } if (tst <= sqrt((r__1 = d__[m], dabs(r__1))) * sqrt((r__2 = d__[m + 1], dabs(r__2))) * eps) { e[m] = 0.f; goto L30; } /* L20: */ } } m = *n; L30: l = l1; lsv = l; lend = m; lendsv = lend; l1 = m + 1; if (lend == l) { goto L10; } /* Scale submatrix in rows and columns L to LEND */ i__1 = lend - l + 1; anorm = slanst_("I", &i__1, &d__[l], &e[l], (ftnlen)1); iscale = 0; if (anorm == 0.f) { goto L10; } if (anorm > ssfmax) { iscale = 1; i__1 = lend - l + 1; slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n, info, (ftnlen)1); i__1 = lend - l; slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n, info, (ftnlen)1); } else if (anorm < ssfmin) { iscale = 2; i__1 = lend - l + 1; slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n, info, (ftnlen)1); i__1 = lend - l; slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n, info, (ftnlen)1); } /* Choose between QL and QR iteration */ if ((r__1 = d__[lend], dabs(r__1)) < (r__2 = d__[l], dabs(r__2))) { lend = lsv; l = lendsv; } if (lend > l) { /* QL Iteration */ /* Look for small subdiagonal element. */ L40: if (l != lend) { lendm1 = lend - 1; i__1 = lendm1; for (m = l; m <= i__1; ++m) { /* Computing 2nd power */ r__2 = (r__1 = e[m], dabs(r__1)); tst = r__2 * r__2; if (tst <= eps2 * (r__1 = d__[m], dabs(r__1)) * (r__2 = d__[m + 1], dabs(r__2)) + safmin) { goto L60; } /* L50: */ } } m = lend; L60: if (m < lend) { e[m] = 0.f; } p = d__[l]; if (m == l) { goto L80; } /* If remaining matrix is 2-by-2, use SLAE2 or SLAEV2 */ /* to compute its eigensystem. */ if (m == l + 1) { if (icompz > 0) { slaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s); work[l] = c__; work[*n - 1 + l] = s; clasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], & z__[l * z_dim1 + 1], ldz, (ftnlen)1, (ftnlen)1, ( ftnlen)1); } else { slae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2); } d__[l] = rt1; d__[l + 1] = rt2; e[l] = 0.f; l += 2; if (l <= lend) { goto L40; } goto L140; } if (jtot == nmaxit) { goto L140; } ++jtot; /* Form shift. */ g = (d__[l + 1] - p) / (e[l] * 2.f); r__ = slapy2_(&g, &c_b41); g = d__[m] - p + e[l] / (g + r_sign(&r__, &g)); s = 1.f; c__ = 1.f; p = 0.f; /* Inner loop */ mm1 = m - 1; i__1 = l; for (i__ = mm1; i__ >= i__1; --i__) { f = s * e[i__]; b = c__ * e[i__]; slartg_(&g, &f, &c__, &s, &r__); if (i__ != m - 1) { e[i__ + 1] = r__; } g = d__[i__ + 1] - p; r__ = (d__[i__] - g) * s + c__ * 2.f * b; p = s * r__; d__[i__ + 1] = g + p; g = c__ * r__ - b; /* If eigenvectors are desired, then save rotations. */ if (icompz > 0) { work[i__] = c__; work[*n - 1 + i__] = -s; } /* L70: */ } /* If eigenvectors are desired, then apply saved rotations. */ if (icompz > 0) { mm = m - l + 1; clasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l * z_dim1 + 1], ldz, (ftnlen)1, (ftnlen)1, (ftnlen)1); } d__[l] -= p; e[l] = g; goto L40; /* Eigenvalue found. */ L80: d__[l] = p; ++l; if (l <= lend) { goto L40; } goto L140; } else { /* QR Iteration */ /* Look for small superdiagonal element. */ L90: if (l != lend) { lendp1 = lend + 1; i__1 = lendp1; for (m = l; m >= i__1; --m) { /* Computing 2nd power */ r__2 = (r__1 = e[m - 1], dabs(r__1)); tst = r__2 * r__2; if (tst <= eps2 * (r__1 = d__[m], dabs(r__1)) * (r__2 = d__[m - 1], dabs(r__2)) + safmin) { goto L110; } /* L100: */ } } m = lend; L110: if (m > lend) { e[m - 1] = 0.f; } p = d__[l]; if (m == l) { goto L130; } /* If remaining matrix is 2-by-2, use SLAE2 or SLAEV2 */ /* to compute its eigensystem. */ if (m == l - 1) { if (icompz > 0) { slaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s) ; work[m] = c__; work[*n - 1 + m] = s; clasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], & z__[(l - 1) * z_dim1 + 1], ldz, (ftnlen)1, (ftnlen)1, (ftnlen)1); } else { slae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2); } d__[l - 1] = rt1; d__[l] = rt2; e[l - 1] = 0.f; l += -2; if (l >= lend) { goto L90; } goto L140; } if (jtot == nmaxit) { goto L140; } ++jtot; /* Form shift. */ g = (d__[l - 1] - p) / (e[l - 1] * 2.f); r__ = slapy2_(&g, &c_b41); g = d__[m] - p + e[l - 1] / (g + r_sign(&r__, &g)); s = 1.f; c__ = 1.f; p = 0.f; /* Inner loop */ lm1 = l - 1; i__1 = lm1; for (i__ = m; i__ <= i__1; ++i__) { f = s * e[i__]; b = c__ * e[i__]; slartg_(&g, &f, &c__, &s, &r__); if (i__ != m) { e[i__ - 1] = r__; } g = d__[i__] - p; r__ = (d__[i__ + 1] - g) * s + c__ * 2.f * b; p = s * r__; d__[i__] = g + p; g = c__ * r__ - b; /* If eigenvectors are desired, then save rotations. */ if (icompz > 0) { work[i__] = c__; work[*n - 1 + i__] = s; } /* L120: */ } /* If eigenvectors are desired, then apply saved rotations. */ if (icompz > 0) { mm = l - m + 1; clasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m * z_dim1 + 1], ldz, (ftnlen)1, (ftnlen)1, (ftnlen)1); } d__[l] -= p; e[lm1] = g; goto L90; /* Eigenvalue found. */ L130: d__[l] = p; --l; if (l >= lend) { goto L90; } goto L140; } /* Undo scaling if necessary */ L140: if (iscale == 1) { i__1 = lendsv - lsv + 1; slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv], n, info, (ftnlen)1); i__1 = lendsv - lsv; slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n, info, (ftnlen)1); } else if (iscale == 2) { i__1 = lendsv - lsv + 1; slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv], n, info, (ftnlen)1); i__1 = lendsv - lsv; slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n, info, (ftnlen)1); } /* Check for no convergence to an eigenvalue after a total */ /* of N*MAXIT iterations. */ if (jtot == nmaxit) { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { if (e[i__] != 0.f) { ++(*info); } /* L150: */ } return 0; } goto L10; /* Order eigenvalues and eigenvectors. */ L160: if (icompz == 0) { /* Use Quick Sort */ slasrt_("I", n, &d__[1], info, (ftnlen)1); } else { /* Use Selection Sort to minimize swaps of eigenvectors */ i__1 = *n; for (ii = 2; ii <= i__1; ++ii) { i__ = ii - 1; k = i__; p = d__[i__]; i__2 = *n; for (j = ii; j <= i__2; ++j) { if (d__[j] < p) { k = j; p = d__[j]; } /* L170: */ } if (k != i__) { d__[k] = d__[i__]; d__[i__] = p; cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1], &c__1); } /* L180: */ } } return 0; /* End of CSTEQR */ } /* csteqr_ */
/* DECK CHIDI */ /* Subroutine */ int chidi_(complex *a, integer *lda, integer *n, integer * kpvt, real *det, integer *inert, complex *work, integer *job) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; real r__1; complex q__1, q__2, q__3; /* Local variables */ static real d__; static integer j, k; static real t, ak; static integer jb, ks, km1; static real ten, akp1; static complex temp, akkp1; extern /* Complex */ void cdotc_(complex *, integer *, complex *, integer *, complex *, integer *); static logical nodet; extern /* Subroutine */ int ccopy_(integer *, complex *, integer *, complex *, integer *), cswap_(integer *, complex *, integer *, complex *, integer *), caxpy_(integer *, complex *, complex *, integer *, complex *, integer *); static integer kstep; static logical noert, noinv; /* ***BEGIN PROLOGUE CHIDI */ /* ***PURPOSE Compute the determinant, inertia and inverse of a complex */ /* Hermitian matrix using the factors obtained from CHIFA. */ /* ***LIBRARY SLATEC (LINPACK) */ /* ***CATEGORY D2D1A, D3D1A */ /* ***TYPE COMPLEX (SSIDI-S, DSISI-D, CHIDI-C, CSIDI-C) */ /* ***KEYWORDS DETERMINANT, HERMITIAN, INVERSE, LINEAR ALGEBRA, LINPACK, */ /* MATRIX */ /* ***AUTHOR Bunch, J., (UCSD) */ /* ***DESCRIPTION */ /* CHIDI computes the determinant, inertia and inverse */ /* of a complex Hermitian matrix using the factors from CHIFA. */ /* On Entry */ /* A COMPLEX(LDA,N) */ /* the output from CHIFA. */ /* LDA INTEGER */ /* the leading dimension of the array A. */ /* N INTEGER */ /* the order of the matrix A. */ /* KVPT INTEGER(N) */ /* the pivot vector from CHIFA. */ /* WORK COMPLEX(N) */ /* work vector. Contents destroyed. */ /* JOB INTEGER */ /* JOB has the decimal expansion ABC where */ /* if C .NE. 0, the inverse is computed, */ /* if B .NE. 0, the determinant is computed, */ /* if A .NE. 0, the inertia is computed. */ /* For example, JOB = 111 gives all three. */ /* On Return */ /* Variables not requested by JOB are not used. */ /* A contains the upper triangle of the inverse of */ /* the original matrix. The strict lower triangle */ /* is never referenced. */ /* DET REAL(2) */ /* determinant of original matrix. */ /* Determinant = DET(1) * 10.0**DET(2) */ /* with 1.0 .LE. ABS(DET(1)) .LT. 10.0 */ /* or DET(1) = 0.0. */ /* INERT INTEGER(3) */ /* the inertia of the original matrix. */ /* INERT(1) = number of positive eigenvalues. */ /* INERT(2) = number of negative eigenvalues. */ /* INERT(3) = number of zero eigenvalues. */ /* Error Condition */ /* A division by zero may occur if the inverse is requested */ /* and CHICO has set RCOND .EQ. 0.0 */ /* or CHIFA has set INFO .NE. 0 . */ /* ***REFERENCES J. J. Dongarra, J. R. Bunch, C. B. Moler, and G. W. */ /* Stewart, LINPACK Users' Guide, SIAM, 1979. */ /* ***ROUTINES CALLED CAXPY, CCOPY, CDOTC, CSWAP */ /* ***REVISION HISTORY (YYMMDD) */ /* 780814 DATE WRITTEN */ /* 890531 Changed all specific intrinsics to generic. (WRB) */ /* 890831 Modified array declarations. (WRB) */ /* 891107 Modified routine equivalence list. (WRB) */ /* 891107 REVISION DATE from Version 3.2 */ /* 891214 Prologue converted to Version 4.0 format. (BAB) */ /* 900326 Removed duplicate information from DESCRIPTION section. */ /* (WRB) */ /* 920501 Reformatted the REFERENCES section. (WRB) */ /* ***END PROLOGUE CHIDI */ /* ***FIRST EXECUTABLE STATEMENT CHIDI */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --kpvt; --det; --inert; --work; /* Function Body */ noinv = *job % 10 == 0; nodet = *job % 100 / 10 == 0; noert = *job % 1000 / 100 == 0; if (nodet && noert) { goto L140; } if (noert) { goto L10; } inert[1] = 0; inert[2] = 0; inert[3] = 0; L10: if (nodet) { goto L20; } det[1] = 1.f; det[2] = 0.f; ten = 10.f; L20: t = 0.f; i__1 = *n; for (k = 1; k <= i__1; ++k) { i__2 = k + k * a_dim1; d__ = a[i__2].r; /* CHECK IF 1 BY 1 */ if (kpvt[k] > 0) { goto L50; } /* 2 BY 2 BLOCK */ /* USE DET (D S) = (D/T * C - T) * T , T = ABS(S) */ /* (S C) */ /* TO AVOID UNDERFLOW/OVERFLOW TROUBLES. */ /* TAKE TWO PASSES THROUGH SCALING. USE T FOR FLAG. */ if (t != 0.f) { goto L30; } t = c_abs(&a[k + (k + 1) * a_dim1]); i__2 = k + 1 + (k + 1) * a_dim1; d__ = d__ / t * a[i__2].r - t; goto L40; L30: d__ = t; t = 0.f; L40: L50: if (noert) { goto L60; } if (d__ > 0.f) { ++inert[1]; } if (d__ < 0.f) { ++inert[2]; } if (d__ == 0.f) { ++inert[3]; } L60: if (nodet) { goto L120; } det[1] = d__ * det[1]; if (det[1] == 0.f) { goto L110; } L70: if (dabs(det[1]) >= 1.f) { goto L80; } det[1] = ten * det[1]; det[2] += -1.f; goto L70; L80: L90: if (dabs(det[1]) < ten) { goto L100; } det[1] /= ten; det[2] += 1.f; goto L90; L100: L110: L120: /* L130: */ ; } L140: /* COMPUTE INVERSE(A) */ if (noinv) { goto L270; } k = 1; L150: if (k > *n) { goto L260; } km1 = k - 1; if (kpvt[k] < 0) { goto L180; } /* 1 BY 1 */ i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; r__1 = 1.f / a[i__2].r; q__1.r = r__1, q__1.i = 0.f; a[i__1].r = q__1.r, a[i__1].i = q__1.i; if (km1 < 1) { goto L170; } ccopy_(&km1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1); i__1 = km1; for (j = 1; j <= i__1; ++j) { i__2 = j + k * a_dim1; cdotc_(&q__1, &j, &a[j * a_dim1 + 1], &c__1, &work[1], &c__1); a[i__2].r = q__1.r, a[i__2].i = q__1.i; i__2 = j - 1; caxpy_(&i__2, &work[j], &a[j * a_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &c__1); /* L160: */ } i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; cdotc_(&q__3, &km1, &work[1], &c__1, &a[k * a_dim1 + 1], &c__1); r__1 = q__3.r; q__2.r = r__1, q__2.i = 0.f; q__1.r = a[i__2].r + q__2.r, q__1.i = a[i__2].i + q__2.i; a[i__1].r = q__1.r, a[i__1].i = q__1.i; L170: kstep = 1; goto L220; L180: /* 2 BY 2 */ t = c_abs(&a[k + (k + 1) * a_dim1]); i__1 = k + k * a_dim1; ak = a[i__1].r / t; i__1 = k + 1 + (k + 1) * a_dim1; akp1 = a[i__1].r / t; i__1 = k + (k + 1) * a_dim1; q__1.r = a[i__1].r / t, q__1.i = a[i__1].i / t; akkp1.r = q__1.r, akkp1.i = q__1.i; d__ = t * (ak * akp1 - 1.f); i__1 = k + k * a_dim1; r__1 = akp1 / d__; q__1.r = r__1, q__1.i = 0.f; a[i__1].r = q__1.r, a[i__1].i = q__1.i; i__1 = k + 1 + (k + 1) * a_dim1; r__1 = ak / d__; q__1.r = r__1, q__1.i = 0.f; a[i__1].r = q__1.r, a[i__1].i = q__1.i; i__1 = k + (k + 1) * a_dim1; q__2.r = -akkp1.r, q__2.i = -akkp1.i; q__1.r = q__2.r / d__, q__1.i = q__2.i / d__; a[i__1].r = q__1.r, a[i__1].i = q__1.i; if (km1 < 1) { goto L210; } ccopy_(&km1, &a[(k + 1) * a_dim1 + 1], &c__1, &work[1], &c__1); i__1 = km1; for (j = 1; j <= i__1; ++j) { i__2 = j + (k + 1) * a_dim1; cdotc_(&q__1, &j, &a[j * a_dim1 + 1], &c__1, &work[1], &c__1); a[i__2].r = q__1.r, a[i__2].i = q__1.i; i__2 = j - 1; caxpy_(&i__2, &work[j], &a[j * a_dim1 + 1], &c__1, &a[(k + 1) * a_dim1 + 1], &c__1); /* L190: */ } i__1 = k + 1 + (k + 1) * a_dim1; i__2 = k + 1 + (k + 1) * a_dim1; cdotc_(&q__3, &km1, &work[1], &c__1, &a[(k + 1) * a_dim1 + 1], &c__1); r__1 = q__3.r; q__2.r = r__1, q__2.i = 0.f; q__1.r = a[i__2].r + q__2.r, q__1.i = a[i__2].i + q__2.i; a[i__1].r = q__1.r, a[i__1].i = q__1.i; i__1 = k + (k + 1) * a_dim1; i__2 = k + (k + 1) * a_dim1; cdotc_(&q__2, &km1, &a[k * a_dim1 + 1], &c__1, &a[(k + 1) * a_dim1 + 1], & c__1); q__1.r = a[i__2].r + q__2.r, q__1.i = a[i__2].i + q__2.i; a[i__1].r = q__1.r, a[i__1].i = q__1.i; ccopy_(&km1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1); i__1 = km1; for (j = 1; j <= i__1; ++j) { i__2 = j + k * a_dim1; cdotc_(&q__1, &j, &a[j * a_dim1 + 1], &c__1, &work[1], &c__1); a[i__2].r = q__1.r, a[i__2].i = q__1.i; i__2 = j - 1; caxpy_(&i__2, &work[j], &a[j * a_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &c__1); /* L200: */ } i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; cdotc_(&q__3, &km1, &work[1], &c__1, &a[k * a_dim1 + 1], &c__1); r__1 = q__3.r; q__2.r = r__1, q__2.i = 0.f; q__1.r = a[i__2].r + q__2.r, q__1.i = a[i__2].i + q__2.i; a[i__1].r = q__1.r, a[i__1].i = q__1.i; L210: kstep = 2; L220: /* SWAP */ ks = (i__1 = kpvt[k], abs(i__1)); if (ks == k) { goto L250; } cswap_(&ks, &a[ks * a_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &c__1); i__1 = k; for (jb = ks; jb <= i__1; ++jb) { j = k + ks - jb; r_cnjg(&q__1, &a[j + k * a_dim1]); temp.r = q__1.r, temp.i = q__1.i; i__2 = j + k * a_dim1; r_cnjg(&q__1, &a[ks + j * a_dim1]); a[i__2].r = q__1.r, a[i__2].i = q__1.i; i__2 = ks + j * a_dim1; a[i__2].r = temp.r, a[i__2].i = temp.i; /* L230: */ } if (kstep == 1) { goto L240; } i__1 = ks + (k + 1) * a_dim1; temp.r = a[i__1].r, temp.i = a[i__1].i; i__1 = ks + (k + 1) * a_dim1; i__2 = k + (k + 1) * a_dim1; a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i; i__1 = k + (k + 1) * a_dim1; a[i__1].r = temp.r, a[i__1].i = temp.i; L240: L250: k += kstep; goto L150; L260: L270: return 0; } /* chidi_ */
/* Subroutine */ int cgetc2_(integer *n, complex *a, integer *lda, integer * ipiv, integer *jpiv, integer *info) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University June 30, 1999 Purpose ======= CGETC2 computes an LU factorization, using complete pivoting, of the n-by-n matrix A. The factorization has the form A = P * L * U * Q, where P and Q are permutation matrices, L is lower triangular with unit diagonal elements and U is upper triangular. This is a level 1 BLAS version of the algorithm. Arguments ========= N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) COMPLEX array, dimension (LDA, N) On entry, the n-by-n matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U*Q; the unit diagonal elements of L are not stored. If U(k, k) appears to be less than SMIN, U(k, k) is given the value of SMIN, giving a nonsingular perturbed system. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1, N). IPIV (output) INTEGER array, dimension (N). The pivot indices; for 1 <= i <= N, row i of the matrix has been interchanged with row IPIV(i). JPIV (output) INTEGER array, dimension (N). The pivot indices; for 1 <= j <= N, column j of the matrix has been interchanged with column JPIV(j). INFO (output) INTEGER = 0: successful exit > 0: if INFO = k, U(k, k) is likely to produce overflow if one tries to solve for x in Ax = b. So U is perturbed to avoid the overflow. Further Details =============== Based on contributions by Bo Kagstrom and Peter Poromaa, Department of Computing Science, Umea University, S-901 87 Umea, Sweden. ===================================================================== Set constants to control overflow Parameter adjustments */ /* Table of constant values */ static integer c__1 = 1; static complex c_b10 = {-1.f,0.f}; /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; real r__1; complex q__1; /* Builtin functions */ double c_abs(complex *); void c_div(complex *, complex *, complex *); /* Local variables */ static real smin, xmax; static integer i__, j; extern /* Subroutine */ int cgeru_(integer *, integer *, complex *, complex *, integer *, complex *, integer *, complex *, integer *), cswap_(integer *, complex *, integer *, complex *, integer *), slabad_(real *, real *); static integer ip, jp; extern doublereal slamch_(char *); static real bignum, smlnum, eps; static integer ipv, jpv; #define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1 #define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)] a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; --jpiv; /* Function Body */ *info = 0; eps = slamch_("P"); smlnum = slamch_("S") / eps; bignum = 1.f / smlnum; slabad_(&smlnum, &bignum); /* Factorize A using complete pivoting. Set pivots less than SMIN to SMIN */ i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Find max element in matrix A */ xmax = 0.f; i__2 = *n; for (ip = i__; ip <= i__2; ++ip) { i__3 = *n; for (jp = i__; jp <= i__3; ++jp) { if (c_abs(&a_ref(ip, jp)) >= xmax) { xmax = c_abs(&a_ref(ip, jp)); ipv = ip; jpv = jp; } /* L10: */ } /* L20: */ } if (i__ == 1) { /* Computing MAX */ r__1 = eps * xmax; smin = dmax(r__1,smlnum); } /* Swap rows */ if (ipv != i__) { cswap_(n, &a_ref(ipv, 1), lda, &a_ref(i__, 1), lda); } ipiv[i__] = ipv; /* Swap columns */ if (jpv != i__) { cswap_(n, &a_ref(1, jpv), &c__1, &a_ref(1, i__), &c__1); } jpiv[i__] = jpv; /* Check for singularity */ if (c_abs(&a_ref(i__, i__)) < smin) { *info = i__; i__2 = a_subscr(i__, i__); q__1.r = smin, q__1.i = 0.f; a[i__2].r = q__1.r, a[i__2].i = q__1.i; } i__2 = *n; for (j = i__ + 1; j <= i__2; ++j) { i__3 = a_subscr(j, i__); c_div(&q__1, &a_ref(j, i__), &a_ref(i__, i__)); a[i__3].r = q__1.r, a[i__3].i = q__1.i; /* L30: */ } i__2 = *n - i__; i__3 = *n - i__; cgeru_(&i__2, &i__3, &c_b10, &a_ref(i__ + 1, i__), &c__1, &a_ref(i__, i__ + 1), lda, &a_ref(i__ + 1, i__ + 1), lda); /* L40: */ } if (c_abs(&a_ref(*n, *n)) < smin) { *info = *n; i__1 = a_subscr(*n, *n); q__1.r = smin, q__1.i = 0.f; a[i__1].r = q__1.r, a[i__1].i = q__1.i; } return 0; /* End of CGETC2 */ } /* cgetc2_ */
/* Subroutine */ int csytrs_rook_(char *uplo, integer *n, integer *nrhs, complex *a, integer *lda, integer *ipiv, complex *b, integer *ldb, integer *info) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2; complex q__1, q__2, q__3; /* Builtin functions */ void c_div(complex *, complex *, complex *); /* Local variables */ integer j, k; complex ak, bk; integer kp; complex akm1, bkm1, akm1k; extern /* Subroutine */ int cscal_(integer *, complex *, complex *, integer *); extern logical lsame_(char *, char *); complex denom; extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex * , complex *, integer *, complex *, integer *, complex *, complex * , integer *), cgeru_(integer *, integer *, complex *, complex *, integer *, complex *, integer *, complex *, integer *), cswap_(integer *, complex *, integer *, complex *, integer *); logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); /* -- LAPACK computational routine (version 3.4.0) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* November 2011 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --ipiv; b_dim1 = *ldb; b_offset = 1 + b_dim1; b -= b_offset; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*nrhs < 0) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*ldb < max(1,*n)) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("CSYTRS_ROOK", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *nrhs == 0) { return 0; } if (upper) { /* Solve A*X = B, where A = U*D*U**T. */ /* First solve U*D*X = B, overwriting B with X. */ /* K is the main loop index, decreasing from N to 1 in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = *n; L10: /* If K < 1, exit from loop. */ if (k < 1) { goto L30; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Interchange rows K and IPIV(K). */ kp = ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } /* Multiply by inv(U(K)), where U(K) is the transformation */ /* stored in column K of A. */ i__1 = k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgeru_(&i__1, nrhs, &q__1, &a[k * a_dim1 + 1], &c__1, &b[k + b_dim1], ldb, &b[b_dim1 + 1], ldb); /* Multiply by the inverse of the diagonal block. */ c_div(&q__1, &c_b1, &a[k + k * a_dim1]); cscal_(nrhs, &q__1, &b[k + b_dim1], ldb); --k; } else { /* 2 x 2 diagonal block */ /* Interchange rows K and -IPIV(K) THEN K-1 and -IPIV(K-1) */ kp = -ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } kp = -ipiv[k - 1]; if (kp != k - 1) { cswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb); } /* Multiply by inv(U(K)), where U(K) is the transformation */ /* stored in columns K-1 and K of A. */ if (k > 2) { i__1 = k - 2; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgeru_(&i__1, nrhs, &q__1, &a[k * a_dim1 + 1], &c__1, &b[k + b_dim1], ldb, &b[b_dim1 + 1], ldb); i__1 = k - 2; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgeru_(&i__1, nrhs, &q__1, &a[(k - 1) * a_dim1 + 1], &c__1, & b[k - 1 + b_dim1], ldb, &b[b_dim1 + 1], ldb); } /* Multiply by the inverse of the diagonal block. */ i__1 = k - 1 + k * a_dim1; akm1k.r = a[i__1].r; akm1k.i = a[i__1].i; // , expr subst c_div(&q__1, &a[k - 1 + (k - 1) * a_dim1], &akm1k); akm1.r = q__1.r; akm1.i = q__1.i; // , expr subst c_div(&q__1, &a[k + k * a_dim1], &akm1k); ak.r = q__1.r; ak.i = q__1.i; // , expr subst q__2.r = akm1.r * ak.r - akm1.i * ak.i; q__2.i = akm1.r * ak.i + akm1.i * ak.r; // , expr subst q__1.r = q__2.r - 1.f; q__1.i = q__2.i - 0.f; // , expr subst denom.r = q__1.r; denom.i = q__1.i; // , expr subst i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { c_div(&q__1, &b[k - 1 + j * b_dim1], &akm1k); bkm1.r = q__1.r; bkm1.i = q__1.i; // , expr subst c_div(&q__1, &b[k + j * b_dim1], &akm1k); bk.r = q__1.r; bk.i = q__1.i; // , expr subst i__2 = k - 1 + j * b_dim1; q__3.r = ak.r * bkm1.r - ak.i * bkm1.i; q__3.i = ak.r * bkm1.i + ak.i * bkm1.r; // , expr subst q__2.r = q__3.r - bk.r; q__2.i = q__3.i - bk.i; // , expr subst c_div(&q__1, &q__2, &denom); b[i__2].r = q__1.r; b[i__2].i = q__1.i; // , expr subst i__2 = k + j * b_dim1; q__3.r = akm1.r * bk.r - akm1.i * bk.i; q__3.i = akm1.r * bk.i + akm1.i * bk.r; // , expr subst q__2.r = q__3.r - bkm1.r; q__2.i = q__3.i - bkm1.i; // , expr subst c_div(&q__1, &q__2, &denom); b[i__2].r = q__1.r; b[i__2].i = q__1.i; // , expr subst /* L20: */ } k += -2; } goto L10; L30: /* Next solve U**T *X = B, overwriting B with X. */ /* K is the main loop index, increasing from 1 to N in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = 1; L40: /* If K > N, exit from loop. */ if (k > *n) { goto L50; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Multiply by inv(U**T(K)), where U(K) is the transformation */ /* stored in column K of A. */ if (k > 1) { i__1 = k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemv_("Transpose", &i__1, nrhs, &q__1, &b[b_offset], ldb, &a[ k * a_dim1 + 1], &c__1, &c_b1, &b[k + b_dim1], ldb); } /* Interchange rows K and IPIV(K). */ kp = ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } ++k; } else { /* 2 x 2 diagonal block */ /* Multiply by inv(U**T(K+1)), where U(K+1) is the transformation */ /* stored in columns K and K+1 of A. */ if (k > 1) { i__1 = k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemv_("Transpose", &i__1, nrhs, &q__1, &b[b_offset], ldb, &a[ k * a_dim1 + 1], &c__1, &c_b1, &b[k + b_dim1], ldb); i__1 = k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemv_("Transpose", &i__1, nrhs, &q__1, &b[b_offset], ldb, &a[ (k + 1) * a_dim1 + 1], &c__1, &c_b1, &b[k + 1 + b_dim1], ldb); } /* Interchange rows K and -IPIV(K) THEN K+1 and -IPIV(K+1). */ kp = -ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } kp = -ipiv[k + 1]; if (kp != k + 1) { cswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb); } k += 2; } goto L40; L50: ; } else { /* Solve A*X = B, where A = L*D*L**T. */ /* First solve L*D*X = B, overwriting B with X. */ /* K is the main loop index, increasing from 1 to N in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = 1; L60: /* If K > N, exit from loop. */ if (k > *n) { goto L80; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Interchange rows K and IPIV(K). */ kp = ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } /* Multiply by inv(L(K)), where L(K) is the transformation */ /* stored in column K of A. */ if (k < *n) { i__1 = *n - k; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgeru_(&i__1, nrhs, &q__1, &a[k + 1 + k * a_dim1], &c__1, &b[ k + b_dim1], ldb, &b[k + 1 + b_dim1], ldb); } /* Multiply by the inverse of the diagonal block. */ c_div(&q__1, &c_b1, &a[k + k * a_dim1]); cscal_(nrhs, &q__1, &b[k + b_dim1], ldb); ++k; } else { /* 2 x 2 diagonal block */ /* Interchange rows K and -IPIV(K) THEN K+1 and -IPIV(K+1) */ kp = -ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } kp = -ipiv[k + 1]; if (kp != k + 1) { cswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb); } /* Multiply by inv(L(K)), where L(K) is the transformation */ /* stored in columns K and K+1 of A. */ if (k < *n - 1) { i__1 = *n - k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgeru_(&i__1, nrhs, &q__1, &a[k + 2 + k * a_dim1], &c__1, &b[ k + b_dim1], ldb, &b[k + 2 + b_dim1], ldb); i__1 = *n - k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgeru_(&i__1, nrhs, &q__1, &a[k + 2 + (k + 1) * a_dim1], & c__1, &b[k + 1 + b_dim1], ldb, &b[k + 2 + b_dim1], ldb); } /* Multiply by the inverse of the diagonal block. */ i__1 = k + 1 + k * a_dim1; akm1k.r = a[i__1].r; akm1k.i = a[i__1].i; // , expr subst c_div(&q__1, &a[k + k * a_dim1], &akm1k); akm1.r = q__1.r; akm1.i = q__1.i; // , expr subst c_div(&q__1, &a[k + 1 + (k + 1) * a_dim1], &akm1k); ak.r = q__1.r; ak.i = q__1.i; // , expr subst q__2.r = akm1.r * ak.r - akm1.i * ak.i; q__2.i = akm1.r * ak.i + akm1.i * ak.r; // , expr subst q__1.r = q__2.r - 1.f; q__1.i = q__2.i - 0.f; // , expr subst denom.r = q__1.r; denom.i = q__1.i; // , expr subst i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { c_div(&q__1, &b[k + j * b_dim1], &akm1k); bkm1.r = q__1.r; bkm1.i = q__1.i; // , expr subst c_div(&q__1, &b[k + 1 + j * b_dim1], &akm1k); bk.r = q__1.r; bk.i = q__1.i; // , expr subst i__2 = k + j * b_dim1; q__3.r = ak.r * bkm1.r - ak.i * bkm1.i; q__3.i = ak.r * bkm1.i + ak.i * bkm1.r; // , expr subst q__2.r = q__3.r - bk.r; q__2.i = q__3.i - bk.i; // , expr subst c_div(&q__1, &q__2, &denom); b[i__2].r = q__1.r; b[i__2].i = q__1.i; // , expr subst i__2 = k + 1 + j * b_dim1; q__3.r = akm1.r * bk.r - akm1.i * bk.i; q__3.i = akm1.r * bk.i + akm1.i * bk.r; // , expr subst q__2.r = q__3.r - bkm1.r; q__2.i = q__3.i - bkm1.i; // , expr subst c_div(&q__1, &q__2, &denom); b[i__2].r = q__1.r; b[i__2].i = q__1.i; // , expr subst /* L70: */ } k += 2; } goto L60; L80: /* Next solve L**T *X = B, overwriting B with X. */ /* K is the main loop index, decreasing from N to 1 in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = *n; L90: /* If K < 1, exit from loop. */ if (k < 1) { goto L100; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Multiply by inv(L**T(K)), where L(K) is the transformation */ /* stored in column K of A. */ if (k < *n) { i__1 = *n - k; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemv_("Transpose", &i__1, nrhs, &q__1, &b[k + 1 + b_dim1], ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b1, &b[k + b_dim1], ldb); } /* Interchange rows K and IPIV(K). */ kp = ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } --k; } else { /* 2 x 2 diagonal block */ /* Multiply by inv(L**T(K-1)), where L(K-1) is the transformation */ /* stored in columns K-1 and K of A. */ if (k < *n) { i__1 = *n - k; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemv_("Transpose", &i__1, nrhs, &q__1, &b[k + 1 + b_dim1], ldb, &a[k + 1 + k * a_dim1], &c__1, &c_b1, &b[k + b_dim1], ldb); i__1 = *n - k; q__1.r = -1.f; q__1.i = -0.f; // , expr subst cgemv_("Transpose", &i__1, nrhs, &q__1, &b[k + 1 + b_dim1], ldb, &a[k + 1 + (k - 1) * a_dim1], &c__1, &c_b1, &b[k - 1 + b_dim1], ldb); } /* Interchange rows K and -IPIV(K) THEN K-1 and -IPIV(K-1) */ kp = -ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } kp = -ipiv[k - 1]; if (kp != k - 1) { cswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb); } k += -2; } goto L90; L100: ; } return 0; /* End of CSYTRS_ROOK */ }
/*! \brief * <pre> * Purpose * ======= * ilu_cdrop_row() - Drop some small rows from the previous * supernode (L-part only). * </pre> */ int ilu_cdrop_row( superlu_options_t *options, /* options */ int first, /* index of the first column in the supernode */ int last, /* index of the last column in the supernode */ double drop_tol, /* dropping parameter */ int quota, /* maximum nonzero entries allowed */ int *nnzLj, /* in/out number of nonzeros in L(:, 1:last) */ double *fill_tol, /* in/out - on exit, fill_tol=-num_zero_pivots, * does not change if options->ILU_MILU != SMILU1 */ GlobalLU_t *Glu, /* modified */ float swork[], /* working space with minimum size last-first+1 */ int iwork[], /* working space with minimum size m - n, * used by the second dropping rule */ int lastc /* if lastc == 0, there is nothing after the * working supernode [first:last]; * if lastc == 1, there is one more column after * the working supernode. */ ) { register int i, j, k, m1; register int nzlc; /* number of nonzeros in column last+1 */ register int xlusup_first, xlsub_first; int m, n; /* m x n is the size of the supernode */ int r = 0; /* number of dropped rows */ register float *temp; register complex *lusup = Glu->lusup; register int *lsub = Glu->lsub; register int *xlsub = Glu->xlsub; register int *xlusup = Glu->xlusup; register float d_max = 0.0, d_min = 1.0; int drop_rule = options->ILU_DropRule; milu_t milu = options->ILU_MILU; norm_t nrm = options->ILU_Norm; complex zero = {0.0, 0.0}; complex one = {1.0, 0.0}; complex none = {-1.0, 0.0}; int inc_diag; /* inc_diag = m + 1 */ int nzp = 0; /* number of zero pivots */ xlusup_first = xlusup[first]; xlsub_first = xlsub[first]; m = xlusup[first + 1] - xlusup_first; n = last - first + 1; m1 = m - 1; inc_diag = m + 1; nzlc = lastc ? (xlusup[last + 2] - xlusup[last + 1]) : 0; temp = swork - n; /* Quick return if nothing to do. */ if (m == 0 || m == n || drop_rule == NODROP) { *nnzLj += m * n; return 0; } /* basic dropping: ILU(tau) */ for (i = n; i <= m1; ) { /* the average abs value of ith row */ switch (nrm) { case ONE_NORM: temp[i] = scasum_(&n, &lusup[xlusup_first + i], &m) / (double)n; break; case TWO_NORM: temp[i] = scnrm2_(&n, &lusup[xlusup_first + i], &m) / sqrt((double)n); break; case INF_NORM: default: k = icamax_(&n, &lusup[xlusup_first + i], &m) - 1; temp[i] = c_abs1(&lusup[xlusup_first + i + m * k]); break; } /* drop small entries due to drop_tol */ if (drop_rule & DROP_BASIC && temp[i] < drop_tol) { r++; /* drop the current row and move the last undropped row here */ if (r > 1) /* add to last row */ { /* accumulate the sum (for MILU) */ switch (milu) { case SMILU_1: case SMILU_2: caxpy_(&n, &one, &lusup[xlusup_first + i], &m, &lusup[xlusup_first + m - 1], &m); break; case SMILU_3: for (j = 0; j < n; j++) lusup[xlusup_first + (m - 1) + j * m].r += c_abs1(&lusup[xlusup_first + i + j * m]); break; case SILU: default: break; } ccopy_(&n, &lusup[xlusup_first + m1], &m, &lusup[xlusup_first + i], &m); } /* if (r > 1) */ else /* move to last row */ { cswap_(&n, &lusup[xlusup_first + m1], &m, &lusup[xlusup_first + i], &m); if (milu == SMILU_3) for (j = 0; j < n; j++) { lusup[xlusup_first + m1 + j * m].r = c_abs1(&lusup[xlusup_first + m1 + j * m]); lusup[xlusup_first + m1 + j * m].i = 0.0; } } lsub[xlsub_first + i] = lsub[xlsub_first + m1]; m1--; continue; } /* if dropping */ else { if (temp[i] > d_max) d_max = temp[i]; if (temp[i] < d_min) d_min = temp[i]; } i++; } /* for */ /* Secondary dropping: drop more rows according to the quota. */ quota = ceil((double)quota / (double)n); if (drop_rule & DROP_SECONDARY && m - r > quota) { register double tol = d_max; /* Calculate the second dropping tolerance */ if (quota > n) { if (drop_rule & DROP_INTERP) /* by interpolation */ { d_max = 1.0 / d_max; d_min = 1.0 / d_min; tol = 1.0 / (d_max + (d_min - d_max) * quota / (m - n - r)); } else /* by quick sort */ { register int *itemp = iwork - n; A = temp; for (i = n; i <= m1; i++) itemp[i] = i; qsort(iwork, m1 - n + 1, sizeof(int), _compare_); tol = temp[iwork[quota]]; } } for (i = n; i <= m1; ) { if (temp[i] <= tol) { register int j; r++; /* drop the current row and move the last undropped row here */ if (r > 1) /* add to last row */ { /* accumulate the sum (for MILU) */ switch (milu) { case SMILU_1: case SMILU_2: caxpy_(&n, &one, &lusup[xlusup_first + i], &m, &lusup[xlusup_first + m - 1], &m); break; case SMILU_3: for (j = 0; j < n; j++) lusup[xlusup_first + (m - 1) + j * m].r += c_abs1(&lusup[xlusup_first + i + j * m]); break; case SILU: default: break; } ccopy_(&n, &lusup[xlusup_first + m1], &m, &lusup[xlusup_first + i], &m); } /* if (r > 1) */ else /* move to last row */ { cswap_(&n, &lusup[xlusup_first + m1], &m, &lusup[xlusup_first + i], &m); if (milu == SMILU_3) for (j = 0; j < n; j++) { lusup[xlusup_first + m1 + j * m].r = c_abs1(&lusup[xlusup_first + m1 + j * m]); lusup[xlusup_first + m1 + j * m].i = 0.0; } } lsub[xlsub_first + i] = lsub[xlsub_first + m1]; m1--; temp[i] = temp[m1]; continue; } i++; } /* for */ } /* if secondary dropping */ for (i = n; i < m; i++) temp[i] = 0.0; if (r == 0) { *nnzLj += m * n; return 0; } /* add dropped entries to the diagnal */ if (milu != SILU) { register int j; complex t; for (j = 0; j < n; j++) { cs_mult(&t, &lusup[xlusup_first + (m - 1) + j * m], MILU_ALPHA); switch (milu) { case SMILU_1: if ( !(c_eq(&t, &none)) ) { c_add(&t, &t, &one); cc_mult(&lusup[xlusup_first + j * inc_diag], &lusup[xlusup_first + j * inc_diag], &t); } else { cs_mult( &lusup[xlusup_first + j * inc_diag], &lusup[xlusup_first + j * inc_diag], *fill_tol); #ifdef DEBUG printf("[1] ZERO PIVOT: FILL col %d.\n", first + j); fflush(stdout); #endif nzp++; } break; case SMILU_2: cs_mult(&lusup[xlusup_first + j * inc_diag], &lusup[xlusup_first + j * inc_diag], 1.0 + c_abs1(&t)); break; case SMILU_3: c_add(&t, &t, &one); cc_mult(&lusup[xlusup_first + j * inc_diag], &lusup[xlusup_first + j * inc_diag], &t); break; case SILU: default: break; } } if (nzp > 0) *fill_tol = -nzp; } /* Remove dropped entries from the memory and fix the pointers. */ m1 = m - r; for (j = 1; j < n; j++) { register int tmp1, tmp2; tmp1 = xlusup_first + j * m1; tmp2 = xlusup_first + j * m; for (i = 0; i < m1; i++) lusup[i + tmp1] = lusup[i + tmp2]; } for (i = 0; i < nzlc; i++) lusup[xlusup_first + i + n * m1] = lusup[xlusup_first + i + n * m]; for (i = 0; i < nzlc; i++) lsub[xlsub[last + 1] - r + i] = lsub[xlsub[last + 1] + i]; for (i = first + 1; i <= last + 1; i++) { xlusup[i] -= r * (i - first); xlsub[i] -= r; } if (lastc) { xlusup[last + 2] -= r * n; xlsub[last + 2] -= r; } *nnzLj += (m - r) * n; return r; }
/* Subroutine */ int cgebak_(char *job, char *side, integer *n, integer *ilo, integer *ihi, real *scale, integer *m, complex *v, integer *ldv, integer *info) { /* System generated locals */ integer v_dim1, v_offset, i__1; /* Local variables */ integer i__, k; real s; integer ii; extern logical lsame_(char *, char *); extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); logical leftv; extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer *), xerbla_(char *, integer *); logical rightv; /* -- LAPACK routine (version 3.2) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CGEBAK forms the right or left eigenvectors of a complex general */ /* matrix by backward transformation on the computed eigenvectors of the */ /* balanced matrix output by CGEBAL. */ /* Arguments */ /* ========= */ /* JOB (input) CHARACTER*1 */ /* Specifies the type of backward transformation required: */ /* = 'N', do nothing, return immediately; */ /* = 'P', do backward transformation for permutation only; */ /* = 'S', do backward transformation for scaling only; */ /* = 'B', do backward transformations for both permutation and */ /* scaling. */ /* JOB must be the same as the argument JOB supplied to CGEBAL. */ /* SIDE (input) CHARACTER*1 */ /* = 'R': V contains right eigenvectors; */ /* = 'L': V contains left eigenvectors. */ /* N (input) INTEGER */ /* The number of rows of the matrix V. N >= 0. */ /* ILO (input) INTEGER */ /* IHI (input) INTEGER */ /* The integers ILO and IHI determined by CGEBAL. */ /* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */ /* SCALE (input) REAL array, dimension (N) */ /* Details of the permutation and scaling factors, as returned */ /* by CGEBAL. */ /* M (input) INTEGER */ /* The number of columns of the matrix V. M >= 0. */ /* V (input/output) COMPLEX array, dimension (LDV,M) */ /* On entry, the matrix of right or left eigenvectors to be */ /* transformed, as returned by CHSEIN or CTREVC. */ /* On exit, V is overwritten by the transformed eigenvectors. */ /* LDV (input) INTEGER */ /* The leading dimension of the array V. LDV >= max(1,N). */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Decode and Test the input parameters */ /* Parameter adjustments */ --scale; v_dim1 = *ldv; v_offset = 1 + v_dim1; v -= v_offset; /* Function Body */ rightv = lsame_(side, "R"); leftv = lsame_(side, "L"); *info = 0; if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S") && ! lsame_(job, "B")) { *info = -1; } else if (! rightv && ! leftv) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -4; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -5; } else if (*m < 0) { *info = -7; } else if (*ldv < max(1,*n)) { *info = -9; } if (*info != 0) { i__1 = -(*info); xerbla_("CGEBAK", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (*m == 0) { return 0; } if (lsame_(job, "N")) { return 0; } if (*ilo == *ihi) { goto L30; } /* Backward balance */ if (lsame_(job, "S") || lsame_(job, "B")) { if (rightv) { i__1 = *ihi; for (i__ = *ilo; i__ <= i__1; ++i__) { s = scale[i__]; csscal_(m, &s, &v[i__ + v_dim1], ldv); /* L10: */ } } if (leftv) { i__1 = *ihi; for (i__ = *ilo; i__ <= i__1; ++i__) { s = 1.f / scale[i__]; csscal_(m, &s, &v[i__ + v_dim1], ldv); /* L20: */ } } } /* Backward permutation */ /* For I = ILO-1 step -1 until 1, */ /* IHI+1 step 1 until N do -- */ L30: if (lsame_(job, "P") || lsame_(job, "B")) { if (rightv) { i__1 = *n; for (ii = 1; ii <= i__1; ++ii) { i__ = ii; if (i__ >= *ilo && i__ <= *ihi) { goto L40; } if (i__ < *ilo) { i__ = *ilo - ii; } k = scale[i__]; if (k == i__) { goto L40; } cswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv); L40: ; } } if (leftv) { i__1 = *n; for (ii = 1; ii <= i__1; ++ii) { i__ = ii; if (i__ >= *ilo && i__ <= *ihi) { goto L50; } if (i__ < *ilo) { i__ = *ilo - ii; } k = scale[i__]; if (k == i__) { goto L50; } cswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv); L50: ; } } } return 0; /* End of CGEBAK */ } /* cgebak_ */
/* Subroutine */ int cgbtrs_(char *trans, integer *n, integer *kl, integer * ku, integer *nrhs, complex *ab, integer *ldab, integer *ipiv, complex *b, integer *ldb, integer *info) { /* -- LAPACK routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University September 30, 1994 Purpose ======= CGBTRS solves a system of linear equations A * X = B, A**T * X = B, or A**H * X = B with a general band matrix A using the LU factorization computed by CGBTRF. Arguments ========= TRANS (input) CHARACTER*1 Specifies the form of the system of equations. = 'N': A * X = B (No transpose) = 'T': A**T * X = B (Transpose) = 'C': A**H * X = B (Conjugate transpose) N (input) INTEGER The order of the matrix A. N >= 0. KL (input) INTEGER The number of subdiagonals within the band of A. KL >= 0. KU (input) INTEGER The number of superdiagonals within the band of A. KU >= 0. NRHS (input) INTEGER The number of right hand sides, i.e., the number of columns of the matrix B. NRHS >= 0. AB (input) COMPLEX array, dimension (LDAB,N) Details of the LU factorization of the band matrix A, as computed by CGBTRF. U is stored as an upper triangular band matrix with KL+KU superdiagonals in rows 1 to KL+KU+1, and the multipliers used during the factorization are stored in rows KL+KU+2 to 2*KL+KU+1. LDAB (input) INTEGER The leading dimension of the array AB. LDAB >= 2*KL+KU+1. IPIV (input) INTEGER array, dimension (N) The pivot indices; for 1 <= i <= N, row i of the matrix was interchanged with row IPIV(i). B (input/output) COMPLEX array, dimension (LDB,NRHS) On entry, the right hand side matrix B. On exit, the solution matrix X. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,N). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input parameters. Parameter adjustments */ /* Table of constant values */ static complex c_b1 = {1.f,0.f}; static integer c__1 = 1; /* System generated locals */ integer ab_dim1, ab_offset, b_dim1, b_offset, i__1, i__2, i__3; complex q__1; /* Local variables */ static integer i__, j, l; extern logical lsame_(char *, char *); extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex * , complex *, integer *, complex *, integer *, complex *, complex * , integer *), cgeru_(integer *, integer *, complex *, complex *, integer *, complex *, integer *, complex *, integer *), cswap_(integer *, complex *, integer *, complex *, integer *), ctbsv_(char *, char *, char *, integer *, integer *, complex *, integer *, complex *, integer *); static logical lnoti; static integer kd, lm; extern /* Subroutine */ int clacgv_(integer *, complex *, integer *), xerbla_(char *, integer *); static logical notran; #define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1 #define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)] #define ab_subscr(a_1,a_2) (a_2)*ab_dim1 + a_1 #define ab_ref(a_1,a_2) ab[ab_subscr(a_1,a_2)] ab_dim1 = *ldab; ab_offset = 1 + ab_dim1 * 1; ab -= ab_offset; --ipiv; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ *info = 0; notran = lsame_(trans, "N"); if (! notran && ! lsame_(trans, "T") && ! lsame_( trans, "C")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*kl < 0) { *info = -3; } else if (*ku < 0) { *info = -4; } else if (*nrhs < 0) { *info = -5; } else if (*ldab < (*kl << 1) + *ku + 1) { *info = -7; } else if (*ldb < max(1,*n)) { *info = -10; } if (*info != 0) { i__1 = -(*info); xerbla_("CGBTRS", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *nrhs == 0) { return 0; } kd = *ku + *kl + 1; lnoti = *kl > 0; if (notran) { /* Solve A*X = B. Solve L*X = B, overwriting B with X. L is represented as a product of permutations and unit lower triangular matrices L = P(1) * L(1) * ... * P(n-1) * L(n-1), where each transformation L(i) is a rank-one modification of the identity matrix. */ if (lnoti) { i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__2 = *kl, i__3 = *n - j; lm = min(i__2,i__3); l = ipiv[j]; if (l != j) { cswap_(nrhs, &b_ref(l, 1), ldb, &b_ref(j, 1), ldb); } q__1.r = -1.f, q__1.i = 0.f; cgeru_(&lm, nrhs, &q__1, &ab_ref(kd + 1, j), &c__1, &b_ref(j, 1), ldb, &b_ref(j + 1, 1), ldb); /* L10: */ } } i__1 = *nrhs; for (i__ = 1; i__ <= i__1; ++i__) { /* Solve U*X = B, overwriting B with X. */ i__2 = *kl + *ku; ctbsv_("Upper", "No transpose", "Non-unit", n, &i__2, &ab[ ab_offset], ldab, &b_ref(1, i__), &c__1); /* L20: */ } } else if (lsame_(trans, "T")) { /* Solve A**T * X = B. */ i__1 = *nrhs; for (i__ = 1; i__ <= i__1; ++i__) { /* Solve U**T * X = B, overwriting B with X. */ i__2 = *kl + *ku; ctbsv_("Upper", "Transpose", "Non-unit", n, &i__2, &ab[ab_offset], ldab, &b_ref(1, i__), &c__1); /* L30: */ } /* Solve L**T * X = B, overwriting B with X. */ if (lnoti) { for (j = *n - 1; j >= 1; --j) { /* Computing MIN */ i__1 = *kl, i__2 = *n - j; lm = min(i__1,i__2); q__1.r = -1.f, q__1.i = 0.f; cgemv_("Transpose", &lm, nrhs, &q__1, &b_ref(j + 1, 1), ldb, & ab_ref(kd + 1, j), &c__1, &c_b1, &b_ref(j, 1), ldb); l = ipiv[j]; if (l != j) { cswap_(nrhs, &b_ref(l, 1), ldb, &b_ref(j, 1), ldb); } /* L40: */ } } } else { /* Solve A**H * X = B. */ i__1 = *nrhs; for (i__ = 1; i__ <= i__1; ++i__) { /* Solve U**H * X = B, overwriting B with X. */ i__2 = *kl + *ku; ctbsv_("Upper", "Conjugate transpose", "Non-unit", n, &i__2, &ab[ ab_offset], ldab, &b_ref(1, i__), &c__1); /* L50: */ } /* Solve L**H * X = B, overwriting B with X. */ if (lnoti) { for (j = *n - 1; j >= 1; --j) { /* Computing MIN */ i__1 = *kl, i__2 = *n - j; lm = min(i__1,i__2); clacgv_(nrhs, &b_ref(j, 1), ldb); q__1.r = -1.f, q__1.i = 0.f; cgemv_("Conjugate transpose", &lm, nrhs, &q__1, &b_ref(j + 1, 1), ldb, &ab_ref(kd + 1, j), &c__1, &c_b1, &b_ref(j, 1), ldb); clacgv_(nrhs, &b_ref(j, 1), ldb); l = ipiv[j]; if (l != j) { cswap_(nrhs, &b_ref(l, 1), ldb, &b_ref(j, 1), ldb); } /* L60: */ } } } return 0; /* End of CGBTRS */ } /* cgbtrs_ */
/* Subroutine */ int csytri_rook_(char *uplo, integer *n, complex *a, integer *lda, integer *ipiv, complex *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; complex q__1, q__2, q__3; /* Builtin functions */ void c_div(complex *, complex *, complex *); /* Local variables */ complex d__; integer k; complex t, ak; integer kp; complex akp1, temp, akkp1; extern logical lsame_(char *, char *); extern /* Subroutine */ int ccopy_(integer *, complex *, integer *, complex *, integer *); extern /* Complex */ VOID cdotu_f2c_(complex *, integer *, complex *, integer *, complex *, integer *); extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); integer kstep; logical upper; extern /* Subroutine */ int csymv_(char *, integer *, complex *, complex * , integer *, complex *, integer *, complex *, complex *, integer * ), xerbla_(char *, integer *); /* -- LAPACK computational routine (version 3.4.0) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* November 2011 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --ipiv; --work; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("CSYTRI_ROOK", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* Check that the diagonal matrix D is nonsingular. */ if (upper) { /* Upper triangular storage: examine D from bottom to top */ for (*info = *n; *info >= 1; --(*info)) { i__1 = *info + *info * a_dim1; if (ipiv[*info] > 0 && (a[i__1].r == 0.f && a[i__1].i == 0.f)) { return 0; } /* L10: */ } } else { /* Lower triangular storage: examine D from top to bottom. */ i__1 = *n; for (*info = 1; *info <= i__1; ++(*info)) { i__2 = *info + *info * a_dim1; if (ipiv[*info] > 0 && (a[i__2].r == 0.f && a[i__2].i == 0.f)) { return 0; } /* L20: */ } } *info = 0; if (upper) { /* Compute inv(A) from the factorization A = U*D*U**T. */ /* K is the main loop index, increasing from 1 to N in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = 1; L30: /* If K > N, exit from loop. */ if (k > *n) { goto L40; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Invert the diagonal block. */ i__1 = k + k * a_dim1; c_div(&q__1, &c_b1, &a[k + k * a_dim1]); a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst /* Compute column K of the inverse. */ if (k > 1) { i__1 = k - 1; ccopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1); i__1 = k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst csymv_(uplo, &i__1, &q__1, &a[a_offset], lda, &work[1], &c__1, &c_b2, &a[k * a_dim1 + 1], &c__1); i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; i__3 = k - 1; cdotu_f2c_(&q__2, &i__3, &work[1], &c__1, &a[k * a_dim1 + 1], & c__1); q__1.r = a[i__2].r - q__2.r; q__1.i = a[i__2].i - q__2.i; // , expr subst a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst } kstep = 1; } else { /* 2 x 2 diagonal block */ /* Invert the diagonal block. */ i__1 = k + (k + 1) * a_dim1; t.r = a[i__1].r; t.i = a[i__1].i; // , expr subst c_div(&q__1, &a[k + k * a_dim1], &t); ak.r = q__1.r; ak.i = q__1.i; // , expr subst c_div(&q__1, &a[k + 1 + (k + 1) * a_dim1], &t); akp1.r = q__1.r; akp1.i = q__1.i; // , expr subst c_div(&q__1, &a[k + (k + 1) * a_dim1], &t); akkp1.r = q__1.r; akkp1.i = q__1.i; // , expr subst q__3.r = ak.r * akp1.r - ak.i * akp1.i; q__3.i = ak.r * akp1.i + ak.i * akp1.r; // , expr subst q__2.r = q__3.r - 1.f; q__2.i = q__3.i - 0.f; // , expr subst q__1.r = t.r * q__2.r - t.i * q__2.i; q__1.i = t.r * q__2.i + t.i * q__2.r; // , expr subst d__.r = q__1.r; d__.i = q__1.i; // , expr subst i__1 = k + k * a_dim1; c_div(&q__1, &akp1, &d__); a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst i__1 = k + 1 + (k + 1) * a_dim1; c_div(&q__1, &ak, &d__); a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst i__1 = k + (k + 1) * a_dim1; q__2.r = -akkp1.r; q__2.i = -akkp1.i; // , expr subst c_div(&q__1, &q__2, &d__); a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst /* Compute columns K and K+1 of the inverse. */ if (k > 1) { i__1 = k - 1; ccopy_(&i__1, &a[k * a_dim1 + 1], &c__1, &work[1], &c__1); i__1 = k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst csymv_(uplo, &i__1, &q__1, &a[a_offset], lda, &work[1], &c__1, &c_b2, &a[k * a_dim1 + 1], &c__1); i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; i__3 = k - 1; cdotu_f2c_(&q__2, &i__3, &work[1], &c__1, &a[k * a_dim1 + 1], & c__1); q__1.r = a[i__2].r - q__2.r; q__1.i = a[i__2].i - q__2.i; // , expr subst a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst i__1 = k + (k + 1) * a_dim1; i__2 = k + (k + 1) * a_dim1; i__3 = k - 1; cdotu_f2c_(&q__2, &i__3, &a[k * a_dim1 + 1], &c__1, &a[(k + 1) * a_dim1 + 1], &c__1); q__1.r = a[i__2].r - q__2.r; q__1.i = a[i__2].i - q__2.i; // , expr subst a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst i__1 = k - 1; ccopy_(&i__1, &a[(k + 1) * a_dim1 + 1], &c__1, &work[1], & c__1); i__1 = k - 1; q__1.r = -1.f; q__1.i = -0.f; // , expr subst csymv_(uplo, &i__1, &q__1, &a[a_offset], lda, &work[1], &c__1, &c_b2, &a[(k + 1) * a_dim1 + 1], &c__1); i__1 = k + 1 + (k + 1) * a_dim1; i__2 = k + 1 + (k + 1) * a_dim1; i__3 = k - 1; cdotu_f2c_(&q__2, &i__3, &work[1], &c__1, &a[(k + 1) * a_dim1 + 1] , &c__1); q__1.r = a[i__2].r - q__2.r; q__1.i = a[i__2].i - q__2.i; // , expr subst a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst } kstep = 2; } if (kstep == 1) { /* Interchange rows and columns K and IPIV(K) in the leading */ /* submatrix A(1:k+1,1:k+1) */ kp = ipiv[k]; if (kp != k) { if (kp > 1) { i__1 = kp - 1; cswap_(&i__1, &a[k * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &c__1); } i__1 = k - kp - 1; cswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + (kp + 1) * a_dim1], lda); i__1 = k + k * a_dim1; temp.r = a[i__1].r; temp.i = a[i__1].i; // , expr subst i__1 = k + k * a_dim1; i__2 = kp + kp * a_dim1; a[i__1].r = a[i__2].r; a[i__1].i = a[i__2].i; // , expr subst i__1 = kp + kp * a_dim1; a[i__1].r = temp.r; a[i__1].i = temp.i; // , expr subst } } else { /* Interchange rows and columns K and K+1 with -IPIV(K) and */ /* -IPIV(K+1)in the leading submatrix A(1:k+1,1:k+1) */ kp = -ipiv[k]; if (kp != k) { if (kp > 1) { i__1 = kp - 1; cswap_(&i__1, &a[k * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &c__1); } i__1 = k - kp - 1; cswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + (kp + 1) * a_dim1], lda); i__1 = k + k * a_dim1; temp.r = a[i__1].r; temp.i = a[i__1].i; // , expr subst i__1 = k + k * a_dim1; i__2 = kp + kp * a_dim1; a[i__1].r = a[i__2].r; a[i__1].i = a[i__2].i; // , expr subst i__1 = kp + kp * a_dim1; a[i__1].r = temp.r; a[i__1].i = temp.i; // , expr subst i__1 = k + (k + 1) * a_dim1; temp.r = a[i__1].r; temp.i = a[i__1].i; // , expr subst i__1 = k + (k + 1) * a_dim1; i__2 = kp + (k + 1) * a_dim1; a[i__1].r = a[i__2].r; a[i__1].i = a[i__2].i; // , expr subst i__1 = kp + (k + 1) * a_dim1; a[i__1].r = temp.r; a[i__1].i = temp.i; // , expr subst } ++k; kp = -ipiv[k]; if (kp != k) { if (kp > 1) { i__1 = kp - 1; cswap_(&i__1, &a[k * a_dim1 + 1], &c__1, &a[kp * a_dim1 + 1], &c__1); } i__1 = k - kp - 1; cswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + (kp + 1) * a_dim1], lda); i__1 = k + k * a_dim1; temp.r = a[i__1].r; temp.i = a[i__1].i; // , expr subst i__1 = k + k * a_dim1; i__2 = kp + kp * a_dim1; a[i__1].r = a[i__2].r; a[i__1].i = a[i__2].i; // , expr subst i__1 = kp + kp * a_dim1; a[i__1].r = temp.r; a[i__1].i = temp.i; // , expr subst } } ++k; goto L30; L40: ; } else { /* Compute inv(A) from the factorization A = L*D*L**T. */ /* K is the main loop index, increasing from 1 to N in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = *n; L50: /* If K < 1, exit from loop. */ if (k < 1) { goto L60; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Invert the diagonal block. */ i__1 = k + k * a_dim1; c_div(&q__1, &c_b1, &a[k + k * a_dim1]); a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst /* Compute column K of the inverse. */ if (k < *n) { i__1 = *n - k; ccopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1); i__1 = *n - k; q__1.r = -1.f; q__1.i = -0.f; // , expr subst csymv_(uplo, &i__1, &q__1, &a[k + 1 + (k + 1) * a_dim1], lda, &work[1], &c__1, &c_b2, &a[k + 1 + k * a_dim1], &c__1); i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; i__3 = *n - k; cdotu_f2c_(&q__2, &i__3, &work[1], &c__1, &a[k + 1 + k * a_dim1], &c__1); q__1.r = a[i__2].r - q__2.r; q__1.i = a[i__2].i - q__2.i; // , expr subst a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst } kstep = 1; } else { /* 2 x 2 diagonal block */ /* Invert the diagonal block. */ i__1 = k + (k - 1) * a_dim1; t.r = a[i__1].r; t.i = a[i__1].i; // , expr subst c_div(&q__1, &a[k - 1 + (k - 1) * a_dim1], &t); ak.r = q__1.r; ak.i = q__1.i; // , expr subst c_div(&q__1, &a[k + k * a_dim1], &t); akp1.r = q__1.r; akp1.i = q__1.i; // , expr subst c_div(&q__1, &a[k + (k - 1) * a_dim1], &t); akkp1.r = q__1.r; akkp1.i = q__1.i; // , expr subst q__3.r = ak.r * akp1.r - ak.i * akp1.i; q__3.i = ak.r * akp1.i + ak.i * akp1.r; // , expr subst q__2.r = q__3.r - 1.f; q__2.i = q__3.i - 0.f; // , expr subst q__1.r = t.r * q__2.r - t.i * q__2.i; q__1.i = t.r * q__2.i + t.i * q__2.r; // , expr subst d__.r = q__1.r; d__.i = q__1.i; // , expr subst i__1 = k - 1 + (k - 1) * a_dim1; c_div(&q__1, &akp1, &d__); a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst i__1 = k + k * a_dim1; c_div(&q__1, &ak, &d__); a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst i__1 = k + (k - 1) * a_dim1; q__2.r = -akkp1.r; q__2.i = -akkp1.i; // , expr subst c_div(&q__1, &q__2, &d__); a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst /* Compute columns K-1 and K of the inverse. */ if (k < *n) { i__1 = *n - k; ccopy_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &work[1], &c__1); i__1 = *n - k; q__1.r = -1.f; q__1.i = -0.f; // , expr subst csymv_(uplo, &i__1, &q__1, &a[k + 1 + (k + 1) * a_dim1], lda, &work[1], &c__1, &c_b2, &a[k + 1 + k * a_dim1], &c__1); i__1 = k + k * a_dim1; i__2 = k + k * a_dim1; i__3 = *n - k; cdotu_f2c_(&q__2, &i__3, &work[1], &c__1, &a[k + 1 + k * a_dim1], &c__1); q__1.r = a[i__2].r - q__2.r; q__1.i = a[i__2].i - q__2.i; // , expr subst a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst i__1 = k + (k - 1) * a_dim1; i__2 = k + (k - 1) * a_dim1; i__3 = *n - k; cdotu_f2c_(&q__2, &i__3, &a[k + 1 + k * a_dim1], &c__1, &a[k + 1 + (k - 1) * a_dim1], &c__1); q__1.r = a[i__2].r - q__2.r; q__1.i = a[i__2].i - q__2.i; // , expr subst a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst i__1 = *n - k; ccopy_(&i__1, &a[k + 1 + (k - 1) * a_dim1], &c__1, &work[1], & c__1); i__1 = *n - k; q__1.r = -1.f; q__1.i = -0.f; // , expr subst csymv_(uplo, &i__1, &q__1, &a[k + 1 + (k + 1) * a_dim1], lda, &work[1], &c__1, &c_b2, &a[k + 1 + (k - 1) * a_dim1], &c__1); i__1 = k - 1 + (k - 1) * a_dim1; i__2 = k - 1 + (k - 1) * a_dim1; i__3 = *n - k; cdotu_f2c_(&q__2, &i__3, &work[1], &c__1, &a[k + 1 + (k - 1) * a_dim1], &c__1); q__1.r = a[i__2].r - q__2.r; q__1.i = a[i__2].i - q__2.i; // , expr subst a[i__1].r = q__1.r; a[i__1].i = q__1.i; // , expr subst } kstep = 2; } if (kstep == 1) { /* Interchange rows and columns K and IPIV(K) in the trailing */ /* submatrix A(k-1:n,k-1:n) */ kp = ipiv[k]; if (kp != k) { if (kp < *n) { i__1 = *n - kp; cswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + 1 + kp * a_dim1], &c__1); } i__1 = kp - k - 1; cswap_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &a[kp + (k + 1) * a_dim1], lda); i__1 = k + k * a_dim1; temp.r = a[i__1].r; temp.i = a[i__1].i; // , expr subst i__1 = k + k * a_dim1; i__2 = kp + kp * a_dim1; a[i__1].r = a[i__2].r; a[i__1].i = a[i__2].i; // , expr subst i__1 = kp + kp * a_dim1; a[i__1].r = temp.r; a[i__1].i = temp.i; // , expr subst } } else { /* Interchange rows and columns K and K-1 with -IPIV(K) and */ /* -IPIV(K-1) in the trailing submatrix A(k-1:n,k-1:n) */ kp = -ipiv[k]; if (kp != k) { if (kp < *n) { i__1 = *n - kp; cswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + 1 + kp * a_dim1], &c__1); } i__1 = kp - k - 1; cswap_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &a[kp + (k + 1) * a_dim1], lda); i__1 = k + k * a_dim1; temp.r = a[i__1].r; temp.i = a[i__1].i; // , expr subst i__1 = k + k * a_dim1; i__2 = kp + kp * a_dim1; a[i__1].r = a[i__2].r; a[i__1].i = a[i__2].i; // , expr subst i__1 = kp + kp * a_dim1; a[i__1].r = temp.r; a[i__1].i = temp.i; // , expr subst i__1 = k + (k - 1) * a_dim1; temp.r = a[i__1].r; temp.i = a[i__1].i; // , expr subst i__1 = k + (k - 1) * a_dim1; i__2 = kp + (k - 1) * a_dim1; a[i__1].r = a[i__2].r; a[i__1].i = a[i__2].i; // , expr subst i__1 = kp + (k - 1) * a_dim1; a[i__1].r = temp.r; a[i__1].i = temp.i; // , expr subst } --k; kp = -ipiv[k]; if (kp != k) { if (kp < *n) { i__1 = *n - kp; cswap_(&i__1, &a[kp + 1 + k * a_dim1], &c__1, &a[kp + 1 + kp * a_dim1], &c__1); } i__1 = kp - k - 1; cswap_(&i__1, &a[k + 1 + k * a_dim1], &c__1, &a[kp + (k + 1) * a_dim1], lda); i__1 = k + k * a_dim1; temp.r = a[i__1].r; temp.i = a[i__1].i; // , expr subst i__1 = k + k * a_dim1; i__2 = kp + kp * a_dim1; a[i__1].r = a[i__2].r; a[i__1].i = a[i__2].i; // , expr subst i__1 = kp + kp * a_dim1; a[i__1].r = temp.r; a[i__1].i = temp.i; // , expr subst } } --k; goto L50; L60: ; } return 0; /* End of CSYTRI_ROOK */ }
/* Subroutine */ int csyswapr_(char *uplo, integer *n, complex *a, integer * lda, integer *i1, integer *i2) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ integer i__; complex tmp; extern logical lsame_(char *, char *); extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); logical upper; /* -- LAPACK auxiliary routine (version 3.4.0) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* November 2011 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* ===================================================================== */ /* .. */ /* .. Local Scalars .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; /* Function Body */ upper = lsame_(uplo, "U"); if (upper) { /* UPPER */ /* first swap */ /* - swap column I1 and I2 from I1 to I1-1 */ i__1 = *i1 - 1; cswap_(&i__1, &a[*i1 * a_dim1 + 1], &c__1, &a[*i2 * a_dim1 + 1], & c__1); /* second swap : */ /* - swap A(I1,I1) and A(I2,I2) */ /* - swap row I1 from I1+1 to I2-1 with col I2 from I1+1 to I2-1 */ i__1 = *i1 + *i1 * a_dim1; tmp.r = a[i__1].r; tmp.i = a[i__1].i; // , expr subst i__1 = *i1 + *i1 * a_dim1; i__2 = *i2 + *i2 * a_dim1; a[i__1].r = a[i__2].r; a[i__1].i = a[i__2].i; // , expr subst i__1 = *i2 + *i2 * a_dim1; a[i__1].r = tmp.r; a[i__1].i = tmp.i; // , expr subst i__1 = *i2 - *i1 - 1; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = *i1 + (*i1 + i__) * a_dim1; tmp.r = a[i__2].r; tmp.i = a[i__2].i; // , expr subst i__2 = *i1 + (*i1 + i__) * a_dim1; i__3 = *i1 + i__ + *i2 * a_dim1; a[i__2].r = a[i__3].r; a[i__2].i = a[i__3].i; // , expr subst i__2 = *i1 + i__ + *i2 * a_dim1; a[i__2].r = tmp.r; a[i__2].i = tmp.i; // , expr subst } /* third swap */ /* - swap row I1 and I2 from I2+1 to N */ i__1 = *n; for (i__ = *i2 + 1; i__ <= i__1; ++i__) { i__2 = *i1 + i__ * a_dim1; tmp.r = a[i__2].r; tmp.i = a[i__2].i; // , expr subst i__2 = *i1 + i__ * a_dim1; i__3 = *i2 + i__ * a_dim1; a[i__2].r = a[i__3].r; a[i__2].i = a[i__3].i; // , expr subst i__2 = *i2 + i__ * a_dim1; a[i__2].r = tmp.r; a[i__2].i = tmp.i; // , expr subst } } else { /* LOWER */ /* first swap */ /* - swap row I1 and I2 from I1 to I1-1 */ i__1 = *i1 - 1; cswap_(&i__1, &a[*i1 + a_dim1], lda, &a[*i2 + a_dim1], lda); /* second swap : */ /* - swap A(I1,I1) and A(I2,I2) */ /* - swap col I1 from I1+1 to I2-1 with row I2 from I1+1 to I2-1 */ i__1 = *i1 + *i1 * a_dim1; tmp.r = a[i__1].r; tmp.i = a[i__1].i; // , expr subst i__1 = *i1 + *i1 * a_dim1; i__2 = *i2 + *i2 * a_dim1; a[i__1].r = a[i__2].r; a[i__1].i = a[i__2].i; // , expr subst i__1 = *i2 + *i2 * a_dim1; a[i__1].r = tmp.r; a[i__1].i = tmp.i; // , expr subst i__1 = *i2 - *i1 - 1; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = *i1 + i__ + *i1 * a_dim1; tmp.r = a[i__2].r; tmp.i = a[i__2].i; // , expr subst i__2 = *i1 + i__ + *i1 * a_dim1; i__3 = *i2 + (*i1 + i__) * a_dim1; a[i__2].r = a[i__3].r; a[i__2].i = a[i__3].i; // , expr subst i__2 = *i2 + (*i1 + i__) * a_dim1; a[i__2].r = tmp.r; a[i__2].i = tmp.i; // , expr subst } /* third swap */ /* - swap col I1 and I2 from I2+1 to N */ i__1 = *n; for (i__ = *i2 + 1; i__ <= i__1; ++i__) { i__2 = i__ + *i1 * a_dim1; tmp.r = a[i__2].r; tmp.i = a[i__2].i; // , expr subst i__2 = i__ + *i1 * a_dim1; i__3 = i__ + *i2 * a_dim1; a[i__2].r = a[i__3].r; a[i__2].i = a[i__3].i; // , expr subst i__2 = i__ + *i2 * a_dim1; a[i__2].r = tmp.r; a[i__2].i = tmp.i; // , expr subst } } return 0; }
/* Subroutine */ int chbevx_(char *jobz, char *range, char *uplo, integer *n, integer *kd, complex *ab, integer *ldab, complex *q, integer *ldq, real *vl, real *vu, integer *il, integer *iu, real *abstol, integer * m, real *w, complex *z__, integer *ldz, complex *work, real *rwork, integer *iwork, integer *ifail, integer *info) { /* System generated locals */ integer ab_dim1, ab_offset, q_dim1, q_offset, z_dim1, z_offset, i__1, i__2; real r__1, r__2; /* Local variables */ integer i__, j, jj; real eps, vll, vuu, tmp1; integer indd, inde; real anrm; integer imax; real rmin, rmax; logical test; complex ctmp1; integer itmp1, indee; real sigma; integer iinfo; char order[1]; logical lower; logical wantz; logical alleig, indeig; integer iscale, indibl; logical valeig; real safmin; real abstll, bignum; integer indiwk, indisp; integer indrwk, indwrk; integer nsplit; real smlnum; /* -- LAPACK driver routine (version 3.2) -- */ /* November 2006 */ /* Purpose */ /* ======= */ /* CHBEVX computes selected eigenvalues and, optionally, eigenvectors */ /* of a complex Hermitian band matrix A. Eigenvalues and eigenvectors */ /* can be selected by specifying either a range of values or a range of */ /* indices for the desired eigenvalues. */ /* Arguments */ /* ========= */ /* JOBZ (input) CHARACTER*1 */ /* = 'N': Compute eigenvalues only; */ /* = 'V': Compute eigenvalues and eigenvectors. */ /* RANGE (input) CHARACTER*1 */ /* = 'A': all eigenvalues will be found; */ /* = 'V': all eigenvalues in the half-open interval (VL,VU] */ /* will be found; */ /* = 'I': the IL-th through IU-th eigenvalues will be found. */ /* UPLO (input) CHARACTER*1 */ /* = 'U': Upper triangle of A is stored; */ /* = 'L': Lower triangle of A is stored. */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. */ /* KD (input) INTEGER */ /* The number of superdiagonals of the matrix A if UPLO = 'U', */ /* or the number of subdiagonals if UPLO = 'L'. KD >= 0. */ /* AB (input/output) COMPLEX array, dimension (LDAB, N) */ /* On entry, the upper or lower triangle of the Hermitian band */ /* matrix A, stored in the first KD+1 rows of the array. The */ /* j-th column of A is stored in the j-th column of the array AB */ /* as follows: */ /* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd)<=i<=j; */ /* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j<=i<=min(n,j+kd). */ /* On exit, AB is overwritten by values generated during the */ /* reduction to tridiagonal form. */ /* LDAB (input) INTEGER */ /* The leading dimension of the array AB. LDAB >= KD + 1. */ /* Q (output) COMPLEX array, dimension (LDQ, N) */ /* If JOBZ = 'V', the N-by-N unitary matrix used in the */ /* reduction to tridiagonal form. */ /* If JOBZ = 'N', the array Q is not referenced. */ /* LDQ (input) INTEGER */ /* The leading dimension of the array Q. If JOBZ = 'V', then */ /* LDQ >= max(1,N). */ /* VL (input) REAL */ /* VU (input) REAL */ /* If RANGE='V', the lower and upper bounds of the interval to */ /* be searched for eigenvalues. VL < VU. */ /* Not referenced if RANGE = 'A' or 'I'. */ /* IL (input) INTEGER */ /* IU (input) INTEGER */ /* If RANGE='I', the indices (in ascending order) of the */ /* smallest and largest eigenvalues to be returned. */ /* 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. */ /* Not referenced if RANGE = 'A' or 'V'. */ /* ABSTOL (input) REAL */ /* The absolute error tolerance for the eigenvalues. */ /* An approximate eigenvalue is accepted as converged */ /* when it is determined to lie in an interval [a,b] */ /* of width less than or equal to */ /* ABSTOL + EPS * max( |a|,|b| ) , */ /* where EPS is the machine precision. If ABSTOL is less than */ /* or equal to zero, then EPS*|T| will be used in its place, */ /* where |T| is the 1-norm of the tridiagonal matrix obtained */ /* by reducing AB to tridiagonal form. */ /* Eigenvalues will be computed most accurately when ABSTOL is */ /* set to twice the underflow threshold 2*SLAMCH('S'), not zero. */ /* If this routine returns with INFO>0, indicating that some */ /* eigenvectors did not converge, try setting ABSTOL to */ /* 2*SLAMCH('S'). */ /* See "Computing Small Singular Values of Bidiagonal Matrices */ /* with Guaranteed High Relative Accuracy," by Demmel and */ /* Kahan, LAPACK Working Note #3. */ /* M (output) INTEGER */ /* The total number of eigenvalues found. 0 <= M <= N. */ /* If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. */ /* W (output) REAL array, dimension (N) */ /* The first M elements contain the selected eigenvalues in */ /* ascending order. */ /* Z (output) COMPLEX array, dimension (LDZ, max(1,M)) */ /* If JOBZ = 'V', then if INFO = 0, the first M columns of Z */ /* contain the orthonormal eigenvectors of the matrix A */ /* corresponding to the selected eigenvalues, with the i-th */ /* column of Z holding the eigenvector associated with W(i). */ /* If an eigenvector fails to converge, then that column of Z */ /* contains the latest approximation to the eigenvector, and the */ /* index of the eigenvector is returned in IFAIL. */ /* If JOBZ = 'N', then Z is not referenced. */ /* Note: the user must ensure that at least max(1,M) columns are */ /* supplied in the array Z; if RANGE = 'V', the exact value of M */ /* is not known in advance and an upper bound must be used. */ /* LDZ (input) INTEGER */ /* The leading dimension of the array Z. LDZ >= 1, and if */ /* JOBZ = 'V', LDZ >= max(1,N). */ /* WORK (workspace) COMPLEX array, dimension (N) */ /* RWORK (workspace) REAL array, dimension (7*N) */ /* IWORK (workspace) INTEGER array, dimension (5*N) */ /* IFAIL (output) INTEGER array, dimension (N) */ /* If JOBZ = 'V', then if INFO = 0, the first M elements of */ /* IFAIL are zero. If INFO > 0, then IFAIL contains the */ /* indices of the eigenvectors that failed to converge. */ /* If JOBZ = 'N', then IFAIL is not referenced. */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value */ /* > 0: if INFO = i, then i eigenvectors failed to converge. */ /* Their indices are stored in array IFAIL. */ /* ===================================================================== */ /* Test the input parameters. */ /* Parameter adjustments */ ab_dim1 = *ldab; ab_offset = 1 + ab_dim1; ab -= ab_offset; q_dim1 = *ldq; q_offset = 1 + q_dim1; q -= q_offset; --w; z_dim1 = *ldz; z_offset = 1 + z_dim1; z__ -= z_offset; --work; --rwork; --iwork; --ifail; /* Function Body */ wantz = lsame_(jobz, "V"); alleig = lsame_(range, "A"); valeig = lsame_(range, "V"); indeig = lsame_(range, "I"); lower = lsame_(uplo, "L"); *info = 0; if (! (wantz || lsame_(jobz, "N"))) { *info = -1; } else if (! (alleig || valeig || indeig)) { *info = -2; } else if (! (lower || lsame_(uplo, "U"))) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*kd < 0) { *info = -5; } else if (*ldab < *kd + 1) { *info = -7; } else if (wantz && *ldq < max(1,*n)) { *info = -9; } else { if (valeig) { if (*n > 0 && *vu <= *vl) { *info = -11; } } else if (indeig) { if (*il < 1 || *il > max(1,*n)) { *info = -12; } else if (*iu < min(*n,*il) || *iu > *n) { *info = -13; } } } if (*info == 0) { if (*ldz < 1 || wantz && *ldz < *n) { *info = -18; } } if (*info != 0) { i__1 = -(*info); xerbla_("CHBEVX", &i__1); return 0; } /* Quick return if possible */ *m = 0; if (*n == 0) { return 0; } if (*n == 1) { *m = 1; if (lower) { i__1 = ab_dim1 + 1; ctmp1.r = ab[i__1].r, ctmp1.i = ab[i__1].i; } else { i__1 = *kd + 1 + ab_dim1; ctmp1.r = ab[i__1].r, ctmp1.i = ab[i__1].i; } tmp1 = ctmp1.r; if (valeig) { if (! (*vl < tmp1 && *vu >= tmp1)) { *m = 0; } } if (*m == 1) { w[1] = ctmp1.r; if (wantz) { i__1 = z_dim1 + 1; z__[i__1].r = 1.f, z__[i__1].i = 0.f; } } return 0; } /* Get machine constants. */ safmin = slamch_("Safe minimum"); eps = slamch_("Precision"); smlnum = safmin / eps; bignum = 1.f / smlnum; rmin = sqrt(smlnum); /* Computing MIN */ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin)); rmax = dmin(r__1,r__2); /* Scale matrix to allowable range, if necessary. */ iscale = 0; abstll = *abstol; if (valeig) { vll = *vl; vuu = *vu; } else { vll = 0.f; vuu = 0.f; } anrm = clanhb_("M", uplo, n, kd, &ab[ab_offset], ldab, &rwork[1]); if (anrm > 0.f && anrm < rmin) { iscale = 1; sigma = rmin / anrm; } else if (anrm > rmax) { iscale = 1; sigma = rmax / anrm; } if (iscale == 1) { if (lower) { clascl_("B", kd, kd, &c_b16, &sigma, n, n, &ab[ab_offset], ldab, info); } else { clascl_("Q", kd, kd, &c_b16, &sigma, n, n, &ab[ab_offset], ldab, info); } if (*abstol > 0.f) { abstll = *abstol * sigma; } if (valeig) { vll = *vl * sigma; vuu = *vu * sigma; } } /* Call CHBTRD to reduce Hermitian band matrix to tridiagonal form. */ indd = 1; inde = indd + *n; indrwk = inde + *n; indwrk = 1; chbtrd_(jobz, uplo, n, kd, &ab[ab_offset], ldab, &rwork[indd], &rwork[ inde], &q[q_offset], ldq, &work[indwrk], &iinfo); /* If all eigenvalues are desired and ABSTOL is less than or equal */ /* to zero, then call SSTERF or CSTEQR. If this fails for some */ /* eigenvalue, then try SSTEBZ. */ test = FALSE_; if (indeig) { if (*il == 1 && *iu == *n) { test = TRUE_; } } if ((alleig || test) && *abstol <= 0.f) { scopy_(n, &rwork[indd], &c__1, &w[1], &c__1); indee = indrwk + (*n << 1); if (! wantz) { i__1 = *n - 1; scopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1); ssterf_(n, &w[1], &rwork[indee], info); } else { clacpy_("A", n, n, &q[q_offset], ldq, &z__[z_offset], ldz); i__1 = *n - 1; scopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1); csteqr_(jobz, n, &w[1], &rwork[indee], &z__[z_offset], ldz, & rwork[indrwk], info); if (*info == 0) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { ifail[i__] = 0; } } } if (*info == 0) { *m = *n; goto L30; } *info = 0; } /* Otherwise, call SSTEBZ and, if eigenvectors are desired, CSTEIN. */ if (wantz) { *(unsigned char *)order = 'B'; } else { *(unsigned char *)order = 'E'; } indibl = 1; indisp = indibl + *n; indiwk = indisp + *n; sstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &rwork[indd], & rwork[inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], & rwork[indrwk], &iwork[indiwk], info); if (wantz) { cstein_(n, &rwork[indd], &rwork[inde], m, &w[1], &iwork[indibl], & iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[ indiwk], &ifail[1], info); /* Apply unitary matrix used in reduction to tridiagonal */ /* form to eigenvectors returned by CSTEIN. */ i__1 = *m; for (j = 1; j <= i__1; ++j) { ccopy_(n, &z__[j * z_dim1 + 1], &c__1, &work[1], &c__1); cgemv_("N", n, n, &c_b2, &q[q_offset], ldq, &work[1], &c__1, & c_b1, &z__[j * z_dim1 + 1], &c__1); } } /* If matrix was scaled, then rescale eigenvalues appropriately. */ L30: if (iscale == 1) { if (*info == 0) { imax = *m; } else { imax = *info - 1; } r__1 = 1.f / sigma; sscal_(&imax, &r__1, &w[1], &c__1); } /* If eigenvalues are not in order, then sort them, along with */ /* eigenvectors. */ if (wantz) { i__1 = *m - 1; for (j = 1; j <= i__1; ++j) { i__ = 0; tmp1 = w[j]; i__2 = *m; for (jj = j + 1; jj <= i__2; ++jj) { if (w[jj] < tmp1) { i__ = jj; tmp1 = w[jj]; } } if (i__ != 0) { itmp1 = iwork[indibl + i__ - 1]; w[i__] = w[j]; iwork[indibl + i__ - 1] = iwork[indibl + j - 1]; w[j] = tmp1; iwork[indibl + j - 1] = itmp1; cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1], &c__1); if (*info != 0) { itmp1 = ifail[i__]; ifail[i__] = ifail[j]; ifail[j] = itmp1; } } } } return 0; /* End of CHBEVX */ } /* chbevx_ */
/* Subroutine */ int csptrs_(char *uplo, integer *n, integer *nrhs, complex * ap, integer *ipiv, complex *b, integer *ldb, integer *info) { /* System generated locals */ integer b_dim1, b_offset, i__1, i__2; complex q__1, q__2, q__3; /* Builtin functions */ void c_div(complex *, complex *, complex *); /* Local variables */ integer j, k; complex ak, bk; integer kc, kp; complex akm1, bkm1, akm1k; extern /* Subroutine */ int cscal_(integer *, complex *, complex *, integer *); extern logical lsame_(char *, char *); complex denom; extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex * , complex *, integer *, complex *, integer *, complex *, complex * , integer *), cgeru_(integer *, integer *, complex *, complex *, integer *, complex *, integer *, complex *, integer *), cswap_(integer *, complex *, integer *, complex *, integer *); logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); /* -- LAPACK routine (version 3.2) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CSPTRS solves a system of linear equations A*X = B with a complex */ /* symmetric matrix A stored in packed format using the factorization */ /* A = U*D*U**T or A = L*D*L**T computed by CSPTRF. */ /* Arguments */ /* ========= */ /* UPLO (input) CHARACTER*1 */ /* Specifies whether the details of the factorization are stored */ /* as an upper or lower triangular matrix. */ /* = 'U': Upper triangular, form is A = U*D*U**T; */ /* = 'L': Lower triangular, form is A = L*D*L**T. */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. */ /* NRHS (input) INTEGER */ /* The number of right hand sides, i.e., the number of columns */ /* of the matrix B. NRHS >= 0. */ /* AP (input) COMPLEX array, dimension (N*(N+1)/2) */ /* The block diagonal matrix D and the multipliers used to */ /* obtain the factor U or L as computed by CSPTRF, stored as a */ /* packed triangular matrix. */ /* IPIV (input) INTEGER array, dimension (N) */ /* Details of the interchanges and the block structure of D */ /* as determined by CSPTRF. */ /* B (input/output) COMPLEX array, dimension (LDB,NRHS) */ /* On entry, the right hand side matrix B. */ /* On exit, the solution matrix X. */ /* LDB (input) INTEGER */ /* The leading dimension of the array B. LDB >= max(1,N). */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ --ap; --ipiv; b_dim1 = *ldb; b_offset = 1 + b_dim1; b -= b_offset; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*nrhs < 0) { *info = -3; } else if (*ldb < max(1,*n)) { *info = -7; } if (*info != 0) { i__1 = -(*info); xerbla_("CSPTRS", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *nrhs == 0) { return 0; } if (upper) { /* Solve A*X = B, where A = U*D*U'. */ /* First solve U*D*X = B, overwriting B with X. */ /* K is the main loop index, decreasing from N to 1 in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = *n; kc = *n * (*n + 1) / 2 + 1; L10: /* If K < 1, exit from loop. */ if (k < 1) { goto L30; } kc -= k; if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Interchange rows K and IPIV(K). */ kp = ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } /* Multiply by inv(U(K)), where U(K) is the transformation */ /* stored in column K of A. */ i__1 = k - 1; q__1.r = -1.f, q__1.i = -0.f; cgeru_(&i__1, nrhs, &q__1, &ap[kc], &c__1, &b[k + b_dim1], ldb, & b[b_dim1 + 1], ldb); /* Multiply by the inverse of the diagonal block. */ c_div(&q__1, &c_b1, &ap[kc + k - 1]); cscal_(nrhs, &q__1, &b[k + b_dim1], ldb); --k; } else { /* 2 x 2 diagonal block */ /* Interchange rows K-1 and -IPIV(K). */ kp = -ipiv[k]; if (kp != k - 1) { cswap_(nrhs, &b[k - 1 + b_dim1], ldb, &b[kp + b_dim1], ldb); } /* Multiply by inv(U(K)), where U(K) is the transformation */ /* stored in columns K-1 and K of A. */ i__1 = k - 2; q__1.r = -1.f, q__1.i = -0.f; cgeru_(&i__1, nrhs, &q__1, &ap[kc], &c__1, &b[k + b_dim1], ldb, & b[b_dim1 + 1], ldb); i__1 = k - 2; q__1.r = -1.f, q__1.i = -0.f; cgeru_(&i__1, nrhs, &q__1, &ap[kc - (k - 1)], &c__1, &b[k - 1 + b_dim1], ldb, &b[b_dim1 + 1], ldb); /* Multiply by the inverse of the diagonal block. */ i__1 = kc + k - 2; akm1k.r = ap[i__1].r, akm1k.i = ap[i__1].i; c_div(&q__1, &ap[kc - 1], &akm1k); akm1.r = q__1.r, akm1.i = q__1.i; c_div(&q__1, &ap[kc + k - 1], &akm1k); ak.r = q__1.r, ak.i = q__1.i; q__2.r = akm1.r * ak.r - akm1.i * ak.i, q__2.i = akm1.r * ak.i + akm1.i * ak.r; q__1.r = q__2.r - 1.f, q__1.i = q__2.i - 0.f; denom.r = q__1.r, denom.i = q__1.i; i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { c_div(&q__1, &b[k - 1 + j * b_dim1], &akm1k); bkm1.r = q__1.r, bkm1.i = q__1.i; c_div(&q__1, &b[k + j * b_dim1], &akm1k); bk.r = q__1.r, bk.i = q__1.i; i__2 = k - 1 + j * b_dim1; q__3.r = ak.r * bkm1.r - ak.i * bkm1.i, q__3.i = ak.r * bkm1.i + ak.i * bkm1.r; q__2.r = q__3.r - bk.r, q__2.i = q__3.i - bk.i; c_div(&q__1, &q__2, &denom); b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = k + j * b_dim1; q__3.r = akm1.r * bk.r - akm1.i * bk.i, q__3.i = akm1.r * bk.i + akm1.i * bk.r; q__2.r = q__3.r - bkm1.r, q__2.i = q__3.i - bkm1.i; c_div(&q__1, &q__2, &denom); b[i__2].r = q__1.r, b[i__2].i = q__1.i; /* L20: */ } kc = kc - k + 1; k += -2; } goto L10; L30: /* Next solve U'*X = B, overwriting B with X. */ /* K is the main loop index, increasing from 1 to N in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = 1; kc = 1; L40: /* If K > N, exit from loop. */ if (k > *n) { goto L50; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Multiply by inv(U'(K)), where U(K) is the transformation */ /* stored in column K of A. */ i__1 = k - 1; q__1.r = -1.f, q__1.i = -0.f; cgemv_("Transpose", &i__1, nrhs, &q__1, &b[b_offset], ldb, &ap[kc] , &c__1, &c_b1, &b[k + b_dim1], ldb); /* Interchange rows K and IPIV(K). */ kp = ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } kc += k; ++k; } else { /* 2 x 2 diagonal block */ /* Multiply by inv(U'(K+1)), where U(K+1) is the transformation */ /* stored in columns K and K+1 of A. */ i__1 = k - 1; q__1.r = -1.f, q__1.i = -0.f; cgemv_("Transpose", &i__1, nrhs, &q__1, &b[b_offset], ldb, &ap[kc] , &c__1, &c_b1, &b[k + b_dim1], ldb); i__1 = k - 1; q__1.r = -1.f, q__1.i = -0.f; cgemv_("Transpose", &i__1, nrhs, &q__1, &b[b_offset], ldb, &ap[kc + k], &c__1, &c_b1, &b[k + 1 + b_dim1], ldb); /* Interchange rows K and -IPIV(K). */ kp = -ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } kc = kc + (k << 1) + 1; k += 2; } goto L40; L50: ; } else { /* Solve A*X = B, where A = L*D*L'. */ /* First solve L*D*X = B, overwriting B with X. */ /* K is the main loop index, increasing from 1 to N in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = 1; kc = 1; L60: /* If K > N, exit from loop. */ if (k > *n) { goto L80; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Interchange rows K and IPIV(K). */ kp = ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } /* Multiply by inv(L(K)), where L(K) is the transformation */ /* stored in column K of A. */ if (k < *n) { i__1 = *n - k; q__1.r = -1.f, q__1.i = -0.f; cgeru_(&i__1, nrhs, &q__1, &ap[kc + 1], &c__1, &b[k + b_dim1], ldb, &b[k + 1 + b_dim1], ldb); } /* Multiply by the inverse of the diagonal block. */ c_div(&q__1, &c_b1, &ap[kc]); cscal_(nrhs, &q__1, &b[k + b_dim1], ldb); kc = kc + *n - k + 1; ++k; } else { /* 2 x 2 diagonal block */ /* Interchange rows K+1 and -IPIV(K). */ kp = -ipiv[k]; if (kp != k + 1) { cswap_(nrhs, &b[k + 1 + b_dim1], ldb, &b[kp + b_dim1], ldb); } /* Multiply by inv(L(K)), where L(K) is the transformation */ /* stored in columns K and K+1 of A. */ if (k < *n - 1) { i__1 = *n - k - 1; q__1.r = -1.f, q__1.i = -0.f; cgeru_(&i__1, nrhs, &q__1, &ap[kc + 2], &c__1, &b[k + b_dim1], ldb, &b[k + 2 + b_dim1], ldb); i__1 = *n - k - 1; q__1.r = -1.f, q__1.i = -0.f; cgeru_(&i__1, nrhs, &q__1, &ap[kc + *n - k + 2], &c__1, &b[k + 1 + b_dim1], ldb, &b[k + 2 + b_dim1], ldb); } /* Multiply by the inverse of the diagonal block. */ i__1 = kc + 1; akm1k.r = ap[i__1].r, akm1k.i = ap[i__1].i; c_div(&q__1, &ap[kc], &akm1k); akm1.r = q__1.r, akm1.i = q__1.i; c_div(&q__1, &ap[kc + *n - k + 1], &akm1k); ak.r = q__1.r, ak.i = q__1.i; q__2.r = akm1.r * ak.r - akm1.i * ak.i, q__2.i = akm1.r * ak.i + akm1.i * ak.r; q__1.r = q__2.r - 1.f, q__1.i = q__2.i - 0.f; denom.r = q__1.r, denom.i = q__1.i; i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { c_div(&q__1, &b[k + j * b_dim1], &akm1k); bkm1.r = q__1.r, bkm1.i = q__1.i; c_div(&q__1, &b[k + 1 + j * b_dim1], &akm1k); bk.r = q__1.r, bk.i = q__1.i; i__2 = k + j * b_dim1; q__3.r = ak.r * bkm1.r - ak.i * bkm1.i, q__3.i = ak.r * bkm1.i + ak.i * bkm1.r; q__2.r = q__3.r - bk.r, q__2.i = q__3.i - bk.i; c_div(&q__1, &q__2, &denom); b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = k + 1 + j * b_dim1; q__3.r = akm1.r * bk.r - akm1.i * bk.i, q__3.i = akm1.r * bk.i + akm1.i * bk.r; q__2.r = q__3.r - bkm1.r, q__2.i = q__3.i - bkm1.i; c_div(&q__1, &q__2, &denom); b[i__2].r = q__1.r, b[i__2].i = q__1.i; /* L70: */ } kc = kc + (*n - k << 1) + 1; k += 2; } goto L60; L80: /* Next solve L'*X = B, overwriting B with X. */ /* K is the main loop index, decreasing from N to 1 in steps of */ /* 1 or 2, depending on the size of the diagonal blocks. */ k = *n; kc = *n * (*n + 1) / 2 + 1; L90: /* If K < 1, exit from loop. */ if (k < 1) { goto L100; } kc -= *n - k + 1; if (ipiv[k] > 0) { /* 1 x 1 diagonal block */ /* Multiply by inv(L'(K)), where L(K) is the transformation */ /* stored in column K of A. */ if (k < *n) { i__1 = *n - k; q__1.r = -1.f, q__1.i = -0.f; cgemv_("Transpose", &i__1, nrhs, &q__1, &b[k + 1 + b_dim1], ldb, &ap[kc + 1], &c__1, &c_b1, &b[k + b_dim1], ldb); } /* Interchange rows K and IPIV(K). */ kp = ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } --k; } else { /* 2 x 2 diagonal block */ /* Multiply by inv(L'(K-1)), where L(K-1) is the transformation */ /* stored in columns K-1 and K of A. */ if (k < *n) { i__1 = *n - k; q__1.r = -1.f, q__1.i = -0.f; cgemv_("Transpose", &i__1, nrhs, &q__1, &b[k + 1 + b_dim1], ldb, &ap[kc + 1], &c__1, &c_b1, &b[k + b_dim1], ldb); i__1 = *n - k; q__1.r = -1.f, q__1.i = -0.f; cgemv_("Transpose", &i__1, nrhs, &q__1, &b[k + 1 + b_dim1], ldb, &ap[kc - (*n - k)], &c__1, &c_b1, &b[k - 1 + b_dim1], ldb); } /* Interchange rows K and -IPIV(K). */ kp = -ipiv[k]; if (kp != k) { cswap_(nrhs, &b[k + b_dim1], ldb, &b[kp + b_dim1], ldb); } kc -= *n - k + 2; k += -2; } goto L90; L100: ; } return 0; /* End of CSPTRS */ } /* csptrs_ */
/* Subroutine */ int claqps_(integer *m, integer *n, integer *offset, integer *nb, integer *kb, complex *a, integer *lda, integer *jpvt, complex * tau, real *vn1, real *vn2, complex *auxv, complex *f, integer *ldf) { /* System generated locals */ integer a_dim1, a_offset, f_dim1, f_offset, i__1, i__2, i__3; real r__1, r__2; complex q__1; /* Local variables */ integer j, k, rk; complex akk; integer pvt; real temp, temp2, tol3z; integer itemp; integer lsticc; integer lastrk; /* -- LAPACK auxiliary routine (version 3.2) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* November 2006 */ /* Purpose */ /* ======= */ /* CLAQPS computes a step of QR factorization with column pivoting */ /* of a complex M-by-N matrix A by using Blas-3. It tries to factorize */ /* NB columns from A starting from the row OFFSET+1, and updates all */ /* of the matrix with Blas-3 xGEMM. */ /* In some cases, due to catastrophic cancellations, it cannot */ /* factorize NB columns. Hence, the actual number of factorized */ /* columns is returned in KB. */ /* Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */ /* Arguments */ /* ========= */ /* M (input) INTEGER */ /* The number of rows of the matrix A. M >= 0. */ /* N (input) INTEGER */ /* The number of columns of the matrix A. N >= 0 */ /* OFFSET (input) INTEGER */ /* The number of rows of A that have been factorized in */ /* previous steps. */ /* NB (input) INTEGER */ /* The number of columns to factorize. */ /* KB (output) INTEGER */ /* The number of columns actually factorized. */ /* A (input/output) COMPLEX array, dimension (LDA,N) */ /* On entry, the M-by-N matrix A. */ /* On exit, block A(OFFSET+1:M,1:KB) is the triangular */ /* factor obtained and block A(1:OFFSET,1:N) has been */ /* accordingly pivoted, but no factorized. */ /* The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has */ /* been updated. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= max(1,M). */ /* JPVT (input/output) INTEGER array, dimension (N) */ /* JPVT(I) = K <==> Column K of the full matrix A has been */ /* permuted into position I in AP. */ /* TAU (output) COMPLEX array, dimension (KB) */ /* The scalar factors of the elementary reflectors. */ /* VN1 (input/output) REAL array, dimension (N) */ /* The vector with the partial column norms. */ /* VN2 (input/output) REAL array, dimension (N) */ /* The vector with the exact column norms. */ /* AUXV (input/output) COMPLEX array, dimension (NB) */ /* Auxiliar vector. */ /* F (input/output) COMPLEX array, dimension (LDF,NB) */ /* Matrix F' = L*Y'*A. */ /* LDF (input) INTEGER */ /* The leading dimension of the array F. LDF >= max(1,N). */ /* Further Details */ /* =============== */ /* Based on contributions by */ /* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */ /* X. Sun, Computer Science Dept., Duke University, USA */ /* Partial column norm updating strategy modified by */ /* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */ /* University of Zagreb, Croatia. */ /* June 2006. */ /* For more details see LAPACK Working Note 176. */ /* ===================================================================== */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --jpvt; --tau; --vn1; --vn2; --auxv; f_dim1 = *ldf; f_offset = 1 + f_dim1; f -= f_offset; /* Function Body */ /* Computing MIN */ i__1 = *m, i__2 = *n + *offset; lastrk = min(i__1,i__2); lsticc = 0; k = 0; tol3z = sqrt(slamch_("Epsilon")); /* Beginning of while loop. */ L10: if (k < *nb && lsticc == 0) { ++k; rk = *offset + k; /* Determine ith pivot column and swap if necessary */ i__1 = *n - k + 1; pvt = k - 1 + isamax_(&i__1, &vn1[k], &c__1); if (pvt != k) { cswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[k * a_dim1 + 1], &c__1); i__1 = k - 1; cswap_(&i__1, &f[pvt + f_dim1], ldf, &f[k + f_dim1], ldf); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[k]; jpvt[k] = itemp; vn1[pvt] = vn1[k]; vn2[pvt] = vn2[k]; } /* Apply previous Householder reflectors to column K: */ /* A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. */ if (k > 1) { i__1 = k - 1; for (j = 1; j <= i__1; ++j) { i__2 = k + j * f_dim1; r_cnjg(&q__1, &f[k + j * f_dim1]); f[i__2].r = q__1.r, f[i__2].i = q__1.i; } i__1 = *m - rk + 1; i__2 = k - 1; q__1.r = -1.f, q__1.i = -0.f; cgemv_("No transpose", &i__1, &i__2, &q__1, &a[rk + a_dim1], lda, &f[k + f_dim1], ldf, &c_b2, &a[rk + k * a_dim1], &c__1); i__1 = k - 1; for (j = 1; j <= i__1; ++j) { i__2 = k + j * f_dim1; r_cnjg(&q__1, &f[k + j * f_dim1]); f[i__2].r = q__1.r, f[i__2].i = q__1.i; } } /* Generate elementary reflector H(k). */ if (rk < *m) { i__1 = *m - rk + 1; clarfp_(&i__1, &a[rk + k * a_dim1], &a[rk + 1 + k * a_dim1], & c__1, &tau[k]); } else { clarfp_(&c__1, &a[rk + k * a_dim1], &a[rk + k * a_dim1], &c__1, & tau[k]); } i__1 = rk + k * a_dim1; akk.r = a[i__1].r, akk.i = a[i__1].i; i__1 = rk + k * a_dim1; a[i__1].r = 1.f, a[i__1].i = 0.f; /* Compute Kth column of F: */ /* Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K). */ if (k < *n) { i__1 = *m - rk + 1; i__2 = *n - k; cgemv_("Conjugate transpose", &i__1, &i__2, &tau[k], &a[rk + (k + 1) * a_dim1], lda, &a[rk + k * a_dim1], &c__1, &c_b1, &f[ k + 1 + k * f_dim1], &c__1); } /* Padding F(1:K,K) with zeros. */ i__1 = k; for (j = 1; j <= i__1; ++j) { i__2 = j + k * f_dim1; f[i__2].r = 0.f, f[i__2].i = 0.f; } /* Incremental updating of F: */ /* F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)' */ /* *A(RK:M,K). */ if (k > 1) { i__1 = *m - rk + 1; i__2 = k - 1; i__3 = k; q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i; cgemv_("Conjugate transpose", &i__1, &i__2, &q__1, &a[rk + a_dim1] , lda, &a[rk + k * a_dim1], &c__1, &c_b1, &auxv[1], &c__1); i__1 = k - 1; cgemv_("No transpose", n, &i__1, &c_b2, &f[f_dim1 + 1], ldf, & auxv[1], &c__1, &c_b2, &f[k * f_dim1 + 1], &c__1); } /* Update the current row of A: */ /* A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */ if (k < *n) { i__1 = *n - k; q__1.r = -1.f, q__1.i = -0.f; cgemm_("No transpose", "Conjugate transpose", &c__1, &i__1, &k, & q__1, &a[rk + a_dim1], lda, &f[k + 1 + f_dim1], ldf, & c_b2, &a[rk + (k + 1) * a_dim1], lda); } /* Update partial column norms. */ if (rk < lastrk) { i__1 = *n; for (j = k + 1; j <= i__1; ++j) { if (vn1[j] != 0.f) { /* NOTE: The following 4 lines follow from the analysis in */ /* Lapack Working Note 176. */ temp = c_abs(&a[rk + j * a_dim1]) / vn1[j]; /* Computing MAX */ r__1 = 0.f, r__2 = (temp + 1.f) * (1.f - temp); temp = dmax(r__1,r__2); /* Computing 2nd power */ r__1 = vn1[j] / vn2[j]; temp2 = temp * (r__1 * r__1); if (temp2 <= tol3z) { vn2[j] = (real) lsticc; lsticc = j; } else { vn1[j] *= sqrt(temp); } } } } i__1 = rk + k * a_dim1; a[i__1].r = akk.r, a[i__1].i = akk.i; /* End of while loop. */ goto L10; } *kb = k; rk = *offset + *kb; /* Apply the block reflector to the rest of the matrix: */ /* A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - */ /* A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)'. */ /* Computing MIN */ i__1 = *n, i__2 = *m - *offset; if (*kb < min(i__1,i__2)) { i__1 = *m - rk; i__2 = *n - *kb; q__1.r = -1.f, q__1.i = -0.f; cgemm_("No transpose", "Conjugate transpose", &i__1, &i__2, kb, &q__1, &a[rk + 1 + a_dim1], lda, &f[*kb + 1 + f_dim1], ldf, &c_b2, & a[rk + 1 + (*kb + 1) * a_dim1], lda); } /* Recomputation of difficult columns. */ L60: if (lsticc > 0) { itemp = i_nint(&vn2[lsticc]); i__1 = *m - rk; vn1[lsticc] = scnrm2_(&i__1, &a[rk + 1 + lsticc * a_dim1], &c__1); /* NOTE: The computation of VN1( LSTICC ) relies on the fact that */ /* SNRM2 does not fail on vectors with norm below the value of */ /* SQRT(DLAMCH('S')) */ vn2[lsticc] = vn1[lsticc]; lsticc = itemp; goto L60; } return 0; /* End of CLAQPS */ } /* claqps_ */
/* Subroutine */ int chbgvx_(char *jobz, char *range, char *uplo, integer *n, integer *ka, integer *kb, complex *ab, integer *ldab, complex *bb, integer *ldbb, complex *q, integer *ldq, real *vl, real *vu, integer * il, integer *iu, real *abstol, integer *m, real *w, complex *z__, integer *ldz, complex *work, real *rwork, integer *iwork, integer * ifail, integer *info) { /* System generated locals */ integer ab_dim1, ab_offset, bb_dim1, bb_offset, q_dim1, q_offset, z_dim1, z_offset, i__1, i__2; /* Local variables */ integer i__, j, jj; real tmp1; integer indd, inde; char vect[1]; logical test; integer itmp1, indee; extern logical lsame_(char *, char *); extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex * , complex *, integer *, complex *, integer *, complex *, complex * , integer *); integer iinfo; char order[1]; extern /* Subroutine */ int ccopy_(integer *, complex *, integer *, complex *, integer *), cswap_(integer *, complex *, integer *, complex *, integer *); logical upper; extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *, integer *); logical wantz, alleig, indeig; integer indibl; extern /* Subroutine */ int chbtrd_(char *, char *, integer *, integer *, complex *, integer *, real *, real *, complex *, integer *, complex *, integer *); logical valeig; extern /* Subroutine */ int chbgst_(char *, char *, integer *, integer *, integer *, complex *, integer *, complex *, integer *, complex *, integer *, complex *, real *, integer *), clacpy_( char *, integer *, integer *, complex *, integer *, complex *, integer *), xerbla_(char *, integer *), cpbstf_( char *, integer *, integer *, complex *, integer *, integer *); integer indiwk, indisp; extern /* Subroutine */ int cstein_(integer *, real *, real *, integer *, real *, integer *, integer *, complex *, integer *, real *, integer *, integer *, integer *); integer indrwk, indwrk; extern /* Subroutine */ int csteqr_(char *, integer *, real *, real *, complex *, integer *, real *, integer *), ssterf_(integer *, real *, real *, integer *); integer nsplit; extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *, real *, integer *, integer *, real *, real *, real *, integer *, integer *, real *, integer *, integer *, real *, integer *, integer *); /* -- LAPACK driver routine (version 3.4.0) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* November 2011 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ ab_dim1 = *ldab; ab_offset = 1 + ab_dim1; ab -= ab_offset; bb_dim1 = *ldbb; bb_offset = 1 + bb_dim1; bb -= bb_offset; q_dim1 = *ldq; q_offset = 1 + q_dim1; q -= q_offset; --w; z_dim1 = *ldz; z_offset = 1 + z_dim1; z__ -= z_offset; --work; --rwork; --iwork; --ifail; /* Function Body */ wantz = lsame_(jobz, "V"); upper = lsame_(uplo, "U"); alleig = lsame_(range, "A"); valeig = lsame_(range, "V"); indeig = lsame_(range, "I"); *info = 0; if (! (wantz || lsame_(jobz, "N"))) { *info = -1; } else if (! (alleig || valeig || indeig)) { *info = -2; } else if (! (upper || lsame_(uplo, "L"))) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*ka < 0) { *info = -5; } else if (*kb < 0 || *kb > *ka) { *info = -6; } else if (*ldab < *ka + 1) { *info = -8; } else if (*ldbb < *kb + 1) { *info = -10; } else if (*ldq < 1 || wantz && *ldq < *n) { *info = -12; } else { if (valeig) { if (*n > 0 && *vu <= *vl) { *info = -14; } } else if (indeig) { if (*il < 1 || *il > max(1,*n)) { *info = -15; } else if (*iu < min(*n,*il) || *iu > *n) { *info = -16; } } } if (*info == 0) { if (*ldz < 1 || wantz && *ldz < *n) { *info = -21; } } if (*info != 0) { i__1 = -(*info); xerbla_("CHBGVX", &i__1); return 0; } /* Quick return if possible */ *m = 0; if (*n == 0) { return 0; } /* Form a split Cholesky factorization of B. */ cpbstf_(uplo, n, kb, &bb[bb_offset], ldbb, info); if (*info != 0) { *info = *n + *info; return 0; } /* Transform problem to standard eigenvalue problem. */ chbgst_(jobz, uplo, n, ka, kb, &ab[ab_offset], ldab, &bb[bb_offset], ldbb, &q[q_offset], ldq, &work[1], &rwork[1], &iinfo); /* Solve the standard eigenvalue problem. */ /* Reduce Hermitian band matrix to tridiagonal form. */ indd = 1; inde = indd + *n; indrwk = inde + *n; indwrk = 1; if (wantz) { *(unsigned char *)vect = 'U'; } else { *(unsigned char *)vect = 'N'; } chbtrd_(vect, uplo, n, ka, &ab[ab_offset], ldab, &rwork[indd], &rwork[ inde], &q[q_offset], ldq, &work[indwrk], &iinfo); /* If all eigenvalues are desired and ABSTOL is less than or equal */ /* to zero, then call SSTERF or CSTEQR. If this fails for some */ /* eigenvalue, then try SSTEBZ. */ test = FALSE_; if (indeig) { if (*il == 1 && *iu == *n) { test = TRUE_; } } if ((alleig || test) && *abstol <= 0.f) { scopy_(n, &rwork[indd], &c__1, &w[1], &c__1); indee = indrwk + (*n << 1); i__1 = *n - 1; scopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1); if (! wantz) { ssterf_(n, &w[1], &rwork[indee], info); } else { clacpy_("A", n, n, &q[q_offset], ldq, &z__[z_offset], ldz); csteqr_(jobz, n, &w[1], &rwork[indee], &z__[z_offset], ldz, & rwork[indrwk], info); if (*info == 0) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { ifail[i__] = 0; /* L10: */ } } } if (*info == 0) { *m = *n; goto L30; } *info = 0; } /* Otherwise, call SSTEBZ and, if eigenvectors are desired, */ /* call CSTEIN. */ if (wantz) { *(unsigned char *)order = 'B'; } else { *(unsigned char *)order = 'E'; } indibl = 1; indisp = indibl + *n; indiwk = indisp + *n; sstebz_(range, order, n, vl, vu, il, iu, abstol, &rwork[indd], &rwork[ inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], &rwork[ indrwk], &iwork[indiwk], info); if (wantz) { cstein_(n, &rwork[indd], &rwork[inde], m, &w[1], &iwork[indibl], & iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[ indiwk], &ifail[1], info); /* Apply unitary matrix used in reduction to tridiagonal */ /* form to eigenvectors returned by CSTEIN. */ i__1 = *m; for (j = 1; j <= i__1; ++j) { ccopy_(n, &z__[j * z_dim1 + 1], &c__1, &work[1], &c__1); cgemv_("N", n, n, &c_b2, &q[q_offset], ldq, &work[1], &c__1, & c_b1, &z__[j * z_dim1 + 1], &c__1); /* L20: */ } } L30: /* If eigenvalues are not in order, then sort them, along with */ /* eigenvectors. */ if (wantz) { i__1 = *m - 1; for (j = 1; j <= i__1; ++j) { i__ = 0; tmp1 = w[j]; i__2 = *m; for (jj = j + 1; jj <= i__2; ++jj) { if (w[jj] < tmp1) { i__ = jj; tmp1 = w[jj]; } /* L40: */ } if (i__ != 0) { itmp1 = iwork[indibl + i__ - 1]; w[i__] = w[j]; iwork[indibl + i__ - 1] = iwork[indibl + j - 1]; w[j] = tmp1; iwork[indibl + j - 1] = itmp1; cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[j * z_dim1 + 1], &c__1); if (*info != 0) { itmp1 = ifail[i__]; ifail[i__] = ifail[j]; ifail[j] = itmp1; } } /* L50: */ } } return 0; /* End of CHBGVX */ }
/* Subroutine */ int csytri_(char *uplo, integer *n, complex *a, integer *lda, integer *ipiv, complex *work, integer *info) { /* -- LAPACK routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University September 30, 1994 Purpose ======= CSYTRI computes the inverse of a complex symmetric indefinite matrix A using the factorization A = U*D*U**T or A = L*D*L**T computed by CSYTRF. Arguments ========= UPLO (input) CHARACTER*1 Specifies whether the details of the factorization are stored as an upper or lower triangular matrix. = 'U': Upper triangular, form is A = U*D*U**T; = 'L': Lower triangular, form is A = L*D*L**T. N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) COMPLEX array, dimension (LDA,N) On entry, the block diagonal matrix D and the multipliers used to obtain the factor U or L as computed by CSYTRF. On exit, if INFO = 0, the (symmetric) inverse of the original matrix. If UPLO = 'U', the upper triangular part of the inverse is formed and the part of A below the diagonal is not referenced; if UPLO = 'L' the lower triangular part of the inverse is formed and the part of A above the diagonal is not referenced. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). IPIV (input) INTEGER array, dimension (N) Details of the interchanges and the block structure of D as determined by CSYTRF. WORK (workspace) COMPLEX array, dimension (2*N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its inverse could not be computed. ===================================================================== Test the input parameters. Parameter adjustments */ /* Table of constant values */ static complex c_b1 = {1.f,0.f}; static complex c_b2 = {0.f,0.f}; static integer c__1 = 1; /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; complex q__1, q__2, q__3; /* Builtin functions */ void c_div(complex *, complex *, complex *); /* Local variables */ static complex temp, akkp1, d__; static integer k; static complex t; extern logical lsame_(char *, char *); extern /* Subroutine */ int ccopy_(integer *, complex *, integer *, complex *, integer *); extern /* Complex */ VOID cdotu_(complex *, integer *, complex *, integer *, complex *, integer *); extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); static integer kstep; static logical upper; extern /* Subroutine */ int csymv_(char *, integer *, complex *, complex * , integer *, complex *, integer *, complex *, complex *, integer * ); static complex ak; static integer kp; extern /* Subroutine */ int xerbla_(char *, integer *); static complex akp1; #define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1 #define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)] a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; --work; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("CSYTRI", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* Check that the diagonal matrix D is nonsingular. */ if (upper) { /* Upper triangular storage: examine D from bottom to top */ for (*info = *n; *info >= 1; --(*info)) { i__1 = a_subscr(*info, *info); if (ipiv[*info] > 0 && (a[i__1].r == 0.f && a[i__1].i == 0.f)) { return 0; } /* L10: */ } } else { /* Lower triangular storage: examine D from top to bottom. */ i__1 = *n; for (*info = 1; *info <= i__1; ++(*info)) { i__2 = a_subscr(*info, *info); if (ipiv[*info] > 0 && (a[i__2].r == 0.f && a[i__2].i == 0.f)) { return 0; } /* L20: */ } } *info = 0; if (upper) { /* Compute inv(A) from the factorization A = U*D*U'. K is the main loop index, increasing from 1 to N in steps of 1 or 2, depending on the size of the diagonal blocks. */ k = 1; L30: /* If K > N, exit from loop. */ if (k > *n) { goto L40; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block Invert the diagonal block. */ i__1 = a_subscr(k, k); c_div(&q__1, &c_b1, &a_ref(k, k)); a[i__1].r = q__1.r, a[i__1].i = q__1.i; /* Compute column K of the inverse. */ if (k > 1) { i__1 = k - 1; ccopy_(&i__1, &a_ref(1, k), &c__1, &work[1], &c__1); i__1 = k - 1; q__1.r = -1.f, q__1.i = 0.f; csymv_(uplo, &i__1, &q__1, &a[a_offset], lda, &work[1], &c__1, &c_b2, &a_ref(1, k), &c__1); i__1 = a_subscr(k, k); i__2 = a_subscr(k, k); i__3 = k - 1; cdotu_(&q__2, &i__3, &work[1], &c__1, &a_ref(1, k), &c__1); q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i; a[i__1].r = q__1.r, a[i__1].i = q__1.i; } kstep = 1; } else { /* 2 x 2 diagonal block Invert the diagonal block. */ i__1 = a_subscr(k, k + 1); t.r = a[i__1].r, t.i = a[i__1].i; c_div(&q__1, &a_ref(k, k), &t); ak.r = q__1.r, ak.i = q__1.i; c_div(&q__1, &a_ref(k + 1, k + 1), &t); akp1.r = q__1.r, akp1.i = q__1.i; c_div(&q__1, &a_ref(k, k + 1), &t); akkp1.r = q__1.r, akkp1.i = q__1.i; q__3.r = ak.r * akp1.r - ak.i * akp1.i, q__3.i = ak.r * akp1.i + ak.i * akp1.r; q__2.r = q__3.r - 1.f, q__2.i = q__3.i + 0.f; q__1.r = t.r * q__2.r - t.i * q__2.i, q__1.i = t.r * q__2.i + t.i * q__2.r; d__.r = q__1.r, d__.i = q__1.i; i__1 = a_subscr(k, k); c_div(&q__1, &akp1, &d__); a[i__1].r = q__1.r, a[i__1].i = q__1.i; i__1 = a_subscr(k + 1, k + 1); c_div(&q__1, &ak, &d__); a[i__1].r = q__1.r, a[i__1].i = q__1.i; i__1 = a_subscr(k, k + 1); q__2.r = -akkp1.r, q__2.i = -akkp1.i; c_div(&q__1, &q__2, &d__); a[i__1].r = q__1.r, a[i__1].i = q__1.i; /* Compute columns K and K+1 of the inverse. */ if (k > 1) { i__1 = k - 1; ccopy_(&i__1, &a_ref(1, k), &c__1, &work[1], &c__1); i__1 = k - 1; q__1.r = -1.f, q__1.i = 0.f; csymv_(uplo, &i__1, &q__1, &a[a_offset], lda, &work[1], &c__1, &c_b2, &a_ref(1, k), &c__1); i__1 = a_subscr(k, k); i__2 = a_subscr(k, k); i__3 = k - 1; cdotu_(&q__2, &i__3, &work[1], &c__1, &a_ref(1, k), &c__1); q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i; a[i__1].r = q__1.r, a[i__1].i = q__1.i; i__1 = a_subscr(k, k + 1); i__2 = a_subscr(k, k + 1); i__3 = k - 1; cdotu_(&q__2, &i__3, &a_ref(1, k), &c__1, &a_ref(1, k + 1), & c__1); q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i; a[i__1].r = q__1.r, a[i__1].i = q__1.i; i__1 = k - 1; ccopy_(&i__1, &a_ref(1, k + 1), &c__1, &work[1], &c__1); i__1 = k - 1; q__1.r = -1.f, q__1.i = 0.f; csymv_(uplo, &i__1, &q__1, &a[a_offset], lda, &work[1], &c__1, &c_b2, &a_ref(1, k + 1), &c__1); i__1 = a_subscr(k + 1, k + 1); i__2 = a_subscr(k + 1, k + 1); i__3 = k - 1; cdotu_(&q__2, &i__3, &work[1], &c__1, &a_ref(1, k + 1), &c__1) ; q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i; a[i__1].r = q__1.r, a[i__1].i = q__1.i; } kstep = 2; } kp = (i__1 = ipiv[k], abs(i__1)); if (kp != k) { /* Interchange rows and columns K and KP in the leading submatrix A(1:k+1,1:k+1) */ i__1 = kp - 1; cswap_(&i__1, &a_ref(1, k), &c__1, &a_ref(1, kp), &c__1); i__1 = k - kp - 1; cswap_(&i__1, &a_ref(kp + 1, k), &c__1, &a_ref(kp, kp + 1), lda); i__1 = a_subscr(k, k); temp.r = a[i__1].r, temp.i = a[i__1].i; i__1 = a_subscr(k, k); i__2 = a_subscr(kp, kp); a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i; i__1 = a_subscr(kp, kp); a[i__1].r = temp.r, a[i__1].i = temp.i; if (kstep == 2) { i__1 = a_subscr(k, k + 1); temp.r = a[i__1].r, temp.i = a[i__1].i; i__1 = a_subscr(k, k + 1); i__2 = a_subscr(kp, k + 1); a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i; i__1 = a_subscr(kp, k + 1); a[i__1].r = temp.r, a[i__1].i = temp.i; } } k += kstep; goto L30; L40: ; } else { /* Compute inv(A) from the factorization A = L*D*L'. K is the main loop index, increasing from 1 to N in steps of 1 or 2, depending on the size of the diagonal blocks. */ k = *n; L50: /* If K < 1, exit from loop. */ if (k < 1) { goto L60; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block Invert the diagonal block. */ i__1 = a_subscr(k, k); c_div(&q__1, &c_b1, &a_ref(k, k)); a[i__1].r = q__1.r, a[i__1].i = q__1.i; /* Compute column K of the inverse. */ if (k < *n) { i__1 = *n - k; ccopy_(&i__1, &a_ref(k + 1, k), &c__1, &work[1], &c__1); i__1 = *n - k; q__1.r = -1.f, q__1.i = 0.f; csymv_(uplo, &i__1, &q__1, &a_ref(k + 1, k + 1), lda, &work[1] , &c__1, &c_b2, &a_ref(k + 1, k), &c__1); i__1 = a_subscr(k, k); i__2 = a_subscr(k, k); i__3 = *n - k; cdotu_(&q__2, &i__3, &work[1], &c__1, &a_ref(k + 1, k), &c__1) ; q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i; a[i__1].r = q__1.r, a[i__1].i = q__1.i; } kstep = 1; } else { /* 2 x 2 diagonal block Invert the diagonal block. */ i__1 = a_subscr(k, k - 1); t.r = a[i__1].r, t.i = a[i__1].i; c_div(&q__1, &a_ref(k - 1, k - 1), &t); ak.r = q__1.r, ak.i = q__1.i; c_div(&q__1, &a_ref(k, k), &t); akp1.r = q__1.r, akp1.i = q__1.i; c_div(&q__1, &a_ref(k, k - 1), &t); akkp1.r = q__1.r, akkp1.i = q__1.i; q__3.r = ak.r * akp1.r - ak.i * akp1.i, q__3.i = ak.r * akp1.i + ak.i * akp1.r; q__2.r = q__3.r - 1.f, q__2.i = q__3.i + 0.f; q__1.r = t.r * q__2.r - t.i * q__2.i, q__1.i = t.r * q__2.i + t.i * q__2.r; d__.r = q__1.r, d__.i = q__1.i; i__1 = a_subscr(k - 1, k - 1); c_div(&q__1, &akp1, &d__); a[i__1].r = q__1.r, a[i__1].i = q__1.i; i__1 = a_subscr(k, k); c_div(&q__1, &ak, &d__); a[i__1].r = q__1.r, a[i__1].i = q__1.i; i__1 = a_subscr(k, k - 1); q__2.r = -akkp1.r, q__2.i = -akkp1.i; c_div(&q__1, &q__2, &d__); a[i__1].r = q__1.r, a[i__1].i = q__1.i; /* Compute columns K-1 and K of the inverse. */ if (k < *n) { i__1 = *n - k; ccopy_(&i__1, &a_ref(k + 1, k), &c__1, &work[1], &c__1); i__1 = *n - k; q__1.r = -1.f, q__1.i = 0.f; csymv_(uplo, &i__1, &q__1, &a_ref(k + 1, k + 1), lda, &work[1] , &c__1, &c_b2, &a_ref(k + 1, k), &c__1); i__1 = a_subscr(k, k); i__2 = a_subscr(k, k); i__3 = *n - k; cdotu_(&q__2, &i__3, &work[1], &c__1, &a_ref(k + 1, k), &c__1) ; q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i; a[i__1].r = q__1.r, a[i__1].i = q__1.i; i__1 = a_subscr(k, k - 1); i__2 = a_subscr(k, k - 1); i__3 = *n - k; cdotu_(&q__2, &i__3, &a_ref(k + 1, k), &c__1, &a_ref(k + 1, k - 1), &c__1); q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i; a[i__1].r = q__1.r, a[i__1].i = q__1.i; i__1 = *n - k; ccopy_(&i__1, &a_ref(k + 1, k - 1), &c__1, &work[1], &c__1); i__1 = *n - k; q__1.r = -1.f, q__1.i = 0.f; csymv_(uplo, &i__1, &q__1, &a_ref(k + 1, k + 1), lda, &work[1] , &c__1, &c_b2, &a_ref(k + 1, k - 1), &c__1); i__1 = a_subscr(k - 1, k - 1); i__2 = a_subscr(k - 1, k - 1); i__3 = *n - k; cdotu_(&q__2, &i__3, &work[1], &c__1, &a_ref(k + 1, k - 1), & c__1); q__1.r = a[i__2].r - q__2.r, q__1.i = a[i__2].i - q__2.i; a[i__1].r = q__1.r, a[i__1].i = q__1.i; } kstep = 2; } kp = (i__1 = ipiv[k], abs(i__1)); if (kp != k) { /* Interchange rows and columns K and KP in the trailing submatrix A(k-1:n,k-1:n) */ if (kp < *n) { i__1 = *n - kp; cswap_(&i__1, &a_ref(kp + 1, k), &c__1, &a_ref(kp + 1, kp), & c__1); } i__1 = kp - k - 1; cswap_(&i__1, &a_ref(k + 1, k), &c__1, &a_ref(kp, k + 1), lda); i__1 = a_subscr(k, k); temp.r = a[i__1].r, temp.i = a[i__1].i; i__1 = a_subscr(k, k); i__2 = a_subscr(kp, kp); a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i; i__1 = a_subscr(kp, kp); a[i__1].r = temp.r, a[i__1].i = temp.i; if (kstep == 2) { i__1 = a_subscr(k, k - 1); temp.r = a[i__1].r, temp.i = a[i__1].i; i__1 = a_subscr(k, k - 1); i__2 = a_subscr(kp, k - 1); a[i__1].r = a[i__2].r, a[i__1].i = a[i__2].i; i__1 = a_subscr(kp, k - 1); a[i__1].r = temp.r, a[i__1].i = temp.i; } } k -= kstep; goto L50; L60: ; } return 0; /* End of CSYTRI */ } /* csytri_ */
/* Subroutine */ int cchkpb_(logical *dotype, integer *nn, integer *nval, integer *nnb, integer *nbval, integer *nns, integer *nsval, real * thresh, logical *tsterr, integer *nmax, complex *a, complex *afac, complex *ainv, complex *b, complex *x, complex *xact, complex *work, real *rwork, integer *nout) { /* Initialized data */ static integer iseedy[4] = { 1988,1989,1990,1991 }; /* Format strings */ static char fmt_9999[] = "(\002 UPLO='\002,a1,\002', N=\002,i5,\002, KD" "=\002,i5,\002, NB=\002,i4,\002, type \002,i2,\002, test \002,i2" ",\002, ratio= \002,g12.5)"; static char fmt_9998[] = "(\002 UPLO='\002,a1,\002', N=\002,i5,\002, KD" "=\002,i5,\002, NRHS=\002,i3,\002, type \002,i2,\002, test(\002,i" "2,\002) = \002,g12.5)"; static char fmt_9997[] = "(\002 UPLO='\002,a1,\002', N=\002,i5,\002, KD" "=\002,i5,\002,\002,10x,\002 type \002,i2,\002, test(\002,i2,\002" ") = \002,g12.5)"; /* System generated locals */ integer i__1, i__2, i__3, i__4, i__5, i__6; /* Local variables */ integer i__, k, n, i1, i2, kd, nb, in, kl, iw, ku, lda, ikd, inb, nkd, ldab, ioff, mode, koff, imat, info; char path[3], dist[1]; integer irhs, nrhs; char uplo[1], type__[1]; integer nrun; integer nfail, iseed[4]; integer kdval[4]; real rcond; integer nimat; real anorm; integer iuplo, izero, nerrs; logical zerot; char xtype[1]; real rcondc; char packit[1]; real cndnum; real ainvnm; real result[7]; /* Fortran I/O blocks */ static cilist io___40 = { 0, 0, 0, fmt_9999, 0 }; static cilist io___46 = { 0, 0, 0, fmt_9998, 0 }; static cilist io___48 = { 0, 0, 0, fmt_9997, 0 }; /* -- LAPACK test routine (version 3.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CCHKPB tests CPBTRF, -TRS, -RFS, and -CON. */ /* Arguments */ /* ========= */ /* DOTYPE (input) LOGICAL array, dimension (NTYPES) */ /* The matrix types to be used for testing. Matrices of type j */ /* (for 1 <= j <= NTYPES) are used for testing if DOTYPE(j) = */ /* .TRUE.; if DOTYPE(j) = .FALSE., then type j is not used. */ /* NN (input) INTEGER */ /* The number of values of N contained in the vector NVAL. */ /* NVAL (input) INTEGER array, dimension (NN) */ /* The values of the matrix dimension N. */ /* NNB (input) INTEGER */ /* The number of values of NB contained in the vector NBVAL. */ /* NBVAL (input) INTEGER array, dimension (NBVAL) */ /* The values of the blocksize NB. */ /* NNS (input) INTEGER */ /* The number of values of NRHS contained in the vector NSVAL. */ /* NSVAL (input) INTEGER array, dimension (NNS) */ /* The values of the number of right hand sides NRHS. */ /* THRESH (input) REAL */ /* The threshold value for the test ratios. A result is */ /* included in the output file if RESULT >= THRESH. To have */ /* every test ratio printed, use THRESH = 0. */ /* TSTERR (input) LOGICAL */ /* Flag that indicates whether error exits are to be tested. */ /* NMAX (input) INTEGER */ /* The maximum value permitted for N, used in dimensioning the */ /* work arrays. */ /* A (workspace) REAL array, dimension (NMAX*NMAX) */ /* AFAC (workspace) REAL array, dimension (NMAX*NMAX) */ /* AINV (workspace) REAL array, dimension (NMAX*NMAX) */ /* B (workspace) REAL array, dimension (NMAX*NSMAX) */ /* where NSMAX is the largest entry in NSVAL. */ /* X (workspace) REAL array, dimension (NMAX*NSMAX) */ /* XACT (workspace) REAL array, dimension (NMAX*NSMAX) */ /* WORK (workspace) REAL array, dimension */ /* (NMAX*max(3,NSMAX)) */ /* RWORK (workspace) REAL array, dimension */ /* (max(NMAX,2*NSMAX)) */ /* NOUT (input) INTEGER */ /* The unit number for output. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. Local Arrays .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Scalars in Common .. */ /* .. */ /* .. Common blocks .. */ /* .. */ /* .. Data statements .. */ /* Parameter adjustments */ --rwork; --work; --xact; --x; --b; --ainv; --afac; --a; --nsval; --nbval; --nval; --dotype; /* Function Body */ /* .. */ /* .. Executable Statements .. */ /* Initialize constants and the random number seed. */ s_copy(path, "Complex precision", (ftnlen)1, (ftnlen)17); s_copy(path + 1, "PB", (ftnlen)2, (ftnlen)2); nrun = 0; nfail = 0; nerrs = 0; for (i__ = 1; i__ <= 4; ++i__) { iseed[i__ - 1] = iseedy[i__ - 1]; /* L10: */ } /* Test the error exits */ if (*tsterr) { cerrpo_(path, nout); } infoc_1.infot = 0; kdval[0] = 0; /* Do for each value of N in NVAL */ i__1 = *nn; for (in = 1; in <= i__1; ++in) { n = nval[in]; lda = max(n,1); *(unsigned char *)xtype = 'N'; /* Set limits on the number of loop iterations. */ /* Computing MAX */ i__2 = 1, i__3 = min(n,4); nkd = max(i__2,i__3); nimat = 8; if (n == 0) { nimat = 1; } kdval[1] = n + (n + 1) / 4; kdval[2] = (n * 3 - 1) / 4; kdval[3] = (n + 1) / 4; i__2 = nkd; for (ikd = 1; ikd <= i__2; ++ikd) { /* Do for KD = 0, (5*N+1)/4, (3N-1)/4, and (N+1)/4. This order */ /* makes it easier to skip redundant values for small values */ /* of N. */ kd = kdval[ikd - 1]; ldab = kd + 1; /* Do first for UPLO = 'U', then for UPLO = 'L' */ for (iuplo = 1; iuplo <= 2; ++iuplo) { koff = 1; if (iuplo == 1) { *(unsigned char *)uplo = 'U'; /* Computing MAX */ i__3 = 1, i__4 = kd + 2 - n; koff = max(i__3,i__4); *(unsigned char *)packit = 'Q'; } else { *(unsigned char *)uplo = 'L'; *(unsigned char *)packit = 'B'; } i__3 = nimat; for (imat = 1; imat <= i__3; ++imat) { /* Do the tests only if DOTYPE( IMAT ) is true. */ if (! dotype[imat]) { goto L60; } /* Skip types 2, 3, or 4 if the matrix size is too small. */ zerot = imat >= 2 && imat <= 4; if (zerot && n < imat - 1) { goto L60; } if (! zerot || ! dotype[1]) { /* Set up parameters with CLATB4 and generate a test */ /* matrix with CLATMS. */ clatb4_(path, &imat, &n, &n, type__, &kl, &ku, &anorm, &mode, &cndnum, dist); s_copy(srnamc_1.srnamt, "CLATMS", (ftnlen)32, (ftnlen) 6); clatms_(&n, &n, dist, iseed, type__, &rwork[1], &mode, &cndnum, &anorm, &kd, &kd, packit, &a[koff], &ldab, &work[1], &info); /* Check error code from CLATMS. */ if (info != 0) { alaerh_(path, "CLATMS", &info, &c__0, uplo, &n, & n, &kd, &kd, &c_n1, &imat, &nfail, &nerrs, nout); goto L60; } } else if (izero > 0) { /* Use the same matrix for types 3 and 4 as for type */ /* 2 by copying back the zeroed out column, */ iw = (lda << 1) + 1; if (iuplo == 1) { ioff = (izero - 1) * ldab + kd + 1; i__4 = izero - i1; ccopy_(&i__4, &work[iw], &c__1, &a[ioff - izero + i1], &c__1); iw = iw + izero - i1; i__4 = i2 - izero + 1; /* Computing MAX */ i__6 = ldab - 1; i__5 = max(i__6,1); ccopy_(&i__4, &work[iw], &c__1, &a[ioff], &i__5); } else { ioff = (i1 - 1) * ldab + 1; i__4 = izero - i1; /* Computing MAX */ i__6 = ldab - 1; i__5 = max(i__6,1); ccopy_(&i__4, &work[iw], &c__1, &a[ioff + izero - i1], &i__5); ioff = (izero - 1) * ldab + 1; iw = iw + izero - i1; i__4 = i2 - izero + 1; ccopy_(&i__4, &work[iw], &c__1, &a[ioff], &c__1); } } /* For types 2-4, zero one row and column of the matrix */ /* to test that INFO is returned correctly. */ izero = 0; if (zerot) { if (imat == 2) { izero = 1; } else if (imat == 3) { izero = n; } else { izero = n / 2 + 1; } /* Save the zeroed out row and column in WORK(*,3) */ iw = lda << 1; /* Computing MIN */ i__5 = (kd << 1) + 1; i__4 = min(i__5,n); for (i__ = 1; i__ <= i__4; ++i__) { i__5 = iw + i__; work[i__5].r = 0.f, work[i__5].i = 0.f; /* L20: */ } ++iw; /* Computing MAX */ i__4 = izero - kd; i1 = max(i__4,1); /* Computing MIN */ i__4 = izero + kd; i2 = min(i__4,n); if (iuplo == 1) { ioff = (izero - 1) * ldab + kd + 1; i__4 = izero - i1; cswap_(&i__4, &a[ioff - izero + i1], &c__1, &work[ iw], &c__1); iw = iw + izero - i1; i__4 = i2 - izero + 1; /* Computing MAX */ i__6 = ldab - 1; i__5 = max(i__6,1); cswap_(&i__4, &a[ioff], &i__5, &work[iw], &c__1); } else { ioff = (i1 - 1) * ldab + 1; i__4 = izero - i1; /* Computing MAX */ i__6 = ldab - 1; i__5 = max(i__6,1); cswap_(&i__4, &a[ioff + izero - i1], &i__5, &work[ iw], &c__1); ioff = (izero - 1) * ldab + 1; iw = iw + izero - i1; i__4 = i2 - izero + 1; cswap_(&i__4, &a[ioff], &c__1, &work[iw], &c__1); } } /* Set the imaginary part of the diagonals. */ if (iuplo == 1) { claipd_(&n, &a[kd + 1], &ldab, &c__0); } else { claipd_(&n, &a[1], &ldab, &c__0); } /* Do for each value of NB in NBVAL */ i__4 = *nnb; for (inb = 1; inb <= i__4; ++inb) { nb = nbval[inb]; xlaenv_(&c__1, &nb); /* Compute the L*L' or U'*U factorization of the band */ /* matrix. */ i__5 = kd + 1; clacpy_("Full", &i__5, &n, &a[1], &ldab, &afac[1], & ldab); s_copy(srnamc_1.srnamt, "CPBTRF", (ftnlen)32, (ftnlen) 6); cpbtrf_(uplo, &n, &kd, &afac[1], &ldab, &info); /* Check error code from CPBTRF. */ if (info != izero) { alaerh_(path, "CPBTRF", &info, &izero, uplo, &n, & n, &kd, &kd, &nb, &imat, &nfail, &nerrs, nout); goto L50; } /* Skip the tests if INFO is not 0. */ if (info != 0) { goto L50; } /* + TEST 1 */ /* Reconstruct matrix from factors and compute */ /* residual. */ i__5 = kd + 1; clacpy_("Full", &i__5, &n, &afac[1], &ldab, &ainv[1], &ldab); cpbt01_(uplo, &n, &kd, &a[1], &ldab, &ainv[1], &ldab, &rwork[1], result); /* Print the test ratio if it is .GE. THRESH. */ if (result[0] >= *thresh) { if (nfail == 0 && nerrs == 0) { alahd_(nout, path); } io___40.ciunit = *nout; s_wsfe(&io___40); do_fio(&c__1, uplo, (ftnlen)1); do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer)) ; do_fio(&c__1, (char *)&kd, (ftnlen)sizeof(integer) ); do_fio(&c__1, (char *)&nb, (ftnlen)sizeof(integer) ); do_fio(&c__1, (char *)&imat, (ftnlen)sizeof( integer)); do_fio(&c__1, (char *)&c__1, (ftnlen)sizeof( integer)); do_fio(&c__1, (char *)&result[0], (ftnlen)sizeof( real)); e_wsfe(); ++nfail; } ++nrun; /* Only do other tests if this is the first blocksize. */ if (inb > 1) { goto L50; } /* Form the inverse of A so we can get a good estimate */ /* of RCONDC = 1/(norm(A) * norm(inv(A))). */ claset_("Full", &n, &n, &c_b50, &c_b51, &ainv[1], & lda); s_copy(srnamc_1.srnamt, "CPBTRS", (ftnlen)32, (ftnlen) 6); cpbtrs_(uplo, &n, &kd, &n, &afac[1], &ldab, &ainv[1], &lda, &info); /* Compute RCONDC = 1/(norm(A) * norm(inv(A))). */ anorm = clanhb_("1", uplo, &n, &kd, &a[1], &ldab, & rwork[1]); ainvnm = clange_("1", &n, &n, &ainv[1], &lda, &rwork[ 1]); if (anorm <= 0.f || ainvnm <= 0.f) { rcondc = 1.f; } else { rcondc = 1.f / anorm / ainvnm; } i__5 = *nns; for (irhs = 1; irhs <= i__5; ++irhs) { nrhs = nsval[irhs]; /* + TEST 2 */ /* Solve and compute residual for A * X = B. */ s_copy(srnamc_1.srnamt, "CLARHS", (ftnlen)32, ( ftnlen)6); clarhs_(path, xtype, uplo, " ", &n, &n, &kd, &kd, &nrhs, &a[1], &ldab, &xact[1], &lda, &b[1] , &lda, iseed, &info); clacpy_("Full", &n, &nrhs, &b[1], &lda, &x[1], & lda); s_copy(srnamc_1.srnamt, "CPBTRS", (ftnlen)32, ( ftnlen)6); cpbtrs_(uplo, &n, &kd, &nrhs, &afac[1], &ldab, &x[ 1], &lda, &info); /* Check error code from CPBTRS. */ if (info != 0) { alaerh_(path, "CPBTRS", &info, &c__0, uplo, & n, &n, &kd, &kd, &nrhs, &imat, &nfail, &nerrs, nout); } clacpy_("Full", &n, &nrhs, &b[1], &lda, &work[1], &lda); cpbt02_(uplo, &n, &kd, &nrhs, &a[1], &ldab, &x[1], &lda, &work[1], &lda, &rwork[1], &result[ 1]); /* + TEST 3 */ /* Check solution from generated exact solution. */ cget04_(&n, &nrhs, &x[1], &lda, &xact[1], &lda, & rcondc, &result[2]); /* + TESTS 4, 5, and 6 */ /* Use iterative refinement to improve the solution. */ s_copy(srnamc_1.srnamt, "CPBRFS", (ftnlen)32, ( ftnlen)6); cpbrfs_(uplo, &n, &kd, &nrhs, &a[1], &ldab, &afac[ 1], &ldab, &b[1], &lda, &x[1], &lda, & rwork[1], &rwork[nrhs + 1], &work[1], & rwork[(nrhs << 1) + 1], &info); /* Check error code from CPBRFS. */ if (info != 0) { alaerh_(path, "CPBRFS", &info, &c__0, uplo, & n, &n, &kd, &kd, &nrhs, &imat, &nfail, &nerrs, nout); } cget04_(&n, &nrhs, &x[1], &lda, &xact[1], &lda, & rcondc, &result[3]); cpbt05_(uplo, &n, &kd, &nrhs, &a[1], &ldab, &b[1], &lda, &x[1], &lda, &xact[1], &lda, & rwork[1], &rwork[nrhs + 1], &result[4]); /* Print information about the tests that did not */ /* pass the threshold. */ for (k = 2; k <= 6; ++k) { if (result[k - 1] >= *thresh) { if (nfail == 0 && nerrs == 0) { alahd_(nout, path); } io___46.ciunit = *nout; s_wsfe(&io___46); do_fio(&c__1, uplo, (ftnlen)1); do_fio(&c__1, (char *)&n, (ftnlen)sizeof( integer)); do_fio(&c__1, (char *)&kd, (ftnlen)sizeof( integer)); do_fio(&c__1, (char *)&nrhs, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&imat, (ftnlen) sizeof(integer)); do_fio(&c__1, (char *)&k, (ftnlen)sizeof( integer)); do_fio(&c__1, (char *)&result[k - 1], ( ftnlen)sizeof(real)); e_wsfe(); ++nfail; } /* L30: */ } nrun += 5; /* L40: */ } /* + TEST 7 */ /* Get an estimate of RCOND = 1/CNDNUM. */ s_copy(srnamc_1.srnamt, "CPBCON", (ftnlen)32, (ftnlen) 6); cpbcon_(uplo, &n, &kd, &afac[1], &ldab, &anorm, & rcond, &work[1], &rwork[1], &info); /* Check error code from CPBCON. */ if (info != 0) { alaerh_(path, "CPBCON", &info, &c__0, uplo, &n, & n, &kd, &kd, &c_n1, &imat, &nfail, &nerrs, nout); } result[6] = sget06_(&rcond, &rcondc); /* Print the test ratio if it is .GE. THRESH. */ if (result[6] >= *thresh) { if (nfail == 0 && nerrs == 0) { alahd_(nout, path); } io___48.ciunit = *nout; s_wsfe(&io___48); do_fio(&c__1, uplo, (ftnlen)1); do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer)) ; do_fio(&c__1, (char *)&kd, (ftnlen)sizeof(integer) ); do_fio(&c__1, (char *)&imat, (ftnlen)sizeof( integer)); do_fio(&c__1, (char *)&c__7, (ftnlen)sizeof( integer)); do_fio(&c__1, (char *)&result[6], (ftnlen)sizeof( real)); e_wsfe(); ++nfail; } ++nrun; L50: ; } L60: ; } /* L70: */ } /* L80: */ } /* L90: */ } /* Print a summary of the results. */ alasum_(path, nout, &nfail, &nrun, &nerrs); return 0; /* End of CCHKPB */ } /* cchkpb_ */
/* Subroutine */ int cbdsqr_(char *uplo, integer *n, integer *ncvt, integer * nru, integer *ncc, real *d__, real *e, complex *vt, integer *ldvt, complex *u, integer *ldu, complex *c__, integer *ldc, real *rwork, integer *info) { /* System generated locals */ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; real r__1, r__2, r__3, r__4; doublereal d__1; /* Local variables */ real f, g, h__; integer i__, j, m; real r__, cs; integer ll; real sn, mu; integer nm1, nm12, nm13, lll; real eps, sll, tol, abse; integer idir; real abss; integer oldm; real cosl; integer isub, iter; real unfl, sinl, cosr, smin, smax, sinr; real oldcs; integer oldll; real shift, sigmn, oldsn; integer maxit; real sminl, sigmx; logical lower; real sminoa; real thresh; logical rotate; real tolmul; /* -- LAPACK routine (version 3.2) -- */ /* November 2006 */ /* Purpose */ /* ======= */ /* CBDSQR computes the singular values and, optionally, the right and/or */ /* left singular vectors from the singular value decomposition (SVD) of */ /* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit */ /* zero-shift QR algorithm. The SVD of B has the form */ /* B = Q * S * P**H */ /* where S is the diagonal matrix of singular values, Q is an orthogonal */ /* matrix of left singular vectors, and P is an orthogonal matrix of */ /* right singular vectors. If left singular vectors are requested, this */ /* subroutine actually returns U*Q instead of Q, and, if right singular */ /* vectors are requested, this subroutine returns P**H*VT instead of */ /* P**H, for given complex input matrices U and VT. When U and VT are */ /* the unitary matrices that reduce a general matrix A to bidiagonal */ /* form: A = U*B*VT, as computed by CGEBRD, then */ /* A = (U*Q) * S * (P**H*VT) */ /* is the SVD of A. Optionally, the subroutine may also compute Q**H*C */ /* for a given complex input matrix C. */ /* See "Computing Small Singular Values of Bidiagonal Matrices With */ /* Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, */ /* LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11, */ /* no. 5, pp. 873-912, Sept 1990) and */ /* "Accurate singular values and differential qd algorithms," by */ /* B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics */ /* Department, University of California at Berkeley, July 1992 */ /* for a detailed description of the algorithm. */ /* Arguments */ /* ========= */ /* UPLO (input) CHARACTER*1 */ /* = 'U': B is upper bidiagonal; */ /* = 'L': B is lower bidiagonal. */ /* N (input) INTEGER */ /* The order of the matrix B. N >= 0. */ /* NCVT (input) INTEGER */ /* The number of columns of the matrix VT. NCVT >= 0. */ /* NRU (input) INTEGER */ /* The number of rows of the matrix U. NRU >= 0. */ /* NCC (input) INTEGER */ /* The number of columns of the matrix C. NCC >= 0. */ /* D (input/output) REAL array, dimension (N) */ /* On entry, the n diagonal elements of the bidiagonal matrix B. */ /* On exit, if INFO=0, the singular values of B in decreasing */ /* order. */ /* E (input/output) REAL array, dimension (N-1) */ /* On entry, the N-1 offdiagonal elements of the bidiagonal */ /* matrix B. */ /* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E */ /* will contain the diagonal and superdiagonal elements of a */ /* bidiagonal matrix orthogonally equivalent to the one given */ /* as input. */ /* VT (input/output) COMPLEX array, dimension (LDVT, NCVT) */ /* On entry, an N-by-NCVT matrix VT. */ /* On exit, VT is overwritten by P**H * VT. */ /* Not referenced if NCVT = 0. */ /* LDVT (input) INTEGER */ /* The leading dimension of the array VT. */ /* LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0. */ /* U (input/output) COMPLEX array, dimension (LDU, N) */ /* On entry, an NRU-by-N matrix U. */ /* On exit, U is overwritten by U * Q. */ /* Not referenced if NRU = 0. */ /* LDU (input) INTEGER */ /* The leading dimension of the array U. LDU >= max(1,NRU). */ /* C (input/output) COMPLEX array, dimension (LDC, NCC) */ /* On entry, an N-by-NCC matrix C. */ /* On exit, C is overwritten by Q**H * C. */ /* Not referenced if NCC = 0. */ /* LDC (input) INTEGER */ /* The leading dimension of the array C. */ /* LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. */ /* RWORK (workspace) REAL array, dimension (2*N) */ /* if NCVT = NRU = NCC = 0, (max(1, 4*N-4)) otherwise */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: If INFO = -i, the i-th argument had an illegal value */ /* > 0: the algorithm did not converge; D and E contain the */ /* elements of a bidiagonal matrix which is orthogonally */ /* similar to the input matrix B; if INFO = i, i */ /* elements of E have not converged to zero. */ /* Internal Parameters */ /* =================== */ /* TOLMUL REAL, default = max(10,min(100,EPS**(-1/8))) */ /* TOLMUL controls the convergence criterion of the QR loop. */ /* If it is positive, TOLMUL*EPS is the desired relative */ /* precision in the computed singular values. */ /* If it is negative, abs(TOLMUL*EPS*sigma_max) is the */ /* desired absolute accuracy in the computed singular */ /* values (corresponds to relative accuracy */ /* abs(TOLMUL*EPS) in the largest singular value. */ /* abs(TOLMUL) should be between 1 and 1/EPS, and preferably */ /* between 10 (for fast convergence) and .1/EPS */ /* (for there to be some accuracy in the results). */ /* Default is to lose at either one eighth or 2 of the */ /* available decimal digits in each computed singular value */ /* (whichever is smaller). */ /* MAXITR INTEGER, default = 6 */ /* MAXITR controls the maximum number of passes of the */ /* algorithm through its inner loop. The algorithms stops */ /* (and so fails to converge) if the number of passes */ /* through the inner loop exceeds MAXITR*N**2. */ /* ===================================================================== */ /* Test the input parameters. */ /* Parameter adjustments */ --d__; --e; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1; vt -= vt_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1; u -= u_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1; c__ -= c_offset; --rwork; /* Function Body */ *info = 0; lower = lsame_(uplo, "L"); if (! lsame_(uplo, "U") && ! lower) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ncvt < 0) { *info = -3; } else if (*nru < 0) { *info = -4; } else if (*ncc < 0) { *info = -5; } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) { *info = -9; } else if (*ldu < max(1,*nru)) { *info = -11; } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) { *info = -13; } if (*info != 0) { i__1 = -(*info); xerbla_("CBDSQR", &i__1); return 0; } if (*n == 0) { return 0; } if (*n == 1) { goto L160; } /* ROTATE is true if any singular vectors desired, false otherwise */ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0; /* If no singular vectors desired, use qd algorithm */ if (! rotate) { slasq1_(n, &d__[1], &e[1], &rwork[1], info); return 0; } nm1 = *n - 1; nm12 = nm1 + nm1; nm13 = nm12 + nm1; idir = 0; /* Get machine constants */ eps = slamch_("Epsilon"); unfl = slamch_("Safe minimum"); /* If matrix lower bidiagonal, rotate to be upper bidiagonal */ /* by applying Givens rotations on the left */ if (lower) { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { slartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; rwork[i__] = cs; rwork[nm1 + i__] = sn; } /* Update singular vectors if desired */ if (*nru > 0) { clasr_("R", "V", "F", nru, n, &rwork[1], &rwork[*n], &u[u_offset], ldu); } if (*ncc > 0) { clasr_("L", "V", "F", n, ncc, &rwork[1], &rwork[*n], &c__[ c_offset], ldc); } } /* Compute singular values to relative accuracy TOL */ /* (By setting TOL to be negative, algorithm will compute */ /* singular values to absolute accuracy ABS(TOL)*norm(input matrix)) */ /* Computing MAX */ /* Computing MIN */ d__1 = (doublereal) eps; r__3 = 100.f, r__4 = pow_dd(&d__1, &c_b15); r__1 = 10.f, r__2 = dmin(r__3,r__4); tolmul = dmax(r__1,r__2); tol = tolmul * eps; /* Compute approximate maximum, minimum singular values */ smax = 0.f; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ r__2 = smax, r__3 = (r__1 = d__[i__], dabs(r__1)); smax = dmax(r__2,r__3); } i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ r__2 = smax, r__3 = (r__1 = e[i__], dabs(r__1)); smax = dmax(r__2,r__3); } sminl = 0.f; if (tol >= 0.f) { /* Relative accuracy desired */ sminoa = dabs(d__[1]); if (sminoa == 0.f) { goto L50; } mu = sminoa; i__1 = *n; for (i__ = 2; i__ <= i__1; ++i__) { mu = (r__2 = d__[i__], dabs(r__2)) * (mu / (mu + (r__1 = e[i__ - 1], dabs(r__1)))); sminoa = dmin(sminoa,mu); if (sminoa == 0.f) { goto L50; } } L50: sminoa /= sqrt((real) (*n)); /* Computing MAX */ r__1 = tol * sminoa, r__2 = *n * 6 * *n * unfl; thresh = dmax(r__1,r__2); } else { /* Absolute accuracy desired */ /* Computing MAX */ r__1 = dabs(tol) * smax, r__2 = *n * 6 * *n * unfl; thresh = dmax(r__1,r__2); } /* Prepare for main iteration loop for the singular values */ /* (MAXIT is the maximum number of passes through the inner */ /* loop permitted before nonconvergence signalled.) */ maxit = *n * 6 * *n; iter = 0; oldll = -1; oldm = -1; /* M points to last element of unconverged part of matrix */ m = *n; /* Begin main iteration loop */ L60: /* Check for convergence or exceeding iteration count */ if (m <= 1) { goto L160; } if (iter > maxit) { goto L200; } /* Find diagonal block of matrix to work on */ if (tol < 0.f && (r__1 = d__[m], dabs(r__1)) <= thresh) { d__[m] = 0.f; } smax = (r__1 = d__[m], dabs(r__1)); smin = smax; i__1 = m - 1; for (lll = 1; lll <= i__1; ++lll) { ll = m - lll; abss = (r__1 = d__[ll], dabs(r__1)); abse = (r__1 = e[ll], dabs(r__1)); if (tol < 0.f && abss <= thresh) { d__[ll] = 0.f; } if (abse <= thresh) { goto L80; } smin = dmin(smin,abss); /* Computing MAX */ r__1 = max(smax,abss); smax = dmax(r__1,abse); } ll = 0; goto L90; L80: e[ll] = 0.f; /* Matrix splits since E(LL) = 0 */ if (ll == m - 1) { /* Convergence of bottom singular value, return to top of loop */ --m; goto L60; } L90: ++ll; /* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */ if (ll == m - 1) { /* 2 by 2 block, handle separately */ slasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr, &sinl, &cosl); d__[m - 1] = sigmx; e[m - 1] = 0.f; d__[m] = sigmn; /* Compute singular vectors, if desired */ if (*ncvt > 0) { csrot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, & cosr, &sinr); } if (*nru > 0) { csrot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], & c__1, &cosl, &sinl); } if (*ncc > 0) { csrot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, & cosl, &sinl); } m += -2; goto L60; } /* If working on new submatrix, choose shift direction */ /* (from larger end diagonal element towards smaller) */ if (ll > oldm || m < oldll) { if ((r__1 = d__[ll], dabs(r__1)) >= (r__2 = d__[m], dabs(r__2))) { /* Chase bulge from top (big end) to bottom (small end) */ idir = 1; } else { /* Chase bulge from bottom (big end) to top (small end) */ idir = 2; } } /* Apply convergence tests */ if (idir == 1) { /* Run convergence test in forward direction */ /* First apply standard test to bottom of matrix */ if ((r__2 = e[m - 1], dabs(r__2)) <= dabs(tol) * (r__1 = d__[m], dabs( r__1)) || tol < 0.f && (r__3 = e[m - 1], dabs(r__3)) <= thresh) { e[m - 1] = 0.f; goto L60; } if (tol >= 0.f) { /* If relative accuracy desired, */ /* apply convergence criterion forward */ mu = (r__1 = d__[ll], dabs(r__1)); sminl = mu; i__1 = m - 1; for (lll = ll; lll <= i__1; ++lll) { if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) { e[lll] = 0.f; goto L60; } mu = (r__2 = d__[lll + 1], dabs(r__2)) * (mu / (mu + (r__1 = e[lll], dabs(r__1)))); sminl = dmin(sminl,mu); } } } else { /* Run convergence test in backward direction */ /* First apply standard test to top of matrix */ if ((r__2 = e[ll], dabs(r__2)) <= dabs(tol) * (r__1 = d__[ll], dabs( r__1)) || tol < 0.f && (r__3 = e[ll], dabs(r__3)) <= thresh) { e[ll] = 0.f; goto L60; } if (tol >= 0.f) { /* If relative accuracy desired, */ /* apply convergence criterion backward */ mu = (r__1 = d__[m], dabs(r__1)); sminl = mu; i__1 = ll; for (lll = m - 1; lll >= i__1; --lll) { if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) { e[lll] = 0.f; goto L60; } mu = (r__2 = d__[lll], dabs(r__2)) * (mu / (mu + (r__1 = e[ lll], dabs(r__1)))); sminl = dmin(sminl,mu); } } } oldll = ll; oldm = m; /* Compute shift. First, test if shifting would ruin relative */ /* accuracy, and if so set the shift to zero. */ /* Computing MAX */ r__1 = eps, r__2 = tol * .01f; if (tol >= 0.f && *n * tol * (sminl / smax) <= dmax(r__1,r__2)) { /* Use a zero shift to avoid loss of relative accuracy */ shift = 0.f; } else { /* Compute the shift from 2-by-2 block at end of matrix */ if (idir == 1) { sll = (r__1 = d__[ll], dabs(r__1)); slas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__); } else { sll = (r__1 = d__[m], dabs(r__1)); slas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__); } /* Test if shift negligible, and if so set to zero */ if (sll > 0.f) { /* Computing 2nd power */ r__1 = shift / sll; if (r__1 * r__1 < eps) { shift = 0.f; } } } /* Increment iteration count */ iter = iter + m - ll; /* If SHIFT = 0, do simplified QR iteration */ if (shift == 0.f) { if (idir == 1) { /* Chase bulge from top to bottom */ /* Save cosines and sines for later singular vector updates */ cs = 1.f; oldcs = 1.f; i__1 = m - 1; for (i__ = ll; i__ <= i__1; ++i__) { r__1 = d__[i__] * cs; slartg_(&r__1, &e[i__], &cs, &sn, &r__); if (i__ > ll) { e[i__ - 1] = oldsn * r__; } r__1 = oldcs * r__; r__2 = d__[i__ + 1] * sn; slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]); rwork[i__ - ll + 1] = cs; rwork[i__ - ll + 1 + nm1] = sn; rwork[i__ - ll + 1 + nm12] = oldcs; rwork[i__ - ll + 1 + nm13] = oldsn; } h__ = d__[m] * cs; d__[m] = h__ * oldcs; e[m - 1] = h__ * oldsn; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; clasr_("L", "V", "F", &i__1, ncvt, &rwork[1], &rwork[*n], &vt[ ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; clasr_("R", "V", "F", nru, &i__1, &rwork[nm12 + 1], &rwork[ nm13 + 1], &u[ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; clasr_("L", "V", "F", &i__1, ncc, &rwork[nm12 + 1], &rwork[ nm13 + 1], &c__[ll + c_dim1], ldc); } /* Test convergence */ if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) { e[m - 1] = 0.f; } } else { /* Chase bulge from bottom to top */ /* Save cosines and sines for later singular vector updates */ cs = 1.f; oldcs = 1.f; i__1 = ll + 1; for (i__ = m; i__ >= i__1; --i__) { r__1 = d__[i__] * cs; slartg_(&r__1, &e[i__ - 1], &cs, &sn, &r__); if (i__ < m) { e[i__] = oldsn * r__; } r__1 = oldcs * r__; r__2 = d__[i__ - 1] * sn; slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]); rwork[i__ - ll] = cs; rwork[i__ - ll + nm1] = -sn; rwork[i__ - ll + nm12] = oldcs; rwork[i__ - ll + nm13] = -oldsn; } h__ = d__[ll] * cs; d__[ll] = h__ * oldcs; e[ll] = h__ * oldsn; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; clasr_("L", "V", "B", &i__1, ncvt, &rwork[nm12 + 1], &rwork[ nm13 + 1], &vt[ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; clasr_("R", "V", "B", nru, &i__1, &rwork[1], &rwork[*n], &u[ ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; clasr_("L", "V", "B", &i__1, ncc, &rwork[1], &rwork[*n], &c__[ ll + c_dim1], ldc); } /* Test convergence */ if ((r__1 = e[ll], dabs(r__1)) <= thresh) { e[ll] = 0.f; } } } else { /* Use nonzero shift */ if (idir == 1) { /* Chase bulge from top to bottom */ /* Save cosines and sines for later singular vector updates */ f = ((r__1 = d__[ll], dabs(r__1)) - shift) * (r_sign(&c_b49, &d__[ ll]) + shift / d__[ll]); g = e[ll]; i__1 = m - 1; for (i__ = ll; i__ <= i__1; ++i__) { slartg_(&f, &g, &cosr, &sinr, &r__); if (i__ > ll) { e[i__ - 1] = r__; } f = cosr * d__[i__] + sinr * e[i__]; e[i__] = cosr * e[i__] - sinr * d__[i__]; g = sinr * d__[i__ + 1]; d__[i__ + 1] = cosr * d__[i__ + 1]; slartg_(&f, &g, &cosl, &sinl, &r__); d__[i__] = r__; f = cosl * e[i__] + sinl * d__[i__ + 1]; d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__]; if (i__ < m - 1) { g = sinl * e[i__ + 1]; e[i__ + 1] = cosl * e[i__ + 1]; } rwork[i__ - ll + 1] = cosr; rwork[i__ - ll + 1 + nm1] = sinr; rwork[i__ - ll + 1 + nm12] = cosl; rwork[i__ - ll + 1 + nm13] = sinl; } e[m - 1] = f; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; clasr_("L", "V", "F", &i__1, ncvt, &rwork[1], &rwork[*n], &vt[ ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; clasr_("R", "V", "F", nru, &i__1, &rwork[nm12 + 1], &rwork[ nm13 + 1], &u[ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; clasr_("L", "V", "F", &i__1, ncc, &rwork[nm12 + 1], &rwork[ nm13 + 1], &c__[ll + c_dim1], ldc); } /* Test convergence */ if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) { e[m - 1] = 0.f; } } else { /* Chase bulge from bottom to top */ /* Save cosines and sines for later singular vector updates */ f = ((r__1 = d__[m], dabs(r__1)) - shift) * (r_sign(&c_b49, &d__[ m]) + shift / d__[m]); g = e[m - 1]; i__1 = ll + 1; for (i__ = m; i__ >= i__1; --i__) { slartg_(&f, &g, &cosr, &sinr, &r__); if (i__ < m) { e[i__] = r__; } f = cosr * d__[i__] + sinr * e[i__ - 1]; e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__]; g = sinr * d__[i__ - 1]; d__[i__ - 1] = cosr * d__[i__ - 1]; slartg_(&f, &g, &cosl, &sinl, &r__); d__[i__] = r__; f = cosl * e[i__ - 1] + sinl * d__[i__ - 1]; d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1]; if (i__ > ll + 1) { g = sinl * e[i__ - 2]; e[i__ - 2] = cosl * e[i__ - 2]; } rwork[i__ - ll] = cosr; rwork[i__ - ll + nm1] = -sinr; rwork[i__ - ll + nm12] = cosl; rwork[i__ - ll + nm13] = -sinl; } e[ll] = f; /* Test convergence */ if ((r__1 = e[ll], dabs(r__1)) <= thresh) { e[ll] = 0.f; } /* Update singular vectors if desired */ if (*ncvt > 0) { i__1 = m - ll + 1; clasr_("L", "V", "B", &i__1, ncvt, &rwork[nm12 + 1], &rwork[ nm13 + 1], &vt[ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; clasr_("R", "V", "B", nru, &i__1, &rwork[1], &rwork[*n], &u[ ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; clasr_("L", "V", "B", &i__1, ncc, &rwork[1], &rwork[*n], &c__[ ll + c_dim1], ldc); } } } /* QR iteration finished, go back and check convergence */ goto L60; /* All singular values converged, so make them positive */ L160: i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (d__[i__] < 0.f) { d__[i__] = -d__[i__]; /* Change sign of singular vectors, if desired */ if (*ncvt > 0) { csscal_(ncvt, &c_b72, &vt[i__ + vt_dim1], ldvt); } } } /* Sort the singular values into decreasing order (insertion sort on */ /* singular values, but only one transposition per singular vector) */ i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Scan for smallest D(I) */ isub = 1; smin = d__[1]; i__2 = *n + 1 - i__; for (j = 2; j <= i__2; ++j) { if (d__[j] <= smin) { isub = j; smin = d__[j]; } } if (isub != *n + 1 - i__) { /* Swap singular values and vectors */ d__[isub] = d__[*n + 1 - i__]; d__[*n + 1 - i__] = smin; if (*ncvt > 0) { cswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ + vt_dim1], ldvt); } if (*nru > 0) { cswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) * u_dim1 + 1], &c__1); } if (*ncc > 0) { cswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ + c_dim1], ldc); } } } goto L220; /* Maximum number of iterations exceeded, failure to converge */ L200: *info = 0; i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { if (e[i__] != 0.f) { ++(*info); } } L220: return 0; /* End of CBDSQR */ } /* cbdsqr_ */
/* Subroutine */ int chpevx_(char *jobz, char *range, char *uplo, integer *n, complex *ap, real *vl, real *vu, integer *il, integer *iu, real * abstol, integer *m, real *w, complex *z__, integer *ldz, complex * work, real *rwork, integer *iwork, integer *ifail, integer *info) { /* -- LAPACK driver routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University June 30, 1999 Purpose ======= CHPEVX computes selected eigenvalues and, optionally, eigenvectors of a complex Hermitian matrix A in packed storage. Eigenvalues/vectors can be selected by specifying either a range of values or a range of indices for the desired eigenvalues. Arguments ========= JOBZ (input) CHARACTER*1 = 'N': Compute eigenvalues only; = 'V': Compute eigenvalues and eigenvectors. RANGE (input) CHARACTER*1 = 'A': all eigenvalues will be found; = 'V': all eigenvalues in the half-open interval (VL,VU] will be found; = 'I': the IL-th through IU-th eigenvalues will be found. UPLO (input) CHARACTER*1 = 'U': Upper triangle of A is stored; = 'L': Lower triangle of A is stored. N (input) INTEGER The order of the matrix A. N >= 0. AP (input/output) COMPLEX array, dimension (N*(N+1)/2) On entry, the upper or lower triangle of the Hermitian matrix A, packed columnwise in a linear array. The j-th column of A is stored in the array AP as follows: if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; if UPLO = 'L', AP(i + (j-1)*(2*n-j)/2) = A(i,j) for j<=i<=n. On exit, AP is overwritten by values generated during the reduction to tridiagonal form. If UPLO = 'U', the diagonal and first superdiagonal of the tridiagonal matrix T overwrite the corresponding elements of A, and if UPLO = 'L', the diagonal and first subdiagonal of T overwrite the corresponding elements of A. VL (input) REAL VU (input) REAL If RANGE='V', the lower and upper bounds of the interval to be searched for eigenvalues. VL < VU. Not referenced if RANGE = 'A' or 'I'. IL (input) INTEGER IU (input) INTEGER If RANGE='I', the indices (in ascending order) of the smallest and largest eigenvalues to be returned. 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. Not referenced if RANGE = 'A' or 'V'. ABSTOL (input) REAL The absolute error tolerance for the eigenvalues. An approximate eigenvalue is accepted as converged when it is determined to lie in an interval [a,b] of width less than or equal to ABSTOL + EPS * max( |a|,|b| ) , where EPS is the machine precision. If ABSTOL is less than or equal to zero, then EPS*|T| will be used in its place, where |T| is the 1-norm of the tridiagonal matrix obtained by reducing AP to tridiagonal form. Eigenvalues will be computed most accurately when ABSTOL is set to twice the underflow threshold 2*SLAMCH('S'), not zero. If this routine returns with INFO>0, indicating that some eigenvectors did not converge, try setting ABSTOL to 2*SLAMCH('S'). See "Computing Small Singular Values of Bidiagonal Matrices with Guaranteed High Relative Accuracy," by Demmel and Kahan, LAPACK Working Note #3. M (output) INTEGER The total number of eigenvalues found. 0 <= M <= N. If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. W (output) REAL array, dimension (N) If INFO = 0, the selected eigenvalues in ascending order. Z (output) COMPLEX array, dimension (LDZ, max(1,M)) If JOBZ = 'V', then if INFO = 0, the first M columns of Z contain the orthonormal eigenvectors of the matrix A corresponding to the selected eigenvalues, with the i-th column of Z holding the eigenvector associated with W(i). If an eigenvector fails to converge, then that column of Z contains the latest approximation to the eigenvector, and the index of the eigenvector is returned in IFAIL. If JOBZ = 'N', then Z is not referenced. Note: the user must ensure that at least max(1,M) columns are supplied in the array Z; if RANGE = 'V', the exact value of M is not known in advance and an upper bound must be used. LDZ (input) INTEGER The leading dimension of the array Z. LDZ >= 1, and if JOBZ = 'V', LDZ >= max(1,N). WORK (workspace) COMPLEX array, dimension (2*N) RWORK (workspace) REAL array, dimension (7*N) IWORK (workspace) INTEGER array, dimension (5*N) IFAIL (output) INTEGER array, dimension (N) If JOBZ = 'V', then if INFO = 0, the first M elements of IFAIL are zero. If INFO > 0, then IFAIL contains the indices of the eigenvectors that failed to converge. If JOBZ = 'N', then IFAIL is not referenced. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = i, then i eigenvectors failed to converge. Their indices are stored in array IFAIL. ===================================================================== Test the input parameters. Parameter adjustments */ /* Table of constant values */ static integer c__1 = 1; /* System generated locals */ integer z_dim1, z_offset, i__1, i__2; real r__1, r__2; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer indd, inde; static real anrm; static integer imax; static real rmin, rmax; static integer itmp1, i__, j, indee; static real sigma; extern logical lsame_(char *, char *); static integer iinfo; extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *); static char order[1]; extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *), scopy_(integer *, real *, integer *, real * , integer *); static logical wantz; static integer jj; static logical alleig, indeig; static integer iscale, indibl; extern doublereal clanhp_(char *, char *, integer *, complex *, real *); static logical valeig; extern doublereal slamch_(char *); extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer *); static real safmin; extern /* Subroutine */ int xerbla_(char *, integer *); static real abstll, bignum; static integer indiwk, indisp, indtau; extern /* Subroutine */ int chptrd_(char *, integer *, complex *, real *, real *, complex *, integer *), cstein_(integer *, real *, real *, integer *, real *, integer *, integer *, complex *, integer *, real *, integer *, integer *, integer *); static integer indrwk, indwrk; extern /* Subroutine */ int csteqr_(char *, integer *, real *, real *, complex *, integer *, real *, integer *), cupgtr_(char *, integer *, complex *, complex *, complex *, integer *, complex *, integer *), ssterf_(integer *, real *, real *, integer *); static integer nsplit; extern /* Subroutine */ int cupmtr_(char *, char *, char *, integer *, integer *, complex *, complex *, complex *, integer *, complex *, integer *); static real smlnum; extern /* Subroutine */ int sstebz_(char *, char *, integer *, real *, real *, integer *, integer *, real *, real *, real *, integer *, integer *, real *, integer *, integer *, real *, integer *, integer *); static real eps, vll, vuu, tmp1; #define z___subscr(a_1,a_2) (a_2)*z_dim1 + a_1 #define z___ref(a_1,a_2) z__[z___subscr(a_1,a_2)] --ap; --w; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --work; --rwork; --iwork; --ifail; /* Function Body */ wantz = lsame_(jobz, "V"); alleig = lsame_(range, "A"); valeig = lsame_(range, "V"); indeig = lsame_(range, "I"); *info = 0; if (! (wantz || lsame_(jobz, "N"))) { *info = -1; } else if (! (alleig || valeig || indeig)) { *info = -2; } else if (! (lsame_(uplo, "L") || lsame_(uplo, "U"))) { *info = -3; } else if (*n < 0) { *info = -4; } else { if (valeig) { if (*n > 0 && *vu <= *vl) { *info = -7; } } else if (indeig) { if (*il < 1 || *il > max(1,*n)) { *info = -8; } else if (*iu < min(*n,*il) || *iu > *n) { *info = -9; } } } if (*info == 0) { if (*ldz < 1 || wantz && *ldz < *n) { *info = -14; } } if (*info != 0) { i__1 = -(*info); xerbla_("CHPEVX", &i__1); return 0; } /* Quick return if possible */ *m = 0; if (*n == 0) { return 0; } if (*n == 1) { if (alleig || indeig) { *m = 1; w[1] = ap[1].r; } else { if (*vl < ap[1].r && *vu >= ap[1].r) { *m = 1; w[1] = ap[1].r; } } if (wantz) { i__1 = z___subscr(1, 1); z__[i__1].r = 1.f, z__[i__1].i = 0.f; } return 0; } /* Get machine constants. */ safmin = slamch_("Safe minimum"); eps = slamch_("Precision"); smlnum = safmin / eps; bignum = 1.f / smlnum; rmin = sqrt(smlnum); /* Computing MIN */ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin)); rmax = dmin(r__1,r__2); /* Scale matrix to allowable range, if necessary. */ iscale = 0; abstll = *abstol; if (valeig) { vll = *vl; vuu = *vu; } else { vll = 0.f; vuu = 0.f; } anrm = clanhp_("M", uplo, n, &ap[1], &rwork[1]); if (anrm > 0.f && anrm < rmin) { iscale = 1; sigma = rmin / anrm; } else if (anrm > rmax) { iscale = 1; sigma = rmax / anrm; } if (iscale == 1) { i__1 = *n * (*n + 1) / 2; csscal_(&i__1, &sigma, &ap[1], &c__1); if (*abstol > 0.f) { abstll = *abstol * sigma; } if (valeig) { vll = *vl * sigma; vuu = *vu * sigma; } } /* Call CHPTRD to reduce Hermitian packed matrix to tridiagonal form. */ indd = 1; inde = indd + *n; indrwk = inde + *n; indtau = 1; indwrk = indtau + *n; chptrd_(uplo, n, &ap[1], &rwork[indd], &rwork[inde], &work[indtau], & iinfo); /* If all eigenvalues are desired and ABSTOL is less than or equal to zero, then call SSTERF or CUPGTR and CSTEQR. If this fails for some eigenvalue, then try SSTEBZ. */ if ((alleig || indeig && *il == 1 && *iu == *n) && *abstol <= 0.f) { scopy_(n, &rwork[indd], &c__1, &w[1], &c__1); indee = indrwk + (*n << 1); if (! wantz) { i__1 = *n - 1; scopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1); ssterf_(n, &w[1], &rwork[indee], info); } else { cupgtr_(uplo, n, &ap[1], &work[indtau], &z__[z_offset], ldz, & work[indwrk], &iinfo); i__1 = *n - 1; scopy_(&i__1, &rwork[inde], &c__1, &rwork[indee], &c__1); csteqr_(jobz, n, &w[1], &rwork[indee], &z__[z_offset], ldz, & rwork[indrwk], info); if (*info == 0) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { ifail[i__] = 0; /* L10: */ } } } if (*info == 0) { *m = *n; goto L20; } *info = 0; } /* Otherwise, call SSTEBZ and, if eigenvectors are desired, CSTEIN. */ if (wantz) { *(unsigned char *)order = 'B'; } else { *(unsigned char *)order = 'E'; } indibl = 1; indisp = indibl + *n; indiwk = indisp + *n; sstebz_(range, order, n, &vll, &vuu, il, iu, &abstll, &rwork[indd], & rwork[inde], m, &nsplit, &w[1], &iwork[indibl], &iwork[indisp], & rwork[indrwk], &iwork[indiwk], info); if (wantz) { cstein_(n, &rwork[indd], &rwork[inde], m, &w[1], &iwork[indibl], & iwork[indisp], &z__[z_offset], ldz, &rwork[indrwk], &iwork[ indiwk], &ifail[1], info); /* Apply unitary matrix used in reduction to tridiagonal form to eigenvectors returned by CSTEIN. */ indwrk = indtau + *n; cupmtr_("L", uplo, "N", n, m, &ap[1], &work[indtau], &z__[z_offset], ldz, &work[indwrk], info); } /* If matrix was scaled, then rescale eigenvalues appropriately. */ L20: if (iscale == 1) { if (*info == 0) { imax = *m; } else { imax = *info - 1; } r__1 = 1.f / sigma; sscal_(&imax, &r__1, &w[1], &c__1); } /* If eigenvalues are not in order, then sort them, along with eigenvectors. */ if (wantz) { i__1 = *m - 1; for (j = 1; j <= i__1; ++j) { i__ = 0; tmp1 = w[j]; i__2 = *m; for (jj = j + 1; jj <= i__2; ++jj) { if (w[jj] < tmp1) { i__ = jj; tmp1 = w[jj]; } /* L30: */ } if (i__ != 0) { itmp1 = iwork[indibl + i__ - 1]; w[i__] = w[j]; iwork[indibl + i__ - 1] = iwork[indibl + j - 1]; w[j] = tmp1; iwork[indibl + j - 1] = itmp1; cswap_(n, &z___ref(1, i__), &c__1, &z___ref(1, j), &c__1); if (*info != 0) { itmp1 = ifail[i__]; ifail[i__] = ifail[j]; ifail[j] = itmp1; } } /* L40: */ } } return 0; /* End of CHPEVX */ } /* chpevx_ */
/* Subroutine */ int chetrs_(char *uplo, integer *n, integer *nrhs, complex * a, integer *lda, integer *ipiv, complex *b, integer *ldb, integer * info) { /* -- LAPACK routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University September 30, 1994 Purpose ======= CHETRS solves a system of linear equations A*X = B with a complex Hermitian matrix A using the factorization A = U*D*U**H or A = L*D*L**H computed by CHETRF. Arguments ========= UPLO (input) CHARACTER*1 Specifies whether the details of the factorization are stored as an upper or lower triangular matrix. = 'U': Upper triangular, form is A = U*D*U**H; = 'L': Lower triangular, form is A = L*D*L**H. N (input) INTEGER The order of the matrix A. N >= 0. NRHS (input) INTEGER The number of right hand sides, i.e., the number of columns of the matrix B. NRHS >= 0. A (input) COMPLEX array, dimension (LDA,N) The block diagonal matrix D and the multipliers used to obtain the factor U or L as computed by CHETRF. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). IPIV (input) INTEGER array, dimension (N) Details of the interchanges and the block structure of D as determined by CHETRF. B (input/output) COMPLEX array, dimension (LDB,NRHS) On entry, the right hand side matrix B. On exit, the solution matrix X. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,N). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Parameter adjustments */ /* Table of constant values */ static complex c_b1 = {1.f,0.f}; static integer c__1 = 1; /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2; complex q__1, q__2, q__3; /* Builtin functions */ void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *); /* Local variables */ static complex akm1k; static integer j, k; static real s; extern logical lsame_(char *, char *); static complex denom; extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex * , complex *, integer *, complex *, integer *, complex *, complex * , integer *), cgeru_(integer *, integer *, complex *, complex *, integer *, complex *, integer *, complex *, integer *), cswap_(integer *, complex *, integer *, complex *, integer *); static logical upper; static complex ak, bk; static integer kp; extern /* Subroutine */ int clacgv_(integer *, complex *, integer *), csscal_(integer *, real *, complex *, integer *), xerbla_(char *, integer *); static complex akm1, bkm1; #define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1 #define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)] #define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1 #define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)] a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*nrhs < 0) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*ldb < max(1,*n)) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("CHETRS", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *nrhs == 0) { return 0; } if (upper) { /* Solve A*X = B, where A = U*D*U'. First solve U*D*X = B, overwriting B with X. K is the main loop index, decreasing from N to 1 in steps of 1 or 2, depending on the size of the diagonal blocks. */ k = *n; L10: /* If K < 1, exit from loop. */ if (k < 1) { goto L30; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block Interchange rows K and IPIV(K). */ kp = ipiv[k]; if (kp != k) { cswap_(nrhs, &b_ref(k, 1), ldb, &b_ref(kp, 1), ldb); } /* Multiply by inv(U(K)), where U(K) is the transformation stored in column K of A. */ i__1 = k - 1; q__1.r = -1.f, q__1.i = 0.f; cgeru_(&i__1, nrhs, &q__1, &a_ref(1, k), &c__1, &b_ref(k, 1), ldb, &b_ref(1, 1), ldb); /* Multiply by the inverse of the diagonal block. */ i__1 = a_subscr(k, k); s = 1.f / a[i__1].r; csscal_(nrhs, &s, &b_ref(k, 1), ldb); --k; } else { /* 2 x 2 diagonal block Interchange rows K-1 and -IPIV(K). */ kp = -ipiv[k]; if (kp != k - 1) { cswap_(nrhs, &b_ref(k - 1, 1), ldb, &b_ref(kp, 1), ldb); } /* Multiply by inv(U(K)), where U(K) is the transformation stored in columns K-1 and K of A. */ i__1 = k - 2; q__1.r = -1.f, q__1.i = 0.f; cgeru_(&i__1, nrhs, &q__1, &a_ref(1, k), &c__1, &b_ref(k, 1), ldb, &b_ref(1, 1), ldb); i__1 = k - 2; q__1.r = -1.f, q__1.i = 0.f; cgeru_(&i__1, nrhs, &q__1, &a_ref(1, k - 1), &c__1, &b_ref(k - 1, 1), ldb, &b_ref(1, 1), ldb); /* Multiply by the inverse of the diagonal block. */ i__1 = a_subscr(k - 1, k); akm1k.r = a[i__1].r, akm1k.i = a[i__1].i; c_div(&q__1, &a_ref(k - 1, k - 1), &akm1k); akm1.r = q__1.r, akm1.i = q__1.i; r_cnjg(&q__2, &akm1k); c_div(&q__1, &a_ref(k, k), &q__2); ak.r = q__1.r, ak.i = q__1.i; q__2.r = akm1.r * ak.r - akm1.i * ak.i, q__2.i = akm1.r * ak.i + akm1.i * ak.r; q__1.r = q__2.r - 1.f, q__1.i = q__2.i + 0.f; denom.r = q__1.r, denom.i = q__1.i; i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { c_div(&q__1, &b_ref(k - 1, j), &akm1k); bkm1.r = q__1.r, bkm1.i = q__1.i; r_cnjg(&q__2, &akm1k); c_div(&q__1, &b_ref(k, j), &q__2); bk.r = q__1.r, bk.i = q__1.i; i__2 = b_subscr(k - 1, j); q__3.r = ak.r * bkm1.r - ak.i * bkm1.i, q__3.i = ak.r * bkm1.i + ak.i * bkm1.r; q__2.r = q__3.r - bk.r, q__2.i = q__3.i - bk.i; c_div(&q__1, &q__2, &denom); b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = b_subscr(k, j); q__3.r = akm1.r * bk.r - akm1.i * bk.i, q__3.i = akm1.r * bk.i + akm1.i * bk.r; q__2.r = q__3.r - bkm1.r, q__2.i = q__3.i - bkm1.i; c_div(&q__1, &q__2, &denom); b[i__2].r = q__1.r, b[i__2].i = q__1.i; /* L20: */ } k += -2; } goto L10; L30: /* Next solve U'*X = B, overwriting B with X. K is the main loop index, increasing from 1 to N in steps of 1 or 2, depending on the size of the diagonal blocks. */ k = 1; L40: /* If K > N, exit from loop. */ if (k > *n) { goto L50; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block Multiply by inv(U'(K)), where U(K) is the transformation stored in column K of A. */ if (k > 1) { clacgv_(nrhs, &b_ref(k, 1), ldb); i__1 = k - 1; q__1.r = -1.f, q__1.i = 0.f; cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[b_offset] , ldb, &a_ref(1, k), &c__1, &c_b1, &b_ref(k, 1), ldb); clacgv_(nrhs, &b_ref(k, 1), ldb); } /* Interchange rows K and IPIV(K). */ kp = ipiv[k]; if (kp != k) { cswap_(nrhs, &b_ref(k, 1), ldb, &b_ref(kp, 1), ldb); } ++k; } else { /* 2 x 2 diagonal block Multiply by inv(U'(K+1)), where U(K+1) is the transformation stored in columns K and K+1 of A. */ if (k > 1) { clacgv_(nrhs, &b_ref(k, 1), ldb); i__1 = k - 1; q__1.r = -1.f, q__1.i = 0.f; cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[b_offset] , ldb, &a_ref(1, k), &c__1, &c_b1, &b_ref(k, 1), ldb); clacgv_(nrhs, &b_ref(k, 1), ldb); clacgv_(nrhs, &b_ref(k + 1, 1), ldb); i__1 = k - 1; q__1.r = -1.f, q__1.i = 0.f; cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b[b_offset] , ldb, &a_ref(1, k + 1), &c__1, &c_b1, &b_ref(k + 1, 1), ldb); clacgv_(nrhs, &b_ref(k + 1, 1), ldb); } /* Interchange rows K and -IPIV(K). */ kp = -ipiv[k]; if (kp != k) { cswap_(nrhs, &b_ref(k, 1), ldb, &b_ref(kp, 1), ldb); } k += 2; } goto L40; L50: ; } else { /* Solve A*X = B, where A = L*D*L'. First solve L*D*X = B, overwriting B with X. K is the main loop index, increasing from 1 to N in steps of 1 or 2, depending on the size of the diagonal blocks. */ k = 1; L60: /* If K > N, exit from loop. */ if (k > *n) { goto L80; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block Interchange rows K and IPIV(K). */ kp = ipiv[k]; if (kp != k) { cswap_(nrhs, &b_ref(k, 1), ldb, &b_ref(kp, 1), ldb); } /* Multiply by inv(L(K)), where L(K) is the transformation stored in column K of A. */ if (k < *n) { i__1 = *n - k; q__1.r = -1.f, q__1.i = 0.f; cgeru_(&i__1, nrhs, &q__1, &a_ref(k + 1, k), &c__1, &b_ref(k, 1), ldb, &b_ref(k + 1, 1), ldb); } /* Multiply by the inverse of the diagonal block. */ i__1 = a_subscr(k, k); s = 1.f / a[i__1].r; csscal_(nrhs, &s, &b_ref(k, 1), ldb); ++k; } else { /* 2 x 2 diagonal block Interchange rows K+1 and -IPIV(K). */ kp = -ipiv[k]; if (kp != k + 1) { cswap_(nrhs, &b_ref(k + 1, 1), ldb, &b_ref(kp, 1), ldb); } /* Multiply by inv(L(K)), where L(K) is the transformation stored in columns K and K+1 of A. */ if (k < *n - 1) { i__1 = *n - k - 1; q__1.r = -1.f, q__1.i = 0.f; cgeru_(&i__1, nrhs, &q__1, &a_ref(k + 2, k), &c__1, &b_ref(k, 1), ldb, &b_ref(k + 2, 1), ldb); i__1 = *n - k - 1; q__1.r = -1.f, q__1.i = 0.f; cgeru_(&i__1, nrhs, &q__1, &a_ref(k + 2, k + 1), &c__1, & b_ref(k + 1, 1), ldb, &b_ref(k + 2, 1), ldb); } /* Multiply by the inverse of the diagonal block. */ i__1 = a_subscr(k + 1, k); akm1k.r = a[i__1].r, akm1k.i = a[i__1].i; r_cnjg(&q__2, &akm1k); c_div(&q__1, &a_ref(k, k), &q__2); akm1.r = q__1.r, akm1.i = q__1.i; c_div(&q__1, &a_ref(k + 1, k + 1), &akm1k); ak.r = q__1.r, ak.i = q__1.i; q__2.r = akm1.r * ak.r - akm1.i * ak.i, q__2.i = akm1.r * ak.i + akm1.i * ak.r; q__1.r = q__2.r - 1.f, q__1.i = q__2.i + 0.f; denom.r = q__1.r, denom.i = q__1.i; i__1 = *nrhs; for (j = 1; j <= i__1; ++j) { r_cnjg(&q__2, &akm1k); c_div(&q__1, &b_ref(k, j), &q__2); bkm1.r = q__1.r, bkm1.i = q__1.i; c_div(&q__1, &b_ref(k + 1, j), &akm1k); bk.r = q__1.r, bk.i = q__1.i; i__2 = b_subscr(k, j); q__3.r = ak.r * bkm1.r - ak.i * bkm1.i, q__3.i = ak.r * bkm1.i + ak.i * bkm1.r; q__2.r = q__3.r - bk.r, q__2.i = q__3.i - bk.i; c_div(&q__1, &q__2, &denom); b[i__2].r = q__1.r, b[i__2].i = q__1.i; i__2 = b_subscr(k + 1, j); q__3.r = akm1.r * bk.r - akm1.i * bk.i, q__3.i = akm1.r * bk.i + akm1.i * bk.r; q__2.r = q__3.r - bkm1.r, q__2.i = q__3.i - bkm1.i; c_div(&q__1, &q__2, &denom); b[i__2].r = q__1.r, b[i__2].i = q__1.i; /* L70: */ } k += 2; } goto L60; L80: /* Next solve L'*X = B, overwriting B with X. K is the main loop index, decreasing from N to 1 in steps of 1 or 2, depending on the size of the diagonal blocks. */ k = *n; L90: /* If K < 1, exit from loop. */ if (k < 1) { goto L100; } if (ipiv[k] > 0) { /* 1 x 1 diagonal block Multiply by inv(L'(K)), where L(K) is the transformation stored in column K of A. */ if (k < *n) { clacgv_(nrhs, &b_ref(k, 1), ldb); i__1 = *n - k; q__1.r = -1.f, q__1.i = 0.f; cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b_ref(k + 1, 1), ldb, &a_ref(k + 1, k), &c__1, &c_b1, &b_ref(k, 1), ldb); clacgv_(nrhs, &b_ref(k, 1), ldb); } /* Interchange rows K and IPIV(K). */ kp = ipiv[k]; if (kp != k) { cswap_(nrhs, &b_ref(k, 1), ldb, &b_ref(kp, 1), ldb); } --k; } else { /* 2 x 2 diagonal block Multiply by inv(L'(K-1)), where L(K-1) is the transformation stored in columns K-1 and K of A. */ if (k < *n) { clacgv_(nrhs, &b_ref(k, 1), ldb); i__1 = *n - k; q__1.r = -1.f, q__1.i = 0.f; cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b_ref(k + 1, 1), ldb, &a_ref(k + 1, k), &c__1, &c_b1, &b_ref(k, 1), ldb); clacgv_(nrhs, &b_ref(k, 1), ldb); clacgv_(nrhs, &b_ref(k - 1, 1), ldb); i__1 = *n - k; q__1.r = -1.f, q__1.i = 0.f; cgemv_("Conjugate transpose", &i__1, nrhs, &q__1, &b_ref(k + 1, 1), ldb, &a_ref(k + 1, k - 1), &c__1, &c_b1, & b_ref(k - 1, 1), ldb); clacgv_(nrhs, &b_ref(k - 1, 1), ldb); } /* Interchange rows K and -IPIV(K). */ kp = -ipiv[k]; if (kp != k) { cswap_(nrhs, &b_ref(k, 1), ldb, &b_ref(kp, 1), ldb); } k += -2; } goto L90; L100: ; } return 0; /* End of CHETRS */ } /* chetrs_ */
int cgetri_(int *n, complex *a, int *lda, int * ipiv, complex *work, int *lwork, int *info) { /* System generated locals */ int a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; complex q__1; /* Local variables */ int i__, j, jb, nb, jj, jp, nn, iws; extern int cgemm_(char *, char *, int *, int *, int *, complex *, complex *, int *, complex *, int *, complex *, complex *, int *), cgemv_(char *, int *, int *, complex *, complex *, int *, complex *, int *, complex *, complex *, int *); int nbmin; extern int cswap_(int *, complex *, int *, complex *, int *), ctrsm_(char *, char *, char *, char *, int *, int *, complex *, complex *, int *, complex *, int *); extern int ilaenv_(int *, char *, char *, int *, int *, int *, int *); extern int xerbla_(char *, int *); int ldwork; extern int ctrtri_(char *, char *, int *, complex *, int *, int *); int lwkopt; int lquery; /* -- LAPACK routine (version 3.2) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CGETRI computes the inverse of a matrix using the LU factorization */ /* computed by CGETRF. */ /* This method inverts U and then computes inv(A) by solving the system */ /* inv(A)*L = inv(U) for inv(A). */ /* Arguments */ /* ========= */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. */ /* A (input/output) COMPLEX array, dimension (LDA,N) */ /* On entry, the factors L and U from the factorization */ /* A = P*L*U as computed by CGETRF. */ /* On exit, if INFO = 0, the inverse of the original matrix A. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= MAX(1,N). */ /* IPIV (input) INTEGER array, dimension (N) */ /* The pivot indices from CGETRF; for 1<=i<=N, row i of the */ /* matrix was interchanged with row IPIV(i). */ /* WORK (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */ /* On exit, if INFO=0, then WORK(1) returns the optimal LWORK. */ /* LWORK (input) INTEGER */ /* The dimension of the array WORK. LWORK >= MAX(1,N). */ /* For optimal performance LWORK >= N*NB, where NB is */ /* the optimal blocksize returned by ILAENV. */ /* If LWORK = -1, then a workspace query is assumed; the routine */ /* only calculates the optimal size of the WORK array, returns */ /* this value as the first entry of the WORK array, and no error */ /* message related to LWORK is issued by XERBLA. */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value */ /* > 0: if INFO = i, U(i,i) is exactly zero; the matrix is */ /* singular and its inverse could not be computed. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --ipiv; --work; /* Function Body */ *info = 0; nb = ilaenv_(&c__1, "CGETRI", " ", n, &c_n1, &c_n1, &c_n1); lwkopt = *n * nb; work[1].r = (float) lwkopt, work[1].i = 0.f; lquery = *lwork == -1; if (*n < 0) { *info = -1; } else if (*lda < MAX(1,*n)) { *info = -3; } else if (*lwork < MAX(1,*n) && ! lquery) { *info = -6; } if (*info != 0) { i__1 = -(*info); xerbla_("CGETRI", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* Form inv(U). If INFO > 0 from CTRTRI, then U is singular, */ /* and the inverse is not computed. */ ctrtri_("Upper", "Non-unit", n, &a[a_offset], lda, info); if (*info > 0) { return 0; } nbmin = 2; ldwork = *n; if (nb > 1 && nb < *n) { /* Computing MAX */ i__1 = ldwork * nb; iws = MAX(i__1,1); if (*lwork < iws) { nb = *lwork / ldwork; /* Computing MAX */ i__1 = 2, i__2 = ilaenv_(&c__2, "CGETRI", " ", n, &c_n1, &c_n1, & c_n1); nbmin = MAX(i__1,i__2); } } else { iws = *n; } /* Solve the equation inv(A)*L = inv(U) for inv(A). */ if (nb < nbmin || nb >= *n) { /* Use unblocked code. */ for (j = *n; j >= 1; --j) { /* Copy current column of L to WORK and replace with zeros. */ i__1 = *n; for (i__ = j + 1; i__ <= i__1; ++i__) { i__2 = i__; i__3 = i__ + j * a_dim1; work[i__2].r = a[i__3].r, work[i__2].i = a[i__3].i; i__2 = i__ + j * a_dim1; a[i__2].r = 0.f, a[i__2].i = 0.f; /* L10: */ } /* Compute current column of inv(A). */ if (j < *n) { i__1 = *n - j; q__1.r = -1.f, q__1.i = -0.f; cgemv_("No transpose", n, &i__1, &q__1, &a[(j + 1) * a_dim1 + 1], lda, &work[j + 1], &c__1, &c_b2, &a[j * a_dim1 + 1], &c__1); } /* L20: */ } } else { /* Use blocked code. */ nn = (*n - 1) / nb * nb + 1; i__1 = -nb; for (j = nn; i__1 < 0 ? j >= 1 : j <= 1; j += i__1) { /* Computing MIN */ i__2 = nb, i__3 = *n - j + 1; jb = MIN(i__2,i__3); /* Copy current block column of L to WORK and replace with */ /* zeros. */ i__2 = j + jb - 1; for (jj = j; jj <= i__2; ++jj) { i__3 = *n; for (i__ = jj + 1; i__ <= i__3; ++i__) { i__4 = i__ + (jj - j) * ldwork; i__5 = i__ + jj * a_dim1; work[i__4].r = a[i__5].r, work[i__4].i = a[i__5].i; i__4 = i__ + jj * a_dim1; a[i__4].r = 0.f, a[i__4].i = 0.f; /* L30: */ } /* L40: */ } /* Compute current block column of inv(A). */ if (j + jb <= *n) { i__2 = *n - j - jb + 1; q__1.r = -1.f, q__1.i = -0.f; cgemm_("No transpose", "No transpose", n, &jb, &i__2, &q__1, & a[(j + jb) * a_dim1 + 1], lda, &work[j + jb], &ldwork, &c_b2, &a[j * a_dim1 + 1], lda); } ctrsm_("Right", "Lower", "No transpose", "Unit", n, &jb, &c_b2, & work[j], &ldwork, &a[j * a_dim1 + 1], lda); /* L50: */ } } /* Apply column interchanges. */ for (j = *n - 1; j >= 1; --j) { jp = ipiv[j]; if (jp != j) { cswap_(n, &a[j * a_dim1 + 1], &c__1, &a[jp * a_dim1 + 1], &c__1); } /* L60: */ } work[1].r = (float) iws, work[1].i = 0.f; return 0; /* End of CGETRI */ } /* cgetri_ */
/* Subroutine */ int claqp2_(integer *m, integer *n, integer *offset, complex *a, integer *lda, integer *jpvt, complex *tau, real *vn1, real *vn2, complex *work) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; real r__1; complex q__1; /* Builtin functions */ double sqrt(doublereal); void r_cnjg(complex *, complex *); double c_abs(complex *); /* Local variables */ integer i__, j, mn; complex aii; integer pvt; real temp, temp2, tol3z; extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex * , integer *, complex *, complex *, integer *, complex *); integer offpi; extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); integer itemp; extern doublereal scnrm2_(integer *, complex *, integer *); extern /* Subroutine */ int clarfp_(integer *, complex *, complex *, integer *, complex *); extern doublereal slamch_(char *); extern integer isamax_(integer *, real *, integer *); /* -- LAPACK auxiliary routine (version 3.2) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CLAQP2 computes a QR factorization with column pivoting of */ /* the block A(OFFSET+1:M,1:N). */ /* The block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. */ /* Arguments */ /* ========= */ /* M (input) INTEGER */ /* The number of rows of the matrix A. M >= 0. */ /* N (input) INTEGER */ /* The number of columns of the matrix A. N >= 0. */ /* OFFSET (input) INTEGER */ /* The number of rows of the matrix A that must be pivoted */ /* but no factorized. OFFSET >= 0. */ /* A (input/output) COMPLEX array, dimension (LDA,N) */ /* On entry, the M-by-N matrix A. */ /* On exit, the upper triangle of block A(OFFSET+1:M,1:N) is */ /* the triangular factor obtained; the elements in block */ /* A(OFFSET+1:M,1:N) below the diagonal, together with the */ /* array TAU, represent the orthogonal matrix Q as a product of */ /* elementary reflectors. Block A(1:OFFSET,1:N) has been */ /* accordingly pivoted, but no factorized. */ /* LDA (input) INTEGER */ /* The leading dimension of the array A. LDA >= max(1,M). */ /* JPVT (input/output) INTEGER array, dimension (N) */ /* On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted */ /* to the front of A*P (a leading column); if JPVT(i) = 0, */ /* the i-th column of A is a free column. */ /* On exit, if JPVT(i) = k, then the i-th column of A*P */ /* was the k-th column of A. */ /* TAU (output) COMPLEX array, dimension (min(M,N)) */ /* The scalar factors of the elementary reflectors. */ /* VN1 (input/output) REAL array, dimension (N) */ /* The vector with the partial column norms. */ /* VN2 (input/output) REAL array, dimension (N) */ /* The vector with the exact column norms. */ /* WORK (workspace) COMPLEX array, dimension (N) */ /* Further Details */ /* =============== */ /* Based on contributions by */ /* G. Quintana-Orti, Depto. de Informatica, Universidad Jaime I, Spain */ /* X. Sun, Computer Science Dept., Duke University, USA */ /* Partial column norm updating strategy modified by */ /* Z. Drmac and Z. Bujanovic, Dept. of Mathematics, */ /* University of Zagreb, Croatia. */ /* June 2006. */ /* For more details see LAPACK Working Note 176. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; --jpvt; --tau; --vn1; --vn2; --work; /* Function Body */ /* Computing MIN */ i__1 = *m - *offset; mn = min(i__1,*n); tol3z = sqrt(slamch_("Epsilon")); /* Compute factorization. */ i__1 = mn; for (i__ = 1; i__ <= i__1; ++i__) { offpi = *offset + i__; /* Determine ith pivot column and swap if necessary. */ i__2 = *n - i__ + 1; pvt = i__ - 1 + isamax_(&i__2, &vn1[i__], &c__1); if (pvt != i__) { cswap_(m, &a[pvt * a_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], & c__1); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[i__]; jpvt[i__] = itemp; vn1[pvt] = vn1[i__]; vn2[pvt] = vn2[i__]; } /* Generate elementary reflector H(i). */ if (offpi < *m) { i__2 = *m - offpi + 1; clarfp_(&i__2, &a[offpi + i__ * a_dim1], &a[offpi + 1 + i__ * a_dim1], &c__1, &tau[i__]); } else { clarfp_(&c__1, &a[*m + i__ * a_dim1], &a[*m + i__ * a_dim1], & c__1, &tau[i__]); } if (i__ < *n) { /* Apply H(i)' to A(offset+i:m,i+1:n) from the left. */ i__2 = offpi + i__ * a_dim1; aii.r = a[i__2].r, aii.i = a[i__2].i; i__2 = offpi + i__ * a_dim1; a[i__2].r = 1.f, a[i__2].i = 0.f; i__2 = *m - offpi + 1; i__3 = *n - i__; r_cnjg(&q__1, &tau[i__]); clarf_("Left", &i__2, &i__3, &a[offpi + i__ * a_dim1], &c__1, & q__1, &a[offpi + (i__ + 1) * a_dim1], lda, &work[1]); i__2 = offpi + i__ * a_dim1; a[i__2].r = aii.r, a[i__2].i = aii.i; } /* Update partial column norms. */ i__2 = *n; for (j = i__ + 1; j <= i__2; ++j) { if (vn1[j] != 0.f) { /* NOTE: The following 4 lines follow from the analysis in */ /* Lapack Working Note 176. */ /* Computing 2nd power */ r__1 = c_abs(&a[offpi + j * a_dim1]) / vn1[j]; temp = 1.f - r__1 * r__1; temp = dmax(temp,0.f); /* Computing 2nd power */ r__1 = vn1[j] / vn2[j]; temp2 = temp * (r__1 * r__1); if (temp2 <= tol3z) { if (offpi < *m) { i__3 = *m - offpi; vn1[j] = scnrm2_(&i__3, &a[offpi + 1 + j * a_dim1], & c__1); vn2[j] = vn1[j]; } else { vn1[j] = 0.f; vn2[j] = 0.f; } } else { vn1[j] *= sqrt(temp); } } /* L10: */ } /* L20: */ } return 0; /* End of CLAQP2 */ } /* claqp2_ */
/* Subroutine */ int cgeqpf_(integer *m, integer *n, complex *a, integer *lda, integer *jpvt, complex *tau, complex *work, real *rwork, integer * info) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University June 30, 1999 Purpose ======= This routine is deprecated and has been replaced by routine CGEQP3. CGEQPF computes a QR factorization with column pivoting of a complex M-by-N matrix A: A*P = Q*R. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0 A (input/output) COMPLEX array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, the upper triangle of the array contains the min(M,N)-by-N upper triangular matrix R; the elements below the diagonal, together with the array TAU, represent the unitary matrix Q as a product of min(m,n) elementary reflectors. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). JPVT (input/output) INTEGER array, dimension (N) On entry, if JPVT(i) .ne. 0, the i-th column of A is permuted to the front of A*P (a leading column); if JPVT(i) = 0, the i-th column of A is a free column. On exit, if JPVT(i) = k, then the i-th column of A*P was the k-th column of A. TAU (output) COMPLEX array, dimension (min(M,N)) The scalar factors of the elementary reflectors. WORK (workspace) COMPLEX array, dimension (N) RWORK (workspace) REAL array, dimension (2*N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(n) Each H(i) has the form H = I - tau * v * v' where tau is a complex scalar, and v is a complex vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i). The matrix P is represented in jpvt as follows: If jpvt(j) = i then the jth column of P is the ith canonical unit vector. ===================================================================== Test the input arguments Parameter adjustments */ /* Table of constant values */ static integer c__1 = 1; /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; real r__1; complex q__1; /* Builtin functions */ void r_cnjg(complex *, complex *); double c_abs(complex *), sqrt(doublereal); /* Local variables */ static real temp, temp2; static integer i__, j; extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex * , integer *, complex *, complex *, integer *, complex *), cswap_(integer *, complex *, integer *, complex *, integer *); static integer itemp; extern /* Subroutine */ int cgeqr2_(integer *, integer *, complex *, integer *, complex *, complex *, integer *); extern doublereal scnrm2_(integer *, complex *, integer *); extern /* Subroutine */ int cunm2r_(char *, char *, integer *, integer *, integer *, complex *, integer *, complex *, complex *, integer *, complex *, integer *); static integer ma, mn; extern /* Subroutine */ int clarfg_(integer *, complex *, complex *, integer *, complex *), xerbla_(char *, integer *); extern integer isamax_(integer *, real *, integer *); static complex aii; static integer pvt; #define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1 #define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)] a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --jpvt; --tau; --work; --rwork; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("CGEQPF", &i__1); return 0; } mn = min(*m,*n); /* Move initial columns up front */ itemp = 1; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (jpvt[i__] != 0) { if (i__ != itemp) { cswap_(m, &a_ref(1, i__), &c__1, &a_ref(1, itemp), &c__1); jpvt[i__] = jpvt[itemp]; jpvt[itemp] = i__; } else { jpvt[i__] = i__; } ++itemp; } else { jpvt[i__] = i__; } /* L10: */ } --itemp; /* Compute the QR factorization and update remaining columns */ if (itemp > 0) { ma = min(itemp,*m); cgeqr2_(m, &ma, &a[a_offset], lda, &tau[1], &work[1], info); if (ma < *n) { i__1 = *n - ma; cunm2r_("Left", "Conjugate transpose", m, &i__1, &ma, &a[a_offset] , lda, &tau[1], &a_ref(1, ma + 1), lda, &work[1], info); } } if (itemp < mn) { /* Initialize partial column norms. The first n elements of work store the exact column norms. */ i__1 = *n; for (i__ = itemp + 1; i__ <= i__1; ++i__) { i__2 = *m - itemp; rwork[i__] = scnrm2_(&i__2, &a_ref(itemp + 1, i__), &c__1); rwork[*n + i__] = rwork[i__]; /* L20: */ } /* Compute factorization */ i__1 = mn; for (i__ = itemp + 1; i__ <= i__1; ++i__) { /* Determine ith pivot column and swap if necessary */ i__2 = *n - i__ + 1; pvt = i__ - 1 + isamax_(&i__2, &rwork[i__], &c__1); if (pvt != i__) { cswap_(m, &a_ref(1, pvt), &c__1, &a_ref(1, i__), &c__1); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[i__]; jpvt[i__] = itemp; rwork[pvt] = rwork[i__]; rwork[*n + pvt] = rwork[*n + i__]; } /* Generate elementary reflector H(i) */ i__2 = a_subscr(i__, i__); aii.r = a[i__2].r, aii.i = a[i__2].i; /* Computing MIN */ i__2 = i__ + 1; i__3 = *m - i__ + 1; clarfg_(&i__3, &aii, &a_ref(min(i__2,*m), i__), &c__1, &tau[i__]); i__2 = a_subscr(i__, i__); a[i__2].r = aii.r, a[i__2].i = aii.i; if (i__ < *n) { /* Apply H(i) to A(i:m,i+1:n) from the left */ i__2 = a_subscr(i__, i__); aii.r = a[i__2].r, aii.i = a[i__2].i; i__2 = a_subscr(i__, i__); a[i__2].r = 1.f, a[i__2].i = 0.f; i__2 = *m - i__ + 1; i__3 = *n - i__; r_cnjg(&q__1, &tau[i__]); clarf_("Left", &i__2, &i__3, &a_ref(i__, i__), &c__1, &q__1, & a_ref(i__, i__ + 1), lda, &work[1]); i__2 = a_subscr(i__, i__); a[i__2].r = aii.r, a[i__2].i = aii.i; } /* Update partial column norms */ i__2 = *n; for (j = i__ + 1; j <= i__2; ++j) { if (rwork[j] != 0.f) { /* Computing 2nd power */ r__1 = c_abs(&a_ref(i__, j)) / rwork[j]; temp = 1.f - r__1 * r__1; temp = dmax(temp,0.f); /* Computing 2nd power */ r__1 = rwork[j] / rwork[*n + j]; temp2 = temp * .05f * (r__1 * r__1) + 1.f; if (temp2 == 1.f) { if (*m - i__ > 0) { i__3 = *m - i__; rwork[j] = scnrm2_(&i__3, &a_ref(i__ + 1, j), & c__1); rwork[*n + j] = rwork[j]; } else { rwork[j] = 0.f; rwork[*n + j] = 0.f; } } else { rwork[j] *= sqrt(temp); } } /* L30: */ } /* L40: */ } } return 0; /* End of CGEQPF */ } /* cgeqpf_ */
/* Subroutine */ int chptrf_(char *uplo, integer *n, complex *ap, integer * ipiv, integer *info, ftnlen uplo_len) { /* System generated locals */ integer i__1, i__2, i__3, i__4, i__5, i__6; real r__1, r__2, r__3, r__4; complex q__1, q__2, q__3, q__4, q__5, q__6; /* Builtin functions */ double sqrt(doublereal), r_imag(complex *); void r_cnjg(complex *, complex *); /* Local variables */ static real d__; static integer i__, j, k; static complex t; static real r1, d11; static complex d12; static real d22; static complex d21; static integer kc, kk, kp; static complex wk; static integer kx; static real tt; static integer knc, kpc, npp; static complex wkm1, wkp1; extern /* Subroutine */ int chpr_(char *, integer *, real *, complex *, integer *, complex *, ftnlen); static integer imax, jmax; static real alpha; extern logical lsame_(char *, char *, ftnlen, ftnlen); extern /* Subroutine */ int cswap_(integer *, complex *, integer *, complex *, integer *); static integer kstep; static logical upper; extern doublereal slapy2_(real *, real *); static real absakk; extern integer icamax_(integer *, complex *, integer *); extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer *), xerbla_(char *, integer *, ftnlen); static real colmax, rowmax; /* -- LAPACK routine (version 3.0) -- */ /* Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., */ /* Courant Institute, Argonne National Lab, and Rice University */ /* June 30, 1999 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CHPTRF computes the factorization of a complex Hermitian packed */ /* matrix A using the Bunch-Kaufman diagonal pivoting method: */ /* A = U*D*U**H or A = L*D*L**H */ /* where U (or L) is a product of permutation and unit upper (lower) */ /* triangular matrices, and D is Hermitian and block diagonal with */ /* 1-by-1 and 2-by-2 diagonal blocks. */ /* Arguments */ /* ========= */ /* UPLO (input) CHARACTER*1 */ /* = 'U': Upper triangle of A is stored; */ /* = 'L': Lower triangle of A is stored. */ /* N (input) INTEGER */ /* The order of the matrix A. N >= 0. */ /* AP (input/output) COMPLEX array, dimension (N*(N+1)/2) */ /* On entry, the upper or lower triangle of the Hermitian matrix */ /* A, packed columnwise in a linear array. The j-th column of A */ /* is stored in the array AP as follows: */ /* if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */ /* if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */ /* On exit, the block diagonal matrix D and the multipliers used */ /* to obtain the factor U or L, stored as a packed triangular */ /* matrix overwriting A (see below for further details). */ /* IPIV (output) INTEGER array, dimension (N) */ /* Details of the interchanges and the block structure of D. */ /* If IPIV(k) > 0, then rows and columns k and IPIV(k) were */ /* interchanged and D(k,k) is a 1-by-1 diagonal block. */ /* If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */ /* columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */ /* is a 2-by-2 diagonal block. If UPLO = 'L' and IPIV(k) = */ /* IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */ /* interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */ /* INFO (output) INTEGER */ /* = 0: successful exit */ /* < 0: if INFO = -i, the i-th argument had an illegal value */ /* > 0: if INFO = i, D(i,i) is exactly zero. The factorization */ /* has been completed, but the block diagonal matrix D is */ /* exactly singular, and division by zero will occur if it */ /* is used to solve a system of equations. */ /* Further Details */ /* =============== */ /* 5-96 - Based on modifications by J. Lewis, Boeing Computer Services */ /* Company */ /* If UPLO = 'U', then A = U*D*U', where */ /* U = P(n)*U(n)* ... *P(k)U(k)* ..., */ /* i.e., U is a product of terms P(k)*U(k), where k decreases from n to */ /* 1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */ /* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */ /* defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */ /* that if the diagonal block D(k) is of order s (s = 1 or 2), then */ /* ( I v 0 ) k-s */ /* U(k) = ( 0 I 0 ) s */ /* ( 0 0 I ) n-k */ /* k-s s n-k */ /* If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */ /* If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */ /* and A(k,k), and v overwrites A(1:k-2,k-1:k). */ /* If UPLO = 'L', then A = L*D*L', where */ /* L = P(1)*L(1)* ... *P(k)*L(k)* ..., */ /* i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */ /* n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */ /* and 2-by-2 diagonal blocks D(k). P(k) is a permutation matrix as */ /* defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */ /* that if the diagonal block D(k) is of order s (s = 1 or 2), then */ /* ( I 0 0 ) k-1 */ /* L(k) = ( 0 I 0 ) s */ /* ( 0 v I ) n-k-s+1 */ /* k-1 s n-k-s+1 */ /* If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */ /* If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */ /* and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Statement Functions .. */ /* .. */ /* .. Statement Function definitions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ --ipiv; --ap; /* Function Body */ *info = 0; upper = lsame_(uplo, "U", (ftnlen)1, (ftnlen)1); if (! upper && ! lsame_(uplo, "L", (ftnlen)1, (ftnlen)1)) { *info = -1; } else if (*n < 0) { *info = -2; } if (*info != 0) { i__1 = -(*info); xerbla_("CHPTRF", &i__1, (ftnlen)6); return 0; } /* Initialize ALPHA for use in choosing pivot block size. */ alpha = (sqrt(17.f) + 1.f) / 8.f; if (upper) { /* Factorize A as U*D*U' using the upper triangle of A */ /* K is the main loop index, decreasing from N to 1 in steps of */ /* 1 or 2 */ k = *n; kc = (*n - 1) * *n / 2 + 1; L10: knc = kc; /* If K < 1, exit from loop */ if (k < 1) { goto L110; } kstep = 1; /* Determine rows and columns to be interchanged and whether */ /* a 1-by-1 or 2-by-2 pivot block will be used */ i__1 = kc + k - 1; absakk = (r__1 = ap[i__1].r, dabs(r__1)); /* IMAX is the row-index of the largest off-diagonal element in */ /* column K, and COLMAX is its absolute value */ if (k > 1) { i__1 = k - 1; imax = icamax_(&i__1, &ap[kc], &c__1); i__1 = kc + imax - 1; colmax = (r__1 = ap[i__1].r, dabs(r__1)) + (r__2 = r_imag(&ap[kc + imax - 1]), dabs(r__2)); } else { colmax = 0.f; } if (dmax(absakk,colmax) == 0.f) { /* Column K is zero: set INFO and continue */ if (*info == 0) { *info = k; } kp = k; i__1 = kc + k - 1; i__2 = kc + k - 1; r__1 = ap[i__2].r; ap[i__1].r = r__1, ap[i__1].i = 0.f; } else { if (absakk >= alpha * colmax) { /* no interchange, use 1-by-1 pivot block */ kp = k; } else { /* JMAX is the column-index of the largest off-diagonal */ /* element in row IMAX, and ROWMAX is its absolute value */ rowmax = 0.f; jmax = imax; kx = imax * (imax + 1) / 2 + imax; i__1 = k; for (j = imax + 1; j <= i__1; ++j) { i__2 = kx; if ((r__1 = ap[i__2].r, dabs(r__1)) + (r__2 = r_imag(&ap[ kx]), dabs(r__2)) > rowmax) { i__2 = kx; rowmax = (r__1 = ap[i__2].r, dabs(r__1)) + (r__2 = r_imag(&ap[kx]), dabs(r__2)); jmax = j; } kx += j; /* L20: */ } kpc = (imax - 1) * imax / 2 + 1; if (imax > 1) { i__1 = imax - 1; jmax = icamax_(&i__1, &ap[kpc], &c__1); /* Computing MAX */ i__1 = kpc + jmax - 1; r__3 = rowmax, r__4 = (r__1 = ap[i__1].r, dabs(r__1)) + ( r__2 = r_imag(&ap[kpc + jmax - 1]), dabs(r__2)); rowmax = dmax(r__3,r__4); } if (absakk >= alpha * colmax * (colmax / rowmax)) { /* no interchange, use 1-by-1 pivot block */ kp = k; } else /* if(complicated condition) */ { i__1 = kpc + imax - 1; if ((r__1 = ap[i__1].r, dabs(r__1)) >= alpha * rowmax) { /* interchange rows and columns K and IMAX, use 1-by-1 */ /* pivot block */ kp = imax; } else { /* interchange rows and columns K-1 and IMAX, use 2-by-2 */ /* pivot block */ kp = imax; kstep = 2; } } } kk = k - kstep + 1; if (kstep == 2) { knc = knc - k + 1; } if (kp != kk) { /* Interchange rows and columns KK and KP in the leading */ /* submatrix A(1:k,1:k) */ i__1 = kp - 1; cswap_(&i__1, &ap[knc], &c__1, &ap[kpc], &c__1); kx = kpc + kp - 1; i__1 = kk - 1; for (j = kp + 1; j <= i__1; ++j) { kx = kx + j - 1; r_cnjg(&q__1, &ap[knc + j - 1]); t.r = q__1.r, t.i = q__1.i; i__2 = knc + j - 1; r_cnjg(&q__1, &ap[kx]); ap[i__2].r = q__1.r, ap[i__2].i = q__1.i; i__2 = kx; ap[i__2].r = t.r, ap[i__2].i = t.i; /* L30: */ } i__1 = kx + kk - 1; r_cnjg(&q__1, &ap[kx + kk - 1]); ap[i__1].r = q__1.r, ap[i__1].i = q__1.i; i__1 = knc + kk - 1; r1 = ap[i__1].r; i__1 = knc + kk - 1; i__2 = kpc + kp - 1; r__1 = ap[i__2].r; ap[i__1].r = r__1, ap[i__1].i = 0.f; i__1 = kpc + kp - 1; ap[i__1].r = r1, ap[i__1].i = 0.f; if (kstep == 2) { i__1 = kc + k - 1; i__2 = kc + k - 1; r__1 = ap[i__2].r; ap[i__1].r = r__1, ap[i__1].i = 0.f; i__1 = kc + k - 2; t.r = ap[i__1].r, t.i = ap[i__1].i; i__1 = kc + k - 2; i__2 = kc + kp - 1; ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i; i__1 = kc + kp - 1; ap[i__1].r = t.r, ap[i__1].i = t.i; } } else { i__1 = kc + k - 1; i__2 = kc + k - 1; r__1 = ap[i__2].r; ap[i__1].r = r__1, ap[i__1].i = 0.f; if (kstep == 2) { i__1 = kc - 1; i__2 = kc - 1; r__1 = ap[i__2].r; ap[i__1].r = r__1, ap[i__1].i = 0.f; } } /* Update the leading submatrix */ if (kstep == 1) { /* 1-by-1 pivot block D(k): column k now holds */ /* W(k) = U(k)*D(k) */ /* where U(k) is the k-th column of U */ /* Perform a rank-1 update of A(1:k-1,1:k-1) as */ /* A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */ i__1 = kc + k - 1; r1 = 1.f / ap[i__1].r; i__1 = k - 1; r__1 = -r1; chpr_(uplo, &i__1, &r__1, &ap[kc], &c__1, &ap[1], (ftnlen)1); /* Store U(k) in column k */ i__1 = k - 1; csscal_(&i__1, &r1, &ap[kc], &c__1); } else { /* 2-by-2 pivot block D(k): columns k and k-1 now hold */ /* ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */ /* where U(k) and U(k-1) are the k-th and (k-1)-th columns */ /* of U */ /* Perform a rank-2 update of A(1:k-2,1:k-2) as */ /* A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */ /* = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */ if (k > 2) { i__1 = k - 1 + (k - 1) * k / 2; r__1 = ap[i__1].r; r__2 = r_imag(&ap[k - 1 + (k - 1) * k / 2]); d__ = slapy2_(&r__1, &r__2); i__1 = k - 1 + (k - 2) * (k - 1) / 2; d22 = ap[i__1].r / d__; i__1 = k + (k - 1) * k / 2; d11 = ap[i__1].r / d__; tt = 1.f / (d11 * d22 - 1.f); i__1 = k - 1 + (k - 1) * k / 2; q__1.r = ap[i__1].r / d__, q__1.i = ap[i__1].i / d__; d12.r = q__1.r, d12.i = q__1.i; d__ = tt / d__; for (j = k - 2; j >= 1; --j) { i__1 = j + (k - 2) * (k - 1) / 2; q__3.r = d11 * ap[i__1].r, q__3.i = d11 * ap[i__1].i; r_cnjg(&q__5, &d12); i__2 = j + (k - 1) * k / 2; q__4.r = q__5.r * ap[i__2].r - q__5.i * ap[i__2].i, q__4.i = q__5.r * ap[i__2].i + q__5.i * ap[ i__2].r; q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i; q__1.r = d__ * q__2.r, q__1.i = d__ * q__2.i; wkm1.r = q__1.r, wkm1.i = q__1.i; i__1 = j + (k - 1) * k / 2; q__3.r = d22 * ap[i__1].r, q__3.i = d22 * ap[i__1].i; i__2 = j + (k - 2) * (k - 1) / 2; q__4.r = d12.r * ap[i__2].r - d12.i * ap[i__2].i, q__4.i = d12.r * ap[i__2].i + d12.i * ap[i__2] .r; q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i; q__1.r = d__ * q__2.r, q__1.i = d__ * q__2.i; wk.r = q__1.r, wk.i = q__1.i; for (i__ = j; i__ >= 1; --i__) { i__1 = i__ + (j - 1) * j / 2; i__2 = i__ + (j - 1) * j / 2; i__3 = i__ + (k - 1) * k / 2; r_cnjg(&q__4, &wk); q__3.r = ap[i__3].r * q__4.r - ap[i__3].i * q__4.i, q__3.i = ap[i__3].r * q__4.i + ap[ i__3].i * q__4.r; q__2.r = ap[i__2].r - q__3.r, q__2.i = ap[i__2].i - q__3.i; i__4 = i__ + (k - 2) * (k - 1) / 2; r_cnjg(&q__6, &wkm1); q__5.r = ap[i__4].r * q__6.r - ap[i__4].i * q__6.i, q__5.i = ap[i__4].r * q__6.i + ap[ i__4].i * q__6.r; q__1.r = q__2.r - q__5.r, q__1.i = q__2.i - q__5.i; ap[i__1].r = q__1.r, ap[i__1].i = q__1.i; /* L40: */ } i__1 = j + (k - 1) * k / 2; ap[i__1].r = wk.r, ap[i__1].i = wk.i; i__1 = j + (k - 2) * (k - 1) / 2; ap[i__1].r = wkm1.r, ap[i__1].i = wkm1.i; i__1 = j + (j - 1) * j / 2; i__2 = j + (j - 1) * j / 2; r__1 = ap[i__2].r; q__1.r = r__1, q__1.i = 0.f; ap[i__1].r = q__1.r, ap[i__1].i = q__1.i; /* L50: */ } } } } /* Store details of the interchanges in IPIV */ if (kstep == 1) { ipiv[k] = kp; } else { ipiv[k] = -kp; ipiv[k - 1] = -kp; } /* Decrease K and return to the start of the main loop */ k -= kstep; kc = knc - k; goto L10; } else { /* Factorize A as L*D*L' using the lower triangle of A */ /* K is the main loop index, increasing from 1 to N in steps of */ /* 1 or 2 */ k = 1; kc = 1; npp = *n * (*n + 1) / 2; L60: knc = kc; /* If K > N, exit from loop */ if (k > *n) { goto L110; } kstep = 1; /* Determine rows and columns to be interchanged and whether */ /* a 1-by-1 or 2-by-2 pivot block will be used */ i__1 = kc; absakk = (r__1 = ap[i__1].r, dabs(r__1)); /* IMAX is the row-index of the largest off-diagonal element in */ /* column K, and COLMAX is its absolute value */ if (k < *n) { i__1 = *n - k; imax = k + icamax_(&i__1, &ap[kc + 1], &c__1); i__1 = kc + imax - k; colmax = (r__1 = ap[i__1].r, dabs(r__1)) + (r__2 = r_imag(&ap[kc + imax - k]), dabs(r__2)); } else { colmax = 0.f; } if (dmax(absakk,colmax) == 0.f) { /* Column K is zero: set INFO and continue */ if (*info == 0) { *info = k; } kp = k; i__1 = kc; i__2 = kc; r__1 = ap[i__2].r; ap[i__1].r = r__1, ap[i__1].i = 0.f; } else { if (absakk >= alpha * colmax) { /* no interchange, use 1-by-1 pivot block */ kp = k; } else { /* JMAX is the column-index of the largest off-diagonal */ /* element in row IMAX, and ROWMAX is its absolute value */ rowmax = 0.f; kx = kc + imax - k; i__1 = imax - 1; for (j = k; j <= i__1; ++j) { i__2 = kx; if ((r__1 = ap[i__2].r, dabs(r__1)) + (r__2 = r_imag(&ap[ kx]), dabs(r__2)) > rowmax) { i__2 = kx; rowmax = (r__1 = ap[i__2].r, dabs(r__1)) + (r__2 = r_imag(&ap[kx]), dabs(r__2)); jmax = j; } kx = kx + *n - j; /* L70: */ } kpc = npp - (*n - imax + 1) * (*n - imax + 2) / 2 + 1; if (imax < *n) { i__1 = *n - imax; jmax = imax + icamax_(&i__1, &ap[kpc + 1], &c__1); /* Computing MAX */ i__1 = kpc + jmax - imax; r__3 = rowmax, r__4 = (r__1 = ap[i__1].r, dabs(r__1)) + ( r__2 = r_imag(&ap[kpc + jmax - imax]), dabs(r__2)) ; rowmax = dmax(r__3,r__4); } if (absakk >= alpha * colmax * (colmax / rowmax)) { /* no interchange, use 1-by-1 pivot block */ kp = k; } else /* if(complicated condition) */ { i__1 = kpc; if ((r__1 = ap[i__1].r, dabs(r__1)) >= alpha * rowmax) { /* interchange rows and columns K and IMAX, use 1-by-1 */ /* pivot block */ kp = imax; } else { /* interchange rows and columns K+1 and IMAX, use 2-by-2 */ /* pivot block */ kp = imax; kstep = 2; } } } kk = k + kstep - 1; if (kstep == 2) { knc = knc + *n - k + 1; } if (kp != kk) { /* Interchange rows and columns KK and KP in the trailing */ /* submatrix A(k:n,k:n) */ if (kp < *n) { i__1 = *n - kp; cswap_(&i__1, &ap[knc + kp - kk + 1], &c__1, &ap[kpc + 1], &c__1); } kx = knc + kp - kk; i__1 = kp - 1; for (j = kk + 1; j <= i__1; ++j) { kx = kx + *n - j + 1; r_cnjg(&q__1, &ap[knc + j - kk]); t.r = q__1.r, t.i = q__1.i; i__2 = knc + j - kk; r_cnjg(&q__1, &ap[kx]); ap[i__2].r = q__1.r, ap[i__2].i = q__1.i; i__2 = kx; ap[i__2].r = t.r, ap[i__2].i = t.i; /* L80: */ } i__1 = knc + kp - kk; r_cnjg(&q__1, &ap[knc + kp - kk]); ap[i__1].r = q__1.r, ap[i__1].i = q__1.i; i__1 = knc; r1 = ap[i__1].r; i__1 = knc; i__2 = kpc; r__1 = ap[i__2].r; ap[i__1].r = r__1, ap[i__1].i = 0.f; i__1 = kpc; ap[i__1].r = r1, ap[i__1].i = 0.f; if (kstep == 2) { i__1 = kc; i__2 = kc; r__1 = ap[i__2].r; ap[i__1].r = r__1, ap[i__1].i = 0.f; i__1 = kc + 1; t.r = ap[i__1].r, t.i = ap[i__1].i; i__1 = kc + 1; i__2 = kc + kp - k; ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i; i__1 = kc + kp - k; ap[i__1].r = t.r, ap[i__1].i = t.i; } } else { i__1 = kc; i__2 = kc; r__1 = ap[i__2].r; ap[i__1].r = r__1, ap[i__1].i = 0.f; if (kstep == 2) { i__1 = knc; i__2 = knc; r__1 = ap[i__2].r; ap[i__1].r = r__1, ap[i__1].i = 0.f; } } /* Update the trailing submatrix */ if (kstep == 1) { /* 1-by-1 pivot block D(k): column k now holds */ /* W(k) = L(k)*D(k) */ /* where L(k) is the k-th column of L */ if (k < *n) { /* Perform a rank-1 update of A(k+1:n,k+1:n) as */ /* A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */ i__1 = kc; r1 = 1.f / ap[i__1].r; i__1 = *n - k; r__1 = -r1; chpr_(uplo, &i__1, &r__1, &ap[kc + 1], &c__1, &ap[kc + *n - k + 1], (ftnlen)1); /* Store L(k) in column K */ i__1 = *n - k; csscal_(&i__1, &r1, &ap[kc + 1], &c__1); } } else { /* 2-by-2 pivot block D(k): columns K and K+1 now hold */ /* ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */ /* where L(k) and L(k+1) are the k-th and (k+1)-th columns */ /* of L */ if (k < *n - 1) { /* Perform a rank-2 update of A(k+2:n,k+2:n) as */ /* A := A - ( L(k) L(k+1) )*D(k)*( L(k) L(k+1) )' */ /* = A - ( W(k) W(k+1) )*inv(D(k))*( W(k) W(k+1) )' */ /* where L(k) and L(k+1) are the k-th and (k+1)-th */ /* columns of L */ i__1 = k + 1 + (k - 1) * ((*n << 1) - k) / 2; r__1 = ap[i__1].r; r__2 = r_imag(&ap[k + 1 + (k - 1) * ((*n << 1) - k) / 2]); d__ = slapy2_(&r__1, &r__2); i__1 = k + 1 + k * ((*n << 1) - k - 1) / 2; d11 = ap[i__1].r / d__; i__1 = k + (k - 1) * ((*n << 1) - k) / 2; d22 = ap[i__1].r / d__; tt = 1.f / (d11 * d22 - 1.f); i__1 = k + 1 + (k - 1) * ((*n << 1) - k) / 2; q__1.r = ap[i__1].r / d__, q__1.i = ap[i__1].i / d__; d21.r = q__1.r, d21.i = q__1.i; d__ = tt / d__; i__1 = *n; for (j = k + 2; j <= i__1; ++j) { i__2 = j + (k - 1) * ((*n << 1) - k) / 2; q__3.r = d11 * ap[i__2].r, q__3.i = d11 * ap[i__2].i; i__3 = j + k * ((*n << 1) - k - 1) / 2; q__4.r = d21.r * ap[i__3].r - d21.i * ap[i__3].i, q__4.i = d21.r * ap[i__3].i + d21.i * ap[i__3] .r; q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i; q__1.r = d__ * q__2.r, q__1.i = d__ * q__2.i; wk.r = q__1.r, wk.i = q__1.i; i__2 = j + k * ((*n << 1) - k - 1) / 2; q__3.r = d22 * ap[i__2].r, q__3.i = d22 * ap[i__2].i; r_cnjg(&q__5, &d21); i__3 = j + (k - 1) * ((*n << 1) - k) / 2; q__4.r = q__5.r * ap[i__3].r - q__5.i * ap[i__3].i, q__4.i = q__5.r * ap[i__3].i + q__5.i * ap[ i__3].r; q__2.r = q__3.r - q__4.r, q__2.i = q__3.i - q__4.i; q__1.r = d__ * q__2.r, q__1.i = d__ * q__2.i; wkp1.r = q__1.r, wkp1.i = q__1.i; i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { i__3 = i__ + (j - 1) * ((*n << 1) - j) / 2; i__4 = i__ + (j - 1) * ((*n << 1) - j) / 2; i__5 = i__ + (k - 1) * ((*n << 1) - k) / 2; r_cnjg(&q__4, &wk); q__3.r = ap[i__5].r * q__4.r - ap[i__5].i * q__4.i, q__3.i = ap[i__5].r * q__4.i + ap[ i__5].i * q__4.r; q__2.r = ap[i__4].r - q__3.r, q__2.i = ap[i__4].i - q__3.i; i__6 = i__ + k * ((*n << 1) - k - 1) / 2; r_cnjg(&q__6, &wkp1); q__5.r = ap[i__6].r * q__6.r - ap[i__6].i * q__6.i, q__5.i = ap[i__6].r * q__6.i + ap[ i__6].i * q__6.r; q__1.r = q__2.r - q__5.r, q__1.i = q__2.i - q__5.i; ap[i__3].r = q__1.r, ap[i__3].i = q__1.i; /* L90: */ } i__2 = j + (k - 1) * ((*n << 1) - k) / 2; ap[i__2].r = wk.r, ap[i__2].i = wk.i; i__2 = j + k * ((*n << 1) - k - 1) / 2; ap[i__2].r = wkp1.r, ap[i__2].i = wkp1.i; i__2 = j + (j - 1) * ((*n << 1) - j) / 2; i__3 = j + (j - 1) * ((*n << 1) - j) / 2; r__1 = ap[i__3].r; q__1.r = r__1, q__1.i = 0.f; ap[i__2].r = q__1.r, ap[i__2].i = q__1.i; /* L100: */ } } } } /* Store details of the interchanges in IPIV */ if (kstep == 1) { ipiv[k] = kp; } else { ipiv[k] = -kp; ipiv[k + 1] = -kp; } /* Increase K and return to the start of the main loop */ k += kstep; kc = knc + *n - k + 2; goto L60; } L110: return 0; /* End of CHPTRF */ } /* chptrf_ */
/* Subroutine */ int cstegr_(char *jobz, char *range, integer *n, real *d__, real *e, real *vl, real *vu, integer *il, integer *iu, real *abstol, integer *m, real *w, complex *z__, integer *ldz, integer *isuppz, real *work, integer *lwork, integer *iwork, integer *liwork, integer * info) { /* System generated locals */ integer z_dim1, z_offset, i__1, i__2; real r__1, r__2; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer iend; static real rmin, rmax; static integer itmp; static real tnrm; static integer i__, j; static real scale; extern logical lsame_(char *, char *); static integer iinfo; extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *), cswap_(integer *, complex *, integer *, complex *, integer *); static integer lwmin; static logical wantz; static integer jj; static logical alleig, indeig; static integer ibegin, iindbl; static logical valeig; extern doublereal slamch_(char *); extern /* Subroutine */ int claset_(char *, integer *, integer *, complex *, complex *, complex *, integer *); static real safmin; extern /* Subroutine */ int xerbla_(char *, integer *); static real bignum; static integer iindwk, indgrs, indwof; extern /* Subroutine */ int clarrv_(integer *, real *, real *, integer *, integer *, real *, integer *, real *, real *, complex *, integer * , integer *, real *, integer *, integer *), slarre_(integer *, real *, real *, real *, integer *, integer *, integer *, real *, real *, real *, real *, integer *); static real thresh; static integer iinspl, indwrk, liwmin; extern doublereal slanst_(char *, integer *, real *, real *); static integer nsplit; static real smlnum; static logical lquery; static real eps, tol, tmp; #define z___subscr(a_1,a_2) (a_2)*z_dim1 + a_1 #define z___ref(a_1,a_2) z__[z___subscr(a_1,a_2)] /* -- LAPACK computational routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1999 Purpose ======= CSTEGR computes selected eigenvalues and, optionally, eigenvectors of a real symmetric tridiagonal matrix T. Eigenvalues and eigenvectors can be selected by specifying either a range of values or a range of indices for the desired eigenvalues. The eigenvalues are computed by the dqds algorithm, while orthogonal eigenvectors are computed from various ``good'' L D L^T representations (also known as Relatively Robust Representations). Gram-Schmidt orthogonalization is avoided as far as possible. More specifically, the various steps of the algorithm are as follows. For the i-th unreduced block of T, (a) Compute T - sigma_i = L_i D_i L_i^T, such that L_i D_i L_i^T is a relatively robust representation, (b) Compute the eigenvalues, lambda_j, of L_i D_i L_i^T to high relative accuracy by the dqds algorithm, (c) If there is a cluster of close eigenvalues, "choose" sigma_i close to the cluster, and go to step (a), (d) Given the approximate eigenvalue lambda_j of L_i D_i L_i^T, compute the corresponding eigenvector by forming a rank-revealing twisted factorization. The desired accuracy of the output can be specified by the input parameter ABSTOL. For more details, see "A new O(n^2) algorithm for the symmetric tridiagonal eigenvalue/eigenvector problem", by Inderjit Dhillon, Computer Science Division Technical Report No. UCB/CSD-97-971, UC Berkeley, May 1997. Note 1 : Currently CSTEGR is only set up to find ALL the n eigenvalues and eigenvectors of T in O(n^2) time Note 2 : Currently the routine CSTEIN is called when an appropriate sigma_i cannot be chosen in step (c) above. CSTEIN invokes modified Gram-Schmidt when eigenvalues are close. Note 3 : CSTEGR works only on machines which follow ieee-754 floating-point standard in their handling of infinities and NaNs. Normal execution of CSTEGR may create NaNs and infinities and hence may abort due to a floating point exception in environments which do not conform to the ieee standard. Arguments ========= JOBZ (input) CHARACTER*1 = 'N': Compute eigenvalues only; = 'V': Compute eigenvalues and eigenvectors. RANGE (input) CHARACTER*1 = 'A': all eigenvalues will be found. = 'V': all eigenvalues in the half-open interval (VL,VU] will be found. = 'I': the IL-th through IU-th eigenvalues will be found. ********* Only RANGE = 'A' is currently supported ********************* N (input) INTEGER The order of the matrix. N >= 0. D (input/output) REAL array, dimension (N) On entry, the n diagonal elements of the tridiagonal matrix T. On exit, D is overwritten. E (input/output) REAL array, dimension (N) On entry, the (n-1) subdiagonal elements of the tridiagonal matrix T in elements 1 to N-1 of E; E(N) need not be set. On exit, E is overwritten. VL (input) REAL VU (input) REAL If RANGE='V', the lower and upper bounds of the interval to be searched for eigenvalues. VL < VU. Not referenced if RANGE = 'A' or 'I'. IL (input) INTEGER IU (input) INTEGER If RANGE='I', the indices (in ascending order) of the smallest and largest eigenvalues to be returned. 1 <= IL <= IU <= N, if N > 0; IL = 1 and IU = 0 if N = 0. Not referenced if RANGE = 'A' or 'V'. ABSTOL (input) REAL The absolute error tolerance for the eigenvalues/eigenvectors. IF JOBZ = 'V', the eigenvalues and eigenvectors output have residual norms bounded by ABSTOL, and the dot products between different eigenvectors are bounded by ABSTOL. If ABSTOL is less than N*EPS*|T|, then N*EPS*|T| will be used in its place, where EPS is the machine precision and |T| is the 1-norm of the tridiagonal matrix. The eigenvalues are computed to an accuracy of EPS*|T| irrespective of ABSTOL. If high relative accuracy is important, set ABSTOL to DLAMCH( 'Safe minimum' ). See Barlow and Demmel "Computing Accurate Eigensystems of Scaled Diagonally Dominant Matrices", LAPACK Working Note #7 for a discussion of which matrices define their eigenvalues to high relative accuracy. M (output) INTEGER The total number of eigenvalues found. 0 <= M <= N. If RANGE = 'A', M = N, and if RANGE = 'I', M = IU-IL+1. W (output) REAL array, dimension (N) The first M elements contain the selected eigenvalues in ascending order. Z (output) COMPLEX array, dimension (LDZ, max(1,M) ) If JOBZ = 'V', then if INFO = 0, the first M columns of Z contain the orthonormal eigenvectors of the matrix T corresponding to the selected eigenvalues, with the i-th column of Z holding the eigenvector associated with W(i). If JOBZ = 'N', then Z is not referenced. Note: the user must ensure that at least max(1,M) columns are supplied in the array Z; if RANGE = 'V', the exact value of M is not known in advance and an upper bound must be used. LDZ (input) INTEGER The leading dimension of the array Z. LDZ >= 1, and if JOBZ = 'V', LDZ >= max(1,N). ISUPPZ (output) INTEGER ARRAY, dimension ( 2*max(1,M) ) The support of the eigenvectors in Z, i.e., the indices indicating the nonzero elements in Z. The i-th eigenvector is nonzero only in elements ISUPPZ( 2*i-1 ) through ISUPPZ( 2*i ). WORK (workspace/output) REAL array, dimension (LWORK) On exit, if INFO = 0, WORK(1) returns the optimal (and minimal) LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,18*N) If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. IWORK (workspace/output) INTEGER array, dimension (LIWORK) On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. LIWORK (input) INTEGER The dimension of the array IWORK. LIWORK >= max(1,10*N) If LIWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the IWORK array, returns this value as the first entry of the IWORK array, and no error message related to LIWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = 1, internal error in SLARRE, if INFO = 2, internal error in CLARRV. Further Details =============== Based on contributions by Inderjit Dhillon, IBM Almaden, USA Osni Marques, LBNL/NERSC, USA Ken Stanley, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. Parameter adjustments */ --d__; --e; --w; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --isuppz; --work; --iwork; /* Function Body */ wantz = lsame_(jobz, "V"); alleig = lsame_(range, "A"); valeig = lsame_(range, "V"); indeig = lsame_(range, "I"); lquery = *lwork == -1 || *liwork == -1; lwmin = *n * 18; liwmin = *n * 10; *info = 0; if (! (wantz || lsame_(jobz, "N"))) { *info = -1; } else if (! (alleig || valeig || indeig)) { *info = -2; /* The following two lines need to be removed once the RANGE = 'V' and RANGE = 'I' options are provided. */ } else if (valeig || indeig) { *info = -2; } else if (*n < 0) { *info = -3; } else if (valeig && *n > 0 && *vu <= *vl) { *info = -7; } else if (indeig && *il < 1) { *info = -8; /* The following change should be made in DSTEVX also, otherwise IL can be specified as N+1 and IU as N. ELSE IF( INDEIG .AND. ( IU.LT.MIN( N, IL ) .OR. IU.GT.N ) ) THEN */ } else if (indeig && (*iu < *il || *iu > *n)) { *info = -9; } else if (*ldz < 1 || wantz && *ldz < *n) { *info = -14; } else if (*lwork < lwmin && ! lquery) { *info = -17; } else if (*liwork < liwmin && ! lquery) { *info = -19; } if (*info == 0) { work[1] = (real) lwmin; iwork[1] = liwmin; } if (*info != 0) { i__1 = -(*info); xerbla_("CSTEGR", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ *m = 0; if (*n == 0) { return 0; } if (*n == 1) { if (alleig || indeig) { *m = 1; w[1] = d__[1]; } else { if (*vl < d__[1] && *vu >= d__[1]) { *m = 1; w[1] = d__[1]; } } if (wantz) { i__1 = z___subscr(1, 1); z__[i__1].r = 1.f, z__[i__1].i = 0.f; } return 0; } /* Get machine constants. */ safmin = slamch_("Safe minimum"); eps = slamch_("Precision"); smlnum = safmin / eps; bignum = 1.f / smlnum; rmin = sqrt(smlnum); /* Computing MIN */ r__1 = sqrt(bignum), r__2 = 1.f / sqrt(sqrt(safmin)); rmax = dmin(r__1,r__2); /* Scale matrix to allowable range, if necessary. */ scale = 1.f; tnrm = slanst_("M", n, &d__[1], &e[1]); if (tnrm > 0.f && tnrm < rmin) { scale = rmin / tnrm; } else if (tnrm > rmax) { scale = rmax / tnrm; } if (scale != 1.f) { sscal_(n, &scale, &d__[1], &c__1); i__1 = *n - 1; sscal_(&i__1, &scale, &e[1], &c__1); tnrm *= scale; } indgrs = 1; indwof = (*n << 1) + 1; indwrk = *n * 3 + 1; iinspl = 1; iindbl = *n + 1; iindwk = (*n << 1) + 1; claset_("Full", n, n, &c_b1, &c_b1, &z__[z_offset], ldz); /* Compute the desired eigenvalues of the tridiagonal after splitting into smaller subblocks if the corresponding of-diagonal elements are small */ thresh = eps * tnrm; slarre_(n, &d__[1], &e[1], &thresh, &nsplit, &iwork[iinspl], m, &w[1], & work[indwof], &work[indgrs], &work[indwrk], &iinfo); if (iinfo != 0) { *info = 1; return 0; } if (wantz) { /* Compute the desired eigenvectors corresponding to the computed eigenvalues Computing MAX */ r__1 = *abstol, r__2 = (real) (*n) * thresh; tol = dmax(r__1,r__2); ibegin = 1; i__1 = nsplit; for (i__ = 1; i__ <= i__1; ++i__) { iend = iwork[iinspl + i__ - 1]; i__2 = iend; for (j = ibegin; j <= i__2; ++j) { iwork[iindbl + j - 1] = i__; /* L10: */ } ibegin = iend + 1; /* L20: */ } clarrv_(n, &d__[1], &e[1], &iwork[iinspl], m, &w[1], &iwork[iindbl], & work[indgrs], &tol, &z__[z_offset], ldz, &isuppz[1], &work[ indwrk], &iwork[iindwk], &iinfo); if (iinfo != 0) { *info = 2; return 0; } } ibegin = 1; i__1 = nsplit; for (i__ = 1; i__ <= i__1; ++i__) { iend = iwork[iinspl + i__ - 1]; i__2 = iend; for (j = ibegin; j <= i__2; ++j) { w[j] += work[indwof + i__ - 1]; /* L30: */ } ibegin = iend + 1; /* L40: */ } /* If matrix was scaled, then rescale eigenvalues appropriately. */ if (scale != 1.f) { r__1 = 1.f / scale; sscal_(m, &r__1, &w[1], &c__1); } /* If eigenvalues are not in order, then sort them, along with eigenvectors. */ if (nsplit > 1) { i__1 = *m - 1; for (j = 1; j <= i__1; ++j) { i__ = 0; tmp = w[j]; i__2 = *m; for (jj = j + 1; jj <= i__2; ++jj) { if (w[jj] < tmp) { i__ = jj; tmp = w[jj]; } /* L50: */ } if (i__ != 0) { w[i__] = w[j]; w[j] = tmp; if (wantz) { cswap_(n, &z___ref(1, i__), &c__1, &z___ref(1, j), &c__1); itmp = isuppz[(i__ << 1) - 1]; isuppz[(i__ << 1) - 1] = isuppz[(j << 1) - 1]; isuppz[(j << 1) - 1] = itmp; itmp = isuppz[i__ * 2]; isuppz[i__ * 2] = isuppz[j * 2]; isuppz[j * 2] = itmp; } } /* L60: */ } } work[1] = (real) lwmin; iwork[1] = liwmin; return 0; /* End of CSTEGR */ } /* cstegr_ */