Пример #1
0
int
f2c_zscal(integer* N,
          doublecomplex* alpha,
          doublecomplex* X, integer* incX)
{
    zscal_(N, alpha, X, incX);
    return 0;
}
Пример #2
0
/*! _zgbmatrix*std::complex<double> operator */
inline _zgbmatrix operator*(const _zgbmatrix& mat, const std::complex<double>& d)
{
#ifdef  CPPL_VERBOSE
  std::cerr << "# [MARK] operator*(const _zgbmatrix&, const std::complex<double>&)"
            << std::endl;
#endif//CPPL_VERBOSE
  
  zscal_((mat.KL+mat.KU+1)*mat.N, d, mat.Array, 1);
  return mat;
}
Пример #3
0
/*! zssmatrix*=std::complex<double> operator */
inline zssmatrix& zssmatrix::operator*=(const std::complex<double>& d)
{
#ifdef  CPPL_VERBOSE
  std::cerr << "# [MARK] zssmatrix::operator*=(const std::complex<double>&)"
            << std::endl;
#endif//CPPL_VERBOSE
  
  zscal_(VOL, d, Array, 1);
  return *this;
}
Пример #4
0
/*! zcovector*=std::complex<double> operator */
inline zcovector& zcovector::operator*=(const std::complex<double>& d)
{
#ifdef  CPPL_VERBOSE
  std::cerr << "# [MARK] zcovector::operator*=(const std::complex<double>&)"
            << std::endl;
#endif//CPPL_VERBOSE
  
  zscal_(L, d, Array, 1);
  return *this;
}
Пример #5
0
/*! std::complex<double>*_zrovector operator */
inline _zrovector operator*(const std::complex<double>& d, const _zrovector& vec)
{
#ifdef  CPPL_VERBOSE
  std::cerr << "# [MARK] operator*(const std::complex<double>&, const _zrovector&)"
            << std::endl;
#endif//CPPL_VERBOSE
  
  zscal_(vec.L, d, vec.Array, 1);  
  return vec;
}
Пример #6
0
PyObject* scal(PyObject *self, PyObject *args)
{
  Py_complex alpha;
  PyArrayObject* x;
  if (!PyArg_ParseTuple(args, "DO", &alpha, &x))
    return NULL;
  int n = PyArray_DIMS(x)[0];
  for (int d = 1; d < PyArray_NDIM(x); d++)
    n *= PyArray_DIMS(x)[d];
  int incx = 1;

  if (PyArray_DESCR(x)->type_num == NPY_DOUBLE)
    dscal_(&n, &(alpha.real), DOUBLEP(x), &incx);
  else
    zscal_(&n, &alpha, (void*)COMPLEXP(x), &incx);

  Py_RETURN_NONE;
}
Пример #7
0
/* Subroutine */
int zlarfgp_(integer *n, doublecomplex *alpha, doublecomplex *x, integer *incx, doublecomplex *tau)
{
    /* System generated locals */
    integer i__1, i__2;
    doublereal d__1, d__2;
    doublecomplex z__1, z__2;
    /* Builtin functions */
    double d_imag(doublecomplex *), d_sign(doublereal *, doublereal *), z_abs( doublecomplex *);
    /* Local variables */
    integer j;
    doublecomplex savealpha;
    integer knt;
    doublereal beta, alphi, alphr;
    extern /* Subroutine */
    int zscal_(integer *, doublecomplex *, doublecomplex *, integer *);
    doublereal xnorm;
    extern doublereal dlapy2_(doublereal *, doublereal *), dlapy3_(doublereal *, doublereal *, doublereal *), dznrm2_(integer *, doublecomplex * , integer *), dlamch_(char *);
    extern /* Subroutine */
    int zdscal_(integer *, doublereal *, doublecomplex *, integer *);
    doublereal bignum;
    extern /* Double Complex */
    VOID zladiv_(doublecomplex *, doublecomplex *, doublecomplex *);
    doublereal smlnum;
    /* -- LAPACK auxiliary routine (version 3.4.2) -- */
    /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
    /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
    /* September 2012 */
    /* .. Scalar Arguments .. */
    /* .. */
    /* .. Array Arguments .. */
    /* .. */
    /* ===================================================================== */
    /* .. Parameters .. */
    /* .. */
    /* .. Local Scalars .. */
    /* .. */
    /* .. External Functions .. */
    /* .. */
    /* .. Intrinsic Functions .. */
    /* .. */
    /* .. External Subroutines .. */
    /* .. */
    /* .. Executable Statements .. */
    /* Parameter adjustments */
    --x;
    /* Function Body */
    if (*n <= 0)
    {
        tau->r = 0., tau->i = 0.;
        return 0;
    }
    i__1 = *n - 1;
    xnorm = dznrm2_(&i__1, &x[1], incx);
    alphr = alpha->r;
    alphi = d_imag(alpha);
    if (xnorm == 0.)
    {
        /* H = [1-alpha/abs(alpha) 0;
        0 I], sign chosen so ALPHA >= 0. */
        if (alphi == 0.)
        {
            if (alphr >= 0.)
            {
                /* When TAU.eq.ZERO, the vector is special-cased to be */
                /* all zeros in the application routines. We do not need */
                /* to clear it. */
                tau->r = 0., tau->i = 0.;
            }
            else
            {
                /* However, the application routines rely on explicit */
                /* zero checks when TAU.ne.ZERO, and we must clear X. */
                tau->r = 2., tau->i = 0.;
                i__1 = *n - 1;
                for (j = 1;
                        j <= i__1;
                        ++j)
                {
                    i__2 = (j - 1) * *incx + 1;
                    x[i__2].r = 0.;
                    x[i__2].i = 0.; // , expr subst
                }
                z__1.r = -alpha->r;
                z__1.i = -alpha->i; // , expr subst
                alpha->r = z__1.r, alpha->i = z__1.i;
            }
        }
        else
        {
            /* Only "reflecting" the diagonal entry to be real and non-negative. */
            xnorm = dlapy2_(&alphr, &alphi);
            d__1 = 1. - alphr / xnorm;
            d__2 = -alphi / xnorm;
            z__1.r = d__1;
            z__1.i = d__2; // , expr subst
            tau->r = z__1.r, tau->i = z__1.i;
            i__1 = *n - 1;
            for (j = 1;
                    j <= i__1;
                    ++j)
            {
                i__2 = (j - 1) * *incx + 1;
                x[i__2].r = 0.;
                x[i__2].i = 0.; // , expr subst
            }
            alpha->r = xnorm, alpha->i = 0.;
        }
    }
    else
    {
        /* general case */
        d__1 = dlapy3_(&alphr, &alphi, &xnorm);
        beta = d_sign(&d__1, &alphr);
        smlnum = dlamch_("S") / dlamch_("E");
        bignum = 1. / smlnum;
        knt = 0;
        if (abs(beta) < smlnum)
        {
            /* XNORM, BETA may be inaccurate;
            scale X and recompute them */
L10:
            ++knt;
            i__1 = *n - 1;
            zdscal_(&i__1, &bignum, &x[1], incx);
            beta *= bignum;
            alphi *= bignum;
            alphr *= bignum;
            if (abs(beta) < smlnum)
            {
                goto L10;
            }
            /* New BETA is at most 1, at least SMLNUM */
            i__1 = *n - 1;
            xnorm = dznrm2_(&i__1, &x[1], incx);
            z__1.r = alphr;
            z__1.i = alphi; // , expr subst
            alpha->r = z__1.r, alpha->i = z__1.i;
            d__1 = dlapy3_(&alphr, &alphi, &xnorm);
            beta = d_sign(&d__1, &alphr);
        }
        savealpha.r = alpha->r;
        savealpha.i = alpha->i; // , expr subst
        z__1.r = alpha->r + beta;
        z__1.i = alpha->i; // , expr subst
        alpha->r = z__1.r, alpha->i = z__1.i;
        if (beta < 0.)
        {
            beta = -beta;
            z__2.r = -alpha->r;
            z__2.i = -alpha->i; // , expr subst
            z__1.r = z__2.r / beta;
            z__1.i = z__2.i / beta; // , expr subst
            tau->r = z__1.r, tau->i = z__1.i;
        }
        else
        {
            alphr = alphi * (alphi / alpha->r);
            alphr += xnorm * (xnorm / alpha->r);
            d__1 = alphr / beta;
            d__2 = -alphi / beta;
            z__1.r = d__1;
            z__1.i = d__2; // , expr subst
            tau->r = z__1.r, tau->i = z__1.i;
            d__1 = -alphr;
            z__1.r = d__1;
            z__1.i = alphi; // , expr subst
            alpha->r = z__1.r, alpha->i = z__1.i;
        }
        zladiv_(&z__1, &c_b5, alpha);
        alpha->r = z__1.r, alpha->i = z__1.i;
        if (z_abs(tau) <= smlnum)
        {
            /* In the case where the computed TAU ends up being a denormalized number, */
            /* it loses relative accuracy. This is a BIG problem. Solution: flush TAU */
            /* to ZERO (or TWO or whatever makes a nonnegative real number for BETA). */
            /* (Bug report provided by Pat Quillen from MathWorks on Jul 29, 2009.) */
            /* (Thanks Pat. Thanks MathWorks.) */
            alphr = savealpha.r;
            alphi = d_imag(&savealpha);
            if (alphi == 0.)
            {
                if (alphr >= 0.)
                {
                    tau->r = 0., tau->i = 0.;
                }
                else
                {
                    tau->r = 2., tau->i = 0.;
                    i__1 = *n - 1;
                    for (j = 1;
                            j <= i__1;
                            ++j)
                    {
                        i__2 = (j - 1) * *incx + 1;
                        x[i__2].r = 0.;
                        x[i__2].i = 0.; // , expr subst
                    }
                    z__1.r = -savealpha.r;
                    z__1.i = -savealpha.i; // , expr subst
                    beta = z__1.r;
                }
            }
            else
            {
                xnorm = dlapy2_(&alphr, &alphi);
                d__1 = 1. - alphr / xnorm;
                d__2 = -alphi / xnorm;
                z__1.r = d__1;
                z__1.i = d__2; // , expr subst
                tau->r = z__1.r, tau->i = z__1.i;
                i__1 = *n - 1;
                for (j = 1;
                        j <= i__1;
                        ++j)
                {
                    i__2 = (j - 1) * *incx + 1;
                    x[i__2].r = 0.;
                    x[i__2].i = 0.; // , expr subst
                }
                beta = xnorm;
            }
        }
        else
        {
            /* This is the general case. */
            i__1 = *n - 1;
            zscal_(&i__1, alpha, &x[1], incx);
        }
        /* If BETA is subnormal, it may lose relative accuracy */
        i__1 = knt;
        for (j = 1;
                j <= i__1;
                ++j)
        {
            beta *= smlnum;
            /* L20: */
        }
        alpha->r = beta, alpha->i = 0.;
    }
    return 0;
    /* End of ZLARFGP */
}
Пример #8
0
/* Subroutine */ int zlarfg_(integer *n, doublecomplex *alpha, doublecomplex *
	x, integer *incx, doublecomplex *tau)
{
/*  -- LAPACK auxiliary routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    ZLARFG generates a complex elementary reflector H of order n, such   
    that   

          H' * ( alpha ) = ( beta ),   H' * H = I.   
               (   x   )   (   0  )   

    where alpha and beta are scalars, with beta real, and x is an   
    (n-1)-element complex vector. H is represented in the form   

          H = I - tau * ( 1 ) * ( 1 v' ) ,   
                        ( v )   

    where tau is a complex scalar and v is a complex (n-1)-element   
    vector. Note that H is not hermitian.   

    If the elements of x are all zero and alpha is real, then tau = 0   
    and H is taken to be the unit matrix.   

    Otherwise  1 <= real(tau) <= 2  and  abs(tau-1) <= 1 .   

    Arguments   
    =========   

    N       (input) INTEGER   
            The order of the elementary reflector.   

    ALPHA   (input/output) COMPLEX*16   
            On entry, the value alpha.   
            On exit, it is overwritten with the value beta.   

    X       (input/output) COMPLEX*16 array, dimension   
                           (1+(N-2)*abs(INCX))   
            On entry, the vector x.   
            On exit, it is overwritten with the vector v.   

    INCX    (input) INTEGER   
            The increment between elements of X. INCX > 0.   

    TAU     (output) COMPLEX*16   
            The value tau.   

    =====================================================================   


       Parameter adjustments */
    /* Table of constant values */
    static doublecomplex c_b5 = {1.,0.};
    
    /* System generated locals */
    integer i__1;
    doublereal d__1, d__2;
    doublecomplex z__1, z__2;
    /* Builtin functions */
    double d_imag(doublecomplex *), d_sign(doublereal *, doublereal *);
    /* Local variables */
    static doublereal beta;
    static integer j;
    static doublereal alphi, alphr;
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *);
    static doublereal xnorm;
    extern doublereal dlapy3_(doublereal *, doublereal *, doublereal *), 
	    dznrm2_(integer *, doublecomplex *, integer *), dlamch_(char *);
    static doublereal safmin;
    extern /* Subroutine */ int zdscal_(integer *, doublereal *, 
	    doublecomplex *, integer *);
    static doublereal rsafmn;
    extern /* Double Complex */ VOID zladiv_(doublecomplex *, doublecomplex *,
	     doublecomplex *);
    static integer knt;


    --x;

    /* Function Body */
    if (*n <= 0) {
	tau->r = 0., tau->i = 0.;
	return 0;
    }

    i__1 = *n - 1;
    xnorm = dznrm2_(&i__1, &x[1], incx);
    alphr = alpha->r;
    alphi = d_imag(alpha);

    if (xnorm == 0. && alphi == 0.) {

/*        H  =  I */

	tau->r = 0., tau->i = 0.;
    } else {

/*        general case */

	d__1 = dlapy3_(&alphr, &alphi, &xnorm);
	beta = -d_sign(&d__1, &alphr);
	safmin = dlamch_("S") / dlamch_("E");
	rsafmn = 1. / safmin;

	if (abs(beta) < safmin) {

/*           XNORM, BETA may be inaccurate; scale X and recompute them */

	    knt = 0;
L10:
	    ++knt;
	    i__1 = *n - 1;
	    zdscal_(&i__1, &rsafmn, &x[1], incx);
	    beta *= rsafmn;
	    alphi *= rsafmn;
	    alphr *= rsafmn;
	    if (abs(beta) < safmin) {
		goto L10;
	    }

/*           New BETA is at most 1, at least SAFMIN */

	    i__1 = *n - 1;
	    xnorm = dznrm2_(&i__1, &x[1], incx);
	    z__1.r = alphr, z__1.i = alphi;
	    alpha->r = z__1.r, alpha->i = z__1.i;
	    d__1 = dlapy3_(&alphr, &alphi, &xnorm);
	    beta = -d_sign(&d__1, &alphr);
	    d__1 = (beta - alphr) / beta;
	    d__2 = -alphi / beta;
	    z__1.r = d__1, z__1.i = d__2;
	    tau->r = z__1.r, tau->i = z__1.i;
	    z__2.r = alpha->r - beta, z__2.i = alpha->i;
	    zladiv_(&z__1, &c_b5, &z__2);
	    alpha->r = z__1.r, alpha->i = z__1.i;
	    i__1 = *n - 1;
	    zscal_(&i__1, alpha, &x[1], incx);

/*           If ALPHA is subnormal, it may lose relative accuracy */

	    alpha->r = beta, alpha->i = 0.;
	    i__1 = knt;
	    for (j = 1; j <= i__1; ++j) {
		z__1.r = safmin * alpha->r, z__1.i = safmin * alpha->i;
		alpha->r = z__1.r, alpha->i = z__1.i;
/* L20: */
	    }
	} else {
	    d__1 = (beta - alphr) / beta;
	    d__2 = -alphi / beta;
	    z__1.r = d__1, z__1.i = d__2;
	    tau->r = z__1.r, tau->i = z__1.i;
	    z__2.r = alpha->r - beta, z__2.i = alpha->i;
	    zladiv_(&z__1, &c_b5, &z__2);
	    alpha->r = z__1.r, alpha->i = z__1.i;
	    i__1 = *n - 1;
	    zscal_(&i__1, alpha, &x[1], incx);
	    alpha->r = beta, alpha->i = 0.;
	}
    }

    return 0;

/*     End of ZLARFG */

} /* zlarfg_ */
Пример #9
0
 int zlatrd_(char *uplo, int *n, int *nb, 
	doublecomplex *a, int *lda, double *e, doublecomplex *tau, 
	doublecomplex *w, int *ldw)
{
    /* System generated locals */
    int a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
    double d__1;
    doublecomplex z__1, z__2, z__3, z__4;

    /* Local variables */
    int i__, iw;
    doublecomplex alpha;
    extern int lsame_(char *, char *);
    extern  int zscal_(int *, doublecomplex *, 
	    doublecomplex *, int *);
    extern /* Double Complex */ VOID zdotc_(doublecomplex *, int *, 
	    doublecomplex *, int *, doublecomplex *, int *);
    extern  int zgemv_(char *, int *, int *, 
	    doublecomplex *, doublecomplex *, int *, doublecomplex *, 
	    int *, doublecomplex *, doublecomplex *, int *), 
	    zhemv_(char *, int *, doublecomplex *, doublecomplex *, 
	    int *, doublecomplex *, int *, doublecomplex *, 
	    doublecomplex *, int *), zaxpy_(int *, 
	    doublecomplex *, doublecomplex *, int *, doublecomplex *, 
	    int *), zlarfg_(int *, doublecomplex *, doublecomplex *, 
	    int *, doublecomplex *), zlacgv_(int *, doublecomplex *, 
	    int *);


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZLATRD reduces NB rows and columns of a complex Hermitian matrix A to */
/*  Hermitian tridiagonal form by a unitary similarity */
/*  transformation Q' * A * Q, and returns the matrices V and W which are */
/*  needed to apply the transformation to the unreduced part of A. */

/*  If UPLO = 'U', ZLATRD reduces the last NB rows and columns of a */
/*  matrix, of which the upper triangle is supplied; */
/*  if UPLO = 'L', ZLATRD reduces the first NB rows and columns of a */
/*  matrix, of which the lower triangle is supplied. */

/*  This is an auxiliary routine called by ZHETRD. */

/*  Arguments */
/*  ========= */

/*  UPLO    (input) CHARACTER*1 */
/*          Specifies whether the upper or lower triangular part of the */
/*          Hermitian matrix A is stored: */
/*          = 'U': Upper triangular */
/*          = 'L': Lower triangular */

/*  N       (input) INTEGER */
/*          The order of the matrix A. */

/*  NB      (input) INTEGER */
/*          The number of rows and columns to be reduced. */

/*  A       (input/output) COMPLEX*16 array, dimension (LDA,N) */
/*          On entry, the Hermitian matrix A.  If UPLO = 'U', the leading */
/*          n-by-n upper triangular part of A contains the upper */
/*          triangular part of the matrix A, and the strictly lower */
/*          triangular part of A is not referenced.  If UPLO = 'L', the */
/*          leading n-by-n lower triangular part of A contains the lower */
/*          triangular part of the matrix A, and the strictly upper */
/*          triangular part of A is not referenced. */
/*          On exit: */
/*          if UPLO = 'U', the last NB columns have been reduced to */
/*            tridiagonal form, with the diagonal elements overwriting */
/*            the diagonal elements of A; the elements above the diagonal */
/*            with the array TAU, represent the unitary matrix Q as a */
/*            product of elementary reflectors; */
/*          if UPLO = 'L', the first NB columns have been reduced to */
/*            tridiagonal form, with the diagonal elements overwriting */
/*            the diagonal elements of A; the elements below the diagonal */
/*            with the array TAU, represent the  unitary matrix Q as a */
/*            product of elementary reflectors. */
/*          See Further Details. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= MAX(1,N). */

/*  E       (output) DOUBLE PRECISION array, dimension (N-1) */
/*          If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal */
/*          elements of the last NB columns of the reduced matrix; */
/*          if UPLO = 'L', E(1:nb) contains the subdiagonal elements of */
/*          the first NB columns of the reduced matrix. */

/*  TAU     (output) COMPLEX*16 array, dimension (N-1) */
/*          The scalar factors of the elementary reflectors, stored in */
/*          TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'. */
/*          See Further Details. */

/*  W       (output) COMPLEX*16 array, dimension (LDW,NB) */
/*          The n-by-nb matrix W required to update the unreduced part */
/*          of A. */

/*  LDW     (input) INTEGER */
/*          The leading dimension of the array W. LDW >= MAX(1,N). */

/*  Further Details */
/*  =============== */

/*  If UPLO = 'U', the matrix Q is represented as a product of elementary */
/*  reflectors */

/*     Q = H(n) H(n-1) . . . H(n-nb+1). */

/*  Each H(i) has the form */

/*     H(i) = I - tau * v * v' */

/*  where tau is a complex scalar, and v is a complex vector with */
/*  v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), */
/*  and tau in TAU(i-1). */

/*  If UPLO = 'L', the matrix Q is represented as a product of elementary */
/*  reflectors */

/*     Q = H(1) H(2) . . . H(nb). */

/*  Each H(i) has the form */

/*     H(i) = I - tau * v * v' */

/*  where tau is a complex scalar, and v is a complex vector with */
/*  v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), */
/*  and tau in TAU(i). */

/*  The elements of the vectors v together form the n-by-nb matrix V */
/*  which is needed, with W, to apply the transformation to the unreduced */
/*  part of the matrix, using a Hermitian rank-2k update of the form: */
/*  A := A - V*W' - W*V'. */

/*  The contents of A on exit are illustrated by the following examples */
/*  with n = 5 and nb = 2: */

/*  if UPLO = 'U':                       if UPLO = 'L': */

/*    (  a   a   a   v4  v5 )              (  d                  ) */
/*    (      a   a   v4  v5 )              (  1   d              ) */
/*    (          a   1   v5 )              (  v1  1   a          ) */
/*    (              d   1  )              (  v1  v2  a   a      ) */
/*    (                  d  )              (  v1  v2  a   a   a  ) */

/*  where d denotes a diagonal element of the reduced matrix, a denotes */
/*  an element of the original matrix that is unchanged, and vi denotes */
/*  an element of the vector defining H(i). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Quick return if possible */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --e;
    --tau;
    w_dim1 = *ldw;
    w_offset = 1 + w_dim1;
    w -= w_offset;

    /* Function Body */
    if (*n <= 0) {
	return 0;
    }

    if (lsame_(uplo, "U")) {

/*        Reduce last NB columns of upper triangle */

	i__1 = *n - *nb + 1;
	for (i__ = *n; i__ >= i__1; --i__) {
	    iw = i__ - *n + *nb;
	    if (i__ < *n) {

/*              Update A(1:i,i) */

		i__2 = i__ + i__ * a_dim1;
		i__3 = i__ + i__ * a_dim1;
		d__1 = a[i__3].r;
		a[i__2].r = d__1, a[i__2].i = 0.;
		i__2 = *n - i__;
		zlacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
		i__2 = *n - i__;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__, &i__2, &z__1, &a[(i__ + 1) * 
			a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, &
			c_b2, &a[i__ * a_dim1 + 1], &c__1);
		i__2 = *n - i__;
		zlacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
		i__2 = *n - i__;
		zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
		i__2 = *n - i__;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__, &i__2, &z__1, &w[(iw + 1) * 
			w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, &
			c_b2, &a[i__ * a_dim1 + 1], &c__1);
		i__2 = *n - i__;
		zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
		i__2 = i__ + i__ * a_dim1;
		i__3 = i__ + i__ * a_dim1;
		d__1 = a[i__3].r;
		a[i__2].r = d__1, a[i__2].i = 0.;
	    }
	    if (i__ > 1) {

/*              Generate elementary reflector H(i) to annihilate */
/*              A(1:i-2,i) */

		i__2 = i__ - 1 + i__ * a_dim1;
		alpha.r = a[i__2].r, alpha.i = a[i__2].i;
		i__2 = i__ - 1;
		zlarfg_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &tau[i__ 
			- 1]);
		i__2 = i__ - 1;
		e[i__2] = alpha.r;
		i__2 = i__ - 1 + i__ * a_dim1;
		a[i__2].r = 1., a[i__2].i = 0.;

/*              Compute W(1:i-1,i) */

		i__2 = i__ - 1;
		zhemv_("Upper", &i__2, &c_b2, &a[a_offset], lda, &a[i__ * 
			a_dim1 + 1], &c__1, &c_b1, &w[iw * w_dim1 + 1], &c__1);
		if (i__ < *n) {
		    i__2 = i__ - 1;
		    i__3 = *n - i__;
		    zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[(iw 
			    + 1) * w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &
			    c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1);
		    i__2 = i__ - 1;
		    i__3 = *n - i__;
		    z__1.r = -1., z__1.i = -0.;
		    zgemv_("No transpose", &i__2, &i__3, &z__1, &a[(i__ + 1) *
			     a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], &
			    c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1);
		    i__2 = i__ - 1;
		    i__3 = *n - i__;
		    zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[(
			    i__ + 1) * a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], 
			     &c__1, &c_b1, &w[i__ + 1 + iw * w_dim1], &c__1);
		    i__2 = i__ - 1;
		    i__3 = *n - i__;
		    z__1.r = -1., z__1.i = -0.;
		    zgemv_("No transpose", &i__2, &i__3, &z__1, &w[(iw + 1) * 
			    w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], &
			    c__1, &c_b2, &w[iw * w_dim1 + 1], &c__1);
		}
		i__2 = i__ - 1;
		zscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
		z__3.r = -.5, z__3.i = -0.;
		i__2 = i__ - 1;
		z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i, z__2.i =
			 z__3.r * tau[i__2].i + z__3.i * tau[i__2].r;
		i__3 = i__ - 1;
		zdotc_(&z__4, &i__3, &w[iw * w_dim1 + 1], &c__1, &a[i__ * 
			a_dim1 + 1], &c__1);
		z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * 
			z__4.i + z__2.i * z__4.r;
		alpha.r = z__1.r, alpha.i = z__1.i;
		i__2 = i__ - 1;
		zaxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw * 
			w_dim1 + 1], &c__1);
	    }

/* L10: */
	}
    } else {

/*        Reduce first NB columns of lower triangle */

	i__1 = *nb;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Update A(i:n,i) */

	    i__2 = i__ + i__ * a_dim1;
	    i__3 = i__ + i__ * a_dim1;
	    d__1 = a[i__3].r;
	    a[i__2].r = d__1, a[i__2].i = 0.;
	    i__2 = i__ - 1;
	    zlacgv_(&i__2, &w[i__ + w_dim1], ldw);
	    i__2 = *n - i__ + 1;
	    i__3 = i__ - 1;
	    z__1.r = -1., z__1.i = -0.;
	    zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + a_dim1], lda, 
		     &w[i__ + w_dim1], ldw, &c_b2, &a[i__ + i__ * a_dim1], &
		    c__1);
	    i__2 = i__ - 1;
	    zlacgv_(&i__2, &w[i__ + w_dim1], ldw);
	    i__2 = i__ - 1;
	    zlacgv_(&i__2, &a[i__ + a_dim1], lda);
	    i__2 = *n - i__ + 1;
	    i__3 = i__ - 1;
	    z__1.r = -1., z__1.i = -0.;
	    zgemv_("No transpose", &i__2, &i__3, &z__1, &w[i__ + w_dim1], ldw, 
		     &a[i__ + a_dim1], lda, &c_b2, &a[i__ + i__ * a_dim1], &
		    c__1);
	    i__2 = i__ - 1;
	    zlacgv_(&i__2, &a[i__ + a_dim1], lda);
	    i__2 = i__ + i__ * a_dim1;
	    i__3 = i__ + i__ * a_dim1;
	    d__1 = a[i__3].r;
	    a[i__2].r = d__1, a[i__2].i = 0.;
	    if (i__ < *n) {

/*              Generate elementary reflector H(i) to annihilate */
/*              A(i+2:n,i) */

		i__2 = i__ + 1 + i__ * a_dim1;
		alpha.r = a[i__2].r, alpha.i = a[i__2].i;
		i__2 = *n - i__;
/* Computing MIN */
		i__3 = i__ + 2;
		zlarfg_(&i__2, &alpha, &a[MIN(i__3, *n)+ i__ * a_dim1], &c__1, 
			 &tau[i__]);
		i__2 = i__;
		e[i__2] = alpha.r;
		i__2 = i__ + 1 + i__ * a_dim1;
		a[i__2].r = 1., a[i__2].i = 0.;

/*              Compute W(i+1:n,i) */

		i__2 = *n - i__;
		zhemv_("Lower", &i__2, &c_b2, &a[i__ + 1 + (i__ + 1) * a_dim1]
, lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b1, &w[
			i__ + 1 + i__ * w_dim1], &c__1);
		i__2 = *n - i__;
		i__3 = i__ - 1;
		zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &w[i__ + 1 
			+ w_dim1], ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &
			c_b1, &w[i__ * w_dim1 + 1], &c__1);
		i__2 = *n - i__;
		i__3 = i__ - 1;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 + 
			a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[
			i__ + 1 + i__ * w_dim1], &c__1);
		i__2 = *n - i__;
		i__3 = i__ - 1;
		zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 
			+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
			c_b1, &w[i__ * w_dim1 + 1], &c__1);
		i__2 = *n - i__;
		i__3 = i__ - 1;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__2, &i__3, &z__1, &w[i__ + 1 + 
			w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b2, &w[
			i__ + 1 + i__ * w_dim1], &c__1);
		i__2 = *n - i__;
		zscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
		z__3.r = -.5, z__3.i = -0.;
		i__2 = i__;
		z__2.r = z__3.r * tau[i__2].r - z__3.i * tau[i__2].i, z__2.i =
			 z__3.r * tau[i__2].i + z__3.i * tau[i__2].r;
		i__3 = *n - i__;
		zdotc_(&z__4, &i__3, &w[i__ + 1 + i__ * w_dim1], &c__1, &a[
			i__ + 1 + i__ * a_dim1], &c__1);
		z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * 
			z__4.i + z__2.i * z__4.r;
		alpha.r = z__1.r, alpha.i = z__1.i;
		i__2 = *n - i__;
		zaxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[
			i__ + 1 + i__ * w_dim1], &c__1);
	    }

/* L20: */
	}
    }

    return 0;

/*     End of ZLATRD */

} /* zlatrd_ */
Пример #10
0
/* Subroutine */ int zhgeqz_(char *job, char *compq, char *compz, integer *n, 
	integer *ilo, integer *ihi, doublecomplex *a, integer *lda, 
	doublecomplex *b, integer *ldb, doublecomplex *alpha, doublecomplex *
	beta, doublecomplex *q, integer *ldq, doublecomplex *z__, integer *
	ldz, doublecomplex *work, integer *lwork, doublereal *rwork, integer *
	info)
{
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1, 
	    z_offset, i__1, i__2, i__3, i__4, i__5, i__6;
    doublereal d__1, d__2, d__3, d__4, d__5, d__6;
    doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;

    /* Builtin functions */
    double z_abs(doublecomplex *);
    void d_cnjg(doublecomplex *, doublecomplex *);
    double d_imag(doublecomplex *);
    void z_div(doublecomplex *, doublecomplex *, doublecomplex *), pow_zi(
	    doublecomplex *, doublecomplex *, integer *), z_sqrt(
	    doublecomplex *, doublecomplex *);

    /* Local variables */
    static doublereal absb, atol, btol, temp, opst;
    extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublecomplex *);
    static doublereal temp2, c__;
    static integer j;
    static doublecomplex s, t;
    extern logical lsame_(char *, char *);
    static doublecomplex ctemp;
    static integer iiter, ilast, jiter;
    static doublereal anorm;
    static integer maxit;
    static doublereal bnorm;
    static doublecomplex shift;
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *);
    static doublereal tempr;
    static doublecomplex ctemp2, ctemp3;
    static logical ilazr2;
    static integer jc, in;
    static doublereal ascale, bscale;
    static doublecomplex u12;
    extern doublereal dlamch_(char *);
    static integer jr, nq;
    static doublecomplex signbc;
    static integer nz;
    static doublereal safmin;
    extern /* Subroutine */ int xerbla_(char *, integer *);
    static doublecomplex eshift;
    static logical ilschr;
    static integer icompq, ilastm;
    static doublecomplex rtdisc;
    static integer ischur;
    extern doublereal zlanhs_(char *, integer *, doublecomplex *, integer *, 
	    doublereal *);
    static logical ilazro;
    static integer icompz, ifirst;
    extern /* Subroutine */ int zlartg_(doublecomplex *, doublecomplex *, 
	    doublereal *, doublecomplex *, doublecomplex *);
    static integer ifrstm;
    extern /* Subroutine */ int zlaset_(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, doublecomplex *, integer *);
    static integer istart;
    static logical lquery;
    static doublecomplex ad11, ad12, ad21, ad22;
    static integer jch;
    static logical ilq, ilz;
    static doublereal ulp;
    static doublecomplex abi22;


#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]
#define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1
#define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)]
#define q_subscr(a_1,a_2) (a_2)*q_dim1 + a_1
#define q_ref(a_1,a_2) q[q_subscr(a_1,a_2)]
#define z___subscr(a_1,a_2) (a_2)*z_dim1 + a_1
#define z___ref(a_1,a_2) z__[z___subscr(a_1,a_2)]


/*  -- LAPACK routine (instrumented to count operations, version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   


       ----------------------- Begin Timing Code ------------------------   
       Common block to return operation count and iteration count   
       ITCNT is initialized to 0, OPS is only incremented   
       OPST is used to accumulate small contributions to OPS   
       to avoid roundoff error   
       ------------------------ End Timing Code -------------------------   


    Purpose   
    =======   

    ZHGEQZ implements a single-shift version of the QZ   
    method for finding the generalized eigenvalues w(i)=ALPHA(i)/BETA(i)   
    of the equation   

         det( A - w(i) B ) = 0   

    If JOB='S', then the pair (A,B) is simultaneously   
    reduced to Schur form (i.e., A and B are both upper triangular) by   
    applying one unitary tranformation (usually called Q) on the left and   
    another (usually called Z) on the right.  The diagonal elements of   
    A are then ALPHA(1),...,ALPHA(N), and of B are BETA(1),...,BETA(N).   

    If JOB='S' and COMPQ and COMPZ are 'V' or 'I', then the unitary   
    transformations used to reduce (A,B) are accumulated into the arrays   
    Q and Z s.t.:   

         Q(in) A(in) Z(in)* = Q(out) A(out) Z(out)*   
         Q(in) B(in) Z(in)* = Q(out) B(out) Z(out)*   

    Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix   
         Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973),   
         pp. 241--256.   

    Arguments   
    =========   

    JOB     (input) CHARACTER*1   
            = 'E': compute only ALPHA and BETA.  A and B will not   
                   necessarily be put into generalized Schur form.   
            = 'S': put A and B into generalized Schur form, as well   
                   as computing ALPHA and BETA.   

    COMPQ   (input) CHARACTER*1   
            = 'N': do not modify Q.   
            = 'V': multiply the array Q on the right by the conjugate   
                   transpose of the unitary tranformation that is   
                   applied to the left side of A and B to reduce them   
                   to Schur form.   
            = 'I': like COMPQ='V', except that Q will be initialized to   
                   the identity first.   

    COMPZ   (input) CHARACTER*1   
            = 'N': do not modify Z.   
            = 'V': multiply the array Z on the right by the unitary   
                   tranformation that is applied to the right side of   
                   A and B to reduce them to Schur form.   
            = 'I': like COMPZ='V', except that Z will be initialized to   
                   the identity first.   

    N       (input) INTEGER   
            The order of the matrices A, B, Q, and Z.  N >= 0.   

    ILO     (input) INTEGER   
    IHI     (input) INTEGER   
            It is assumed that A is already upper triangular in rows and   
            columns 1:ILO-1 and IHI+1:N.   
            1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.   

    A       (input/output) COMPLEX*16 array, dimension (LDA, N)   
            On entry, the N-by-N upper Hessenberg matrix A.  Elements   
            below the subdiagonal must be zero.   
            If JOB='S', then on exit A and B will have been   
               simultaneously reduced to upper triangular form.   
            If JOB='E', then on exit A will have been destroyed.   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= max( 1, N ).   

    B       (input/output) COMPLEX*16 array, dimension (LDB, N)   
            On entry, the N-by-N upper triangular matrix B.  Elements   
            below the diagonal must be zero.   
            If JOB='S', then on exit A and B will have been   
               simultaneously reduced to upper triangular form.   
            If JOB='E', then on exit B will have been destroyed.   

    LDB     (input) INTEGER   
            The leading dimension of the array B.  LDB >= max( 1, N ).   

    ALPHA   (output) COMPLEX*16 array, dimension (N)   
            The diagonal elements of A when the pair (A,B) has been   
            reduced to Schur form.  ALPHA(i)/BETA(i) i=1,...,N   
            are the generalized eigenvalues.   

    BETA    (output) COMPLEX*16 array, dimension (N)   
            The diagonal elements of B when the pair (A,B) has been   
            reduced to Schur form.  ALPHA(i)/BETA(i) i=1,...,N   
            are the generalized eigenvalues.  A and B are normalized   
            so that BETA(1),...,BETA(N) are non-negative real numbers.   

    Q       (input/output) COMPLEX*16 array, dimension (LDQ, N)   
            If COMPQ='N', then Q will not be referenced.   
            If COMPQ='V' or 'I', then the conjugate transpose of the   
               unitary transformations which are applied to A and B on   
               the left will be applied to the array Q on the right.   

    LDQ     (input) INTEGER   
            The leading dimension of the array Q.  LDQ >= 1.   
            If COMPQ='V' or 'I', then LDQ >= N.   

    Z       (input/output) COMPLEX*16 array, dimension (LDZ, N)   
            If COMPZ='N', then Z will not be referenced.   
            If COMPZ='V' or 'I', then the unitary transformations which   
               are applied to A and B on the right will be applied to the   
               array Z on the right.   

    LDZ     (input) INTEGER   
            The leading dimension of the array Z.  LDZ >= 1.   
            If COMPZ='V' or 'I', then LDZ >= N.   

    WORK    (workspace/output) COMPLEX*16 array, dimension (LWORK)   
            On exit, if INFO >= 0, WORK(1) returns the optimal LWORK.   

    LWORK   (input) INTEGER   
            The dimension of the array WORK.  LWORK >= max(1,N).   

            If LWORK = -1, then a workspace query is assumed; the routine   
            only calculates the optimal size of the WORK array, returns   
            this value as the first entry of the WORK array, and no error   
            message related to LWORK is issued by XERBLA.   

    RWORK   (workspace) DOUBLE PRECISION array, dimension (N)   

    INFO    (output) INTEGER   
            = 0: successful exit   
            < 0: if INFO = -i, the i-th argument had an illegal value   
            = 1,...,N: the QZ iteration did not converge.  (A,B) is not   
                       in Schur form, but ALPHA(i) and BETA(i),   
                       i=INFO+1,...,N should be correct.   
            = N+1,...,2*N: the shift calculation failed.  (A,B) is not   
                       in Schur form, but ALPHA(i) and BETA(i),   
                       i=INFO-N+1,...,N should be correct.   
            > 2*N:     various "impossible" errors.   

    Further Details   
    ===============   

    We assume that complex ABS works as long as its value is less than   
    overflow.   

    =====================================================================   

       ----------------------- Begin Timing Code ------------------------   
       Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;
    --alpha;
    --beta;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1 * 1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1 * 1;
    z__ -= z_offset;
    --work;
    --rwork;

    /* Function Body */
    latime_1.itcnt = 0.;
/*     ------------------------ End Timing Code -------------------------   

       Decode JOB, COMPQ, COMPZ */

    if (lsame_(job, "E")) {
	ilschr = FALSE_;
	ischur = 1;
    } else if (lsame_(job, "S")) {
	ilschr = TRUE_;
	ischur = 2;
    } else {
	ischur = 0;
    }

    if (lsame_(compq, "N")) {
	ilq = FALSE_;
	icompq = 1;
	nq = 0;
    } else if (lsame_(compq, "V")) {
	ilq = TRUE_;
	icompq = 2;
	nq = *n;
    } else if (lsame_(compq, "I")) {
	ilq = TRUE_;
	icompq = 3;
	nq = *n;
    } else {
	icompq = 0;
    }

    if (lsame_(compz, "N")) {
	ilz = FALSE_;
	icompz = 1;
	nz = 0;
    } else if (lsame_(compz, "V")) {
	ilz = TRUE_;
	icompz = 2;
	nz = *n;
    } else if (lsame_(compz, "I")) {
	ilz = TRUE_;
	icompz = 3;
	nz = *n;
    } else {
	icompz = 0;
    }

/*     Check Argument Values */

    *info = 0;
    i__1 = max(1,*n);
    work[1].r = (doublereal) i__1, work[1].i = 0.;
    lquery = *lwork == -1;
    if (ischur == 0) {
	*info = -1;
    } else if (icompq == 0) {
	*info = -2;
    } else if (icompz == 0) {
	*info = -3;
    } else if (*n < 0) {
	*info = -4;
    } else if (*ilo < 1) {
	*info = -5;
    } else if (*ihi > *n || *ihi < *ilo - 1) {
	*info = -6;
    } else if (*lda < *n) {
	*info = -8;
    } else if (*ldb < *n) {
	*info = -10;
    } else if (*ldq < 1 || ilq && *ldq < *n) {
	*info = -14;
    } else if (*ldz < 1 || ilz && *ldz < *n) {
	*info = -16;
    } else if (*lwork < max(1,*n) && ! lquery) {
	*info = -18;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZHGEQZ", &i__1);
	return 0;
    } else if (lquery) {
	return 0;
    }

/*     Quick return if possible   

       WORK( 1 ) = CMPLX( 1 ) */
    if (*n <= 0) {
	work[1].r = 1., work[1].i = 0.;
	return 0;
    }

/*     Initialize Q and Z */

    if (icompq == 3) {
	zlaset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
    }
    if (icompz == 3) {
	zlaset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
    }

/*     Machine Constants */

    in = *ihi + 1 - *ilo;
    safmin = dlamch_("S");
    ulp = dlamch_("E") * dlamch_("B");
    anorm = zlanhs_("F", &in, &a_ref(*ilo, *ilo), lda, &rwork[1]);
    bnorm = zlanhs_("F", &in, &b_ref(*ilo, *ilo), ldb, &rwork[1]);
/* Computing MAX */
    d__1 = safmin, d__2 = ulp * anorm;
    atol = max(d__1,d__2);
/* Computing MAX */
    d__1 = safmin, d__2 = ulp * bnorm;
    btol = max(d__1,d__2);
    ascale = 1. / max(safmin,anorm);
    bscale = 1. / max(safmin,bnorm);

/*     ---------------------- Begin Timing Code -------------------------   
       Count ops for norms, etc. */
    opst = 0.;
/* Computing 2nd power */
    i__1 = *n;
    latime_1.ops += (doublereal) ((i__1 * i__1 << 2) + *n * 12 - 5);
/*     ----------------------- End Timing Code --------------------------   



       Set Eigenvalues IHI+1:N */

    i__1 = *n;
    for (j = *ihi + 1; j <= i__1; ++j) {
	absb = z_abs(&b_ref(j, j));
	if (absb > safmin) {
	    i__2 = b_subscr(j, j);
	    z__2.r = b[i__2].r / absb, z__2.i = b[i__2].i / absb;
	    d_cnjg(&z__1, &z__2);
	    signbc.r = z__1.r, signbc.i = z__1.i;
	    i__2 = b_subscr(j, j);
	    b[i__2].r = absb, b[i__2].i = 0.;
	    if (ilschr) {
		i__2 = j - 1;
		zscal_(&i__2, &signbc, &b_ref(1, j), &c__1);
		zscal_(&j, &signbc, &a_ref(1, j), &c__1);
/*              ----------------- Begin Timing Code --------------------- */
		opst += (doublereal) ((j - 1) * 12);
/*              ------------------ End Timing Code ---------------------- */
	    } else {
		i__2 = a_subscr(j, j);
		i__3 = a_subscr(j, j);
		z__1.r = a[i__3].r * signbc.r - a[i__3].i * signbc.i, z__1.i =
			 a[i__3].r * signbc.i + a[i__3].i * signbc.r;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
	    }
	    if (ilz) {
		zscal_(n, &signbc, &z___ref(1, j), &c__1);
	    }
/*           ------------------- Begin Timing Code ---------------------- */
	    opst += (doublereal) (nz * 6 + 13);
/*           -------------------- End Timing Code ----------------------- */
	} else {
	    i__2 = b_subscr(j, j);
	    b[i__2].r = 0., b[i__2].i = 0.;
	}
	i__2 = j;
	i__3 = a_subscr(j, j);
	alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
	i__2 = j;
	i__3 = b_subscr(j, j);
	beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;
/* L10: */
    }

/*     If IHI < ILO, skip QZ steps */

    if (*ihi < *ilo) {
	goto L190;
    }

/*     MAIN QZ ITERATION LOOP   

       Initialize dynamic indices   

       Eigenvalues ILAST+1:N have been found.   
          Column operations modify rows IFRSTM:whatever   
          Row operations modify columns whatever:ILASTM   

       If only eigenvalues are being computed, then   
          IFRSTM is the row of the last splitting row above row ILAST;   
          this is always at least ILO.   
       IITER counts iterations since the last eigenvalue was found,   
          to tell when to use an extraordinary shift.   
       MAXIT is the maximum number of QZ sweeps allowed. */

    ilast = *ihi;
    if (ilschr) {
	ifrstm = 1;
	ilastm = *n;
    } else {
	ifrstm = *ilo;
	ilastm = *ihi;
    }
    iiter = 0;
    eshift.r = 0., eshift.i = 0.;
    maxit = (*ihi - *ilo + 1) * 30;

    i__1 = maxit;
    for (jiter = 1; jiter <= i__1; ++jiter) {

/*        Check for too many iterations. */

	if (jiter > maxit) {
	    goto L180;
	}

/*        Split the matrix if possible.   

          Two tests:   
             1: A(j,j-1)=0  or  j=ILO   
             2: B(j,j)=0   

          Special case: j=ILAST */

	if (ilast == *ilo) {
	    goto L60;
	} else {
	    i__2 = a_subscr(ilast, ilast - 1);
	    if ((d__1 = a[i__2].r, abs(d__1)) + (d__2 = d_imag(&a_ref(ilast, 
		    ilast - 1)), abs(d__2)) <= atol) {
		i__2 = a_subscr(ilast, ilast - 1);
		a[i__2].r = 0., a[i__2].i = 0.;
		goto L60;
	    }
	}

	if (z_abs(&b_ref(ilast, ilast)) <= btol) {
	    i__2 = b_subscr(ilast, ilast);
	    b[i__2].r = 0., b[i__2].i = 0.;
	    goto L50;
	}

/*        General case: j<ILAST */

	i__2 = *ilo;
	for (j = ilast - 1; j >= i__2; --j) {

/*           Test 1: for A(j,j-1)=0 or j=ILO */

	    if (j == *ilo) {
		ilazro = TRUE_;
	    } else {
		i__3 = a_subscr(j, j - 1);
		if ((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a_ref(j, 
			j - 1)), abs(d__2)) <= atol) {
		    i__3 = a_subscr(j, j - 1);
		    a[i__3].r = 0., a[i__3].i = 0.;
		    ilazro = TRUE_;
		} else {
		    ilazro = FALSE_;
		}
	    }

/*           Test 2: for B(j,j)=0 */

	    if (z_abs(&b_ref(j, j)) < btol) {
		i__3 = b_subscr(j, j);
		b[i__3].r = 0., b[i__3].i = 0.;

/*              Test 1a: Check for 2 consecutive small subdiagonals in A */

		ilazr2 = FALSE_;
		if (! ilazro) {
		    i__3 = a_subscr(j, j - 1);
		    i__4 = a_subscr(j + 1, j);
		    i__5 = a_subscr(j, j);
		    if (((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&
			    a_ref(j, j - 1)), abs(d__2))) * (ascale * ((d__3 =
			     a[i__4].r, abs(d__3)) + (d__4 = d_imag(&a_ref(j 
			    + 1, j)), abs(d__4)))) <= ((d__5 = a[i__5].r, abs(
			    d__5)) + (d__6 = d_imag(&a_ref(j, j)), abs(d__6)))
			     * (ascale * atol)) {
			ilazr2 = TRUE_;
		    }
		}

/*              If both tests pass (1 & 2), i.e., the leading diagonal   
                element of B in the block is zero, split a 1x1 block off   
                at the top. (I.e., at the J-th row/column) The leading   
                diagonal element of the remainder can also be zero, so   
                this may have to be done repeatedly. */

		if (ilazro || ilazr2) {
		    i__3 = ilast - 1;
		    for (jch = j; jch <= i__3; ++jch) {
			i__4 = a_subscr(jch, jch);
			ctemp.r = a[i__4].r, ctemp.i = a[i__4].i;
			zlartg_(&ctemp, &a_ref(jch + 1, jch), &c__, &s, &
				a_ref(jch, jch));
			i__4 = a_subscr(jch + 1, jch);
			a[i__4].r = 0., a[i__4].i = 0.;
			i__4 = ilastm - jch;
			zrot_(&i__4, &a_ref(jch, jch + 1), lda, &a_ref(jch + 
				1, jch + 1), lda, &c__, &s);
			i__4 = ilastm - jch;
			zrot_(&i__4, &b_ref(jch, jch + 1), ldb, &b_ref(jch + 
				1, jch + 1), ldb, &c__, &s);
			if (ilq) {
			    d_cnjg(&z__1, &s);
			    zrot_(n, &q_ref(1, jch), &c__1, &q_ref(1, jch + 1)
				    , &c__1, &c__, &z__1);
			}
			if (ilazr2) {
			    i__4 = a_subscr(jch, jch - 1);
			    i__5 = a_subscr(jch, jch - 1);
			    z__1.r = c__ * a[i__5].r, z__1.i = c__ * a[i__5]
				    .i;
			    a[i__4].r = z__1.r, a[i__4].i = z__1.i;
			}
			ilazr2 = FALSE_;
/*                    --------------- Begin Timing Code ----------------- */
			opst += (doublereal) ((ilastm - jch) * 40 + 32 + nq * 
				20);
/*                    ---------------- End Timing Code ------------------ */
			i__4 = b_subscr(jch + 1, jch + 1);
			if ((d__1 = b[i__4].r, abs(d__1)) + (d__2 = d_imag(&
				b_ref(jch + 1, jch + 1)), abs(d__2)) >= btol) 
				{
			    if (jch + 1 >= ilast) {
				goto L60;
			    } else {
				ifirst = jch + 1;
				goto L70;
			    }
			}
			i__4 = b_subscr(jch + 1, jch + 1);
			b[i__4].r = 0., b[i__4].i = 0.;
/* L20: */
		    }
		    goto L50;
		} else {

/*                 Only test 2 passed -- chase the zero to B(ILAST,ILAST)   
                   Then process as in the case B(ILAST,ILAST)=0 */

		    i__3 = ilast - 1;
		    for (jch = j; jch <= i__3; ++jch) {
			i__4 = b_subscr(jch, jch + 1);
			ctemp.r = b[i__4].r, ctemp.i = b[i__4].i;
			zlartg_(&ctemp, &b_ref(jch + 1, jch + 1), &c__, &s, &
				b_ref(jch, jch + 1));
			i__4 = b_subscr(jch + 1, jch + 1);
			b[i__4].r = 0., b[i__4].i = 0.;
			if (jch < ilastm - 1) {
			    i__4 = ilastm - jch - 1;
			    zrot_(&i__4, &b_ref(jch, jch + 2), ldb, &b_ref(
				    jch + 1, jch + 2), ldb, &c__, &s);
			}
			i__4 = ilastm - jch + 2;
			zrot_(&i__4, &a_ref(jch, jch - 1), lda, &a_ref(jch + 
				1, jch - 1), lda, &c__, &s);
			if (ilq) {
			    d_cnjg(&z__1, &s);
			    zrot_(n, &q_ref(1, jch), &c__1, &q_ref(1, jch + 1)
				    , &c__1, &c__, &z__1);
			}
			i__4 = a_subscr(jch + 1, jch);
			ctemp.r = a[i__4].r, ctemp.i = a[i__4].i;
			zlartg_(&ctemp, &a_ref(jch + 1, jch - 1), &c__, &s, &
				a_ref(jch + 1, jch));
			i__4 = a_subscr(jch + 1, jch - 1);
			a[i__4].r = 0., a[i__4].i = 0.;
			i__4 = jch + 1 - ifrstm;
			zrot_(&i__4, &a_ref(ifrstm, jch), &c__1, &a_ref(
				ifrstm, jch - 1), &c__1, &c__, &s);
			i__4 = jch - ifrstm;
			zrot_(&i__4, &b_ref(ifrstm, jch), &c__1, &b_ref(
				ifrstm, jch - 1), &c__1, &c__, &s);
			if (ilz) {
			    zrot_(n, &z___ref(1, jch), &c__1, &z___ref(1, jch 
				    - 1), &c__1, &c__, &s);
			}
/* L30: */
		    }

/*                 ---------------- Begin Timing Code ------------------- */
		    opst += (doublereal) ((ilastm + 1 - ifrstm) * 40 + 64 + (
			    nq + nz) * 20) * (doublereal) (ilast - j);
/*                 ----------------- End Timing Code -------------------- */

		    goto L50;
		}
	    } else if (ilazro) {

/*              Only test 1 passed -- work on J:ILAST */

		ifirst = j;
		goto L70;
	    }

/*           Neither test passed -- try next J   

   L40: */
	}

/*        (Drop-through is "impossible") */

	*info = (*n << 1) + 1;
	goto L210;

/*        B(ILAST,ILAST)=0 -- clear A(ILAST,ILAST-1) to split off a   
          1x1 block. */

L50:
	i__2 = a_subscr(ilast, ilast);
	ctemp.r = a[i__2].r, ctemp.i = a[i__2].i;
	zlartg_(&ctemp, &a_ref(ilast, ilast - 1), &c__, &s, &a_ref(ilast, 
		ilast));
	i__2 = a_subscr(ilast, ilast - 1);
	a[i__2].r = 0., a[i__2].i = 0.;
	i__2 = ilast - ifrstm;
	zrot_(&i__2, &a_ref(ifrstm, ilast), &c__1, &a_ref(ifrstm, ilast - 1), 
		&c__1, &c__, &s);
	i__2 = ilast - ifrstm;
	zrot_(&i__2, &b_ref(ifrstm, ilast), &c__1, &b_ref(ifrstm, ilast - 1), 
		&c__1, &c__, &s);
	if (ilz) {
	    zrot_(n, &z___ref(1, ilast), &c__1, &z___ref(1, ilast - 1), &c__1,
		     &c__, &s);
	}
/*        --------------------- Begin Timing Code ----------------------- */
	opst += (doublereal) ((ilast - ifrstm) * 40 + 32 + nz * 20);
/*        ---------------------- End Timing Code ------------------------   

          A(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA */

L60:
	absb = z_abs(&b_ref(ilast, ilast));
	if (absb > safmin) {
	    i__2 = b_subscr(ilast, ilast);
	    z__2.r = b[i__2].r / absb, z__2.i = b[i__2].i / absb;
	    d_cnjg(&z__1, &z__2);
	    signbc.r = z__1.r, signbc.i = z__1.i;
	    i__2 = b_subscr(ilast, ilast);
	    b[i__2].r = absb, b[i__2].i = 0.;
	    if (ilschr) {
		i__2 = ilast - ifrstm;
		zscal_(&i__2, &signbc, &b_ref(ifrstm, ilast), &c__1);
		i__2 = ilast + 1 - ifrstm;
		zscal_(&i__2, &signbc, &a_ref(ifrstm, ilast), &c__1);
/*              ----------------- Begin Timing Code --------------------- */
		opst += (doublereal) ((ilast - ifrstm) * 12);
/*              ------------------ End Timing Code ---------------------- */
	    } else {
		i__2 = a_subscr(ilast, ilast);
		i__3 = a_subscr(ilast, ilast);
		z__1.r = a[i__3].r * signbc.r - a[i__3].i * signbc.i, z__1.i =
			 a[i__3].r * signbc.i + a[i__3].i * signbc.r;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
	    }
	    if (ilz) {
		zscal_(n, &signbc, &z___ref(1, ilast), &c__1);
	    }
/*           ------------------- Begin Timing Code ---------------------- */
	    opst += (doublereal) (nz * 6 + 13);
/*           -------------------- End Timing Code ----------------------- */
	} else {
	    i__2 = b_subscr(ilast, ilast);
	    b[i__2].r = 0., b[i__2].i = 0.;
	}
	i__2 = ilast;
	i__3 = a_subscr(ilast, ilast);
	alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
	i__2 = ilast;
	i__3 = b_subscr(ilast, ilast);
	beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;

/*        Go to next block -- exit if finished. */

	--ilast;
	if (ilast < *ilo) {
	    goto L190;
	}

/*        Reset counters */

	iiter = 0;
	eshift.r = 0., eshift.i = 0.;
	if (! ilschr) {
	    ilastm = ilast;
	    if (ifrstm > ilast) {
		ifrstm = *ilo;
	    }
	}
	goto L160;

/*        QZ step   

          This iteration only involves rows/columns IFIRST:ILAST.  We   
          assume IFIRST < ILAST, and that the diagonal of B is non-zero. */

L70:
	++iiter;
	if (! ilschr) {
	    ifrstm = ifirst;
	}

/*        Compute the Shift.   

          At this point, IFIRST < ILAST, and the diagonal elements of   
          B(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in   
          magnitude) */

	if (iiter / 10 * 10 != iiter) {

/*           The Wilkinson shift (AEP p.512), i.e., the eigenvalue of   
             the bottom-right 2x2 block of A inv(B) which is nearest to   
             the bottom-right element.   

             We factor B as U*D, where U has unit diagonals, and   
             compute (A*inv(D))*inv(U). */

	    i__2 = b_subscr(ilast - 1, ilast);
	    z__2.r = bscale * b[i__2].r, z__2.i = bscale * b[i__2].i;
	    i__3 = b_subscr(ilast, ilast);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    u12.r = z__1.r, u12.i = z__1.i;
	    i__2 = a_subscr(ilast - 1, ilast - 1);
	    z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast - 1, ilast - 1);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    ad11.r = z__1.r, ad11.i = z__1.i;
	    i__2 = a_subscr(ilast, ilast - 1);
	    z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast - 1, ilast - 1);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    ad21.r = z__1.r, ad21.i = z__1.i;
	    i__2 = a_subscr(ilast - 1, ilast);
	    z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast, ilast);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    ad12.r = z__1.r, ad12.i = z__1.i;
	    i__2 = a_subscr(ilast, ilast);
	    z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast, ilast);
	    z__3.r = bscale * b[i__3].r, z__3.i = bscale * b[i__3].i;
	    z_div(&z__1, &z__2, &z__3);
	    ad22.r = z__1.r, ad22.i = z__1.i;
	    z__2.r = u12.r * ad21.r - u12.i * ad21.i, z__2.i = u12.r * ad21.i 
		    + u12.i * ad21.r;
	    z__1.r = ad22.r - z__2.r, z__1.i = ad22.i - z__2.i;
	    abi22.r = z__1.r, abi22.i = z__1.i;

	    z__2.r = ad11.r + abi22.r, z__2.i = ad11.i + abi22.i;
	    z__1.r = z__2.r * .5, z__1.i = z__2.i * .5;
	    t.r = z__1.r, t.i = z__1.i;
	    pow_zi(&z__4, &t, &c__2);
	    z__5.r = ad12.r * ad21.r - ad12.i * ad21.i, z__5.i = ad12.r * 
		    ad21.i + ad12.i * ad21.r;
	    z__3.r = z__4.r + z__5.r, z__3.i = z__4.i + z__5.i;
	    z__6.r = ad11.r * ad22.r - ad11.i * ad22.i, z__6.i = ad11.r * 
		    ad22.i + ad11.i * ad22.r;
	    z__2.r = z__3.r - z__6.r, z__2.i = z__3.i - z__6.i;
	    z_sqrt(&z__1, &z__2);
	    rtdisc.r = z__1.r, rtdisc.i = z__1.i;
	    z__1.r = t.r - abi22.r, z__1.i = t.i - abi22.i;
	    z__2.r = t.r - abi22.r, z__2.i = t.i - abi22.i;
	    temp = z__1.r * rtdisc.r + d_imag(&z__2) * d_imag(&rtdisc);
	    if (temp <= 0.) {
		z__1.r = t.r + rtdisc.r, z__1.i = t.i + rtdisc.i;
		shift.r = z__1.r, shift.i = z__1.i;
	    } else {
		z__1.r = t.r - rtdisc.r, z__1.i = t.i - rtdisc.i;
		shift.r = z__1.r, shift.i = z__1.i;
	    }

/*           ------------------- Begin Timing Code ---------------------- */
	    opst += 116.;
/*           -------------------- End Timing Code ----------------------- */

	} else {

/*           Exceptional shift.  Chosen for no particularly good reason. */

	    i__2 = a_subscr(ilast - 1, ilast);
	    z__4.r = ascale * a[i__2].r, z__4.i = ascale * a[i__2].i;
	    i__3 = b_subscr(ilast - 1, ilast - 1);
	    z__5.r = bscale * b[i__3].r, z__5.i = bscale * b[i__3].i;
	    z_div(&z__3, &z__4, &z__5);
	    d_cnjg(&z__2, &z__3);
	    z__1.r = eshift.r + z__2.r, z__1.i = eshift.i + z__2.i;
	    eshift.r = z__1.r, eshift.i = z__1.i;
	    shift.r = eshift.r, shift.i = eshift.i;

/*           ------------------- Begin Timing Code ---------------------- */
	    opst += 15.;
/*           -------------------- End Timing Code ----------------------- */

	}

/*        Now check for two consecutive small subdiagonals. */

	i__2 = ifirst + 1;
	for (j = ilast - 1; j >= i__2; --j) {
	    istart = j;
	    i__3 = a_subscr(j, j);
	    z__2.r = ascale * a[i__3].r, z__2.i = ascale * a[i__3].i;
	    i__4 = b_subscr(j, j);
	    z__4.r = bscale * b[i__4].r, z__4.i = bscale * b[i__4].i;
	    z__3.r = shift.r * z__4.r - shift.i * z__4.i, z__3.i = shift.r * 
		    z__4.i + shift.i * z__4.r;
	    z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
	    ctemp.r = z__1.r, ctemp.i = z__1.i;
	    temp = (d__1 = ctemp.r, abs(d__1)) + (d__2 = d_imag(&ctemp), abs(
		    d__2));
	    i__3 = a_subscr(j + 1, j);
	    temp2 = ascale * ((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&
		    a_ref(j + 1, j)), abs(d__2)));
	    tempr = max(temp,temp2);
	    if (tempr < 1. && tempr != 0.) {
		temp /= tempr;
		temp2 /= tempr;
	    }
	    i__3 = a_subscr(j, j - 1);
	    if (((d__1 = a[i__3].r, abs(d__1)) + (d__2 = d_imag(&a_ref(j, j - 
		    1)), abs(d__2))) * temp2 <= temp * atol) {
		goto L90;
	    }
/* L80: */
	}

	istart = ifirst;
	i__2 = a_subscr(ifirst, ifirst);
	z__2.r = ascale * a[i__2].r, z__2.i = ascale * a[i__2].i;
	i__3 = b_subscr(ifirst, ifirst);
	z__4.r = bscale * b[i__3].r, z__4.i = bscale * b[i__3].i;
	z__3.r = shift.r * z__4.r - shift.i * z__4.i, z__3.i = shift.r * 
		z__4.i + shift.i * z__4.r;
	z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
	ctemp.r = z__1.r, ctemp.i = z__1.i;

/*        --------------------- Begin Timing Code ----------------------- */
	opst += -6.;
/*        ---------------------- End Timing Code ------------------------ */

L90:

/*        Do an implicit-shift QZ sweep.   

          Initial Q */

	i__2 = a_subscr(istart + 1, istart);
	z__1.r = ascale * a[i__2].r, z__1.i = ascale * a[i__2].i;
	ctemp2.r = z__1.r, ctemp2.i = z__1.i;

/*        --------------------- Begin Timing Code ----------------------- */
	opst += (doublereal) ((ilast - istart) * 18 + 2);
/*        ---------------------- End Timing Code ------------------------ */

	zlartg_(&ctemp, &ctemp2, &c__, &s, &ctemp3);

/*        Sweep */

	i__2 = ilast - 1;
	for (j = istart; j <= i__2; ++j) {
	    if (j > istart) {
		i__3 = a_subscr(j, j - 1);
		ctemp.r = a[i__3].r, ctemp.i = a[i__3].i;
		zlartg_(&ctemp, &a_ref(j + 1, j - 1), &c__, &s, &a_ref(j, j - 
			1));
		i__3 = a_subscr(j + 1, j - 1);
		a[i__3].r = 0., a[i__3].i = 0.;
	    }

	    i__3 = ilastm;
	    for (jc = j; jc <= i__3; ++jc) {
		i__4 = a_subscr(j, jc);
		z__2.r = c__ * a[i__4].r, z__2.i = c__ * a[i__4].i;
		i__5 = a_subscr(j + 1, jc);
		z__3.r = s.r * a[i__5].r - s.i * a[i__5].i, z__3.i = s.r * a[
			i__5].i + s.i * a[i__5].r;
		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		ctemp.r = z__1.r, ctemp.i = z__1.i;
		i__4 = a_subscr(j + 1, jc);
		d_cnjg(&z__4, &s);
		z__3.r = -z__4.r, z__3.i = -z__4.i;
		i__5 = a_subscr(j, jc);
		z__2.r = z__3.r * a[i__5].r - z__3.i * a[i__5].i, z__2.i = 
			z__3.r * a[i__5].i + z__3.i * a[i__5].r;
		i__6 = a_subscr(j + 1, jc);
		z__5.r = c__ * a[i__6].r, z__5.i = c__ * a[i__6].i;
		z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		a[i__4].r = z__1.r, a[i__4].i = z__1.i;
		i__4 = a_subscr(j, jc);
		a[i__4].r = ctemp.r, a[i__4].i = ctemp.i;
		i__4 = b_subscr(j, jc);
		z__2.r = c__ * b[i__4].r, z__2.i = c__ * b[i__4].i;
		i__5 = b_subscr(j + 1, jc);
		z__3.r = s.r * b[i__5].r - s.i * b[i__5].i, z__3.i = s.r * b[
			i__5].i + s.i * b[i__5].r;
		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		ctemp2.r = z__1.r, ctemp2.i = z__1.i;
		i__4 = b_subscr(j + 1, jc);
		d_cnjg(&z__4, &s);
		z__3.r = -z__4.r, z__3.i = -z__4.i;
		i__5 = b_subscr(j, jc);
		z__2.r = z__3.r * b[i__5].r - z__3.i * b[i__5].i, z__2.i = 
			z__3.r * b[i__5].i + z__3.i * b[i__5].r;
		i__6 = b_subscr(j + 1, jc);
		z__5.r = c__ * b[i__6].r, z__5.i = c__ * b[i__6].i;
		z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		b[i__4].r = z__1.r, b[i__4].i = z__1.i;
		i__4 = b_subscr(j, jc);
		b[i__4].r = ctemp2.r, b[i__4].i = ctemp2.i;
/* L100: */
	    }
	    if (ilq) {
		i__3 = *n;
		for (jr = 1; jr <= i__3; ++jr) {
		    i__4 = q_subscr(jr, j);
		    z__2.r = c__ * q[i__4].r, z__2.i = c__ * q[i__4].i;
		    d_cnjg(&z__4, &s);
		    i__5 = q_subscr(jr, j + 1);
		    z__3.r = z__4.r * q[i__5].r - z__4.i * q[i__5].i, z__3.i =
			     z__4.r * q[i__5].i + z__4.i * q[i__5].r;
		    z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		    ctemp.r = z__1.r, ctemp.i = z__1.i;
		    i__4 = q_subscr(jr, j + 1);
		    z__3.r = -s.r, z__3.i = -s.i;
		    i__5 = q_subscr(jr, j);
		    z__2.r = z__3.r * q[i__5].r - z__3.i * q[i__5].i, z__2.i =
			     z__3.r * q[i__5].i + z__3.i * q[i__5].r;
		    i__6 = q_subscr(jr, j + 1);
		    z__4.r = c__ * q[i__6].r, z__4.i = c__ * q[i__6].i;
		    z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
		    q[i__4].r = z__1.r, q[i__4].i = z__1.i;
		    i__4 = q_subscr(jr, j);
		    q[i__4].r = ctemp.r, q[i__4].i = ctemp.i;
/* L110: */
		}
	    }

	    i__3 = b_subscr(j + 1, j + 1);
	    ctemp.r = b[i__3].r, ctemp.i = b[i__3].i;
	    zlartg_(&ctemp, &b_ref(j + 1, j), &c__, &s, &b_ref(j + 1, j + 1));
	    i__3 = b_subscr(j + 1, j);
	    b[i__3].r = 0., b[i__3].i = 0.;

/* Computing MIN */
	    i__4 = j + 2;
	    i__3 = min(i__4,ilast);
	    for (jr = ifrstm; jr <= i__3; ++jr) {
		i__4 = a_subscr(jr, j + 1);
		z__2.r = c__ * a[i__4].r, z__2.i = c__ * a[i__4].i;
		i__5 = a_subscr(jr, j);
		z__3.r = s.r * a[i__5].r - s.i * a[i__5].i, z__3.i = s.r * a[
			i__5].i + s.i * a[i__5].r;
		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		ctemp.r = z__1.r, ctemp.i = z__1.i;
		i__4 = a_subscr(jr, j);
		d_cnjg(&z__4, &s);
		z__3.r = -z__4.r, z__3.i = -z__4.i;
		i__5 = a_subscr(jr, j + 1);
		z__2.r = z__3.r * a[i__5].r - z__3.i * a[i__5].i, z__2.i = 
			z__3.r * a[i__5].i + z__3.i * a[i__5].r;
		i__6 = a_subscr(jr, j);
		z__5.r = c__ * a[i__6].r, z__5.i = c__ * a[i__6].i;
		z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		a[i__4].r = z__1.r, a[i__4].i = z__1.i;
		i__4 = a_subscr(jr, j + 1);
		a[i__4].r = ctemp.r, a[i__4].i = ctemp.i;
/* L120: */
	    }
	    i__3 = j;
	    for (jr = ifrstm; jr <= i__3; ++jr) {
		i__4 = b_subscr(jr, j + 1);
		z__2.r = c__ * b[i__4].r, z__2.i = c__ * b[i__4].i;
		i__5 = b_subscr(jr, j);
		z__3.r = s.r * b[i__5].r - s.i * b[i__5].i, z__3.i = s.r * b[
			i__5].i + s.i * b[i__5].r;
		z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		ctemp.r = z__1.r, ctemp.i = z__1.i;
		i__4 = b_subscr(jr, j);
		d_cnjg(&z__4, &s);
		z__3.r = -z__4.r, z__3.i = -z__4.i;
		i__5 = b_subscr(jr, j + 1);
		z__2.r = z__3.r * b[i__5].r - z__3.i * b[i__5].i, z__2.i = 
			z__3.r * b[i__5].i + z__3.i * b[i__5].r;
		i__6 = b_subscr(jr, j);
		z__5.r = c__ * b[i__6].r, z__5.i = c__ * b[i__6].i;
		z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		b[i__4].r = z__1.r, b[i__4].i = z__1.i;
		i__4 = b_subscr(jr, j + 1);
		b[i__4].r = ctemp.r, b[i__4].i = ctemp.i;
/* L130: */
	    }
	    if (ilz) {
		i__3 = *n;
		for (jr = 1; jr <= i__3; ++jr) {
		    i__4 = z___subscr(jr, j + 1);
		    z__2.r = c__ * z__[i__4].r, z__2.i = c__ * z__[i__4].i;
		    i__5 = z___subscr(jr, j);
		    z__3.r = s.r * z__[i__5].r - s.i * z__[i__5].i, z__3.i = 
			    s.r * z__[i__5].i + s.i * z__[i__5].r;
		    z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
		    ctemp.r = z__1.r, ctemp.i = z__1.i;
		    i__4 = z___subscr(jr, j);
		    d_cnjg(&z__4, &s);
		    z__3.r = -z__4.r, z__3.i = -z__4.i;
		    i__5 = z___subscr(jr, j + 1);
		    z__2.r = z__3.r * z__[i__5].r - z__3.i * z__[i__5].i, 
			    z__2.i = z__3.r * z__[i__5].i + z__3.i * z__[i__5]
			    .r;
		    i__6 = z___subscr(jr, j);
		    z__5.r = c__ * z__[i__6].r, z__5.i = c__ * z__[i__6].i;
		    z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		    z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
		    i__4 = z___subscr(jr, j + 1);
		    z__[i__4].r = ctemp.r, z__[i__4].i = ctemp.i;
/* L140: */
		}
	    }
/* L150: */
	}

/*        --------------------- Begin Timing Code ----------------------- */
	opst += (doublereal) (ilast - istart) * (doublereal) ((ilastm - 
		ifrstm) * 40 + 184 + (nq + nz) * 20) - 20;
/*        ---------------------- End Timing Code ------------------------ */

L160:

/*        --------------------- Begin Timing Code -----------------------   
          End of iteration -- add in "small" contributions. */
	latime_1.ops += opst;
	opst = 0.;
/*        ---------------------- End Timing Code ------------------------   


   L170: */
    }

/*     Drop-through = non-convergence */

L180:
    *info = ilast;

/*     ---------------------- Begin Timing Code ------------------------- */
    latime_1.ops += opst;
    opst = 0.;
/*     ----------------------- End Timing Code -------------------------- */

    goto L210;

/*     Successful completion of all QZ steps */

L190:

/*     Set Eigenvalues 1:ILO-1 */

    i__1 = *ilo - 1;
    for (j = 1; j <= i__1; ++j) {
	absb = z_abs(&b_ref(j, j));
	if (absb > safmin) {
	    i__2 = b_subscr(j, j);
	    z__2.r = b[i__2].r / absb, z__2.i = b[i__2].i / absb;
	    d_cnjg(&z__1, &z__2);
	    signbc.r = z__1.r, signbc.i = z__1.i;
	    i__2 = b_subscr(j, j);
	    b[i__2].r = absb, b[i__2].i = 0.;
	    if (ilschr) {
		i__2 = j - 1;
		zscal_(&i__2, &signbc, &b_ref(1, j), &c__1);
		zscal_(&j, &signbc, &a_ref(1, j), &c__1);
/*              ----------------- Begin Timing Code --------------------- */
		opst += (doublereal) ((j - 1) * 12);
/*              ------------------ End Timing Code ---------------------- */
	    } else {
		i__2 = a_subscr(j, j);
		i__3 = a_subscr(j, j);
		z__1.r = a[i__3].r * signbc.r - a[i__3].i * signbc.i, z__1.i =
			 a[i__3].r * signbc.i + a[i__3].i * signbc.r;
		a[i__2].r = z__1.r, a[i__2].i = z__1.i;
	    }
	    if (ilz) {
		zscal_(n, &signbc, &z___ref(1, j), &c__1);
	    }
/*           ------------------- Begin Timing Code ---------------------- */
	    opst += (doublereal) (nz * 6 + 13);
/*           -------------------- End Timing Code ----------------------- */
	} else {
	    i__2 = b_subscr(j, j);
	    b[i__2].r = 0., b[i__2].i = 0.;
	}
	i__2 = j;
	i__3 = a_subscr(j, j);
	alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
	i__2 = j;
	i__3 = b_subscr(j, j);
	beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;
/* L200: */
    }

/*     Normal Termination */

    *info = 0;

/*     Exit (other than argument error) -- return optimal workspace size */

L210:

/*     ---------------------- Begin Timing Code ------------------------- */
    latime_1.ops += opst;
    opst = 0.;
    latime_1.itcnt = (doublereal) jiter;
/*     ----------------------- End Timing Code -------------------------- */

    z__1.r = (doublereal) (*n), z__1.i = 0.;
    work[1].r = z__1.r, work[1].i = z__1.i;
    return 0;

/*     End of ZHGEQZ */

} /* zhgeqz_ */
Пример #11
0
/* Subroutine */ int zlatdf_(integer *ijob, integer *n, doublecomplex *z__, 
	integer *ldz, doublecomplex *rhs, doublereal *rdsum, doublereal *
	rdscal, integer *ipiv, integer *jpiv)
{
    /* System generated locals */
    integer z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
    doublecomplex z__1, z__2, z__3;

    /* Builtin functions */
    void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
    double z_abs(doublecomplex *);
    void z_sqrt(doublecomplex *, doublecomplex *);

    /* Local variables */
    integer i__, j, k;
    doublecomplex bm, bp, xm[2], xp[2];
    integer info;
    doublecomplex temp, work[8];
    doublereal scale;
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *);
    doublecomplex pmone;
    extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublecomplex *, integer *);
    doublereal rtemp, sminu, rwork[2];
    extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *);
    doublereal splus;
    extern /* Subroutine */ int zaxpy_(integer *, doublecomplex *, 
	    doublecomplex *, integer *, doublecomplex *, integer *), zgesc2_(
	    integer *, doublecomplex *, integer *, doublecomplex *, integer *, 
	     integer *, doublereal *), zgecon_(char *, integer *, 
	    doublecomplex *, integer *, doublereal *, doublereal *, 
	    doublecomplex *, doublereal *, integer *);
    extern doublereal dzasum_(integer *, doublecomplex *, integer *);
    extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *, 
	     doublereal *, doublereal *), zlaswp_(integer *, doublecomplex *, 
	    integer *, integer *, integer *, integer *, integer *);


/*  -- LAPACK auxiliary routine (version 3.1) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZLATDF computes the contribution to the reciprocal Dif-estimate */
/*  by solving for x in Z * x = b, where b is chosen such that the norm */
/*  of x is as large as possible. It is assumed that LU decomposition */
/*  of Z has been computed by ZGETC2. On entry RHS = f holds the */
/*  contribution from earlier solved sub-systems, and on return RHS = x. */

/*  The factorization of Z returned by ZGETC2 has the form */
/*  Z = P * L * U * Q, where P and Q are permutation matrices. L is lower */
/*  triangular with unit diagonal elements and U is upper triangular. */

/*  Arguments */
/*  ========= */

/*  IJOB    (input) INTEGER */
/*          IJOB = 2: First compute an approximative null-vector e */
/*              of Z using ZGECON, e is normalized and solve for */
/*              Zx = +-e - f with the sign giving the greater value of */
/*              2-norm(x).  About 5 times as expensive as Default. */
/*          IJOB .ne. 2: Local look ahead strategy where */
/*              all entries of the r.h.s. b is choosen as either +1 or */
/*              -1.  Default. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix Z. */

/*  Z       (input) DOUBLE PRECISION array, dimension (LDZ, N) */
/*          On entry, the LU part of the factorization of the n-by-n */
/*          matrix Z computed by ZGETC2:  Z = P * L * U * Q */

/*  LDZ     (input) INTEGER */
/*          The leading dimension of the array Z.  LDA >= max(1, N). */

/*  RHS     (input/output) DOUBLE PRECISION array, dimension (N). */
/*          On entry, RHS contains contributions from other subsystems. */
/*          On exit, RHS contains the solution of the subsystem with */
/*          entries according to the value of IJOB (see above). */

/*  RDSUM   (input/output) DOUBLE PRECISION */
/*          On entry, the sum of squares of computed contributions to */
/*          the Dif-estimate under computation by ZTGSYL, where the */
/*          scaling factor RDSCAL (see below) has been factored out. */
/*          On exit, the corresponding sum of squares updated with the */
/*          contributions from the current sub-system. */
/*          If TRANS = 'T' RDSUM is not touched. */
/*          NOTE: RDSUM only makes sense when ZTGSY2 is called by CTGSYL. */

/*  RDSCAL  (input/output) DOUBLE PRECISION */
/*          On entry, scaling factor used to prevent overflow in RDSUM. */
/*          On exit, RDSCAL is updated w.r.t. the current contributions */
/*          in RDSUM. */
/*          If TRANS = 'T', RDSCAL is not touched. */
/*          NOTE: RDSCAL only makes sense when ZTGSY2 is called by */
/*          ZTGSYL. */

/*  IPIV    (input) INTEGER array, dimension (N). */
/*          The pivot indices; for 1 <= i <= N, row i of the */
/*          matrix has been interchanged with row IPIV(i). */

/*  JPIV    (input) INTEGER array, dimension (N). */
/*          The pivot indices; for 1 <= j <= N, column j of the */
/*          matrix has been interchanged with column JPIV(j). */

/*  Further Details */
/*  =============== */

/*  Based on contributions by */
/*     Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
/*     Umea University, S-901 87 Umea, Sweden. */

/*  This routine is a further developed implementation of algorithm */
/*  BSOLVE in [1] using complete pivoting in the LU factorization. */

/*   [1]   Bo Kagstrom and Lars Westin, */
/*         Generalized Schur Methods with Condition Estimators for */
/*         Solving the Generalized Sylvester Equation, IEEE Transactions */
/*         on Automatic Control, Vol. 34, No. 7, July 1989, pp 745-751. */

/*   [2]   Peter Poromaa, */
/*         On Efficient and Robust Estimators for the Separation */
/*         between two Regular Matrix Pairs with Applications in */
/*         Condition Estimation. Report UMINF-95.05, Department of */
/*         Computing Science, Umea University, S-901 87 Umea, Sweden, */
/*         1995. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1;
    z__ -= z_offset;
    --rhs;
    --ipiv;
    --jpiv;

    /* Function Body */
    if (*ijob != 2) {

/*        Apply permutations IPIV to RHS */

	i__1 = *n - 1;
	zlaswp_(&c__1, &rhs[1], ldz, &c__1, &i__1, &ipiv[1], &c__1);

/*        Solve for L-part choosing RHS either to +1 or -1. */

	z__1.r = -1., z__1.i = -0.;
	pmone.r = z__1.r, pmone.i = z__1.i;
	i__1 = *n - 1;
	for (j = 1; j <= i__1; ++j) {
	    i__2 = j;
	    z__1.r = rhs[i__2].r + 1., z__1.i = rhs[i__2].i + 0.;
	    bp.r = z__1.r, bp.i = z__1.i;
	    i__2 = j;
	    z__1.r = rhs[i__2].r - 1., z__1.i = rhs[i__2].i - 0.;
	    bm.r = z__1.r, bm.i = z__1.i;
	    splus = 1.;

/*           Lockahead for L- part RHS(1:N-1) = +-1 */
/*           SPLUS and SMIN computed more efficiently than in BSOLVE[1]. */

	    i__2 = *n - j;
	    zdotc_(&z__1, &i__2, &z__[j + 1 + j * z_dim1], &c__1, &z__[j + 1 
		    + j * z_dim1], &c__1);
	    splus += z__1.r;
	    i__2 = *n - j;
	    zdotc_(&z__1, &i__2, &z__[j + 1 + j * z_dim1], &c__1, &rhs[j + 1], 
		     &c__1);
	    sminu = z__1.r;
	    i__2 = j;
	    splus *= rhs[i__2].r;
	    if (splus > sminu) {
		i__2 = j;
		rhs[i__2].r = bp.r, rhs[i__2].i = bp.i;
	    } else if (sminu > splus) {
		i__2 = j;
		rhs[i__2].r = bm.r, rhs[i__2].i = bm.i;
	    } else {

/*              In this case the updating sums are equal and we can */
/*              choose RHS(J) +1 or -1. The first time this happens we */
/*              choose -1, thereafter +1. This is a simple way to get */
/*              good estimates of matrices like Byers well-known example */
/*              (see [1]). (Not done in BSOLVE.) */

		i__2 = j;
		i__3 = j;
		z__1.r = rhs[i__3].r + pmone.r, z__1.i = rhs[i__3].i + 
			pmone.i;
		rhs[i__2].r = z__1.r, rhs[i__2].i = z__1.i;
		pmone.r = 1., pmone.i = 0.;
	    }

/*           Compute the remaining r.h.s. */

	    i__2 = j;
	    z__1.r = -rhs[i__2].r, z__1.i = -rhs[i__2].i;
	    temp.r = z__1.r, temp.i = z__1.i;
	    i__2 = *n - j;
	    zaxpy_(&i__2, &temp, &z__[j + 1 + j * z_dim1], &c__1, &rhs[j + 1], 
		     &c__1);
/* L10: */
	}

/*        Solve for U- part, lockahead for RHS(N) = +-1. This is not done */
/*        In BSOLVE and will hopefully give us a better estimate because */
/*        any ill-conditioning of the original matrix is transfered to U */
/*        and not to L. U(N, N) is an approximation to sigma_min(LU). */

	i__1 = *n - 1;
	zcopy_(&i__1, &rhs[1], &c__1, work, &c__1);
	i__1 = *n - 1;
	i__2 = *n;
	z__1.r = rhs[i__2].r + 1., z__1.i = rhs[i__2].i + 0.;
	work[i__1].r = z__1.r, work[i__1].i = z__1.i;
	i__1 = *n;
	i__2 = *n;
	z__1.r = rhs[i__2].r - 1., z__1.i = rhs[i__2].i - 0.;
	rhs[i__1].r = z__1.r, rhs[i__1].i = z__1.i;
	splus = 0.;
	sminu = 0.;
	for (i__ = *n; i__ >= 1; --i__) {
	    z_div(&z__1, &c_b1, &z__[i__ + i__ * z_dim1]);
	    temp.r = z__1.r, temp.i = z__1.i;
	    i__1 = i__ - 1;
	    i__2 = i__ - 1;
	    z__1.r = work[i__2].r * temp.r - work[i__2].i * temp.i, z__1.i = 
		    work[i__2].r * temp.i + work[i__2].i * temp.r;
	    work[i__1].r = z__1.r, work[i__1].i = z__1.i;
	    i__1 = i__;
	    i__2 = i__;
	    z__1.r = rhs[i__2].r * temp.r - rhs[i__2].i * temp.i, z__1.i = 
		    rhs[i__2].r * temp.i + rhs[i__2].i * temp.r;
	    rhs[i__1].r = z__1.r, rhs[i__1].i = z__1.i;
	    i__1 = *n;
	    for (k = i__ + 1; k <= i__1; ++k) {
		i__2 = i__ - 1;
		i__3 = i__ - 1;
		i__4 = k - 1;
		i__5 = i__ + k * z_dim1;
		z__3.r = z__[i__5].r * temp.r - z__[i__5].i * temp.i, z__3.i =
			 z__[i__5].r * temp.i + z__[i__5].i * temp.r;
		z__2.r = work[i__4].r * z__3.r - work[i__4].i * z__3.i, 
			z__2.i = work[i__4].r * z__3.i + work[i__4].i * 
			z__3.r;
		z__1.r = work[i__3].r - z__2.r, z__1.i = work[i__3].i - 
			z__2.i;
		work[i__2].r = z__1.r, work[i__2].i = z__1.i;
		i__2 = i__;
		i__3 = i__;
		i__4 = k;
		i__5 = i__ + k * z_dim1;
		z__3.r = z__[i__5].r * temp.r - z__[i__5].i * temp.i, z__3.i =
			 z__[i__5].r * temp.i + z__[i__5].i * temp.r;
		z__2.r = rhs[i__4].r * z__3.r - rhs[i__4].i * z__3.i, z__2.i =
			 rhs[i__4].r * z__3.i + rhs[i__4].i * z__3.r;
		z__1.r = rhs[i__3].r - z__2.r, z__1.i = rhs[i__3].i - z__2.i;
		rhs[i__2].r = z__1.r, rhs[i__2].i = z__1.i;
/* L20: */
	    }
	    splus += z_abs(&work[i__ - 1]);
	    sminu += z_abs(&rhs[i__]);
/* L30: */
	}
	if (splus > sminu) {
	    zcopy_(n, work, &c__1, &rhs[1], &c__1);
	}

/*        Apply the permutations JPIV to the computed solution (RHS) */

	i__1 = *n - 1;
	zlaswp_(&c__1, &rhs[1], ldz, &c__1, &i__1, &jpiv[1], &c_n1);

/*        Compute the sum of squares */

	zlassq_(n, &rhs[1], &c__1, rdscal, rdsum);
	return 0;
    }

/*     ENTRY IJOB = 2 */

/*     Compute approximate nullvector XM of Z */

    zgecon_("I", n, &z__[z_offset], ldz, &c_b24, &rtemp, work, rwork, &info);
    zcopy_(n, &work[*n], &c__1, xm, &c__1);

/*     Compute RHS */

    i__1 = *n - 1;
    zlaswp_(&c__1, xm, ldz, &c__1, &i__1, &ipiv[1], &c_n1);
    zdotc_(&z__3, n, xm, &c__1, xm, &c__1);
    z_sqrt(&z__2, &z__3);
    z_div(&z__1, &c_b1, &z__2);
    temp.r = z__1.r, temp.i = z__1.i;
    zscal_(n, &temp, xm, &c__1);
    zcopy_(n, xm, &c__1, xp, &c__1);
    zaxpy_(n, &c_b1, &rhs[1], &c__1, xp, &c__1);
    z__1.r = -1., z__1.i = -0.;
    zaxpy_(n, &z__1, xm, &c__1, &rhs[1], &c__1);
    zgesc2_(n, &z__[z_offset], ldz, &rhs[1], &ipiv[1], &jpiv[1], &scale);
    zgesc2_(n, &z__[z_offset], ldz, xp, &ipiv[1], &jpiv[1], &scale);
    if (dzasum_(n, xp, &c__1) > dzasum_(n, &rhs[1], &c__1)) {
	zcopy_(n, xp, &c__1, &rhs[1], &c__1);
    }

/*     Compute the sum of squares */

    zlassq_(n, &rhs[1], &c__1, rdscal, rdsum);
    return 0;

/*     End of ZLATDF */

} /* zlatdf_ */
Пример #12
0
 int ztgsy2_(char *trans, int *ijob, int *m, int *
	n, doublecomplex *a, int *lda, doublecomplex *b, int *ldb, 
	doublecomplex *c__, int *ldc, doublecomplex *d__, int *ldd, 
	doublecomplex *e, int *lde, doublecomplex *f, int *ldf, 
	double *scale, double *rdsum, double *rdscal, int *
	info)
{
    /* System generated locals */
    int a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, d_dim1, 
	    d_offset, e_dim1, e_offset, f_dim1, f_offset, i__1, i__2, i__3, 
	    i__4;
    doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;

    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);

    /* Local variables */
    int i__, j, k;
    doublecomplex z__[4]	/* was [2][2] */, rhs[2];
    int ierr, ipiv[2], jpiv[2];
    doublecomplex alpha;
    extern int lsame_(char *, char *);
    extern  int zscal_(int *, doublecomplex *, 
	    doublecomplex *, int *), zaxpy_(int *, doublecomplex *, 
	    doublecomplex *, int *, doublecomplex *, int *), zgesc2_(
	    int *, doublecomplex *, int *, doublecomplex *, int *, 
	     int *, double *), zgetc2_(int *, doublecomplex *, 
	    int *, int *, int *, int *);
    double scaloc;
    extern  int xerbla_(char *, int *), zlatdf_(
	    int *, int *, doublecomplex *, int *, doublecomplex *, 
	     double *, double *, int *, int *);
    int notran;


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZTGSY2 solves the generalized Sylvester equation */

/*              A * R - L * B = scale *   C               (1) */
/*              D * R - L * E = scale * F */

/*  using Level 1 and 2 BLAS, where R and L are unknown M-by-N matrices, */
/*  (A, D), (B, E) and (C, F) are given matrix pairs of size M-by-M, */
/*  N-by-N and M-by-N, respectively. A, B, D and E are upper triangular */
/*  (i.e., (A,D) and (B,E) in generalized Schur form). */

/*  The solution (R, L) overwrites (C, F). 0 <= SCALE <= 1 is an output */
/*  scaling factor chosen to avoid overflow. */

/*  In matrix notation solving equation (1) corresponds to solve */
/*  Zx = scale * b, where Z is defined as */

/*         Z = [ kron(In, A)  -kron(B', Im) ]             (2) */
/*             [ kron(In, D)  -kron(E', Im) ], */

/*  Ik is the identity matrix of size k and X' is the transpose of X. */
/*  kron(X, Y) is the Kronecker product between the matrices X and Y. */

/*  If TRANS = 'C', y in the conjugate transposed system Z'y = scale*b */
/*  is solved for, which is equivalent to solve for R and L in */

/*              A' * R  + D' * L   = scale *  C           (3) */
/*              R  * B' + L  * E'  = scale * -F */

/*  This case is used to compute an estimate of Dif[(A, D), (B, E)] = */
/*  = sigma_MIN(Z) using reverse communicaton with ZLACON. */

/*  ZTGSY2 also (IJOB >= 1) contributes to the computation in ZTGSYL */
/*  of an upper bound on the separation between to matrix pairs. Then */
/*  the input (A, D), (B, E) are sub-pencils of two matrix pairs in */
/*  ZTGSYL. */

/*  Arguments */
/*  ========= */

/*  TRANS   (input) CHARACTER*1 */
/*          = 'N', solve the generalized Sylvester equation (1). */
/*          = 'T': solve the 'transposed' system (3). */

/*  IJOB    (input) INTEGER */
/*          Specifies what kind of functionality to be performed. */
/*          =0: solve (1) only. */
/*          =1: A contribution from this subsystem to a Frobenius */
/*              norm-based estimate of the separation between two matrix */
/*              pairs is computed. (look ahead strategy is used). */
/*          =2: A contribution from this subsystem to a Frobenius */
/*              norm-based estimate of the separation between two matrix */
/*              pairs is computed. (DGECON on sub-systems is used.) */
/*          Not referenced if TRANS = 'T'. */

/*  M       (input) INTEGER */
/*          On entry, M specifies the order of A and D, and the row */
/*          dimension of C, F, R and L. */

/*  N       (input) INTEGER */
/*          On entry, N specifies the order of B and E, and the column */
/*          dimension of C, F, R and L. */

/*  A       (input) COMPLEX*16 array, dimension (LDA, M) */
/*          On entry, A contains an upper triangular matrix. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the matrix A. LDA >= MAX(1, M). */

/*  B       (input) COMPLEX*16 array, dimension (LDB, N) */
/*          On entry, B contains an upper triangular matrix. */

/*  LDB     (input) INTEGER */
/*          The leading dimension of the matrix B. LDB >= MAX(1, N). */

/*  C       (input/output) COMPLEX*16 array, dimension (LDC, N) */
/*          On entry, C contains the right-hand-side of the first matrix */
/*          equation in (1). */
/*          On exit, if IJOB = 0, C has been overwritten by the solution */
/*          R. */

/*  LDC     (input) INTEGER */
/*          The leading dimension of the matrix C. LDC >= MAX(1, M). */

/*  D       (input) COMPLEX*16 array, dimension (LDD, M) */
/*          On entry, D contains an upper triangular matrix. */

/*  LDD     (input) INTEGER */
/*          The leading dimension of the matrix D. LDD >= MAX(1, M). */

/*  E       (input) COMPLEX*16 array, dimension (LDE, N) */
/*          On entry, E contains an upper triangular matrix. */

/*  LDE     (input) INTEGER */
/*          The leading dimension of the matrix E. LDE >= MAX(1, N). */

/*  F       (input/output) COMPLEX*16 array, dimension (LDF, N) */
/*          On entry, F contains the right-hand-side of the second matrix */
/*          equation in (1). */
/*          On exit, if IJOB = 0, F has been overwritten by the solution */
/*          L. */

/*  LDF     (input) INTEGER */
/*          The leading dimension of the matrix F. LDF >= MAX(1, M). */

/*  SCALE   (output) DOUBLE PRECISION */
/*          On exit, 0 <= SCALE <= 1. If 0 < SCALE < 1, the solutions */
/*          R and L (C and F on entry) will hold the solutions to a */
/*          slightly perturbed system but the input matrices A, B, D and */
/*          E have not been changed. If SCALE = 0, R and L will hold the */
/*          solutions to the homogeneous system with C = F = 0. */
/*          Normally, SCALE = 1. */

/*  RDSUM   (input/output) DOUBLE PRECISION */
/*          On entry, the sum of squares of computed contributions to */
/*          the Dif-estimate under computation by ZTGSYL, where the */
/*          scaling factor RDSCAL (see below) has been factored out. */
/*          On exit, the corresponding sum of squares updated with the */
/*          contributions from the current sub-system. */
/*          If TRANS = 'T' RDSUM is not touched. */
/*          NOTE: RDSUM only makes sense when ZTGSY2 is called by */
/*          ZTGSYL. */

/*  RDSCAL  (input/output) DOUBLE PRECISION */
/*          On entry, scaling factor used to prevent overflow in RDSUM. */
/*          On exit, RDSCAL is updated w.r.t. the current contributions */
/*          in RDSUM. */
/*          If TRANS = 'T', RDSCAL is not touched. */
/*          NOTE: RDSCAL only makes sense when ZTGSY2 is called by */
/*          ZTGSYL. */

/*  INFO    (output) INTEGER */
/*          On exit, if INFO is set to */
/*            =0: Successful exit */
/*            <0: If INFO = -i, input argument number i is illegal. */
/*            >0: The matrix pairs (A, D) and (B, E) have common or very */
/*                close eigenvalues. */

/*  Further Details */
/*  =============== */

/*  Based on contributions by */
/*     Bo Kagstrom and Peter Poromaa, Department of Computing Science, */
/*     Umea University, S-901 87 Umea, Sweden. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Decode and test input parameters */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1;
    b -= b_offset;
    c_dim1 = *ldc;
    c_offset = 1 + c_dim1;
    c__ -= c_offset;
    d_dim1 = *ldd;
    d_offset = 1 + d_dim1;
    d__ -= d_offset;
    e_dim1 = *lde;
    e_offset = 1 + e_dim1;
    e -= e_offset;
    f_dim1 = *ldf;
    f_offset = 1 + f_dim1;
    f -= f_offset;

    /* Function Body */
    *info = 0;
    ierr = 0;
    notran = lsame_(trans, "N");
    if (! notran && ! lsame_(trans, "C")) {
	*info = -1;
    } else if (notran) {
	if (*ijob < 0 || *ijob > 2) {
	    *info = -2;
	}
    }
    if (*info == 0) {
	if (*m <= 0) {
	    *info = -3;
	} else if (*n <= 0) {
	    *info = -4;
	} else if (*lda < MAX(1,*m)) {
	    *info = -5;
	} else if (*ldb < MAX(1,*n)) {
	    *info = -8;
	} else if (*ldc < MAX(1,*m)) {
	    *info = -10;
	} else if (*ldd < MAX(1,*m)) {
	    *info = -12;
	} else if (*lde < MAX(1,*n)) {
	    *info = -14;
	} else if (*ldf < MAX(1,*m)) {
	    *info = -16;
	}
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZTGSY2", &i__1);
	return 0;
    }

    if (notran) {

/*        Solve (I, J) - system */
/*           A(I, I) * R(I, J) - L(I, J) * B(J, J) = C(I, J) */
/*           D(I, I) * R(I, J) - L(I, J) * E(J, J) = F(I, J) */
/*        for I = M, M - 1, ..., 1; J = 1, 2, ..., N */

	*scale = 1.;
	scaloc = 1.;
	i__1 = *n;
	for (j = 1; j <= i__1; ++j) {
	    for (i__ = *m; i__ >= 1; --i__) {

/*              Build 2 by 2 system */

		i__2 = i__ + i__ * a_dim1;
		z__[0].r = a[i__2].r, z__[0].i = a[i__2].i;
		i__2 = i__ + i__ * d_dim1;
		z__[1].r = d__[i__2].r, z__[1].i = d__[i__2].i;
		i__2 = j + j * b_dim1;
		z__1.r = -b[i__2].r, z__1.i = -b[i__2].i;
		z__[2].r = z__1.r, z__[2].i = z__1.i;
		i__2 = j + j * e_dim1;
		z__1.r = -e[i__2].r, z__1.i = -e[i__2].i;
		z__[3].r = z__1.r, z__[3].i = z__1.i;

/*              Set up right hand side(s) */

		i__2 = i__ + j * c_dim1;
		rhs[0].r = c__[i__2].r, rhs[0].i = c__[i__2].i;
		i__2 = i__ + j * f_dim1;
		rhs[1].r = f[i__2].r, rhs[1].i = f[i__2].i;

/*              Solve Z * x = RHS */

		zgetc2_(&c__2, z__, &c__2, ipiv, jpiv, &ierr);
		if (ierr > 0) {
		    *info = ierr;
		}
		if (*ijob == 0) {
		    zgesc2_(&c__2, z__, &c__2, rhs, ipiv, jpiv, &scaloc);
		    if (scaloc != 1.) {
			i__2 = *n;
			for (k = 1; k <= i__2; ++k) {
			    z__1.r = scaloc, z__1.i = 0.;
			    zscal_(m, &z__1, &c__[k * c_dim1 + 1], &c__1);
			    z__1.r = scaloc, z__1.i = 0.;
			    zscal_(m, &z__1, &f[k * f_dim1 + 1], &c__1);
/* L10: */
			}
			*scale *= scaloc;
		    }
		} else {
		    zlatdf_(ijob, &c__2, z__, &c__2, rhs, rdsum, rdscal, ipiv, 
			     jpiv);
		}

/*              Unpack solution vector(s) */

		i__2 = i__ + j * c_dim1;
		c__[i__2].r = rhs[0].r, c__[i__2].i = rhs[0].i;
		i__2 = i__ + j * f_dim1;
		f[i__2].r = rhs[1].r, f[i__2].i = rhs[1].i;

/*              Substitute R(I, J) and L(I, J) into remaining equation. */

		if (i__ > 1) {
		    z__1.r = -rhs[0].r, z__1.i = -rhs[0].i;
		    alpha.r = z__1.r, alpha.i = z__1.i;
		    i__2 = i__ - 1;
		    zaxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &c__[j 
			    * c_dim1 + 1], &c__1);
		    i__2 = i__ - 1;
		    zaxpy_(&i__2, &alpha, &d__[i__ * d_dim1 + 1], &c__1, &f[j 
			    * f_dim1 + 1], &c__1);
		}
		if (j < *n) {
		    i__2 = *n - j;
		    zaxpy_(&i__2, &rhs[1], &b[j + (j + 1) * b_dim1], ldb, &
			    c__[i__ + (j + 1) * c_dim1], ldc);
		    i__2 = *n - j;
		    zaxpy_(&i__2, &rhs[1], &e[j + (j + 1) * e_dim1], lde, &f[
			    i__ + (j + 1) * f_dim1], ldf);
		}

/* L20: */
	    }
/* L30: */
	}
    } else {

/*        Solve transposed (I, J) - system: */
/*           A(I, I)' * R(I, J) + D(I, I)' * L(J, J) = C(I, J) */
/*           R(I, I) * B(J, J) + L(I, J) * E(J, J)   = -F(I, J) */
/*        for I = 1, 2, ..., M, J = N, N - 1, ..., 1 */

	*scale = 1.;
	scaloc = 1.;
	i__1 = *m;
	for (i__ = 1; i__ <= i__1; ++i__) {
	    for (j = *n; j >= 1; --j) {

/*              Build 2 by 2 system Z' */

		d_cnjg(&z__1, &a[i__ + i__ * a_dim1]);
		z__[0].r = z__1.r, z__[0].i = z__1.i;
		d_cnjg(&z__2, &b[j + j * b_dim1]);
		z__1.r = -z__2.r, z__1.i = -z__2.i;
		z__[1].r = z__1.r, z__[1].i = z__1.i;
		d_cnjg(&z__1, &d__[i__ + i__ * d_dim1]);
		z__[2].r = z__1.r, z__[2].i = z__1.i;
		d_cnjg(&z__2, &e[j + j * e_dim1]);
		z__1.r = -z__2.r, z__1.i = -z__2.i;
		z__[3].r = z__1.r, z__[3].i = z__1.i;


/*              Set up right hand side(s) */

		i__2 = i__ + j * c_dim1;
		rhs[0].r = c__[i__2].r, rhs[0].i = c__[i__2].i;
		i__2 = i__ + j * f_dim1;
		rhs[1].r = f[i__2].r, rhs[1].i = f[i__2].i;

/*              Solve Z' * x = RHS */

		zgetc2_(&c__2, z__, &c__2, ipiv, jpiv, &ierr);
		if (ierr > 0) {
		    *info = ierr;
		}
		zgesc2_(&c__2, z__, &c__2, rhs, ipiv, jpiv, &scaloc);
		if (scaloc != 1.) {
		    i__2 = *n;
		    for (k = 1; k <= i__2; ++k) {
			z__1.r = scaloc, z__1.i = 0.;
			zscal_(m, &z__1, &c__[k * c_dim1 + 1], &c__1);
			z__1.r = scaloc, z__1.i = 0.;
			zscal_(m, &z__1, &f[k * f_dim1 + 1], &c__1);
/* L40: */
		    }
		    *scale *= scaloc;
		}

/*              Unpack solution vector(s) */

		i__2 = i__ + j * c_dim1;
		c__[i__2].r = rhs[0].r, c__[i__2].i = rhs[0].i;
		i__2 = i__ + j * f_dim1;
		f[i__2].r = rhs[1].r, f[i__2].i = rhs[1].i;

/*              Substitute R(I, J) and L(I, J) into remaining equation. */

		i__2 = j - 1;
		for (k = 1; k <= i__2; ++k) {
		    i__3 = i__ + k * f_dim1;
		    i__4 = i__ + k * f_dim1;
		    d_cnjg(&z__4, &b[k + j * b_dim1]);
		    z__3.r = rhs[0].r * z__4.r - rhs[0].i * z__4.i, z__3.i = 
			    rhs[0].r * z__4.i + rhs[0].i * z__4.r;
		    z__2.r = f[i__4].r + z__3.r, z__2.i = f[i__4].i + z__3.i;
		    d_cnjg(&z__6, &e[k + j * e_dim1]);
		    z__5.r = rhs[1].r * z__6.r - rhs[1].i * z__6.i, z__5.i = 
			    rhs[1].r * z__6.i + rhs[1].i * z__6.r;
		    z__1.r = z__2.r + z__5.r, z__1.i = z__2.i + z__5.i;
		    f[i__3].r = z__1.r, f[i__3].i = z__1.i;
/* L50: */
		}
		i__2 = *m;
		for (k = i__ + 1; k <= i__2; ++k) {
		    i__3 = k + j * c_dim1;
		    i__4 = k + j * c_dim1;
		    d_cnjg(&z__4, &a[i__ + k * a_dim1]);
		    z__3.r = z__4.r * rhs[0].r - z__4.i * rhs[0].i, z__3.i = 
			    z__4.r * rhs[0].i + z__4.i * rhs[0].r;
		    z__2.r = c__[i__4].r - z__3.r, z__2.i = c__[i__4].i - 
			    z__3.i;
		    d_cnjg(&z__6, &d__[i__ + k * d_dim1]);
		    z__5.r = z__6.r * rhs[1].r - z__6.i * rhs[1].i, z__5.i = 
			    z__6.r * rhs[1].i + z__6.i * rhs[1].r;
		    z__1.r = z__2.r - z__5.r, z__1.i = z__2.i - z__5.i;
		    c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
/* L60: */
		}

/* L70: */
	    }
/* L80: */
	}
    }
    return 0;

/*     End of ZTGSY2 */

} /* ztgsy2_ */
Пример #13
0
/* Subroutine */ int check1_(doublereal *sfac)
{
    /* Initialized data */

    static doublereal strue2[5] = { 0.,.5,.6,.7,.7 };
    static doublereal strue4[5] = { 0.,.7,1.,1.3,1.7 };
    static doublecomplex ctrue5[80]	/* was [8][5][2] */ = { {.1,.1},{
            1.,
            2.
        },{1.,2.},{1.,2.},{1.,2.},{1.,2.},{1.,2.},{1.,2.},{-.16,-.37},{
            3.,4.
        },{3.,4.},{3.,4.},{3.,4.},{3.,4.},{3.,4.},{3.,4.},{-.17,-.19}
        ,{.13,-.39},{5.,6.},{5.,6.},{5.,6.},{5.,6.},{5.,6.},{5.,6.},{
            .11,
            -.03
        },{-.17,.46},{-.17,-.19},{7.,8.},{7.,8.},{7.,8.},{7.,8.},{
            7.,
            8.
        },{.19,-.17},{.32,.09},{.23,-.24},{.18,.01},{2.,3.},{2.,3.},{
            2.,
            3.
        },{2.,3.},{.1,.1},{4.,5.},{4.,5.},{4.,5.},{4.,5.},{4.,5.},{
            4.,
            5.
        },{4.,5.},{-.16,-.37},{6.,7.},{6.,7.},{6.,7.},{6.,7.},{6.,7.},{
            6.,7.
        },{6.,7.},{-.17,-.19},{8.,9.},{.13,-.39},{2.,5.},{2.,5.},{
            2.,
            5.
        },{2.,5.},{2.,5.},{.11,-.03},{3.,6.},{-.17,.46},{4.,7.},{
            -.17,
            -.19
        },{7.,2.},{7.,2.},{7.,2.},{.19,-.17},{5.,8.},{.32,.09},{6.,9.}
        ,{.23,-.24},{8.,3.},{.18,.01},{9.,4.}
    };
    static doublecomplex ctrue6[80]	/* was [8][5][2] */ = { {.1,.1},{
            1.,
            2.
        },{1.,2.},{1.,2.},{1.,2.},{1.,2.},{1.,2.},{1.,2.},{.09,-.12},{
            3.,4.
        },{3.,4.},{3.,4.},{3.,4.},{3.,4.},{3.,4.},{3.,4.},{.03,-.09},
        {.15,-.03},{5.,6.},{5.,6.},{5.,6.},{5.,6.},{5.,6.},{5.,6.},{.03,
            .03
        },{-.18,.03},{.03,-.09},{7.,8.},{7.,8.},{7.,8.},{7.,8.},{7.,8.}
        ,{.09,.03},{.03,.12},{.12,.03},{.03,.06},{2.,3.},{2.,3.},{2.,3.},{
            2.,3.
        },{.1,.1},{4.,5.},{4.,5.},{4.,5.},{4.,5.},{4.,5.},{4.,5.},{
            4.,5.
        },{.09,-.12},{6.,7.},{6.,7.},{6.,7.},{6.,7.},{6.,7.},{6.,7.},
        {6.,7.},{.03,-.09},{8.,9.},{.15,-.03},{2.,5.},{2.,5.},{2.,5.},{2.,
            5.
        },{2.,5.},{.03,.03},{3.,6.},{-.18,.03},{4.,7.},{.03,-.09},{
            7.,
            2.
        },{7.,2.},{7.,2.},{.09,.03},{5.,8.},{.03,.12},{6.,9.},{.12,.03},
        {8.,3.},{.03,.06},{9.,4.}
    };
    static integer itrue3[5] = { 0,1,2,2,2 };
    static doublereal sa = .3;
    static doublecomplex ca = {.4,-.7};
    static doublecomplex cv[80]	/* was [8][5][2] */ = { {.1,.1},{1.,2.},{
            1.,
            2.
        },{1.,2.},{1.,2.},{1.,2.},{1.,2.},{1.,2.},{.3,-.4},{3.,4.},{
            3.,
            4.
        },{3.,4.},{3.,4.},{3.,4.},{3.,4.},{3.,4.},{.1,-.3},{.5,-.1},{
            5.,
            6.
        },{5.,6.},{5.,6.},{5.,6.},{5.,6.},{5.,6.},{.1,.1},{-.6,.1},{
            .1,
            -.3
        },{7.,8.},{7.,8.},{7.,8.},{7.,8.},{7.,8.},{.3,.1},{.1,.4},{
            .4,
            .1
        },{.1,.2},{2.,3.},{2.,3.},{2.,3.},{2.,3.},{.1,.1},{4.,5.},{
            4.,
            5.
        },{4.,5.},{4.,5.},{4.,5.},{4.,5.},{4.,5.},{.3,-.4},{6.,7.},{
            6.,
            7.
        },{6.,7.},{6.,7.},{6.,7.},{6.,7.},{6.,7.},{.1,-.3},{8.,9.},{
            .5,
            -.1
        },{2.,5.},{2.,5.},{2.,5.},{2.,5.},{2.,5.},{.1,.1},{3.,6.},{
            -.6,
            .1
        },{4.,7.},{.1,-.3},{7.,2.},{7.,2.},{7.,2.},{.3,.1},{5.,8.},{
            .1,
            .4
        },{6.,9.},{.4,.1},{8.,3.},{.1,.2},{9.,4.}
    };

    /* System generated locals */
    integer i__1, i__2, i__3;
    doublereal d__1;
    doublecomplex z__1;

    /* Builtin functions */
    integer s_wsle(cilist *), do_lio(integer *, integer *, char *, ftnlen),
            e_wsle(void);
    /* Subroutine */ int s_stop(char *, ftnlen);

    /* Local variables */
    static integer i__;
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
                                       doublecomplex *, integer *), ctest_(integer *, doublecomplex *,
                                               doublecomplex *, doublecomplex *, doublereal *);
    static doublecomplex mwpcs[5], mwpct[5];
    extern /* Subroutine */ int itest1_(integer *, integer *);
    extern doublereal dznrm2_(integer *, doublecomplex *, integer *);
    extern /* Subroutine */ int stest1_(doublereal *, doublereal *,
                                        doublereal *, doublereal *);
    static doublecomplex cx[8];
    extern /* Subroutine */ int zdscal_(integer *, doublereal *,
                                        doublecomplex *, integer *);
    extern integer izamax_(integer *, doublecomplex *, integer *);
    extern doublereal dzasum_(integer *, doublecomplex *, integer *);
    static integer np1, len;

    /* Fortran I/O blocks */
    static cilist io___19 = { 0, 6, 0, 0, 0 };



#define ctrue5_subscr(a_1,a_2,a_3) ((a_3)*5 + (a_2))*8 + a_1 - 49
#define ctrue5_ref(a_1,a_2,a_3) ctrue5[ctrue5_subscr(a_1,a_2,a_3)]
#define ctrue6_subscr(a_1,a_2,a_3) ((a_3)*5 + (a_2))*8 + a_1 - 49
#define ctrue6_ref(a_1,a_2,a_3) ctrue6[ctrue6_subscr(a_1,a_2,a_3)]
#define cv_subscr(a_1,a_2,a_3) ((a_3)*5 + (a_2))*8 + a_1 - 49
#define cv_ref(a_1,a_2,a_3) cv[cv_subscr(a_1,a_2,a_3)]

    for (combla_1.incx = 1; combla_1.incx <= 2; ++combla_1.incx) {
        for (np1 = 1; np1 <= 5; ++np1) {
            combla_1.n = np1 - 1;
            len = max(combla_1.n,1) << 1;
            i__1 = len;
            for (i__ = 1; i__ <= i__1; ++i__) {
                i__2 = i__ - 1;
                i__3 = cv_subscr(i__, np1, combla_1.incx);
                cx[i__2].r = cv[i__3].r, cx[i__2].i = cv[i__3].i;
                /* L20: */
            }
            if (combla_1.icase == 6) {
                d__1 = dznrm2_(&combla_1.n, cx, &combla_1.incx);
                stest1_(&d__1, &strue2[np1 - 1], &strue2[np1 - 1], sfac);
            } else if (combla_1.icase == 7) {
                d__1 = dzasum_(&combla_1.n, cx, &combla_1.incx);
                stest1_(&d__1, &strue4[np1 - 1], &strue4[np1 - 1], sfac);
            } else if (combla_1.icase == 8) {
                zscal_(&combla_1.n, &ca, cx, &combla_1.incx);
                ctest_(&len, cx, &ctrue5_ref(1, np1, combla_1.incx), &
                       ctrue5_ref(1, np1, combla_1.incx), sfac);
            } else if (combla_1.icase == 9) {
                zdscal_(&combla_1.n, &sa, cx, &combla_1.incx);
                ctest_(&len, cx, &ctrue6_ref(1, np1, combla_1.incx), &
                       ctrue6_ref(1, np1, combla_1.incx), sfac);
            } else if (combla_1.icase == 10) {
                i__1 = izamax_(&combla_1.n, cx, &combla_1.incx);
                itest1_(&i__1, &itrue3[np1 - 1]);
            } else {
                s_wsle(&io___19);
                do_lio(&c__9, &c__1, " Shouldn't be here in CHECK1", (ftnlen)
                       28);
                e_wsle();
                s_stop("", (ftnlen)0);
            }

            /* L40: */
        }
        /* L60: */
    }

    combla_1.incx = 1;
    if (combla_1.icase == 8) {
        /*        ZSCAL
                  Add a test for alpha equal to zero. */
        ca.r = 0., ca.i = 0.;
        for (i__ = 1; i__ <= 5; ++i__) {
            i__1 = i__ - 1;
            mwpct[i__1].r = 0., mwpct[i__1].i = 0.;
            i__1 = i__ - 1;
            mwpcs[i__1].r = 1., mwpcs[i__1].i = 1.;
            /* L80: */
        }
        zscal_(&c__5, &ca, cx, &combla_1.incx);
        ctest_(&c__5, cx, mwpct, mwpcs, sfac);
    } else if (combla_1.icase == 9) {
        /*        ZDSCAL
                  Add a test for alpha equal to zero. */
        sa = 0.;
        for (i__ = 1; i__ <= 5; ++i__) {
            i__1 = i__ - 1;
            mwpct[i__1].r = 0., mwpct[i__1].i = 0.;
            i__1 = i__ - 1;
            mwpcs[i__1].r = 1., mwpcs[i__1].i = 1.;
            /* L100: */
        }
        zdscal_(&c__5, &sa, cx, &combla_1.incx);
        ctest_(&c__5, cx, mwpct, mwpcs, sfac);
        /*        Add a test for alpha equal to one. */
        sa = 1.;
        for (i__ = 1; i__ <= 5; ++i__) {
            i__1 = i__ - 1;
            i__2 = i__ - 1;
            mwpct[i__1].r = cx[i__2].r, mwpct[i__1].i = cx[i__2].i;
            i__1 = i__ - 1;
            i__2 = i__ - 1;
            mwpcs[i__1].r = cx[i__2].r, mwpcs[i__1].i = cx[i__2].i;
            /* L120: */
        }
        zdscal_(&c__5, &sa, cx, &combla_1.incx);
        ctest_(&c__5, cx, mwpct, mwpcs, sfac);
        /*        Add a test for alpha equal to minus one. */
        sa = -1.;
        for (i__ = 1; i__ <= 5; ++i__) {
            i__1 = i__ - 1;
            i__2 = i__ - 1;
            z__1.r = -cx[i__2].r, z__1.i = -cx[i__2].i;
            mwpct[i__1].r = z__1.r, mwpct[i__1].i = z__1.i;
            i__1 = i__ - 1;
            i__2 = i__ - 1;
            z__1.r = -cx[i__2].r, z__1.i = -cx[i__2].i;
            mwpcs[i__1].r = z__1.r, mwpcs[i__1].i = z__1.i;
            /* L140: */
        }
        zdscal_(&c__5, &sa, cx, &combla_1.incx);
        ctest_(&c__5, cx, mwpct, mwpcs, sfac);
    }
    return 0;
} /* check1_ */
Пример #14
0
/* Subroutine */ int zlahr2_(integer *n, integer *k, integer *nb, 
	doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *t, 
	integer *ldt, doublecomplex *y, integer *ldy)
{
    /* System generated locals */
    integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2, 
	    i__3;
    doublecomplex z__1;

    /* Local variables */
    integer i__;
    doublecomplex ei;
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *), zgemm_(char *, char *, integer *, 
	    integer *, integer *, doublecomplex *, doublecomplex *, integer *, 
	     doublecomplex *, integer *, doublecomplex *, doublecomplex *, 
	    integer *), zgemv_(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, integer *, doublecomplex *, 
	    integer *, doublecomplex *, doublecomplex *, integer *), 
	    zcopy_(integer *, doublecomplex *, integer *, doublecomplex *, 
	    integer *), ztrmm_(char *, char *, char *, char *, integer *, 
	    integer *, doublecomplex *, doublecomplex *, integer *, 
	    doublecomplex *, integer *), 
	    zaxpy_(integer *, doublecomplex *, doublecomplex *, integer *, 
	    doublecomplex *, integer *), ztrmv_(char *, char *, char *, 
	    integer *, doublecomplex *, integer *, doublecomplex *, integer *), zlarfg_(integer *, doublecomplex *, 
	    doublecomplex *, integer *, doublecomplex *), zlacgv_(integer *, 
	    doublecomplex *, integer *), zlacpy_(char *, integer *, integer *, 
	     doublecomplex *, integer *, doublecomplex *, integer *);


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZLAHR2 reduces the first NB columns of A complex general n-BY-(n-k+1) */
/*  matrix A so that elements below the k-th subdiagonal are zero. The */
/*  reduction is performed by an unitary similarity transformation */
/*  Q' * A * Q. The routine returns the matrices V and T which determine */
/*  Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. */

/*  This is an auxiliary routine called by ZGEHRD. */

/*  Arguments */
/*  ========= */

/*  N       (input) INTEGER */
/*          The order of the matrix A. */

/*  K       (input) INTEGER */
/*          The offset for the reduction. Elements below the k-th */
/*          subdiagonal in the first NB columns are reduced to zero. */
/*          K < N. */

/*  NB      (input) INTEGER */
/*          The number of columns to be reduced. */

/*  A       (input/output) COMPLEX*16 array, dimension (LDA,N-K+1) */
/*          On entry, the n-by-(n-k+1) general matrix A. */
/*          On exit, the elements on and above the k-th subdiagonal in */
/*          the first NB columns are overwritten with the corresponding */
/*          elements of the reduced matrix; the elements below the k-th */
/*          subdiagonal, with the array TAU, represent the matrix Q as a */
/*          product of elementary reflectors. The other columns of A are */
/*          unchanged. See Further Details. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,N). */

/*  TAU     (output) COMPLEX*16 array, dimension (NB) */
/*          The scalar factors of the elementary reflectors. See Further */
/*          Details. */

/*  T       (output) COMPLEX*16 array, dimension (LDT,NB) */
/*          The upper triangular matrix T. */

/*  LDT     (input) INTEGER */
/*          The leading dimension of the array T.  LDT >= NB. */

/*  Y       (output) COMPLEX*16 array, dimension (LDY,NB) */
/*          The n-by-nb matrix Y. */

/*  LDY     (input) INTEGER */
/*          The leading dimension of the array Y. LDY >= N. */

/*  Further Details */
/*  =============== */

/*  The matrix Q is represented as a product of nb elementary reflectors */

/*     Q = H(1) H(2) . . . H(nb). */

/*  Each H(i) has the form */

/*     H(i) = I - tau * v * v' */

/*  where tau is a complex scalar, and v is a complex vector with */
/*  v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in */
/*  A(i+k+1:n,i), and tau in TAU(i). */

/*  The elements of the vectors v together form the (n-k+1)-by-nb matrix */
/*  V which is needed, with T and Y, to apply the transformation to the */
/*  unreduced part of the matrix, using an update of the form: */
/*  A := (I - V*T*V') * (A - Y*V'). */

/*  The contents of A on exit are illustrated by the following example */
/*  with n = 7, k = 3 and nb = 2: */

/*     ( a   a   a   a   a ) */
/*     ( a   a   a   a   a ) */
/*     ( a   a   a   a   a ) */
/*     ( h   h   a   a   a ) */
/*     ( v1  h   a   a   a ) */
/*     ( v1  v2  a   a   a ) */
/*     ( v1  v2  a   a   a ) */

/*  where a denotes an element of the original matrix A, h denotes a */
/*  modified element of the upper Hessenberg matrix H, and vi denotes an */
/*  element of the vector defining H(i). */

/*  This file is a slight modification of LAPACK-3.0's ZLAHRD */
/*  incorporating improvements proposed by Quintana-Orti and Van de */
/*  Gejin. Note that the entries of A(1:K,2:NB) differ from those */
/*  returned by the original LAPACK routine. This function is */
/*  not backward compatible with LAPACK3.0. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Quick return if possible */

    /* Parameter adjustments */
    --tau;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    t_dim1 = *ldt;
    t_offset = 1 + t_dim1;
    t -= t_offset;
    y_dim1 = *ldy;
    y_offset = 1 + y_dim1;
    y -= y_offset;

    /* Function Body */
    if (*n <= 1) {
	return 0;
    }

    i__1 = *nb;
    for (i__ = 1; i__ <= i__1; ++i__) {
	if (i__ > 1) {

/*           Update A(K+1:N,I) */

/*           Update I-th column of A - Y * V' */

	    i__2 = i__ - 1;
	    zlacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
	    i__2 = *n - *k;
	    i__3 = i__ - 1;
	    z__1.r = -1., z__1.i = -0.;
	    zgemv_("NO TRANSPOSE", &i__2, &i__3, &z__1, &y[*k + 1 + y_dim1], 
		    ldy, &a[*k + i__ - 1 + a_dim1], lda, &c_b2, &a[*k + 1 + 
		    i__ * a_dim1], &c__1);
	    i__2 = i__ - 1;
	    zlacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);

/*           Apply I - V * T' * V' to this column (call it b) from the */
/*           left, using the last column of T as workspace */

/*           Let  V = ( V1 )   and   b = ( b1 )   (first I-1 rows) */
/*                    ( V2 )             ( b2 ) */

/*           where V1 is unit lower triangular */

/*           w := V1' * b1 */

	    i__2 = i__ - 1;
	    zcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 + 
		    1], &c__1);
	    i__2 = i__ - 1;
	    ztrmv_("Lower", "Conjugate transpose", "UNIT", &i__2, &a[*k + 1 + 
		    a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1);

/*           w := w + V2'*b2 */

	    i__2 = *n - *k - i__ + 1;
	    i__3 = i__ - 1;
	    zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ + 
		    a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b2, &
		    t[*nb * t_dim1 + 1], &c__1);

/*           w := T'*w */

	    i__2 = i__ - 1;
	    ztrmv_("Upper", "Conjugate transpose", "NON-UNIT", &i__2, &t[
		    t_offset], ldt, &t[*nb * t_dim1 + 1], &c__1);

/*           b2 := b2 - V2*w */

	    i__2 = *n - *k - i__ + 1;
	    i__3 = i__ - 1;
	    z__1.r = -1., z__1.i = -0.;
	    zgemv_("NO TRANSPOSE", &i__2, &i__3, &z__1, &a[*k + i__ + a_dim1], 
		     lda, &t[*nb * t_dim1 + 1], &c__1, &c_b2, &a[*k + i__ + 
		    i__ * a_dim1], &c__1);

/*           b1 := b1 - V1*w */

	    i__2 = i__ - 1;
	    ztrmv_("Lower", "NO TRANSPOSE", "UNIT", &i__2, &a[*k + 1 + a_dim1]
, lda, &t[*nb * t_dim1 + 1], &c__1);
	    i__2 = i__ - 1;
	    z__1.r = -1., z__1.i = -0.;
	    zaxpy_(&i__2, &z__1, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__ 
		    * a_dim1], &c__1);

	    i__2 = *k + i__ - 1 + (i__ - 1) * a_dim1;
	    a[i__2].r = ei.r, a[i__2].i = ei.i;
	}

/*        Generate the elementary reflector H(I) to annihilate */
/*        A(K+I+1:N,I) */

	i__2 = *n - *k - i__ + 1;
/* Computing MIN */
	i__3 = *k + i__ + 1;
	zlarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3, *n)+ i__ * 
		a_dim1], &c__1, &tau[i__]);
	i__2 = *k + i__ + i__ * a_dim1;
	ei.r = a[i__2].r, ei.i = a[i__2].i;
	i__2 = *k + i__ + i__ * a_dim1;
	a[i__2].r = 1., a[i__2].i = 0.;

/*        Compute  Y(K+1:N,I) */

	i__2 = *n - *k;
	i__3 = *n - *k - i__ + 1;
	zgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b2, &a[*k + 1 + (i__ + 1) * 
		a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &y[*
		k + 1 + i__ * y_dim1], &c__1);
	i__2 = *n - *k - i__ + 1;
	i__3 = i__ - 1;
	zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i__ + 
		a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1, &t[
		i__ * t_dim1 + 1], &c__1);
	i__2 = *n - *k;
	i__3 = i__ - 1;
	z__1.r = -1., z__1.i = -0.;
	zgemv_("NO TRANSPOSE", &i__2, &i__3, &z__1, &y[*k + 1 + y_dim1], ldy, 
		&t[i__ * t_dim1 + 1], &c__1, &c_b2, &y[*k + 1 + i__ * y_dim1], 
		 &c__1);
	i__2 = *n - *k;
	zscal_(&i__2, &tau[i__], &y[*k + 1 + i__ * y_dim1], &c__1);

/*        Compute T(1:I,I) */

	i__2 = i__ - 1;
	i__3 = i__;
	z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i;
	zscal_(&i__2, &z__1, &t[i__ * t_dim1 + 1], &c__1);
	i__2 = i__ - 1;
	ztrmv_("Upper", "No Transpose", "NON-UNIT", &i__2, &t[t_offset], ldt, 
		&t[i__ * t_dim1 + 1], &c__1)
		;
	i__2 = i__ + i__ * t_dim1;
	i__3 = i__;
	t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;

/* L10: */
    }
    i__1 = *k + *nb + *nb * a_dim1;
    a[i__1].r = ei.r, a[i__1].i = ei.i;

/*     Compute Y(1:K,1:NB) */

    zlacpy_("ALL", k, nb, &a[(a_dim1 << 1) + 1], lda, &y[y_offset], ldy);
    ztrmm_("RIGHT", "Lower", "NO TRANSPOSE", "UNIT", k, nb, &c_b2, &a[*k + 1 
	    + a_dim1], lda, &y[y_offset], ldy);
    if (*n > *k + *nb) {
	i__1 = *n - *k - *nb;
	zgemm_("NO TRANSPOSE", "NO TRANSPOSE", k, nb, &i__1, &c_b2, &a[(*nb + 
		2) * a_dim1 + 1], lda, &a[*k + 1 + *nb + a_dim1], lda, &c_b2, 
		&y[y_offset], ldy);
    }
    ztrmm_("RIGHT", "Upper", "NO TRANSPOSE", "NON-UNIT", k, nb, &c_b2, &t[
	    t_offset], ldt, &y[y_offset], ldy);

    return 0;

/*     End of ZLAHR2 */

} /* zlahr2_ */
Пример #15
0
/* Subroutine */ int zlavsy_(char *uplo, char *trans, char *diag, integer *n, 
	integer *nrhs, doublecomplex *a, integer *lda, integer *ipiv, 
	doublecomplex *b, integer *ldb, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
    doublecomplex z__1, z__2, z__3;

    /* Local variables */
    static integer j, k;
    extern logical lsame_(char *, char *);
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *), zgemv_(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, integer *, doublecomplex *, 
	    integer *, doublecomplex *, doublecomplex *, integer *), 
	    zgeru_(integer *, integer *, doublecomplex *, doublecomplex *, 
	    integer *, doublecomplex *, integer *, doublecomplex *, integer *)
	    , zswap_(integer *, doublecomplex *, integer *, doublecomplex *, 
	    integer *);
    static doublecomplex t1, t2, d11, d12, d21, d22;
    static integer kp;
    extern /* Subroutine */ int xerbla_(char *, integer *);
    static logical nounit;


#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]
#define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1
#define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)]


/*  -- LAPACK auxiliary routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

       ZLAVSY  performs one of the matrix-vector operations   
          x := A*x  or  x := A'*x,   
       where x is an N element vector and  A is one of the factors   
       from the symmetric factorization computed by ZSYTRF.   
       ZSYTRF produces a factorization of the form   
            U * D * U'      or     L * D * L' ,   
       where U (or L) is a product of permutation and unit upper (lower)   
       triangular matrices, U' (or L') is the transpose of   
       U (or L), and D is symmetric and block diagonal with 1 x 1 and   
       2 x 2 diagonal blocks.  The multipliers for the transformations   
       and the upper or lower triangular parts of the diagonal blocks   
       are stored in the leading upper or lower triangle of the 2-D   
       array A.   

       If TRANS = 'N' or 'n', ZLAVSY multiplies either by U or U * D   
       (or L or L * D).   
       If TRANS = 'T' or 't', ZLAVSY multiplies either by U' or D * U'   
       (or L' or D * L' ).   

    Arguments   
    ==========   

    UPLO   - CHARACTER*1   
             On entry, UPLO specifies whether the triangular matrix   
             stored in A is upper or lower triangular.   
                UPLO = 'U' or 'u'   The matrix is upper triangular.   
                UPLO = 'L' or 'l'   The matrix is lower triangular.   
             Unchanged on exit.   

    TRANS  - CHARACTER*1   
             On entry, TRANS specifies the operation to be performed as   
             follows:   
                TRANS = 'N' or 'n'   x := A*x.   
                TRANS = 'T' or 't'   x := A'*x.   
             Unchanged on exit.   

    DIAG   - CHARACTER*1   
             On entry, DIAG specifies whether the diagonal blocks are   
             assumed to be unit matrices:   
                DIAG = 'U' or 'u'   Diagonal blocks are unit matrices.   
                DIAG = 'N' or 'n'   Diagonal blocks are non-unit.   
             Unchanged on exit.   

    N      - INTEGER   
             On entry, N specifies the order of the matrix A.   
             N must be at least zero.   
             Unchanged on exit.   

    NRHS   - INTEGER   
             On entry, NRHS specifies the number of right hand sides,   
             i.e., the number of vectors x to be multiplied by A.   
             NRHS must be at least zero.   
             Unchanged on exit.   

    A      - COMPLEX*16 array, dimension( LDA, N )   
             On entry, A contains a block diagonal matrix and the   
             multipliers of the transformations used to obtain it,   
             stored as a 2-D triangular matrix.   
             Unchanged on exit.   

    LDA    - INTEGER   
             On entry, LDA specifies the first dimension of A as declared   
             in the calling ( sub ) program. LDA must be at least   
             max( 1, N ).   
             Unchanged on exit.   

    IPIV   - INTEGER array, dimension( N )   
             On entry, IPIV contains the vector of pivot indices as   
             determined by ZSYTRF or ZHETRF.   
             If IPIV( K ) = K, no interchange was done.   
             If IPIV( K ) <> K but IPIV( K ) > 0, then row K was inter-   
             changed with row IPIV( K ) and a 1 x 1 pivot block was used.   
             If IPIV( K ) < 0 and UPLO = 'U', then row K-1 was exchanged   
             with row | IPIV( K ) | and a 2 x 2 pivot block was used.   
             If IPIV( K ) < 0 and UPLO = 'L', then row K+1 was exchanged   
             with row | IPIV( K ) | and a 2 x 2 pivot block was used.   

    B      - COMPLEX*16 array, dimension( LDB, NRHS )   
             On entry, B contains NRHS vectors of length N.   
             On exit, B is overwritten with the product A * B.   

    LDB    - INTEGER   
             On entry, LDB contains the leading dimension of B as   
             declared in the calling program.  LDB must be at least   
             max( 1, N ).   
             Unchanged on exit.   

    INFO   - INTEGER   
             INFO is the error flag.   
             On exit, a value of 0 indicates a successful exit.   
             A negative value, say -K, indicates that the K-th argument   
             has an illegal value.   

    =====================================================================   


       Test the input parameters.   

       Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    --ipiv;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;

    /* Function Body */
    *info = 0;
    if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
	*info = -1;
    } else if (! lsame_(trans, "N") && ! lsame_(trans, 
	    "T")) {
	*info = -2;
    } else if (! lsame_(diag, "U") && ! lsame_(diag, 
	    "N")) {
	*info = -3;
    } else if (*n < 0) {
	*info = -4;
    } else if (*lda < max(1,*n)) {
	*info = -6;
    } else if (*ldb < max(1,*n)) {
	*info = -9;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZLAVSY ", &i__1);
	return 0;
    }

/*     Quick return if possible. */

    if (*n == 0) {
	return 0;
    }

    nounit = lsame_(diag, "N");
/* ------------------------------------------   

       Compute  B := A * B  (No transpose)   

   ------------------------------------------ */
    if (lsame_(trans, "N")) {

/*        Compute  B := U*B   
          where U = P(m)*inv(U(m))* ... *P(1)*inv(U(1)) */

	if (lsame_(uplo, "U")) {

/*        Loop forward applying the transformations. */

	    k = 1;
L10:
	    if (k > *n) {
		goto L30;
	    }
	    if (ipiv[k] > 0) {

/*              1 x 1 pivot block   

                Multiply by the diagonal element if forming U * D. */

		if (nounit) {
		    zscal_(nrhs, &a_ref(k, k), &b_ref(k, 1), ldb);
		}

/*              Multiply by  P(K) * inv(U(K))  if K > 1. */

		if (k > 1) {

/*                 Apply the transformation. */

		    i__1 = k - 1;
		    zgeru_(&i__1, nrhs, &c_b1, &a_ref(1, k), &c__1, &b_ref(k, 
			    1), ldb, &b_ref(1, 1), ldb);

/*                 Interchange if P(K) != I. */

		    kp = ipiv[k];
		    if (kp != k) {
			zswap_(nrhs, &b_ref(k, 1), ldb, &b_ref(kp, 1), ldb);
		    }
		}
		++k;
	    } else {

/*              2 x 2 pivot block   

                Multiply by the diagonal block if forming U * D. */

		if (nounit) {
		    i__1 = a_subscr(k, k);
		    d11.r = a[i__1].r, d11.i = a[i__1].i;
		    i__1 = a_subscr(k + 1, k + 1);
		    d22.r = a[i__1].r, d22.i = a[i__1].i;
		    i__1 = a_subscr(k, k + 1);
		    d12.r = a[i__1].r, d12.i = a[i__1].i;
		    d21.r = d12.r, d21.i = d12.i;
		    i__1 = *nrhs;
		    for (j = 1; j <= i__1; ++j) {
			i__2 = b_subscr(k, j);
			t1.r = b[i__2].r, t1.i = b[i__2].i;
			i__2 = b_subscr(k + 1, j);
			t2.r = b[i__2].r, t2.i = b[i__2].i;
			i__2 = b_subscr(k, j);
			z__2.r = d11.r * t1.r - d11.i * t1.i, z__2.i = d11.r *
				 t1.i + d11.i * t1.r;
			z__3.r = d12.r * t2.r - d12.i * t2.i, z__3.i = d12.r *
				 t2.i + d12.i * t2.r;
			z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
			b[i__2].r = z__1.r, b[i__2].i = z__1.i;
			i__2 = b_subscr(k + 1, j);
			z__2.r = d21.r * t1.r - d21.i * t1.i, z__2.i = d21.r *
				 t1.i + d21.i * t1.r;
			z__3.r = d22.r * t2.r - d22.i * t2.i, z__3.i = d22.r *
				 t2.i + d22.i * t2.r;
			z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
			b[i__2].r = z__1.r, b[i__2].i = z__1.i;
/* L20: */
		    }
		}

/*              Multiply by  P(K) * inv(U(K))  if K > 1. */

		if (k > 1) {

/*                 Apply the transformations. */

		    i__1 = k - 1;
		    zgeru_(&i__1, nrhs, &c_b1, &a_ref(1, k), &c__1, &b_ref(k, 
			    1), ldb, &b_ref(1, 1), ldb);
		    i__1 = k - 1;
		    zgeru_(&i__1, nrhs, &c_b1, &a_ref(1, k + 1), &c__1, &
			    b_ref(k + 1, 1), ldb, &b_ref(1, 1), ldb);

/*                 Interchange if P(K) != I. */

		    kp = (i__1 = ipiv[k], abs(i__1));
		    if (kp != k) {
			zswap_(nrhs, &b_ref(k, 1), ldb, &b_ref(kp, 1), ldb);
		    }
		}
		k += 2;
	    }
	    goto L10;
L30:

/*        Compute  B := L*B   
          where L = P(1)*inv(L(1))* ... *P(m)*inv(L(m)) . */

	    ;
	} else {

/*           Loop backward applying the transformations to B. */

	    k = *n;
L40:
	    if (k < 1) {
		goto L60;
	    }

/*           Test the pivot index.  If greater than zero, a 1 x 1   
             pivot was used, otherwise a 2 x 2 pivot was used. */

	    if (ipiv[k] > 0) {

/*              1 x 1 pivot block:   

                Multiply by the diagonal element if forming L * D. */

		if (nounit) {
		    zscal_(nrhs, &a_ref(k, k), &b_ref(k, 1), ldb);
		}

/*              Multiply by  P(K) * inv(L(K))  if K < N. */

		if (k != *n) {
		    kp = ipiv[k];

/*                 Apply the transformation. */

		    i__1 = *n - k;
		    zgeru_(&i__1, nrhs, &c_b1, &a_ref(k + 1, k), &c__1, &
			    b_ref(k, 1), ldb, &b_ref(k + 1, 1), ldb);

/*                 Interchange if a permutation was applied at the   
                   K-th step of the factorization. */

		    if (kp != k) {
			zswap_(nrhs, &b_ref(k, 1), ldb, &b_ref(kp, 1), ldb);
		    }
		}
		--k;

	    } else {

/*              2 x 2 pivot block:   

                Multiply by the diagonal block if forming L * D. */

		if (nounit) {
		    i__1 = a_subscr(k - 1, k - 1);
		    d11.r = a[i__1].r, d11.i = a[i__1].i;
		    i__1 = a_subscr(k, k);
		    d22.r = a[i__1].r, d22.i = a[i__1].i;
		    i__1 = a_subscr(k, k - 1);
		    d21.r = a[i__1].r, d21.i = a[i__1].i;
		    d12.r = d21.r, d12.i = d21.i;
		    i__1 = *nrhs;
		    for (j = 1; j <= i__1; ++j) {
			i__2 = b_subscr(k - 1, j);
			t1.r = b[i__2].r, t1.i = b[i__2].i;
			i__2 = b_subscr(k, j);
			t2.r = b[i__2].r, t2.i = b[i__2].i;
			i__2 = b_subscr(k - 1, j);
			z__2.r = d11.r * t1.r - d11.i * t1.i, z__2.i = d11.r *
				 t1.i + d11.i * t1.r;
			z__3.r = d12.r * t2.r - d12.i * t2.i, z__3.i = d12.r *
				 t2.i + d12.i * t2.r;
			z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
			b[i__2].r = z__1.r, b[i__2].i = z__1.i;
			i__2 = b_subscr(k, j);
			z__2.r = d21.r * t1.r - d21.i * t1.i, z__2.i = d21.r *
				 t1.i + d21.i * t1.r;
			z__3.r = d22.r * t2.r - d22.i * t2.i, z__3.i = d22.r *
				 t2.i + d22.i * t2.r;
			z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
			b[i__2].r = z__1.r, b[i__2].i = z__1.i;
/* L50: */
		    }
		}

/*              Multiply by  P(K) * inv(L(K))  if K < N. */

		if (k != *n) {

/*                 Apply the transformation. */

		    i__1 = *n - k;
		    zgeru_(&i__1, nrhs, &c_b1, &a_ref(k + 1, k), &c__1, &
			    b_ref(k, 1), ldb, &b_ref(k + 1, 1), ldb);
		    i__1 = *n - k;
		    zgeru_(&i__1, nrhs, &c_b1, &a_ref(k + 1, k - 1), &c__1, &
			    b_ref(k - 1, 1), ldb, &b_ref(k + 1, 1), ldb);

/*                 Interchange if a permutation was applied at the   
                   K-th step of the factorization. */

		    kp = (i__1 = ipiv[k], abs(i__1));
		    if (kp != k) {
			zswap_(nrhs, &b_ref(k, 1), ldb, &b_ref(kp, 1), ldb);
		    }
		}
		k += -2;
	    }
	    goto L40;
L60:
	    ;
	}
/* ----------------------------------------   

       Compute  B := A' * B  (transpose)   

   ---------------------------------------- */
    } else if (lsame_(trans, "T")) {

/*        Form  B := U'*B   
          where U  = P(m)*inv(U(m))* ... *P(1)*inv(U(1))   
          and   U' = inv(U'(1))*P(1)* ... *inv(U'(m))*P(m) */

	if (lsame_(uplo, "U")) {

/*           Loop backward applying the transformations. */

	    k = *n;
L70:
	    if (k < 1) {
		goto L90;
	    }

/*           1 x 1 pivot block. */

	    if (ipiv[k] > 0) {
		if (k > 1) {

/*                 Interchange if P(K) != I. */

		    kp = ipiv[k];
		    if (kp != k) {
			zswap_(nrhs, &b_ref(k, 1), ldb, &b_ref(kp, 1), ldb);
		    }

/*                 Apply the transformation */

		    i__1 = k - 1;
		    zgemv_("Transpose", &i__1, nrhs, &c_b1, &b[b_offset], ldb,
			     &a_ref(1, k), &c__1, &c_b1, &b_ref(k, 1), ldb);
		}
		if (nounit) {
		    zscal_(nrhs, &a_ref(k, k), &b_ref(k, 1), ldb);
		}
		--k;

/*           2 x 2 pivot block. */

	    } else {
		if (k > 2) {

/*                 Interchange if P(K) != I. */

		    kp = (i__1 = ipiv[k], abs(i__1));
		    if (kp != k - 1) {
			zswap_(nrhs, &b_ref(k - 1, 1), ldb, &b_ref(kp, 1), 
				ldb);
		    }

/*                 Apply the transformations */

		    i__1 = k - 2;
		    zgemv_("Transpose", &i__1, nrhs, &c_b1, &b[b_offset], ldb,
			     &a_ref(1, k), &c__1, &c_b1, &b_ref(k, 1), ldb);
		    i__1 = k - 2;
		    zgemv_("Transpose", &i__1, nrhs, &c_b1, &b[b_offset], ldb,
			     &a_ref(1, k - 1), &c__1, &c_b1, &b_ref(k - 1, 1),
			     ldb);
		}

/*              Multiply by the diagonal block if non-unit. */

		if (nounit) {
		    i__1 = a_subscr(k - 1, k - 1);
		    d11.r = a[i__1].r, d11.i = a[i__1].i;
		    i__1 = a_subscr(k, k);
		    d22.r = a[i__1].r, d22.i = a[i__1].i;
		    i__1 = a_subscr(k - 1, k);
		    d12.r = a[i__1].r, d12.i = a[i__1].i;
		    d21.r = d12.r, d21.i = d12.i;
		    i__1 = *nrhs;
		    for (j = 1; j <= i__1; ++j) {
			i__2 = b_subscr(k - 1, j);
			t1.r = b[i__2].r, t1.i = b[i__2].i;
			i__2 = b_subscr(k, j);
			t2.r = b[i__2].r, t2.i = b[i__2].i;
			i__2 = b_subscr(k - 1, j);
			z__2.r = d11.r * t1.r - d11.i * t1.i, z__2.i = d11.r *
				 t1.i + d11.i * t1.r;
			z__3.r = d12.r * t2.r - d12.i * t2.i, z__3.i = d12.r *
				 t2.i + d12.i * t2.r;
			z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
			b[i__2].r = z__1.r, b[i__2].i = z__1.i;
			i__2 = b_subscr(k, j);
			z__2.r = d21.r * t1.r - d21.i * t1.i, z__2.i = d21.r *
				 t1.i + d21.i * t1.r;
			z__3.r = d22.r * t2.r - d22.i * t2.i, z__3.i = d22.r *
				 t2.i + d22.i * t2.r;
			z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
			b[i__2].r = z__1.r, b[i__2].i = z__1.i;
/* L80: */
		    }
		}
		k += -2;
	    }
	    goto L70;
L90:

/*        Form  B := L'*B   
          where L  = P(1)*inv(L(1))* ... *P(m)*inv(L(m))   
          and   L' = inv(L'(m))*P(m)* ... *inv(L'(1))*P(1) */

	    ;
	} else {

/*           Loop forward applying the L-transformations. */

	    k = 1;
L100:
	    if (k > *n) {
		goto L120;
	    }

/*           1 x 1 pivot block */

	    if (ipiv[k] > 0) {
		if (k < *n) {

/*                 Interchange if P(K) != I. */

		    kp = ipiv[k];
		    if (kp != k) {
			zswap_(nrhs, &b_ref(k, 1), ldb, &b_ref(kp, 1), ldb);
		    }

/*                 Apply the transformation */

		    i__1 = *n - k;
		    zgemv_("Transpose", &i__1, nrhs, &c_b1, &b_ref(k + 1, 1), 
			    ldb, &a_ref(k + 1, k), &c__1, &c_b1, &b_ref(k, 1),
			     ldb);
		}
		if (nounit) {
		    zscal_(nrhs, &a_ref(k, k), &b_ref(k, 1), ldb);
		}
		++k;

/*           2 x 2 pivot block. */

	    } else {
		if (k < *n - 1) {

/*              Interchange if P(K) != I. */

		    kp = (i__1 = ipiv[k], abs(i__1));
		    if (kp != k + 1) {
			zswap_(nrhs, &b_ref(k + 1, 1), ldb, &b_ref(kp, 1), 
				ldb);
		    }

/*                 Apply the transformation */

		    i__1 = *n - k - 1;
		    zgemv_("Transpose", &i__1, nrhs, &c_b1, &b_ref(k + 2, 1), 
			    ldb, &a_ref(k + 2, k + 1), &c__1, &c_b1, &b_ref(k 
			    + 1, 1), ldb);
		    i__1 = *n - k - 1;
		    zgemv_("Transpose", &i__1, nrhs, &c_b1, &b_ref(k + 2, 1), 
			    ldb, &a_ref(k + 2, k), &c__1, &c_b1, &b_ref(k, 1),
			     ldb);
		}

/*              Multiply by the diagonal block if non-unit. */

		if (nounit) {
		    i__1 = a_subscr(k, k);
		    d11.r = a[i__1].r, d11.i = a[i__1].i;
		    i__1 = a_subscr(k + 1, k + 1);
		    d22.r = a[i__1].r, d22.i = a[i__1].i;
		    i__1 = a_subscr(k + 1, k);
		    d21.r = a[i__1].r, d21.i = a[i__1].i;
		    d12.r = d21.r, d12.i = d21.i;
		    i__1 = *nrhs;
		    for (j = 1; j <= i__1; ++j) {
			i__2 = b_subscr(k, j);
			t1.r = b[i__2].r, t1.i = b[i__2].i;
			i__2 = b_subscr(k + 1, j);
			t2.r = b[i__2].r, t2.i = b[i__2].i;
			i__2 = b_subscr(k, j);
			z__2.r = d11.r * t1.r - d11.i * t1.i, z__2.i = d11.r *
				 t1.i + d11.i * t1.r;
			z__3.r = d12.r * t2.r - d12.i * t2.i, z__3.i = d12.r *
				 t2.i + d12.i * t2.r;
			z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
			b[i__2].r = z__1.r, b[i__2].i = z__1.i;
			i__2 = b_subscr(k + 1, j);
			z__2.r = d21.r * t1.r - d21.i * t1.i, z__2.i = d21.r *
				 t1.i + d21.i * t1.r;
			z__3.r = d22.r * t2.r - d22.i * t2.i, z__3.i = d22.r *
				 t2.i + d22.i * t2.r;
			z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
			b[i__2].r = z__1.r, b[i__2].i = z__1.i;
/* L110: */
		    }
		}
		k += 2;
	    }
	    goto L100;
L120:
	    ;
	}
    }
    return 0;

/*     End of ZLAVSY */

} /* zlavsy_ */
Пример #16
0
/*! comple*_zgbmatrix operator */
inline _zgbmatrix operator*(const comple& d, const _zgbmatrix& mat)
{VERBOSE_REPORT;
  zscal_((mat.kl+mat.ku+1)*mat.n, d, mat.array, 1);
  return mat;
}
Пример #17
0
/* Subroutine */ int zgbbrd_(char *vect, integer *m, integer *n, integer *ncc, 
	 integer *kl, integer *ku, doublecomplex *ab, integer *ldab, 
	doublereal *d__, doublereal *e, doublecomplex *q, integer *ldq, 
	doublecomplex *pt, integer *ldpt, doublecomplex *c__, integer *ldc, 
	doublecomplex *work, doublereal *rwork, integer *info)
{
    /* System generated locals */
    integer ab_dim1, ab_offset, c_dim1, c_offset, pt_dim1, pt_offset, q_dim1, 
	    q_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
    doublecomplex z__1, z__2, z__3;

    /* Local variables */
    integer i__, j, l;
    doublecomplex t;
    integer j1, j2, kb;
    doublecomplex ra, rb;
    doublereal rc;
    integer kk, ml, nr, mu;
    doublecomplex rs;
    integer kb1, ml0, mu0, klm, kun, nrt, klu1, inca;
    doublereal abst;
    logical wantb, wantc;
    integer minmn;
    logical wantq;
    logical wantpt;

/*  -- LAPACK routine (version 3.2) -- */
/*     November 2006 */

/*  Purpose */
/*  ======= */

/*  ZGBBRD reduces a complex general m-by-n band matrix A to real upper */
/*  bidiagonal form B by a unitary transformation: Q' * A * P = B. */

/*  The routine computes B, and optionally forms Q or P', or computes */
/*  Q'*C for a given matrix C. */

/*  Arguments */
/*  ========= */

/*  VECT    (input) CHARACTER*1 */
/*          Specifies whether or not the matrices Q and P' are to be */
/*          formed. */
/*          = 'N': do not form Q or P'; */
/*          = 'Q': form Q only; */
/*          = 'P': form P' only; */
/*          = 'B': form both. */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A.  M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix A.  N >= 0. */

/*  NCC     (input) INTEGER */
/*          The number of columns of the matrix C.  NCC >= 0. */

/*  KL      (input) INTEGER */
/*          The number of subdiagonals of the matrix A. KL >= 0. */

/*  KU      (input) INTEGER */
/*          The number of superdiagonals of the matrix A. KU >= 0. */

/*  AB      (input/output) COMPLEX*16 array, dimension (LDAB,N) */
/*          On entry, the m-by-n band matrix A, stored in rows 1 to */
/*          KL+KU+1. The j-th column of A is stored in the j-th column of */
/*          the array AB as follows: */
/*          AB(ku+1+i-j,j) = A(i,j) for max(1,j-ku)<=i<=min(m,j+kl). */
/*          On exit, A is overwritten by values generated during the */
/*          reduction. */

/*  LDAB    (input) INTEGER */
/*          The leading dimension of the array A. LDAB >= KL+KU+1. */

/*  D       (output) DOUBLE PRECISION array, dimension (min(M,N)) */
/*          The diagonal elements of the bidiagonal matrix B. */

/*  E       (output) DOUBLE PRECISION array, dimension (min(M,N)-1) */
/*          The superdiagonal elements of the bidiagonal matrix B. */

/*  Q       (output) COMPLEX*16 array, dimension (LDQ,M) */
/*          If VECT = 'Q' or 'B', the m-by-m unitary matrix Q. */
/*          If VECT = 'N' or 'P', the array Q is not referenced. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. */
/*          LDQ >= max(1,M) if VECT = 'Q' or 'B'; LDQ >= 1 otherwise. */

/*  PT      (output) COMPLEX*16 array, dimension (LDPT,N) */
/*          If VECT = 'P' or 'B', the n-by-n unitary matrix P'. */
/*          If VECT = 'N' or 'Q', the array PT is not referenced. */

/*  LDPT    (input) INTEGER */
/*          The leading dimension of the array PT. */
/*          LDPT >= max(1,N) if VECT = 'P' or 'B'; LDPT >= 1 otherwise. */

/*  C       (input/output) COMPLEX*16 array, dimension (LDC,NCC) */
/*          On entry, an m-by-ncc matrix C. */
/*          On exit, C is overwritten by Q'*C. */
/*          C is not referenced if NCC = 0. */

/*  LDC     (input) INTEGER */
/*          The leading dimension of the array C. */
/*          LDC >= max(1,M) if NCC > 0; LDC >= 1 if NCC = 0. */

/*  WORK    (workspace) COMPLEX*16 array, dimension (max(M,N)) */

/*  RWORK   (workspace) DOUBLE PRECISION array, dimension (max(M,N)) */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit. */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value. */

/*  ===================================================================== */

/*     Test the input parameters */

    /* Parameter adjustments */
    ab_dim1 = *ldab;
    ab_offset = 1 + ab_dim1;
    ab -= ab_offset;
    --d__;
    --e;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    pt_dim1 = *ldpt;
    pt_offset = 1 + pt_dim1;
    pt -= pt_offset;
    c_dim1 = *ldc;
    c_offset = 1 + c_dim1;
    c__ -= c_offset;
    --work;
    --rwork;

    /* Function Body */
    wantb = lsame_(vect, "B");
    wantq = lsame_(vect, "Q") || wantb;
    wantpt = lsame_(vect, "P") || wantb;
    wantc = *ncc > 0;
    klu1 = *kl + *ku + 1;
    *info = 0;
    if (! wantq && ! wantpt && ! lsame_(vect, "N")) {
	*info = -1;
    } else if (*m < 0) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    } else if (*ncc < 0) {
	*info = -4;
    } else if (*kl < 0) {
	*info = -5;
    } else if (*ku < 0) {
	*info = -6;
    } else if (*ldab < klu1) {
	*info = -8;
    } else if (*ldq < 1 || wantq && *ldq < max(1,*m)) {
	*info = -12;
    } else if (*ldpt < 1 || wantpt && *ldpt < max(1,*n)) {
	*info = -14;
    } else if (*ldc < 1 || wantc && *ldc < max(1,*m)) {
	*info = -16;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZGBBRD", &i__1);
	return 0;
    }

/*     Initialize Q and P' to the unit matrix, if needed */

    if (wantq) {
	zlaset_("Full", m, m, &c_b1, &c_b2, &q[q_offset], ldq);
    }
    if (wantpt) {
	zlaset_("Full", n, n, &c_b1, &c_b2, &pt[pt_offset], ldpt);
    }

/*     Quick return if possible. */

    if (*m == 0 || *n == 0) {
	return 0;
    }

    minmn = min(*m,*n);

    if (*kl + *ku > 1) {

/*        Reduce to upper bidiagonal form if KU > 0; if KU = 0, reduce */
/*        first to lower bidiagonal form and then transform to upper */
/*        bidiagonal */

	if (*ku > 0) {
	    ml0 = 1;
	    mu0 = 2;
	} else {
	    ml0 = 2;
	    mu0 = 1;
	}

/*        Wherever possible, plane rotations are generated and applied in */
/*        vector operations of length NR over the index set J1:J2:KLU1. */

/*        The complex sines of the plane rotations are stored in WORK, */
/*        and the real cosines in RWORK. */

/* Computing MIN */
	i__1 = *m - 1;
	klm = min(i__1,*kl);
/* Computing MIN */
	i__1 = *n - 1;
	kun = min(i__1,*ku);
	kb = klm + kun;
	kb1 = kb + 1;
	inca = kb1 * *ldab;
	nr = 0;
	j1 = klm + 2;
	j2 = 1 - kun;

	i__1 = minmn;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Reduce i-th column and i-th row of matrix to bidiagonal form */

	    ml = klm + 1;
	    mu = kun + 1;
	    i__2 = kb;
	    for (kk = 1; kk <= i__2; ++kk) {
		j1 += kb;
		j2 += kb;

/*              generate plane rotations to annihilate nonzero elements */
/*              which have been created below the band */

		if (nr > 0) {
		    zlargv_(&nr, &ab[klu1 + (j1 - klm - 1) * ab_dim1], &inca, 
			    &work[j1], &kb1, &rwork[j1], &kb1);
		}

/*              apply plane rotations from the left */

		i__3 = kb;
		for (l = 1; l <= i__3; ++l) {
		    if (j2 - klm + l - 1 > *n) {
			nrt = nr - 1;
		    } else {
			nrt = nr;
		    }
		    if (nrt > 0) {
			zlartv_(&nrt, &ab[klu1 - l + (j1 - klm + l - 1) * 
				ab_dim1], &inca, &ab[klu1 - l + 1 + (j1 - klm 
				+ l - 1) * ab_dim1], &inca, &rwork[j1], &work[
				j1], &kb1);
		    }
		}

		if (ml > ml0) {
		    if (ml <= *m - i__ + 1) {

/*                    generate plane rotation to annihilate a(i+ml-1,i) */
/*                    within the band, and apply rotation from the left */

			zlartg_(&ab[*ku + ml - 1 + i__ * ab_dim1], &ab[*ku + 
				ml + i__ * ab_dim1], &rwork[i__ + ml - 1], &
				work[i__ + ml - 1], &ra);
			i__3 = *ku + ml - 1 + i__ * ab_dim1;
			ab[i__3].r = ra.r, ab[i__3].i = ra.i;
			if (i__ < *n) {
/* Computing MIN */
			    i__4 = *ku + ml - 2, i__5 = *n - i__;
			    i__3 = min(i__4,i__5);
			    i__6 = *ldab - 1;
			    i__7 = *ldab - 1;
			    zrot_(&i__3, &ab[*ku + ml - 2 + (i__ + 1) * 
				    ab_dim1], &i__6, &ab[*ku + ml - 1 + (i__ 
				    + 1) * ab_dim1], &i__7, &rwork[i__ + ml - 
				    1], &work[i__ + ml - 1]);
			}
		    }
		    ++nr;
		    j1 -= kb1;
		}

		if (wantq) {

/*                 accumulate product of plane rotations in Q */

		    i__3 = j2;
		    i__4 = kb1;
		    for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) 
			    {
			d_cnjg(&z__1, &work[j]);
			zrot_(m, &q[(j - 1) * q_dim1 + 1], &c__1, &q[j * 
				q_dim1 + 1], &c__1, &rwork[j], &z__1);
		    }
		}

		if (wantc) {

/*                 apply plane rotations to C */

		    i__4 = j2;
		    i__3 = kb1;
		    for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) 
			    {
			zrot_(ncc, &c__[j - 1 + c_dim1], ldc, &c__[j + c_dim1]
, ldc, &rwork[j], &work[j]);
		    }
		}

		if (j2 + kun > *n) {

/*                 adjust J2 to keep within the bounds of the matrix */

		    --nr;
		    j2 -= kb1;
		}

		i__3 = j2;
		i__4 = kb1;
		for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {

/*                 create nonzero element a(j-1,j+ku) above the band */
/*                 and store it in WORK(n+1:2*n) */

		    i__5 = j + kun;
		    i__6 = j;
		    i__7 = (j + kun) * ab_dim1 + 1;
		    z__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[
			    i__7].i, z__1.i = work[i__6].r * ab[i__7].i + 
			    work[i__6].i * ab[i__7].r;
		    work[i__5].r = z__1.r, work[i__5].i = z__1.i;
		    i__5 = (j + kun) * ab_dim1 + 1;
		    i__6 = j;
		    i__7 = (j + kun) * ab_dim1 + 1;
		    z__1.r = rwork[i__6] * ab[i__7].r, z__1.i = rwork[i__6] * 
			    ab[i__7].i;
		    ab[i__5].r = z__1.r, ab[i__5].i = z__1.i;
		}

/*              generate plane rotations to annihilate nonzero elements */
/*              which have been generated above the band */

		if (nr > 0) {
		    zlargv_(&nr, &ab[(j1 + kun - 1) * ab_dim1 + 1], &inca, &
			    work[j1 + kun], &kb1, &rwork[j1 + kun], &kb1);
		}

/*              apply plane rotations from the right */

		i__4 = kb;
		for (l = 1; l <= i__4; ++l) {
		    if (j2 + l - 1 > *m) {
			nrt = nr - 1;
		    } else {
			nrt = nr;
		    }
		    if (nrt > 0) {
			zlartv_(&nrt, &ab[l + 1 + (j1 + kun - 1) * ab_dim1], &
				inca, &ab[l + (j1 + kun) * ab_dim1], &inca, &
				rwork[j1 + kun], &work[j1 + kun], &kb1);
		    }
		}

		if (ml == ml0 && mu > mu0) {
		    if (mu <= *n - i__ + 1) {

/*                    generate plane rotation to annihilate a(i,i+mu-1) */
/*                    within the band, and apply rotation from the right */

			zlartg_(&ab[*ku - mu + 3 + (i__ + mu - 2) * ab_dim1], 
				&ab[*ku - mu + 2 + (i__ + mu - 1) * ab_dim1], 
				&rwork[i__ + mu - 1], &work[i__ + mu - 1], &
				ra);
			i__4 = *ku - mu + 3 + (i__ + mu - 2) * ab_dim1;
			ab[i__4].r = ra.r, ab[i__4].i = ra.i;
/* Computing MIN */
			i__3 = *kl + mu - 2, i__5 = *m - i__;
			i__4 = min(i__3,i__5);
			zrot_(&i__4, &ab[*ku - mu + 4 + (i__ + mu - 2) * 
				ab_dim1], &c__1, &ab[*ku - mu + 3 + (i__ + mu 
				- 1) * ab_dim1], &c__1, &rwork[i__ + mu - 1], 
				&work[i__ + mu - 1]);
		    }
		    ++nr;
		    j1 -= kb1;
		}

		if (wantpt) {

/*                 accumulate product of plane rotations in P' */

		    i__4 = j2;
		    i__3 = kb1;
		    for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) 
			    {
			d_cnjg(&z__1, &work[j + kun]);
			zrot_(n, &pt[j + kun - 1 + pt_dim1], ldpt, &pt[j + 
				kun + pt_dim1], ldpt, &rwork[j + kun], &z__1);
		    }
		}

		if (j2 + kb > *m) {

/*                 adjust J2 to keep within the bounds of the matrix */

		    --nr;
		    j2 -= kb1;
		}

		i__3 = j2;
		i__4 = kb1;
		for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {

/*                 create nonzero element a(j+kl+ku,j+ku-1) below the */
/*                 band and store it in WORK(1:n) */

		    i__5 = j + kb;
		    i__6 = j + kun;
		    i__7 = klu1 + (j + kun) * ab_dim1;
		    z__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[
			    i__7].i, z__1.i = work[i__6].r * ab[i__7].i + 
			    work[i__6].i * ab[i__7].r;
		    work[i__5].r = z__1.r, work[i__5].i = z__1.i;
		    i__5 = klu1 + (j + kun) * ab_dim1;
		    i__6 = j + kun;
		    i__7 = klu1 + (j + kun) * ab_dim1;
		    z__1.r = rwork[i__6] * ab[i__7].r, z__1.i = rwork[i__6] * 
			    ab[i__7].i;
		    ab[i__5].r = z__1.r, ab[i__5].i = z__1.i;
		}

		if (ml > ml0) {
		    --ml;
		} else {
		    --mu;
		}
	    }
	}
    }

    if (*ku == 0 && *kl > 0) {

/*        A has been reduced to complex lower bidiagonal form */

/*        Transform lower bidiagonal form to upper bidiagonal by applying */
/*        plane rotations from the left, overwriting superdiagonal */
/*        elements on subdiagonal elements */

/* Computing MIN */
	i__2 = *m - 1;
	i__1 = min(i__2,*n);
	for (i__ = 1; i__ <= i__1; ++i__) {
	    zlartg_(&ab[i__ * ab_dim1 + 1], &ab[i__ * ab_dim1 + 2], &rc, &rs, 
		    &ra);
	    i__2 = i__ * ab_dim1 + 1;
	    ab[i__2].r = ra.r, ab[i__2].i = ra.i;
	    if (i__ < *n) {
		i__2 = i__ * ab_dim1 + 2;
		i__4 = (i__ + 1) * ab_dim1 + 1;
		z__1.r = rs.r * ab[i__4].r - rs.i * ab[i__4].i, z__1.i = rs.r 
			* ab[i__4].i + rs.i * ab[i__4].r;
		ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
		i__2 = (i__ + 1) * ab_dim1 + 1;
		i__4 = (i__ + 1) * ab_dim1 + 1;
		z__1.r = rc * ab[i__4].r, z__1.i = rc * ab[i__4].i;
		ab[i__2].r = z__1.r, ab[i__2].i = z__1.i;
	    }
	    if (wantq) {
		d_cnjg(&z__1, &rs);
		zrot_(m, &q[i__ * q_dim1 + 1], &c__1, &q[(i__ + 1) * q_dim1 + 
			1], &c__1, &rc, &z__1);
	    }
	    if (wantc) {
		zrot_(ncc, &c__[i__ + c_dim1], ldc, &c__[i__ + 1 + c_dim1], 
			ldc, &rc, &rs);
	    }
	}
    } else {

/*        A has been reduced to complex upper bidiagonal form or is */
/*        diagonal */

	if (*ku > 0 && *m < *n) {

/*           Annihilate a(m,m+1) by applying plane rotations from the */
/*           right */

	    i__1 = *ku + (*m + 1) * ab_dim1;
	    rb.r = ab[i__1].r, rb.i = ab[i__1].i;
	    for (i__ = *m; i__ >= 1; --i__) {
		zlartg_(&ab[*ku + 1 + i__ * ab_dim1], &rb, &rc, &rs, &ra);
		i__1 = *ku + 1 + i__ * ab_dim1;
		ab[i__1].r = ra.r, ab[i__1].i = ra.i;
		if (i__ > 1) {
		    d_cnjg(&z__3, &rs);
		    z__2.r = -z__3.r, z__2.i = -z__3.i;
		    i__1 = *ku + i__ * ab_dim1;
		    z__1.r = z__2.r * ab[i__1].r - z__2.i * ab[i__1].i, 
			    z__1.i = z__2.r * ab[i__1].i + z__2.i * ab[i__1]
			    .r;
		    rb.r = z__1.r, rb.i = z__1.i;
		    i__1 = *ku + i__ * ab_dim1;
		    i__2 = *ku + i__ * ab_dim1;
		    z__1.r = rc * ab[i__2].r, z__1.i = rc * ab[i__2].i;
		    ab[i__1].r = z__1.r, ab[i__1].i = z__1.i;
		}
		if (wantpt) {
		    d_cnjg(&z__1, &rs);
		    zrot_(n, &pt[i__ + pt_dim1], ldpt, &pt[*m + 1 + pt_dim1], 
			    ldpt, &rc, &z__1);
		}
	    }
	}
    }

/*     Make diagonal and superdiagonal elements real, storing them in D */
/*     and E */

    i__1 = *ku + 1 + ab_dim1;
    t.r = ab[i__1].r, t.i = ab[i__1].i;
    i__1 = minmn;
    for (i__ = 1; i__ <= i__1; ++i__) {
	abst = z_abs(&t);
	d__[i__] = abst;
	if (abst != 0.) {
	    z__1.r = t.r / abst, z__1.i = t.i / abst;
	    t.r = z__1.r, t.i = z__1.i;
	} else {
	    t.r = 1., t.i = 0.;
	}
	if (wantq) {
	    zscal_(m, &t, &q[i__ * q_dim1 + 1], &c__1);
	}
	if (wantc) {
	    d_cnjg(&z__1, &t);
	    zscal_(ncc, &z__1, &c__[i__ + c_dim1], ldc);
	}
	if (i__ < minmn) {
	    if (*ku == 0 && *kl == 0) {
		e[i__] = 0.;
		i__2 = (i__ + 1) * ab_dim1 + 1;
		t.r = ab[i__2].r, t.i = ab[i__2].i;
	    } else {
		if (*ku == 0) {
		    i__2 = i__ * ab_dim1 + 2;
		    d_cnjg(&z__2, &t);
		    z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, 
			    z__1.i = ab[i__2].r * z__2.i + ab[i__2].i * 
			    z__2.r;
		    t.r = z__1.r, t.i = z__1.i;
		} else {
		    i__2 = *ku + (i__ + 1) * ab_dim1;
		    d_cnjg(&z__2, &t);
		    z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, 
			    z__1.i = ab[i__2].r * z__2.i + ab[i__2].i * 
			    z__2.r;
		    t.r = z__1.r, t.i = z__1.i;
		}
		abst = z_abs(&t);
		e[i__] = abst;
		if (abst != 0.) {
		    z__1.r = t.r / abst, z__1.i = t.i / abst;
		    t.r = z__1.r, t.i = z__1.i;
		} else {
		    t.r = 1., t.i = 0.;
		}
		if (wantpt) {
		    zscal_(n, &t, &pt[i__ + 1 + pt_dim1], ldpt);
		}
		i__2 = *ku + 1 + (i__ + 1) * ab_dim1;
		d_cnjg(&z__2, &t);
		z__1.r = ab[i__2].r * z__2.r - ab[i__2].i * z__2.i, z__1.i = 
			ab[i__2].r * z__2.i + ab[i__2].i * z__2.r;
		t.r = z__1.r, t.i = z__1.i;
	    }
	}
    }
    return 0;

/*     End of ZGBBRD */

} /* zgbbrd_ */
Пример #18
0
/* Subroutine */ int zlahrd_(integer *n, integer *k, integer *nb, 
	doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *t, 
	integer *ldt, doublecomplex *y, integer *ldy)
{
/*  -- LAPACK auxiliary routine (version 2.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    ZLAHRD reduces the first NB columns of a complex general n-by-(n-k+1) 
  
    matrix A so that elements below the k-th subdiagonal are zero. The   
    reduction is performed by a unitary similarity transformation   
    Q' * A * Q. The routine returns the matrices V and T which determine 
  
    Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. 
  

    This is an auxiliary routine called by ZGEHRD.   

    Arguments   
    =========   

    N       (input) INTEGER   
            The order of the matrix A.   

    K       (input) INTEGER   
            The offset for the reduction. Elements below the k-th   
            subdiagonal in the first NB columns are reduced to zero.   

    NB      (input) INTEGER   
            The number of columns to be reduced.   

    A       (input/output) COMPLEX*16 array, dimension (LDA,N-K+1)   
            On entry, the n-by-(n-k+1) general matrix A.   
            On exit, the elements on and above the k-th subdiagonal in   
            the first NB columns are overwritten with the corresponding   
            elements of the reduced matrix; the elements below the k-th   
            subdiagonal, with the array TAU, represent the matrix Q as a 
  
            product of elementary reflectors. The other columns of A are 
  
            unchanged. See Further Details.   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= max(1,N).   

    TAU     (output) COMPLEX*16 array, dimension (NB)   
            The scalar factors of the elementary reflectors. See Further 
  
            Details.   

    T       (output) COMPLEX*16 array, dimension (NB,NB)   
            The upper triangular matrix T.   

    LDT     (input) INTEGER   
            The leading dimension of the array T.  LDT >= NB.   

    Y       (output) COMPLEX*16 array, dimension (LDY,NB)   
            The n-by-nb matrix Y.   

    LDY     (input) INTEGER   
            The leading dimension of the array Y. LDY >= max(1,N).   

    Further Details   
    ===============   

    The matrix Q is represented as a product of nb elementary reflectors 
  

       Q = H(1) H(2) . . . H(nb).   

    Each H(i) has the form   

       H(i) = I - tau * v * v'   

    where tau is a complex scalar, and v is a complex vector with   
    v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in   
    A(i+k+1:n,i), and tau in TAU(i).   

    The elements of the vectors v together form the (n-k+1)-by-nb matrix 
  
    V which is needed, with T and Y, to apply the transformation to the   
    unreduced part of the matrix, using an update of the form:   
    A := (I - V*T*V') * (A - Y*V').   

    The contents of A on exit are illustrated by the following example   
    with n = 7, k = 3 and nb = 2:   

       ( a   h   a   a   a )   
       ( a   h   a   a   a )   
       ( a   h   a   a   a )   
       ( h   h   a   a   a )   
       ( v1  h   a   a   a )   
       ( v1  v2  a   a   a )   
       ( v1  v2  a   a   a )   

    where a denotes an element of the original matrix A, h denotes a   
    modified element of the upper Hessenberg matrix H, and vi denotes an 
  
    element of the vector defining H(i).   

    ===================================================================== 
  


       Quick return if possible   

    
   Parameter adjustments   
       Function Body */
    /* Table of constant values */
    static doublecomplex c_b1 = {0.,0.};
    static doublecomplex c_b2 = {1.,0.};
    static integer c__1 = 1;
    
    /* System generated locals */
    integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2, 
	    i__3;
    doublecomplex z__1;
    /* Local variables */
    static integer i;
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *), zgemv_(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, integer *, doublecomplex *, 
	    integer *, doublecomplex *, doublecomplex *, integer *), 
	    zcopy_(integer *, doublecomplex *, integer *, doublecomplex *, 
	    integer *), zaxpy_(integer *, doublecomplex *, doublecomplex *, 
	    integer *, doublecomplex *, integer *), ztrmv_(char *, char *, 
	    char *, integer *, doublecomplex *, integer *, doublecomplex *, 
	    integer *);
    static doublecomplex ei;
    extern /* Subroutine */ int zlarfg_(integer *, doublecomplex *, 
	    doublecomplex *, integer *, doublecomplex *), zlacgv_(integer *, 
	    doublecomplex *, integer *);



#define TAU(I) tau[(I)-1]

#define A(I,J) a[(I)-1 + ((J)-1)* ( *lda)]
#define T(I,J) t[(I)-1 + ((J)-1)* ( *ldt)]
#define Y(I,J) y[(I)-1 + ((J)-1)* ( *ldy)]

    if (*n <= 1) {
	return 0;
    }

    i__1 = *nb;
    for (i = 1; i <= *nb; ++i) {
	if (i > 1) {

/*           Update A(1:n,i)   

             Compute i-th column of A - Y * V' */

	    i__2 = i - 1;
	    zlacgv_(&i__2, &A(*k+i-1,1), lda);
	    i__2 = i - 1;
	    z__1.r = -1., z__1.i = 0.;
	    zgemv_("No transpose", n, &i__2, &z__1, &Y(1,1), ldy, &A(*k+i-1,1), lda, &c_b2, &A(1,i), &c__1);
	    i__2 = i - 1;
	    zlacgv_(&i__2, &A(*k+i-1,1), lda);

/*           Apply I - V * T' * V' to this column (call it b) from
 the   
             left, using the last column of T as workspace   

             Let  V = ( V1 )   and   b = ( b1 )   (first I-1 rows)
   
                      ( V2 )             ( b2 )   

             where V1 is unit lower triangular   

             w := V1' * b1 */

	    i__2 = i - 1;
	    zcopy_(&i__2, &A(*k+1,i), &c__1, &T(1,*nb)
		    , &c__1);
	    i__2 = i - 1;
	    ztrmv_("Lower", "Conjugate transpose", "Unit", &i__2, &A(*k+1,1), lda, &T(1,*nb), &c__1);

/*           w := w + V2'*b2 */

	    i__2 = *n - *k - i + 1;
	    i__3 = i - 1;
	    zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &A(*k+i,1), lda, &A(*k+i,i), &c__1, &c_b2, &T(1,*nb), &c__1);

/*           w := T'*w */

	    i__2 = i - 1;
	    ztrmv_("Upper", "Conjugate transpose", "Non-unit", &i__2, &T(1,1), ldt, &T(1,*nb), &c__1);

/*           b2 := b2 - V2*w */

	    i__2 = *n - *k - i + 1;
	    i__3 = i - 1;
	    z__1.r = -1., z__1.i = 0.;
	    zgemv_("No transpose", &i__2, &i__3, &z__1, &A(*k+i,1), 
		    lda, &T(1,*nb), &c__1, &c_b2, &A(*k+i,i), &c__1);

/*           b1 := b1 - V1*w */

	    i__2 = i - 1;
	    ztrmv_("Lower", "No transpose", "Unit", &i__2, &A(*k+1,1)
		    , lda, &T(1,*nb), &c__1);
	    i__2 = i - 1;
	    z__1.r = -1., z__1.i = 0.;
	    zaxpy_(&i__2, &z__1, &T(1,*nb), &c__1, &A(*k+1,i), &c__1);

	    i__2 = *k + i - 1 + (i - 1) * a_dim1;
	    A(*k+i-1,i-1).r = ei.r, A(*k+i-1,i-1).i = ei.i;
	}

/*        Generate the elementary reflector H(i) to annihilate   
          A(k+i+1:n,i) */

	i__2 = *k + i + i * a_dim1;
	ei.r = A(*k+i,i).r, ei.i = A(*k+i,i).i;
	i__2 = *n - *k - i + 1;
/* Computing MIN */
	i__3 = *k + i + 1;
	zlarfg_(&i__2, &ei, &A(min(*k+i+1,*n),i), &c__1, &TAU(i));
	i__2 = *k + i + i * a_dim1;
	A(*k+i,i).r = 1., A(*k+i,i).i = 0.;

/*        Compute  Y(1:n,i) */

	i__2 = *n - *k - i + 1;
	zgemv_("No transpose", n, &i__2, &c_b2, &A(1,i+1), lda,
		 &A(*k+i,i), &c__1, &c_b1, &Y(1,i), &
		c__1);
	i__2 = *n - *k - i + 1;
	i__3 = i - 1;
	zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &A(*k+i,1)
		, lda, &A(*k+i,i), &c__1, &c_b1, &T(1,i), &c__1);
	i__2 = i - 1;
	z__1.r = -1., z__1.i = 0.;
	zgemv_("No transpose", n, &i__2, &z__1, &Y(1,1), ldy, &T(1,i), &c__1, &c_b2, &Y(1,i), &c__1);
	zscal_(n, &TAU(i), &Y(1,i), &c__1);

/*        Compute T(1:i,i) */

	i__2 = i - 1;
	i__3 = i;
	z__1.r = -TAU(i).r, z__1.i = -TAU(i).i;
	zscal_(&i__2, &z__1, &T(1,i), &c__1);
	i__2 = i - 1;
	ztrmv_("Upper", "No transpose", "Non-unit", &i__2, &T(1,1), ldt, 
		&T(1,i), &c__1);
	i__2 = i + i * t_dim1;
	i__3 = i;
	T(i,i).r = TAU(i).r, T(i,i).i = TAU(i).i;

/* L10: */
    }
    i__1 = *k + *nb + *nb * a_dim1;
    A(*k+*nb,*nb).r = ei.r, A(*k+*nb,*nb).i = ei.i;

    return 0;

/*     End of ZLAHRD */

} /* zlahrd_ */
Пример #19
0
 int zlabrd_(int *m, int *n, int *nb, 
	doublecomplex *a, int *lda, double *d__, double *e, 
	doublecomplex *tauq, doublecomplex *taup, doublecomplex *x, int *
	ldx, doublecomplex *y, int *ldy)
{
    /* System generated locals */
    int a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2, 
	    i__3;
    doublecomplex z__1;

    /* Local variables */
    int i__;
    doublecomplex alpha;
    extern  int zscal_(int *, doublecomplex *, 
	    doublecomplex *, int *), zgemv_(char *, int *, int *, 
	    doublecomplex *, doublecomplex *, int *, doublecomplex *, 
	    int *, doublecomplex *, doublecomplex *, int *), 
	    zlarfg_(int *, doublecomplex *, doublecomplex *, int *, 
	    doublecomplex *), zlacgv_(int *, doublecomplex *, int *);


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZLABRD reduces the first NB rows and columns of a complex general */
/*  m by n matrix A to upper or lower float bidiagonal form by a unitary */
/*  transformation Q' * A * P, and returns the matrices X and Y which */
/*  are needed to apply the transformation to the unreduced part of A. */

/*  If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower */
/*  bidiagonal form. */

/*  This is an auxiliary routine called by ZGEBRD */

/*  Arguments */
/*  ========= */

/*  M       (input) INTEGER */
/*          The number of rows in the matrix A. */

/*  N       (input) INTEGER */
/*          The number of columns in the matrix A. */

/*  NB      (input) INTEGER */
/*          The number of leading rows and columns of A to be reduced. */

/*  A       (input/output) COMPLEX*16 array, dimension (LDA,N) */
/*          On entry, the m by n general matrix to be reduced. */
/*          On exit, the first NB rows and columns of the matrix are */
/*          overwritten; the rest of the array is unchanged. */
/*          If m >= n, elements on and below the diagonal in the first NB */
/*            columns, with the array TAUQ, represent the unitary */
/*            matrix Q as a product of elementary reflectors; and */
/*            elements above the diagonal in the first NB rows, with the */
/*            array TAUP, represent the unitary matrix P as a product */
/*            of elementary reflectors. */
/*          If m < n, elements below the diagonal in the first NB */
/*            columns, with the array TAUQ, represent the unitary */
/*            matrix Q as a product of elementary reflectors, and */
/*            elements on and above the diagonal in the first NB rows, */
/*            with the array TAUP, represent the unitary matrix P as */
/*            a product of elementary reflectors. */
/*          See Further Details. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= MAX(1,M). */

/*  D       (output) DOUBLE PRECISION array, dimension (NB) */
/*          The diagonal elements of the first NB rows and columns of */
/*          the reduced matrix.  D(i) = A(i,i). */

/*  E       (output) DOUBLE PRECISION array, dimension (NB) */
/*          The off-diagonal elements of the first NB rows and columns of */
/*          the reduced matrix. */

/*  TAUQ    (output) COMPLEX*16 array dimension (NB) */
/*          The scalar factors of the elementary reflectors which */
/*          represent the unitary matrix Q. See Further Details. */

/*  TAUP    (output) COMPLEX*16 array, dimension (NB) */
/*          The scalar factors of the elementary reflectors which */
/*          represent the unitary matrix P. See Further Details. */

/*  X       (output) COMPLEX*16 array, dimension (LDX,NB) */
/*          The m-by-nb matrix X required to update the unreduced part */
/*          of A. */

/*  LDX     (input) INTEGER */
/*          The leading dimension of the array X. LDX >= MAX(1,M). */

/*  Y       (output) COMPLEX*16 array, dimension (LDY,NB) */
/*          The n-by-nb matrix Y required to update the unreduced part */
/*          of A. */

/*  LDY     (input) INTEGER */
/*          The leading dimension of the array Y. LDY >= MAX(1,N). */

/*  Further Details */
/*  =============== */

/*  The matrices Q and P are represented as products of elementary */
/*  reflectors: */

/*     Q = H(1) H(2) . . . H(nb)  and  P = G(1) G(2) . . . G(nb) */

/*  Each H(i) and G(i) has the form: */

/*     H(i) = I - tauq * v * v'  and G(i) = I - taup * u * u' */

/*  where tauq and taup are complex scalars, and v and u are complex */
/*  vectors. */

/*  If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in */
/*  A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in */
/*  A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */

/*  If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in */
/*  A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in */
/*  A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). */

/*  The elements of the vectors v and u together form the m-by-nb matrix */
/*  V and the nb-by-n matrix U' which are needed, with X and Y, to apply */
/*  the transformation to the unreduced part of the matrix, using a block */
/*  update of the form:  A := A - V*Y' - X*U'. */

/*  The contents of A on exit are illustrated by the following examples */
/*  with nb = 2: */

/*  m = 6 and n = 5 (m > n):          m = 5 and n = 6 (m < n): */

/*    (  1   1   u1  u1  u1 )           (  1   u1  u1  u1  u1  u1 ) */
/*    (  v1  1   1   u2  u2 )           (  1   1   u2  u2  u2  u2 ) */
/*    (  v1  v2  a   a   a  )           (  v1  1   a   a   a   a  ) */
/*    (  v1  v2  a   a   a  )           (  v1  v2  a   a   a   a  ) */
/*    (  v1  v2  a   a   a  )           (  v1  v2  a   a   a   a  ) */
/*    (  v1  v2  a   a   a  ) */

/*  where a denotes an element of the original matrix which is unchanged, */
/*  vi denotes an element of the vector defining H(i), and ui an element */
/*  of the vector defining G(i). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Quick return if possible */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --d__;
    --e;
    --tauq;
    --taup;
    x_dim1 = *ldx;
    x_offset = 1 + x_dim1;
    x -= x_offset;
    y_dim1 = *ldy;
    y_offset = 1 + y_dim1;
    y -= y_offset;

    /* Function Body */
    if (*m <= 0 || *n <= 0) {
	return 0;
    }

    if (*m >= *n) {

/*        Reduce to upper bidiagonal form */

	i__1 = *nb;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Update A(i:m,i) */

	    i__2 = i__ - 1;
	    zlacgv_(&i__2, &y[i__ + y_dim1], ldy);
	    i__2 = *m - i__ + 1;
	    i__3 = i__ - 1;
	    z__1.r = -1., z__1.i = -0.;
	    zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + a_dim1], lda, 
		     &y[i__ + y_dim1], ldy, &c_b2, &a[i__ + i__ * a_dim1], &
		    c__1);
	    i__2 = i__ - 1;
	    zlacgv_(&i__2, &y[i__ + y_dim1], ldy);
	    i__2 = *m - i__ + 1;
	    i__3 = i__ - 1;
	    z__1.r = -1., z__1.i = -0.;
	    zgemv_("No transpose", &i__2, &i__3, &z__1, &x[i__ + x_dim1], ldx, 
		     &a[i__ * a_dim1 + 1], &c__1, &c_b2, &a[i__ + i__ * 
		    a_dim1], &c__1);

/*           Generate reflection Q(i) to annihilate A(i+1:m,i) */

	    i__2 = i__ + i__ * a_dim1;
	    alpha.r = a[i__2].r, alpha.i = a[i__2].i;
	    i__2 = *m - i__ + 1;
/* Computing MIN */
	    i__3 = i__ + 1;
	    zlarfg_(&i__2, &alpha, &a[MIN(i__3, *m)+ i__ * a_dim1], &c__1, &
		    tauq[i__]);
	    i__2 = i__;
	    d__[i__2] = alpha.r;
	    if (i__ < *n) {
		i__2 = i__ + i__ * a_dim1;
		a[i__2].r = 1., a[i__2].i = 0.;

/*              Compute Y(i+1:n,i) */

		i__2 = *m - i__ + 1;
		i__3 = *n - i__;
		zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + (
			i__ + 1) * a_dim1], lda, &a[i__ + i__ * a_dim1], &
			c__1, &c_b1, &y[i__ + 1 + i__ * y_dim1], &c__1);
		i__2 = *m - i__ + 1;
		i__3 = i__ - 1;
		zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 
			a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b1, &
			y[i__ * y_dim1 + 1], &c__1);
		i__2 = *n - i__;
		i__3 = i__ - 1;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__2, &i__3, &z__1, &y[i__ + 1 + 
			y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b2, &y[
			i__ + 1 + i__ * y_dim1], &c__1);
		i__2 = *m - i__ + 1;
		i__3 = i__ - 1;
		zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &x[i__ + 
			x_dim1], ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b1, &
			y[i__ * y_dim1 + 1], &c__1);
		i__2 = i__ - 1;
		i__3 = *n - i__;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &a[(i__ + 
			1) * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
			c_b2, &y[i__ + 1 + i__ * y_dim1], &c__1);
		i__2 = *n - i__;
		zscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);

/*              Update A(i,i+1:n) */

		i__2 = *n - i__;
		zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
		zlacgv_(&i__, &a[i__ + a_dim1], lda);
		i__2 = *n - i__;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__2, &i__, &z__1, &y[i__ + 1 + 
			y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b2, &a[i__ + (
			i__ + 1) * a_dim1], lda);
		zlacgv_(&i__, &a[i__ + a_dim1], lda);
		i__2 = i__ - 1;
		zlacgv_(&i__2, &x[i__ + x_dim1], ldx);
		i__2 = i__ - 1;
		i__3 = *n - i__;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &a[(i__ + 
			1) * a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b2, &
			a[i__ + (i__ + 1) * a_dim1], lda);
		i__2 = i__ - 1;
		zlacgv_(&i__2, &x[i__ + x_dim1], ldx);

/*              Generate reflection P(i) to annihilate A(i,i+2:n) */

		i__2 = i__ + (i__ + 1) * a_dim1;
		alpha.r = a[i__2].r, alpha.i = a[i__2].i;
		i__2 = *n - i__;
/* Computing MIN */
		i__3 = i__ + 2;
		zlarfg_(&i__2, &alpha, &a[i__ + MIN(i__3, *n)* a_dim1], lda, &
			taup[i__]);
		i__2 = i__;
		e[i__2] = alpha.r;
		i__2 = i__ + (i__ + 1) * a_dim1;
		a[i__2].r = 1., a[i__2].i = 0.;

/*              Compute X(i+1:m,i) */

		i__2 = *m - i__;
		i__3 = *n - i__;
		zgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + (i__ 
			+ 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1], 
			lda, &c_b1, &x[i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = *n - i__;
		zgemv_("Conjugate transpose", &i__2, &i__, &c_b2, &y[i__ + 1 
			+ y_dim1], ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &
			c_b1, &x[i__ * x_dim1 + 1], &c__1);
		i__2 = *m - i__;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__2, &i__, &z__1, &a[i__ + 1 + 
			a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
			i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = i__ - 1;
		i__3 = *n - i__;
		zgemv_("No transpose", &i__2, &i__3, &c_b2, &a[(i__ + 1) * 
			a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
			c_b1, &x[i__ * x_dim1 + 1], &c__1);
		i__2 = *m - i__;
		i__3 = i__ - 1;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__2, &i__3, &z__1, &x[i__ + 1 + 
			x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
			i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = *m - i__;
		zscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = *n - i__;
		zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
	    }
/* L10: */
	}
    } else {

/*        Reduce to lower bidiagonal form */

	i__1 = *nb;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Update A(i,i:n) */

	    i__2 = *n - i__ + 1;
	    zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
	    i__2 = i__ - 1;
	    zlacgv_(&i__2, &a[i__ + a_dim1], lda);
	    i__2 = *n - i__ + 1;
	    i__3 = i__ - 1;
	    z__1.r = -1., z__1.i = -0.;
	    zgemv_("No transpose", &i__2, &i__3, &z__1, &y[i__ + y_dim1], ldy, 
		     &a[i__ + a_dim1], lda, &c_b2, &a[i__ + i__ * a_dim1], 
		    lda);
	    i__2 = i__ - 1;
	    zlacgv_(&i__2, &a[i__ + a_dim1], lda);
	    i__2 = i__ - 1;
	    zlacgv_(&i__2, &x[i__ + x_dim1], ldx);
	    i__2 = i__ - 1;
	    i__3 = *n - i__ + 1;
	    z__1.r = -1., z__1.i = -0.;
	    zgemv_("Conjugate transpose", &i__2, &i__3, &z__1, &a[i__ * 
		    a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b2, &a[i__ + 
		    i__ * a_dim1], lda);
	    i__2 = i__ - 1;
	    zlacgv_(&i__2, &x[i__ + x_dim1], ldx);

/*           Generate reflection P(i) to annihilate A(i,i+1:n) */

	    i__2 = i__ + i__ * a_dim1;
	    alpha.r = a[i__2].r, alpha.i = a[i__2].i;
	    i__2 = *n - i__ + 1;
/* Computing MIN */
	    i__3 = i__ + 1;
	    zlarfg_(&i__2, &alpha, &a[i__ + MIN(i__3, *n)* a_dim1], lda, &
		    taup[i__]);
	    i__2 = i__;
	    d__[i__2] = alpha.r;
	    if (i__ < *m) {
		i__2 = i__ + i__ * a_dim1;
		a[i__2].r = 1., a[i__2].i = 0.;

/*              Compute X(i+1:m,i) */

		i__2 = *m - i__;
		i__3 = *n - i__ + 1;
		zgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 + i__ *
			 a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b1, &x[
			i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = *n - i__ + 1;
		i__3 = i__ - 1;
		zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &y[i__ + 
			y_dim1], ldy, &a[i__ + i__ * a_dim1], lda, &c_b1, &x[
			i__ * x_dim1 + 1], &c__1);
		i__2 = *m - i__;
		i__3 = i__ - 1;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 + 
			a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
			i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = i__ - 1;
		i__3 = *n - i__ + 1;
		zgemv_("No transpose", &i__2, &i__3, &c_b2, &a[i__ * a_dim1 + 
			1], lda, &a[i__ + i__ * a_dim1], lda, &c_b1, &x[i__ * 
			x_dim1 + 1], &c__1);
		i__2 = *m - i__;
		i__3 = i__ - 1;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__2, &i__3, &z__1, &x[i__ + 1 + 
			x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b2, &x[
			i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = *m - i__;
		zscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
		i__2 = *n - i__ + 1;
		zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);

/*              Update A(i+1:m,i) */

		i__2 = i__ - 1;
		zlacgv_(&i__2, &y[i__ + y_dim1], ldy);
		i__2 = *m - i__;
		i__3 = i__ - 1;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__2, &i__3, &z__1, &a[i__ + 1 + 
			a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b2, &a[i__ + 
			1 + i__ * a_dim1], &c__1);
		i__2 = i__ - 1;
		zlacgv_(&i__2, &y[i__ + y_dim1], ldy);
		i__2 = *m - i__;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__2, &i__, &z__1, &x[i__ + 1 + 
			x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b2, &a[
			i__ + 1 + i__ * a_dim1], &c__1);

/*              Generate reflection Q(i) to annihilate A(i+2:m,i) */

		i__2 = i__ + 1 + i__ * a_dim1;
		alpha.r = a[i__2].r, alpha.i = a[i__2].i;
		i__2 = *m - i__;
/* Computing MIN */
		i__3 = i__ + 2;
		zlarfg_(&i__2, &alpha, &a[MIN(i__3, *m)+ i__ * a_dim1], &c__1, 
			 &tauq[i__]);
		i__2 = i__;
		e[i__2] = alpha.r;
		i__2 = i__ + 1 + i__ * a_dim1;
		a[i__2].r = 1., a[i__2].i = 0.;

/*              Compute Y(i+1:n,i) */

		i__2 = *m - i__;
		i__3 = *n - i__;
		zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 
			+ (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1]
, &c__1, &c_b1, &y[i__ + 1 + i__ * y_dim1], &c__1);
		i__2 = *m - i__;
		i__3 = i__ - 1;
		zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[i__ + 1 
			+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
			c_b1, &y[i__ * y_dim1 + 1], &c__1);
		i__2 = *n - i__;
		i__3 = i__ - 1;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("No transpose", &i__2, &i__3, &z__1, &y[i__ + 1 + 
			y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b2, &y[
			i__ + 1 + i__ * y_dim1], &c__1);
		i__2 = *m - i__;
		zgemv_("Conjugate transpose", &i__2, &i__, &c_b2, &x[i__ + 1 
			+ x_dim1], ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &
			c_b1, &y[i__ * y_dim1 + 1], &c__1);
		i__2 = *n - i__;
		z__1.r = -1., z__1.i = -0.;
		zgemv_("Conjugate transpose", &i__, &i__2, &z__1, &a[(i__ + 1)
			 * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
			c_b2, &y[i__ + 1 + i__ * y_dim1], &c__1);
		i__2 = *n - i__;
		zscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
	    } else {
		i__2 = *n - i__ + 1;
		zlacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
	    }
/* L20: */
	}
    }
    return 0;

/*     End of ZLABRD */

} /* zlabrd_ */
Пример #20
0
/* Subroutine */ int zlaghe_(integer *n, integer *k, doublereal *d, 
	doublecomplex *a, integer *lda, integer *iseed, doublecomplex *work, 
	integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    doublereal d__1;
    doublecomplex z__1, z__2, z__3, z__4;

    /* Builtin functions */
    double z_abs(doublecomplex *);
    void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
	    doublecomplex *, doublecomplex *);

    /* Local variables */
    extern /* Subroutine */ int zher2_(char *, integer *, doublecomplex *, 
	    doublecomplex *, integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *);
    static integer i, j;
    static doublecomplex alpha;
    extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *, 
	    doublecomplex *, integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *), zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *);
    extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *, 
	    doublecomplex *, integer *, doublecomplex *, integer *);
    extern /* Subroutine */ int zgemv_(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, integer *, doublecomplex *, 
	    integer *, doublecomplex *, doublecomplex *, integer *), 
	    zhemv_(char *, integer *, doublecomplex *, doublecomplex *, 
	    integer *, doublecomplex *, integer *, doublecomplex *, 
	    doublecomplex *, integer *), zaxpy_(integer *, 
	    doublecomplex *, doublecomplex *, integer *, doublecomplex *, 
	    integer *);
    extern doublereal dznrm2_(integer *, doublecomplex *, integer *);
    static doublecomplex wa, wb;
    static doublereal wn;
    extern /* Subroutine */ int xerbla_(char *, integer *), zlarnv_(
	    integer *, integer *, integer *, doublecomplex *);
    static doublecomplex tau;


/*  -- LAPACK auxiliary test routine (version 2.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    ZLAGHE generates a complex hermitian matrix A, by pre- and post-   
    multiplying a real diagonal matrix D with a random unitary matrix:   
    A = U*D*U'. The semi-bandwidth may then be reduced to k by additional 
  
    unitary transformations.   

    Arguments   
    =========   

    N       (input) INTEGER   
            The order of the matrix A.  N >= 0.   

    K       (input) INTEGER   
            The number of nonzero subdiagonals within the band of A.   
            0 <= K <= N-1.   

    D       (input) DOUBLE PRECISION array, dimension (N)   
            The diagonal elements of the diagonal matrix D.   

    A       (output) COMPLEX*16 array, dimension (LDA,N)   
            The generated n by n hermitian matrix A (the full matrix is   
            stored).   

    LDA     (input) INTEGER   
            The leading dimension of the array A.  LDA >= N.   

    ISEED   (input/output) INTEGER array, dimension (4)   
            On entry, the seed of the random number generator; the array 
  
            elements must be between 0 and 4095, and ISEED(4) must be   
            odd.   
            On exit, the seed is updated.   

    WORK    (workspace) COMPLEX*16 array, dimension (2*N)   

    INFO    (output) INTEGER   
            = 0: successful exit   
            < 0: if INFO = -i, the i-th argument had an illegal value   

    ===================================================================== 
  


       Test the input arguments   

       Parameter adjustments */
    --d;
    a_dim1 = *lda;
    a_offset = a_dim1 + 1;
    a -= a_offset;
    --iseed;
    --work;

    /* Function Body */
    *info = 0;
    if (*n < 0) {
	*info = -1;
    } else if (*k < 0 || *k > *n - 1) {
	*info = -2;
    } else if (*lda < max(1,*n)) {
	*info = -5;
    }
    if (*info < 0) {
	i__1 = -(*info);
	xerbla_("ZLAGHE", &i__1);
	return 0;
    }

/*     initialize lower triangle of A to diagonal matrix */

    i__1 = *n;
    for (j = 1; j <= i__1; ++j) {
	i__2 = *n;
	for (i = j + 1; i <= i__2; ++i) {
	    i__3 = i + j * a_dim1;
	    a[i__3].r = 0., a[i__3].i = 0.;
/* L10: */
	}
/* L20: */
    }
    i__1 = *n;
    for (i = 1; i <= i__1; ++i) {
	i__2 = i + i * a_dim1;
	i__3 = i;
	a[i__2].r = d[i__3], a[i__2].i = 0.;
/* L30: */
    }

/*     Generate lower triangle of hermitian matrix */

    for (i = *n - 1; i >= 1; --i) {

/*        generate random reflection */

	i__1 = *n - i + 1;
	zlarnv_(&c__3, &iseed[1], &i__1, &work[1]);
	i__1 = *n - i + 1;
	wn = dznrm2_(&i__1, &work[1], &c__1);
	d__1 = wn / z_abs(&work[1]);
	z__1.r = d__1 * work[1].r, z__1.i = d__1 * work[1].i;
	wa.r = z__1.r, wa.i = z__1.i;
	if (wn == 0.) {
	    tau.r = 0., tau.i = 0.;
	} else {
	    z__1.r = work[1].r + wa.r, z__1.i = work[1].i + wa.i;
	    wb.r = z__1.r, wb.i = z__1.i;
	    i__1 = *n - i;
	    z_div(&z__1, &c_b2, &wb);
	    zscal_(&i__1, &z__1, &work[2], &c__1);
	    work[1].r = 1., work[1].i = 0.;
	    z_div(&z__1, &wb, &wa);
	    d__1 = z__1.r;
	    tau.r = d__1, tau.i = 0.;
	}

/*        apply random reflection to A(i:n,i:n) from the left   
          and the right   

          compute  y := tau * A * u */

	i__1 = *n - i + 1;
	zhemv_("Lower", &i__1, &tau, &a[i + i * a_dim1], lda, &work[1], &c__1,
		 &c_b1, &work[*n + 1], &c__1);

/*        compute  v := y - 1/2 * tau * ( y, u ) * u */

	z__3.r = -.5, z__3.i = 0.;
	z__2.r = z__3.r * tau.r - z__3.i * tau.i, z__2.i = z__3.r * tau.i + 
		z__3.i * tau.r;
	i__1 = *n - i + 1;
	zdotc_(&z__4, &i__1, &work[*n + 1], &c__1, &work[1], &c__1);
	z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i 
		+ z__2.i * z__4.r;
	alpha.r = z__1.r, alpha.i = z__1.i;
	i__1 = *n - i + 1;
	zaxpy_(&i__1, &alpha, &work[1], &c__1, &work[*n + 1], &c__1);

/*        apply the transformation as a rank-2 update to A(i:n,i:n) */

	i__1 = *n - i + 1;
	z__1.r = -1., z__1.i = 0.;
	zher2_("Lower", &i__1, &z__1, &work[1], &c__1, &work[*n + 1], &c__1, &
		a[i + i * a_dim1], lda);
/* L40: */
    }

/*     Reduce number of subdiagonals to K */

    i__1 = *n - 1 - *k;
    for (i = 1; i <= i__1; ++i) {

/*        generate reflection to annihilate A(k+i+1:n,i) */

	i__2 = *n - *k - i + 1;
	wn = dznrm2_(&i__2, &a[*k + i + i * a_dim1], &c__1);
	d__1 = wn / z_abs(&a[*k + i + i * a_dim1]);
	i__2 = *k + i + i * a_dim1;
	z__1.r = d__1 * a[i__2].r, z__1.i = d__1 * a[i__2].i;
	wa.r = z__1.r, wa.i = z__1.i;
	if (wn == 0.) {
	    tau.r = 0., tau.i = 0.;
	} else {
	    i__2 = *k + i + i * a_dim1;
	    z__1.r = a[i__2].r + wa.r, z__1.i = a[i__2].i + wa.i;
	    wb.r = z__1.r, wb.i = z__1.i;
	    i__2 = *n - *k - i;
	    z_div(&z__1, &c_b2, &wb);
	    zscal_(&i__2, &z__1, &a[*k + i + 1 + i * a_dim1], &c__1);
	    i__2 = *k + i + i * a_dim1;
	    a[i__2].r = 1., a[i__2].i = 0.;
	    z_div(&z__1, &wb, &wa);
	    d__1 = z__1.r;
	    tau.r = d__1, tau.i = 0.;
	}

/*        apply reflection to A(k+i:n,i+1:k+i-1) from the left */

	i__2 = *n - *k - i + 1;
	i__3 = *k - 1;
	zgemv_("Conjugate transpose", &i__2, &i__3, &c_b2, &a[*k + i + (i + 1)
		 * a_dim1], lda, &a[*k + i + i * a_dim1], &c__1, &c_b1, &work[
		1], &c__1);
	i__2 = *n - *k - i + 1;
	i__3 = *k - 1;
	z__1.r = -tau.r, z__1.i = -tau.i;
	zgerc_(&i__2, &i__3, &z__1, &a[*k + i + i * a_dim1], &c__1, &work[1], 
		&c__1, &a[*k + i + (i + 1) * a_dim1], lda);

/*        apply reflection to A(k+i:n,k+i:n) from the left and the rig
ht   

          compute  y := tau * A * u */

	i__2 = *n - *k - i + 1;
	zhemv_("Lower", &i__2, &tau, &a[*k + i + (*k + i) * a_dim1], lda, &a[*
		k + i + i * a_dim1], &c__1, &c_b1, &work[1], &c__1);

/*        compute  v := y - 1/2 * tau * ( y, u ) * u */

	z__3.r = -.5, z__3.i = 0.;
	z__2.r = z__3.r * tau.r - z__3.i * tau.i, z__2.i = z__3.r * tau.i + 
		z__3.i * tau.r;
	i__2 = *n - *k - i + 1;
	zdotc_(&z__4, &i__2, &work[1], &c__1, &a[*k + i + i * a_dim1], &c__1);
	z__1.r = z__2.r * z__4.r - z__2.i * z__4.i, z__1.i = z__2.r * z__4.i 
		+ z__2.i * z__4.r;
	alpha.r = z__1.r, alpha.i = z__1.i;
	i__2 = *n - *k - i + 1;
	zaxpy_(&i__2, &alpha, &a[*k + i + i * a_dim1], &c__1, &work[1], &c__1)
		;

/*        apply hermitian rank-2 update to A(k+i:n,k+i:n) */

	i__2 = *n - *k - i + 1;
	z__1.r = -1., z__1.i = 0.;
	zher2_("Lower", &i__2, &z__1, &a[*k + i + i * a_dim1], &c__1, &work[1]
		, &c__1, &a[*k + i + (*k + i) * a_dim1], lda);

	i__2 = *k + i + i * a_dim1;
	z__1.r = -wa.r, z__1.i = -wa.i;
	a[i__2].r = z__1.r, a[i__2].i = z__1.i;
	i__2 = *n;
	for (j = *k + i + 1; j <= i__2; ++j) {
	    i__3 = j + i * a_dim1;
	    a[i__3].r = 0., a[i__3].i = 0.;
/* L50: */
	}
/* L60: */
    }

/*     Store full hermitian matrix */

    i__1 = *n;
    for (j = 1; j <= i__1; ++j) {
	i__2 = *n;
	for (i = j + 1; i <= i__2; ++i) {
	    i__3 = j + i * a_dim1;
	    d_cnjg(&z__1, &a[i + j * a_dim1]);
	    a[i__3].r = z__1.r, a[i__3].i = z__1.i;
/* L70: */
	}
/* L80: */
    }
    return 0;

/*     End of ZLAGHE */

} /* zlaghe_ */
Пример #21
0
/* Subroutine */ int zung2r_(integer *m, integer *n, integer *k, 
	doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
	work, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    doublecomplex z__1;

    /* Local variables */
    integer i__, j, l;
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *), zlarf_(char *, integer *, integer *, 
	    doublecomplex *, integer *, doublecomplex *, doublecomplex *, 
	    integer *, doublecomplex *), xerbla_(char *, integer *);


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZUNG2R generates an m by n complex matrix Q with orthonormal columns, */
/*  which is defined as the first n columns of a product of k elementary */
/*  reflectors of order m */

/*        Q  =  H(1) H(2) . . . H(k) */

/*  as returned by ZGEQRF. */

/*  Arguments */
/*  ========= */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix Q. M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix Q. M >= N >= 0. */

/*  K       (input) INTEGER */
/*          The number of elementary reflectors whose product defines the */
/*          matrix Q. N >= K >= 0. */

/*  A       (input/output) COMPLEX*16 array, dimension (LDA,N) */
/*          On entry, the i-th column must contain the vector which */
/*          defines the elementary reflector H(i), for i = 1,2,...,k, as */
/*          returned by ZGEQRF in the first k columns of its array */
/*          argument A. */
/*          On exit, the m by n matrix Q. */

/*  LDA     (input) INTEGER */
/*          The first dimension of the array A. LDA >= max(1,M). */

/*  TAU     (input) COMPLEX*16 array, dimension (K) */
/*          TAU(i) must contain the scalar factor of the elementary */
/*          reflector H(i), as returned by ZGEQRF. */

/*  WORK    (workspace) COMPLEX*16 array, dimension (N) */

/*  INFO    (output) INTEGER */
/*          = 0: successful exit */
/*          < 0: if INFO = -i, the i-th argument has an illegal value */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input arguments */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --tau;
    --work;

    /* Function Body */
    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0 || *n > *m) {
	*info = -2;
    } else if (*k < 0 || *k > *n) {
	*info = -3;
    } else if (*lda < max(1,*m)) {
	*info = -5;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZUNG2R", &i__1);
	return 0;
    }

/*     Quick return if possible */

    if (*n <= 0) {
	return 0;
    }

/*     Initialise columns k+1:n to columns of the unit matrix */

    i__1 = *n;
    for (j = *k + 1; j <= i__1; ++j) {
	i__2 = *m;
	for (l = 1; l <= i__2; ++l) {
	    i__3 = l + j * a_dim1;
	    a[i__3].r = 0., a[i__3].i = 0.;
/* L10: */
	}
	i__2 = j + j * a_dim1;
	a[i__2].r = 1., a[i__2].i = 0.;
/* L20: */
    }

    for (i__ = *k; i__ >= 1; --i__) {

/*        Apply H(i) to A(i:m,i:n) from the left */

	if (i__ < *n) {
	    i__1 = i__ + i__ * a_dim1;
	    a[i__1].r = 1., a[i__1].i = 0.;
	    i__1 = *m - i__ + 1;
	    i__2 = *n - i__;
	    zlarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[
		    i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
	}
	if (i__ < *m) {
	    i__1 = *m - i__;
	    i__2 = i__;
	    z__1.r = -tau[i__2].r, z__1.i = -tau[i__2].i;
	    zscal_(&i__1, &z__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
	}
	i__1 = i__ + i__ * a_dim1;
	i__2 = i__;
	z__1.r = 1. - tau[i__2].r, z__1.i = 0. - tau[i__2].i;
	a[i__1].r = z__1.r, a[i__1].i = z__1.i;

/*        Set A(1:i-1,i) to zero */

	i__1 = i__ - 1;
	for (l = 1; l <= i__1; ++l) {
	    i__2 = l + i__ * a_dim1;
	    a[i__2].r = 0., a[i__2].i = 0.;
/* L30: */
	}
/* L40: */
    }
    return 0;

/*     End of ZUNG2R */

} /* zung2r_ */
Пример #22
0
/* Subroutine */ int ztptri_(char *uplo, char *diag, integer *n, 
	doublecomplex *ap, integer *info)
{
    /* System generated locals */
    integer i__1, i__2;
    doublecomplex z__1;

    /* Builtin functions */
    void z_div(doublecomplex *, doublecomplex *, doublecomplex *);

    /* Local variables */
    integer j, jc, jj;
    doublecomplex ajj;
    extern logical lsame_(char *, char *);
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *);
    logical upper;
    extern /* Subroutine */ int ztpmv_(char *, char *, char *, integer *, 
	    doublecomplex *, doublecomplex *, integer *), xerbla_(char *, integer *);
    integer jclast;
    logical nounit;


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZTPTRI computes the inverse of a complex upper or lower triangular */
/*  matrix A stored in packed format. */

/*  Arguments */
/*  ========= */

/*  UPLO    (input) CHARACTER*1 */
/*          = 'U':  A is upper triangular; */
/*          = 'L':  A is lower triangular. */

/*  DIAG    (input) CHARACTER*1 */
/*          = 'N':  A is non-unit triangular; */
/*          = 'U':  A is unit triangular. */

/*  N       (input) INTEGER */
/*          The order of the matrix A.  N >= 0. */

/*  AP      (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
/*          On entry, the upper or lower triangular matrix A, stored */
/*          columnwise in a linear array.  The j-th column of A is stored */
/*          in the array AP as follows: */
/*          if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
/*          if UPLO = 'L', AP(i + (j-1)*((2*n-j)/2) = A(i,j) for j<=i<=n. */
/*          See below for further details. */
/*          On exit, the (triangular) inverse of the original matrix, in */
/*          the same packed storage format. */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value */
/*          > 0:  if INFO = i, A(i,i) is exactly zero.  The triangular */
/*                matrix is singular and its inverse can not be computed. */

/*  Further Details */
/*  =============== */

/*  A triangular matrix A can be transferred to packed storage using one */
/*  of the following program segments: */

/*  UPLO = 'U':                      UPLO = 'L': */

/*        JC = 1                           JC = 1 */
/*        DO 2 J = 1, N                    DO 2 J = 1, N */
/*           DO 1 I = 1, J                    DO 1 I = J, N */
/*              AP(JC+I-1) = A(I,J)              AP(JC+I-J) = A(I,J) */
/*      1    CONTINUE                    1    CONTINUE */
/*           JC = JC + J                      JC = JC + N - J + 1 */
/*      2 CONTINUE                       2 CONTINUE */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input parameters. */

    /* Parameter adjustments */
    --ap;

    /* Function Body */
    *info = 0;
    upper = lsame_(uplo, "U");
    nounit = lsame_(diag, "N");
    if (! upper && ! lsame_(uplo, "L")) {
	*info = -1;
    } else if (! nounit && ! lsame_(diag, "U")) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZTPTRI", &i__1);
	return 0;
    }

/*     Check for singularity if non-unit. */

    if (nounit) {
	if (upper) {
	    jj = 0;
	    i__1 = *n;
	    for (*info = 1; *info <= i__1; ++(*info)) {
		jj += *info;
		i__2 = jj;
		if (ap[i__2].r == 0. && ap[i__2].i == 0.) {
		    return 0;
		}
/* L10: */
	    }
	} else {
	    jj = 1;
	    i__1 = *n;
	    for (*info = 1; *info <= i__1; ++(*info)) {
		i__2 = jj;
		if (ap[i__2].r == 0. && ap[i__2].i == 0.) {
		    return 0;
		}
		jj = jj + *n - *info + 1;
/* L20: */
	    }
	}
	*info = 0;
    }

    if (upper) {

/*        Compute inverse of upper triangular matrix. */

	jc = 1;
	i__1 = *n;
	for (j = 1; j <= i__1; ++j) {
	    if (nounit) {
		i__2 = jc + j - 1;
		z_div(&z__1, &c_b1, &ap[jc + j - 1]);
		ap[i__2].r = z__1.r, ap[i__2].i = z__1.i;
		i__2 = jc + j - 1;
		z__1.r = -ap[i__2].r, z__1.i = -ap[i__2].i;
		ajj.r = z__1.r, ajj.i = z__1.i;
	    } else {
		z__1.r = -1., z__1.i = -0.;
		ajj.r = z__1.r, ajj.i = z__1.i;
	    }

/*           Compute elements 1:j-1 of j-th column. */

	    i__2 = j - 1;
	    ztpmv_("Upper", "No transpose", diag, &i__2, &ap[1], &ap[jc], &
		    c__1);
	    i__2 = j - 1;
	    zscal_(&i__2, &ajj, &ap[jc], &c__1);
	    jc += j;
/* L30: */
	}

    } else {

/*        Compute inverse of lower triangular matrix. */

	jc = *n * (*n + 1) / 2;
	for (j = *n; j >= 1; --j) {
	    if (nounit) {
		i__1 = jc;
		z_div(&z__1, &c_b1, &ap[jc]);
		ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
		i__1 = jc;
		z__1.r = -ap[i__1].r, z__1.i = -ap[i__1].i;
		ajj.r = z__1.r, ajj.i = z__1.i;
	    } else {
		z__1.r = -1., z__1.i = -0.;
		ajj.r = z__1.r, ajj.i = z__1.i;
	    }
	    if (j < *n) {

/*              Compute elements j+1:n of j-th column. */

		i__1 = *n - j;
		ztpmv_("Lower", "No transpose", diag, &i__1, &ap[jclast], &ap[
			jc + 1], &c__1);
		i__1 = *n - j;
		zscal_(&i__1, &ajj, &ap[jc + 1], &c__1);
	    }
	    jclast = jc;
	    jc = jc - *n + j - 2;
/* L40: */
	}
    }

    return 0;

/*     End of ZTPTRI */

} /* ztptri_ */
Пример #23
0
/* Subroutine */ int zpst01_(char *uplo, integer *n, doublecomplex *a, 
	integer *lda, doublecomplex *afac, integer *ldafac, doublecomplex *
	perm, integer *ldperm, integer *piv, doublereal *rwork, doublereal *
	resid, integer *rank)
{
    /* System generated locals */
    integer a_dim1, a_offset, afac_dim1, afac_offset, perm_dim1, perm_offset, 
	    i__1, i__2, i__3, i__4, i__5;
    doublereal d__1;
    doublecomplex z__1;

    /* Local variables */
    integer i__, j, k;
    doublecomplex tc;
    doublereal tr, eps;
    doublereal anorm;


/*  -- LAPACK test routine (version 3.1) -- */
/*     Craig Lucas, University of Manchester / NAG Ltd. */
/*     October, 2008 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZPST01 reconstructs an Hermitian positive semidefinite matrix A */
/*  from its L or U factors and the permutation matrix P and computes */
/*  the residual */
/*     norm( P*L*L'*P' - A ) / ( N * norm(A) * EPS ) or */
/*     norm( P*U'*U*P' - A ) / ( N * norm(A) * EPS ), */
/*  where EPS is the machine epsilon, L' is the conjugate transpose of L, */
/*  and U' is the conjugate transpose of U. */

/*  Arguments */
/*  ========== */

/*  UPLO    (input) CHARACTER*1 */
/*          Specifies whether the upper or lower triangular part of the */
/*          Hermitian matrix A is stored: */
/*          = 'U':  Upper triangular */
/*          = 'L':  Lower triangular */

/*  N       (input) INTEGER */
/*          The number of rows and columns of the matrix A.  N >= 0. */

/*  A       (input) COMPLEX*16 array, dimension (LDA,N) */
/*          The original Hermitian matrix A. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,N) */

/*  AFAC    (input) COMPLEX*16 array, dimension (LDAFAC,N) */
/*          The factor L or U from the L*L' or U'*U */
/*          factorization of A. */

/*  LDAFAC  (input) INTEGER */
/*          The leading dimension of the array AFAC.  LDAFAC >= max(1,N). */

/*  PERM    (output) COMPLEX*16 array, dimension (LDPERM,N) */
/*          Overwritten with the reconstructed matrix, and then with the */
/*          difference P*L*L'*P' - A (or P*U'*U*P' - A) */

/*  LDPERM  (input) INTEGER */
/*          The leading dimension of the array PERM. */
/*          LDAPERM >= max(1,N). */

/*  PIV     (input) INTEGER array, dimension (N) */
/*          PIV is such that the nonzero entries are */
/*          P( PIV( K ), K ) = 1. */

/*  RWORK   (workspace) DOUBLE PRECISION array, dimension (N) */

/*  RESID   (output) DOUBLE PRECISION */
/*          If UPLO = 'L', norm(L*L' - A) / ( N * norm(A) * EPS ) */
/*          If UPLO = 'U', norm(U'*U - A) / ( N * norm(A) * EPS ) */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Quick exit if N = 0. */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    afac_dim1 = *ldafac;
    afac_offset = 1 + afac_dim1;
    afac -= afac_offset;
    perm_dim1 = *ldperm;
    perm_offset = 1 + perm_dim1;
    perm -= perm_offset;
    --piv;
    --rwork;

    /* Function Body */
    if (*n <= 0) {
	*resid = 0.;
	return 0;
    }

/*     Exit with RESID = 1/EPS if ANORM = 0. */

    eps = dlamch_("Epsilon");
    anorm = zlanhe_("1", uplo, n, &a[a_offset], lda, &rwork[1]);
    if (anorm <= 0.) {
	*resid = 1. / eps;
	return 0;
    }

/*     Check the imaginary parts of the diagonal elements and return with */
/*     an error code if any are nonzero. */

    i__1 = *n;
    for (j = 1; j <= i__1; ++j) {
	if (d_imag(&afac[j + j * afac_dim1]) != 0.) {
	    *resid = 1. / eps;
	    return 0;
	}
/* L100: */
    }

/*     Compute the product U'*U, overwriting U. */

    if (lsame_(uplo, "U")) {

	if (*rank < *n) {
	    i__1 = *n;
	    for (j = *rank + 1; j <= i__1; ++j) {
		i__2 = j;
		for (i__ = *rank + 1; i__ <= i__2; ++i__) {
		    i__3 = i__ + j * afac_dim1;
		    afac[i__3].r = 0., afac[i__3].i = 0.;
/* L110: */
		}
/* L120: */
	    }
	}

	for (k = *n; k >= 1; --k) {

/*           Compute the (K,K) element of the result. */

	    zdotc_(&z__1, &k, &afac[k * afac_dim1 + 1], &c__1, &afac[k * 
		    afac_dim1 + 1], &c__1);
	    tr = z__1.r;
	    i__1 = k + k * afac_dim1;
	    afac[i__1].r = tr, afac[i__1].i = 0.;

/*           Compute the rest of column K. */

	    i__1 = k - 1;
	    ztrmv_("Upper", "Conjugate", "Non-unit", &i__1, &afac[afac_offset]
, ldafac, &afac[k * afac_dim1 + 1], &c__1);

/* L130: */
	}

/*     Compute the product L*L', overwriting L. */

    } else {

	if (*rank < *n) {
	    i__1 = *n;
	    for (j = *rank + 1; j <= i__1; ++j) {
		i__2 = *n;
		for (i__ = j; i__ <= i__2; ++i__) {
		    i__3 = i__ + j * afac_dim1;
		    afac[i__3].r = 0., afac[i__3].i = 0.;
/* L140: */
		}
/* L150: */
	    }
	}

	for (k = *n; k >= 1; --k) {
/*           Add a multiple of column K of the factor L to each of */
/*           columns K+1 through N. */

	    if (k + 1 <= *n) {
		i__1 = *n - k;
		zher_("Lower", &i__1, &c_b20, &afac[k + 1 + k * afac_dim1], &
			c__1, &afac[k + 1 + (k + 1) * afac_dim1], ldafac);
	    }

/*           Scale column K by the diagonal element. */

	    i__1 = k + k * afac_dim1;
	    tc.r = afac[i__1].r, tc.i = afac[i__1].i;
	    i__1 = *n - k + 1;
	    zscal_(&i__1, &tc, &afac[k + k * afac_dim1], &c__1);
/* L160: */
	}

    }

/*        Form P*L*L'*P' or P*U'*U*P' */

    if (lsame_(uplo, "U")) {

	i__1 = *n;
	for (j = 1; j <= i__1; ++j) {
	    i__2 = *n;
	    for (i__ = 1; i__ <= i__2; ++i__) {
		if (piv[i__] <= piv[j]) {
		    if (i__ <= j) {
			i__3 = piv[i__] + piv[j] * perm_dim1;
			i__4 = i__ + j * afac_dim1;
			perm[i__3].r = afac[i__4].r, perm[i__3].i = afac[i__4]
				.i;
		    } else {
			i__3 = piv[i__] + piv[j] * perm_dim1;
			d_cnjg(&z__1, &afac[j + i__ * afac_dim1]);
			perm[i__3].r = z__1.r, perm[i__3].i = z__1.i;
		    }
		}
/* L170: */
	    }
/* L180: */
	}


    } else {

	i__1 = *n;
	for (j = 1; j <= i__1; ++j) {
	    i__2 = *n;
	    for (i__ = 1; i__ <= i__2; ++i__) {
		if (piv[i__] >= piv[j]) {
		    if (i__ >= j) {
			i__3 = piv[i__] + piv[j] * perm_dim1;
			i__4 = i__ + j * afac_dim1;
			perm[i__3].r = afac[i__4].r, perm[i__3].i = afac[i__4]
				.i;
		    } else {
			i__3 = piv[i__] + piv[j] * perm_dim1;
			d_cnjg(&z__1, &afac[j + i__ * afac_dim1]);
			perm[i__3].r = z__1.r, perm[i__3].i = z__1.i;
		    }
		}
/* L190: */
	    }
/* L200: */
	}

    }

/*     Compute the difference  P*L*L'*P' - A (or P*U'*U*P' - A). */

    if (lsame_(uplo, "U")) {
	i__1 = *n;
	for (j = 1; j <= i__1; ++j) {
	    i__2 = j - 1;
	    for (i__ = 1; i__ <= i__2; ++i__) {
		i__3 = i__ + j * perm_dim1;
		i__4 = i__ + j * perm_dim1;
		i__5 = i__ + j * a_dim1;
		z__1.r = perm[i__4].r - a[i__5].r, z__1.i = perm[i__4].i - a[
			i__5].i;
		perm[i__3].r = z__1.r, perm[i__3].i = z__1.i;
/* L210: */
	    }
	    i__2 = j + j * perm_dim1;
	    i__3 = j + j * perm_dim1;
	    i__4 = j + j * a_dim1;
	    d__1 = a[i__4].r;
	    z__1.r = perm[i__3].r - d__1, z__1.i = perm[i__3].i;
	    perm[i__2].r = z__1.r, perm[i__2].i = z__1.i;
/* L220: */
	}
    } else {
	i__1 = *n;
	for (j = 1; j <= i__1; ++j) {
	    i__2 = j + j * perm_dim1;
	    i__3 = j + j * perm_dim1;
	    i__4 = j + j * a_dim1;
	    d__1 = a[i__4].r;
	    z__1.r = perm[i__3].r - d__1, z__1.i = perm[i__3].i;
	    perm[i__2].r = z__1.r, perm[i__2].i = z__1.i;
	    i__2 = *n;
	    for (i__ = j + 1; i__ <= i__2; ++i__) {
		i__3 = i__ + j * perm_dim1;
		i__4 = i__ + j * perm_dim1;
		i__5 = i__ + j * a_dim1;
		z__1.r = perm[i__4].r - a[i__5].r, z__1.i = perm[i__4].i - a[
			i__5].i;
		perm[i__3].r = z__1.r, perm[i__3].i = z__1.i;
/* L230: */
	    }
/* L240: */
	}
    }

/*     Compute norm( P*L*L'P - A ) / ( N * norm(A) * EPS ), or */
/*     ( P*U'*U*P' - A )/ ( N * norm(A) * EPS ). */

    *resid = zlanhe_("1", uplo, n, &perm[perm_offset], ldafac, &rwork[1]);

    *resid = *resid / (doublereal) (*n) / anorm / eps;

    return 0;

/*     End of ZPST01 */

} /* zpst01_ */
Пример #24
0
void zscal( int n, doublecomplex* alpha, doublecomplex *x, int incx)
{
    zscal_(&n, alpha, x, &incx);
}
Пример #25
0
/*! _zrovector*comple operator */
inline _zrovector operator*(const _zrovector& vec, const comple& d)
{VERBOSE_REPORT;
  zscal_(vec.l, d, vec.array, 1);  
  return vec;
}
Пример #26
0
/* Subroutine */ int zsptrf_(char *uplo, integer *n, doublecomplex *ap, 
	integer *ipiv, integer *info)
{
    /* System generated locals */
    integer i__1, i__2, i__3, i__4, i__5, i__6;
    doublereal d__1, d__2, d__3, d__4;
    doublecomplex z__1, z__2, z__3, z__4;

    /* Builtin functions */
    double sqrt(doublereal), d_imag(doublecomplex *);
    void z_div(doublecomplex *, doublecomplex *, doublecomplex *);

    /* Local variables */
    integer i__, j, k;
    doublecomplex t, r1, d11, d12, d21, d22;
    integer kc, kk, kp;
    doublecomplex wk;
    integer kx, knc, kpc, npp;
    doublecomplex wkm1, wkp1;
    integer imax, jmax;
    extern /* Subroutine */ int zspr_(char *, integer *, doublecomplex *, 
	    doublecomplex *, integer *, doublecomplex *);
    doublereal alpha;
    extern logical lsame_(char *, char *);
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *);
    integer kstep;
    logical upper;
    extern /* Subroutine */ int zswap_(integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *);
    doublereal absakk;
    extern /* Subroutine */ int xerbla_(char *, integer *);
    doublereal colmax;
    extern integer izamax_(integer *, doublecomplex *, integer *);
    doublereal rowmax;


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZSPTRF computes the factorization of a complex symmetric matrix A */
/*  stored in packed format using the Bunch-Kaufman diagonal pivoting */
/*  method: */

/*     A = U*D*U**T  or  A = L*D*L**T */

/*  where U (or L) is a product of permutation and unit upper (lower) */
/*  triangular matrices, and D is symmetric and block diagonal with */
/*  1-by-1 and 2-by-2 diagonal blocks. */

/*  Arguments */
/*  ========= */

/*  UPLO    (input) CHARACTER*1 */
/*          = 'U':  Upper triangle of A is stored; */
/*          = 'L':  Lower triangle of A is stored. */

/*  N       (input) INTEGER */
/*          The order of the matrix A.  N >= 0. */

/*  AP      (input/output) COMPLEX*16 array, dimension (N*(N+1)/2) */
/*          On entry, the upper or lower triangle of the symmetric matrix */
/*          A, packed columnwise in a linear array.  The j-th column of A */
/*          is stored in the array AP as follows: */
/*          if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j; */
/*          if UPLO = 'L', AP(i + (j-1)*(2n-j)/2) = A(i,j) for j<=i<=n. */

/*          On exit, the block diagonal matrix D and the multipliers used */
/*          to obtain the factor U or L, stored as a packed triangular */
/*          matrix overwriting A (see below for further details). */

/*  IPIV    (output) INTEGER array, dimension (N) */
/*          Details of the interchanges and the block structure of D. */
/*          If IPIV(k) > 0, then rows and columns k and IPIV(k) were */
/*          interchanged and D(k,k) is a 1-by-1 diagonal block. */
/*          If UPLO = 'U' and IPIV(k) = IPIV(k-1) < 0, then rows and */
/*          columns k-1 and -IPIV(k) were interchanged and D(k-1:k,k-1:k) */
/*          is a 2-by-2 diagonal block.  If UPLO = 'L' and IPIV(k) = */
/*          IPIV(k+1) < 0, then rows and columns k+1 and -IPIV(k) were */
/*          interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. */

/*  INFO    (output) INTEGER */
/*          = 0: successful exit */
/*          < 0: if INFO = -i, the i-th argument had an illegal value */
/*          > 0: if INFO = i, D(i,i) is exactly zero.  The factorization */
/*               has been completed, but the block diagonal matrix D is */
/*               exactly singular, and division by zero will occur if it */
/*               is used to solve a system of equations. */

/*  Further Details */
/*  =============== */

/*  5-96 - Based on modifications by J. Lewis, Boeing Computer Services */
/*         Company */

/*  If UPLO = 'U', then A = U*D*U', where */
/*     U = P(n)*U(n)* ... *P(k)U(k)* ..., */
/*  i.e., U is a product of terms P(k)*U(k), where k decreases from n to */
/*  1 in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
/*  and 2-by-2 diagonal blocks D(k).  P(k) is a permutation matrix as */
/*  defined by IPIV(k), and U(k) is a unit upper triangular matrix, such */
/*  that if the diagonal block D(k) is of order s (s = 1 or 2), then */

/*             (   I    v    0   )   k-s */
/*     U(k) =  (   0    I    0   )   s */
/*             (   0    0    I   )   n-k */
/*                k-s   s   n-k */

/*  If s = 1, D(k) overwrites A(k,k), and v overwrites A(1:k-1,k). */
/*  If s = 2, the upper triangle of D(k) overwrites A(k-1,k-1), A(k-1,k), */
/*  and A(k,k), and v overwrites A(1:k-2,k-1:k). */

/*  If UPLO = 'L', then A = L*D*L', where */
/*     L = P(1)*L(1)* ... *P(k)*L(k)* ..., */
/*  i.e., L is a product of terms P(k)*L(k), where k increases from 1 to */
/*  n in steps of 1 or 2, and D is a block diagonal matrix with 1-by-1 */
/*  and 2-by-2 diagonal blocks D(k).  P(k) is a permutation matrix as */
/*  defined by IPIV(k), and L(k) is a unit lower triangular matrix, such */
/*  that if the diagonal block D(k) is of order s (s = 1 or 2), then */

/*             (   I    0     0   )  k-1 */
/*     L(k) =  (   0    I     0   )  s */
/*             (   0    v     I   )  n-k-s+1 */
/*                k-1   s  n-k-s+1 */

/*  If s = 1, D(k) overwrites A(k,k), and v overwrites A(k+1:n,k). */
/*  If s = 2, the lower triangle of D(k) overwrites A(k,k), A(k+1,k), */
/*  and A(k+1,k+1), and v overwrites A(k+2:n,k:k+1). */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Statement Functions .. */
/*     .. */
/*     .. Statement Function definitions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input parameters. */

    /* Parameter adjustments */
    --ipiv;
    --ap;

    /* Function Body */
    *info = 0;
    upper = lsame_(uplo, "U");
    if (! upper && ! lsame_(uplo, "L")) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZSPTRF", &i__1);
	return 0;
    }

/*     Initialize ALPHA for use in choosing pivot block size. */

    alpha = (sqrt(17.) + 1.) / 8.;

    if (upper) {

/*        Factorize A as U*D*U' using the upper triangle of A */

/*        K is the main loop index, decreasing from N to 1 in steps of */
/*        1 or 2 */

	k = *n;
	kc = (*n - 1) * *n / 2 + 1;
L10:
	knc = kc;

/*        If K < 1, exit from loop */

	if (k < 1) {
	    goto L110;
	}
	kstep = 1;

/*        Determine rows and columns to be interchanged and whether */
/*        a 1-by-1 or 2-by-2 pivot block will be used */

	i__1 = kc + k - 1;
	absakk = (d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[kc + k - 
		1]), abs(d__2));

/*        IMAX is the row-index of the largest off-diagonal element in */
/*        column K, and COLMAX is its absolute value */

	if (k > 1) {
	    i__1 = k - 1;
	    imax = izamax_(&i__1, &ap[kc], &c__1);
	    i__1 = kc + imax - 1;
	    colmax = (d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[kc + 
		    imax - 1]), abs(d__2));
	} else {
	    colmax = 0.;
	}

	if (max(absakk,colmax) == 0.) {

/*           Column K is zero: set INFO and continue */

	    if (*info == 0) {
		*info = k;
	    }
	    kp = k;
	} else {
	    if (absakk >= alpha * colmax) {

/*              no interchange, use 1-by-1 pivot block */

		kp = k;
	    } else {

/*              JMAX is the column-index of the largest off-diagonal */
/*              element in row IMAX, and ROWMAX is its absolute value */

		rowmax = 0.;
		jmax = imax;
		kx = imax * (imax + 1) / 2 + imax;
		i__1 = k;
		for (j = imax + 1; j <= i__1; ++j) {
		    i__2 = kx;
		    if ((d__1 = ap[i__2].r, abs(d__1)) + (d__2 = d_imag(&ap[
			    kx]), abs(d__2)) > rowmax) {
			i__2 = kx;
			rowmax = (d__1 = ap[i__2].r, abs(d__1)) + (d__2 = 
				d_imag(&ap[kx]), abs(d__2));
			jmax = j;
		    }
		    kx += j;
/* L20: */
		}
		kpc = (imax - 1) * imax / 2 + 1;
		if (imax > 1) {
		    i__1 = imax - 1;
		    jmax = izamax_(&i__1, &ap[kpc], &c__1);
/* Computing MAX */
		    i__1 = kpc + jmax - 1;
		    d__3 = rowmax, d__4 = (d__1 = ap[i__1].r, abs(d__1)) + (
			    d__2 = d_imag(&ap[kpc + jmax - 1]), abs(d__2));
		    rowmax = max(d__3,d__4);
		}

		if (absakk >= alpha * colmax * (colmax / rowmax)) {

/*                 no interchange, use 1-by-1 pivot block */

		    kp = k;
		} else /* if(complicated condition) */ {
		    i__1 = kpc + imax - 1;
		    if ((d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[
			    kpc + imax - 1]), abs(d__2)) >= alpha * rowmax) {

/*                 interchange rows and columns K and IMAX, use 1-by-1 */
/*                 pivot block */

			kp = imax;
		    } else {

/*                 interchange rows and columns K-1 and IMAX, use 2-by-2 */
/*                 pivot block */

			kp = imax;
			kstep = 2;
		    }
		}
	    }

	    kk = k - kstep + 1;
	    if (kstep == 2) {
		knc = knc - k + 1;
	    }
	    if (kp != kk) {

/*              Interchange rows and columns KK and KP in the leading */
/*              submatrix A(1:k,1:k) */

		i__1 = kp - 1;
		zswap_(&i__1, &ap[knc], &c__1, &ap[kpc], &c__1);
		kx = kpc + kp - 1;
		i__1 = kk - 1;
		for (j = kp + 1; j <= i__1; ++j) {
		    kx = kx + j - 1;
		    i__2 = knc + j - 1;
		    t.r = ap[i__2].r, t.i = ap[i__2].i;
		    i__2 = knc + j - 1;
		    i__3 = kx;
		    ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
		    i__2 = kx;
		    ap[i__2].r = t.r, ap[i__2].i = t.i;
/* L30: */
		}
		i__1 = knc + kk - 1;
		t.r = ap[i__1].r, t.i = ap[i__1].i;
		i__1 = knc + kk - 1;
		i__2 = kpc + kp - 1;
		ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
		i__1 = kpc + kp - 1;
		ap[i__1].r = t.r, ap[i__1].i = t.i;
		if (kstep == 2) {
		    i__1 = kc + k - 2;
		    t.r = ap[i__1].r, t.i = ap[i__1].i;
		    i__1 = kc + k - 2;
		    i__2 = kc + kp - 1;
		    ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
		    i__1 = kc + kp - 1;
		    ap[i__1].r = t.r, ap[i__1].i = t.i;
		}
	    }

/*           Update the leading submatrix */

	    if (kstep == 1) {

/*              1-by-1 pivot block D(k): column k now holds */

/*              W(k) = U(k)*D(k) */

/*              where U(k) is the k-th column of U */

/*              Perform a rank-1 update of A(1:k-1,1:k-1) as */

/*              A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' */

		z_div(&z__1, &c_b1, &ap[kc + k - 1]);
		r1.r = z__1.r, r1.i = z__1.i;
		i__1 = k - 1;
		z__1.r = -r1.r, z__1.i = -r1.i;
		zspr_(uplo, &i__1, &z__1, &ap[kc], &c__1, &ap[1]);

/*              Store U(k) in column k */

		i__1 = k - 1;
		zscal_(&i__1, &r1, &ap[kc], &c__1);
	    } else {

/*              2-by-2 pivot block D(k): columns k and k-1 now hold */

/*              ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) */

/*              where U(k) and U(k-1) are the k-th and (k-1)-th columns */
/*              of U */

/*              Perform a rank-2 update of A(1:k-2,1:k-2) as */

/*              A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' */
/*                 = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' */

		if (k > 2) {

		    i__1 = k - 1 + (k - 1) * k / 2;
		    d12.r = ap[i__1].r, d12.i = ap[i__1].i;
		    z_div(&z__1, &ap[k - 1 + (k - 2) * (k - 1) / 2], &d12);
		    d22.r = z__1.r, d22.i = z__1.i;
		    z_div(&z__1, &ap[k + (k - 1) * k / 2], &d12);
		    d11.r = z__1.r, d11.i = z__1.i;
		    z__3.r = d11.r * d22.r - d11.i * d22.i, z__3.i = d11.r * 
			    d22.i + d11.i * d22.r;
		    z__2.r = z__3.r - 1., z__2.i = z__3.i - 0.;
		    z_div(&z__1, &c_b1, &z__2);
		    t.r = z__1.r, t.i = z__1.i;
		    z_div(&z__1, &t, &d12);
		    d12.r = z__1.r, d12.i = z__1.i;

		    for (j = k - 2; j >= 1; --j) {
			i__1 = j + (k - 2) * (k - 1) / 2;
			z__3.r = d11.r * ap[i__1].r - d11.i * ap[i__1].i, 
				z__3.i = d11.r * ap[i__1].i + d11.i * ap[i__1]
				.r;
			i__2 = j + (k - 1) * k / 2;
			z__2.r = z__3.r - ap[i__2].r, z__2.i = z__3.i - ap[
				i__2].i;
			z__1.r = d12.r * z__2.r - d12.i * z__2.i, z__1.i = 
				d12.r * z__2.i + d12.i * z__2.r;
			wkm1.r = z__1.r, wkm1.i = z__1.i;
			i__1 = j + (k - 1) * k / 2;
			z__3.r = d22.r * ap[i__1].r - d22.i * ap[i__1].i, 
				z__3.i = d22.r * ap[i__1].i + d22.i * ap[i__1]
				.r;
			i__2 = j + (k - 2) * (k - 1) / 2;
			z__2.r = z__3.r - ap[i__2].r, z__2.i = z__3.i - ap[
				i__2].i;
			z__1.r = d12.r * z__2.r - d12.i * z__2.i, z__1.i = 
				d12.r * z__2.i + d12.i * z__2.r;
			wk.r = z__1.r, wk.i = z__1.i;
			for (i__ = j; i__ >= 1; --i__) {
			    i__1 = i__ + (j - 1) * j / 2;
			    i__2 = i__ + (j - 1) * j / 2;
			    i__3 = i__ + (k - 1) * k / 2;
			    z__3.r = ap[i__3].r * wk.r - ap[i__3].i * wk.i, 
				    z__3.i = ap[i__3].r * wk.i + ap[i__3].i * 
				    wk.r;
			    z__2.r = ap[i__2].r - z__3.r, z__2.i = ap[i__2].i 
				    - z__3.i;
			    i__4 = i__ + (k - 2) * (k - 1) / 2;
			    z__4.r = ap[i__4].r * wkm1.r - ap[i__4].i * 
				    wkm1.i, z__4.i = ap[i__4].r * wkm1.i + ap[
				    i__4].i * wkm1.r;
			    z__1.r = z__2.r - z__4.r, z__1.i = z__2.i - 
				    z__4.i;
			    ap[i__1].r = z__1.r, ap[i__1].i = z__1.i;
/* L40: */
			}
			i__1 = j + (k - 1) * k / 2;
			ap[i__1].r = wk.r, ap[i__1].i = wk.i;
			i__1 = j + (k - 2) * (k - 1) / 2;
			ap[i__1].r = wkm1.r, ap[i__1].i = wkm1.i;
/* L50: */
		    }

		}
	    }
	}

/*        Store details of the interchanges in IPIV */

	if (kstep == 1) {
	    ipiv[k] = kp;
	} else {
	    ipiv[k] = -kp;
	    ipiv[k - 1] = -kp;
	}

/*        Decrease K and return to the start of the main loop */

	k -= kstep;
	kc = knc - k;
	goto L10;

    } else {

/*        Factorize A as L*D*L' using the lower triangle of A */

/*        K is the main loop index, increasing from 1 to N in steps of */
/*        1 or 2 */

	k = 1;
	kc = 1;
	npp = *n * (*n + 1) / 2;
L60:
	knc = kc;

/*        If K > N, exit from loop */

	if (k > *n) {
	    goto L110;
	}
	kstep = 1;

/*        Determine rows and columns to be interchanged and whether */
/*        a 1-by-1 or 2-by-2 pivot block will be used */

	i__1 = kc;
	absakk = (d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[kc]), 
		abs(d__2));

/*        IMAX is the row-index of the largest off-diagonal element in */
/*        column K, and COLMAX is its absolute value */

	if (k < *n) {
	    i__1 = *n - k;
	    imax = k + izamax_(&i__1, &ap[kc + 1], &c__1);
	    i__1 = kc + imax - k;
	    colmax = (d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[kc + 
		    imax - k]), abs(d__2));
	} else {
	    colmax = 0.;
	}

	if (max(absakk,colmax) == 0.) {

/*           Column K is zero: set INFO and continue */

	    if (*info == 0) {
		*info = k;
	    }
	    kp = k;
	} else {
	    if (absakk >= alpha * colmax) {

/*              no interchange, use 1-by-1 pivot block */

		kp = k;
	    } else {

/*              JMAX is the column-index of the largest off-diagonal */
/*              element in row IMAX, and ROWMAX is its absolute value */

		rowmax = 0.;
		kx = kc + imax - k;
		i__1 = imax - 1;
		for (j = k; j <= i__1; ++j) {
		    i__2 = kx;
		    if ((d__1 = ap[i__2].r, abs(d__1)) + (d__2 = d_imag(&ap[
			    kx]), abs(d__2)) > rowmax) {
			i__2 = kx;
			rowmax = (d__1 = ap[i__2].r, abs(d__1)) + (d__2 = 
				d_imag(&ap[kx]), abs(d__2));
			jmax = j;
		    }
		    kx = kx + *n - j;
/* L70: */
		}
		kpc = npp - (*n - imax + 1) * (*n - imax + 2) / 2 + 1;
		if (imax < *n) {
		    i__1 = *n - imax;
		    jmax = imax + izamax_(&i__1, &ap[kpc + 1], &c__1);
/* Computing MAX */
		    i__1 = kpc + jmax - imax;
		    d__3 = rowmax, d__4 = (d__1 = ap[i__1].r, abs(d__1)) + (
			    d__2 = d_imag(&ap[kpc + jmax - imax]), abs(d__2));
		    rowmax = max(d__3,d__4);
		}

		if (absakk >= alpha * colmax * (colmax / rowmax)) {

/*                 no interchange, use 1-by-1 pivot block */

		    kp = k;
		} else /* if(complicated condition) */ {
		    i__1 = kpc;
		    if ((d__1 = ap[i__1].r, abs(d__1)) + (d__2 = d_imag(&ap[
			    kpc]), abs(d__2)) >= alpha * rowmax) {

/*                 interchange rows and columns K and IMAX, use 1-by-1 */
/*                 pivot block */

			kp = imax;
		    } else {

/*                 interchange rows and columns K+1 and IMAX, use 2-by-2 */
/*                 pivot block */

			kp = imax;
			kstep = 2;
		    }
		}
	    }

	    kk = k + kstep - 1;
	    if (kstep == 2) {
		knc = knc + *n - k + 1;
	    }
	    if (kp != kk) {

/*              Interchange rows and columns KK and KP in the trailing */
/*              submatrix A(k:n,k:n) */

		if (kp < *n) {
		    i__1 = *n - kp;
		    zswap_(&i__1, &ap[knc + kp - kk + 1], &c__1, &ap[kpc + 1], 
			     &c__1);
		}
		kx = knc + kp - kk;
		i__1 = kp - 1;
		for (j = kk + 1; j <= i__1; ++j) {
		    kx = kx + *n - j + 1;
		    i__2 = knc + j - kk;
		    t.r = ap[i__2].r, t.i = ap[i__2].i;
		    i__2 = knc + j - kk;
		    i__3 = kx;
		    ap[i__2].r = ap[i__3].r, ap[i__2].i = ap[i__3].i;
		    i__2 = kx;
		    ap[i__2].r = t.r, ap[i__2].i = t.i;
/* L80: */
		}
		i__1 = knc;
		t.r = ap[i__1].r, t.i = ap[i__1].i;
		i__1 = knc;
		i__2 = kpc;
		ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
		i__1 = kpc;
		ap[i__1].r = t.r, ap[i__1].i = t.i;
		if (kstep == 2) {
		    i__1 = kc + 1;
		    t.r = ap[i__1].r, t.i = ap[i__1].i;
		    i__1 = kc + 1;
		    i__2 = kc + kp - k;
		    ap[i__1].r = ap[i__2].r, ap[i__1].i = ap[i__2].i;
		    i__1 = kc + kp - k;
		    ap[i__1].r = t.r, ap[i__1].i = t.i;
		}
	    }

/*           Update the trailing submatrix */

	    if (kstep == 1) {

/*              1-by-1 pivot block D(k): column k now holds */

/*              W(k) = L(k)*D(k) */

/*              where L(k) is the k-th column of L */

		if (k < *n) {

/*                 Perform a rank-1 update of A(k+1:n,k+1:n) as */

/*                 A := A - L(k)*D(k)*L(k)' = A - W(k)*(1/D(k))*W(k)' */

		    z_div(&z__1, &c_b1, &ap[kc]);
		    r1.r = z__1.r, r1.i = z__1.i;
		    i__1 = *n - k;
		    z__1.r = -r1.r, z__1.i = -r1.i;
		    zspr_(uplo, &i__1, &z__1, &ap[kc + 1], &c__1, &ap[kc + *n 
			    - k + 1]);

/*                 Store L(k) in column K */

		    i__1 = *n - k;
		    zscal_(&i__1, &r1, &ap[kc + 1], &c__1);
		}
	    } else {

/*              2-by-2 pivot block D(k): columns K and K+1 now hold */

/*              ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) */

/*              where L(k) and L(k+1) are the k-th and (k+1)-th columns */
/*              of L */

		if (k < *n - 1) {

/*                 Perform a rank-2 update of A(k+2:n,k+2:n) as */

/*                 A := A - ( L(k) L(k+1) )*D(k)*( L(k) L(k+1) )' */
/*                    = A - ( W(k) W(k+1) )*inv(D(k))*( W(k) W(k+1) )' */

/*                 where L(k) and L(k+1) are the k-th and (k+1)-th */
/*                 columns of L */

		    i__1 = k + 1 + (k - 1) * ((*n << 1) - k) / 2;
		    d21.r = ap[i__1].r, d21.i = ap[i__1].i;
		    z_div(&z__1, &ap[k + 1 + k * ((*n << 1) - k - 1) / 2], &
			    d21);
		    d11.r = z__1.r, d11.i = z__1.i;
		    z_div(&z__1, &ap[k + (k - 1) * ((*n << 1) - k) / 2], &d21)
			    ;
		    d22.r = z__1.r, d22.i = z__1.i;
		    z__3.r = d11.r * d22.r - d11.i * d22.i, z__3.i = d11.r * 
			    d22.i + d11.i * d22.r;
		    z__2.r = z__3.r - 1., z__2.i = z__3.i - 0.;
		    z_div(&z__1, &c_b1, &z__2);
		    t.r = z__1.r, t.i = z__1.i;
		    z_div(&z__1, &t, &d21);
		    d21.r = z__1.r, d21.i = z__1.i;

		    i__1 = *n;
		    for (j = k + 2; j <= i__1; ++j) {
			i__2 = j + (k - 1) * ((*n << 1) - k) / 2;
			z__3.r = d11.r * ap[i__2].r - d11.i * ap[i__2].i, 
				z__3.i = d11.r * ap[i__2].i + d11.i * ap[i__2]
				.r;
			i__3 = j + k * ((*n << 1) - k - 1) / 2;
			z__2.r = z__3.r - ap[i__3].r, z__2.i = z__3.i - ap[
				i__3].i;
			z__1.r = d21.r * z__2.r - d21.i * z__2.i, z__1.i = 
				d21.r * z__2.i + d21.i * z__2.r;
			wk.r = z__1.r, wk.i = z__1.i;
			i__2 = j + k * ((*n << 1) - k - 1) / 2;
			z__3.r = d22.r * ap[i__2].r - d22.i * ap[i__2].i, 
				z__3.i = d22.r * ap[i__2].i + d22.i * ap[i__2]
				.r;
			i__3 = j + (k - 1) * ((*n << 1) - k) / 2;
			z__2.r = z__3.r - ap[i__3].r, z__2.i = z__3.i - ap[
				i__3].i;
			z__1.r = d21.r * z__2.r - d21.i * z__2.i, z__1.i = 
				d21.r * z__2.i + d21.i * z__2.r;
			wkp1.r = z__1.r, wkp1.i = z__1.i;
			i__2 = *n;
			for (i__ = j; i__ <= i__2; ++i__) {
			    i__3 = i__ + (j - 1) * ((*n << 1) - j) / 2;
			    i__4 = i__ + (j - 1) * ((*n << 1) - j) / 2;
			    i__5 = i__ + (k - 1) * ((*n << 1) - k) / 2;
			    z__3.r = ap[i__5].r * wk.r - ap[i__5].i * wk.i, 
				    z__3.i = ap[i__5].r * wk.i + ap[i__5].i * 
				    wk.r;
			    z__2.r = ap[i__4].r - z__3.r, z__2.i = ap[i__4].i 
				    - z__3.i;
			    i__6 = i__ + k * ((*n << 1) - k - 1) / 2;
			    z__4.r = ap[i__6].r * wkp1.r - ap[i__6].i * 
				    wkp1.i, z__4.i = ap[i__6].r * wkp1.i + ap[
				    i__6].i * wkp1.r;
			    z__1.r = z__2.r - z__4.r, z__1.i = z__2.i - 
				    z__4.i;
			    ap[i__3].r = z__1.r, ap[i__3].i = z__1.i;
/* L90: */
			}
			i__2 = j + (k - 1) * ((*n << 1) - k) / 2;
			ap[i__2].r = wk.r, ap[i__2].i = wk.i;
			i__2 = j + k * ((*n << 1) - k - 1) / 2;
			ap[i__2].r = wkp1.r, ap[i__2].i = wkp1.i;
/* L100: */
		    }
		}
	    }
	}

/*        Store details of the interchanges in IPIV */

	if (kstep == 1) {
	    ipiv[k] = kp;
	} else {
	    ipiv[k] = -kp;
	    ipiv[k + 1] = -kp;
	}

/*        Increase K and return to the start of the main loop */

	k += kstep;
	kc = knc + *n - k + 2;
	goto L60;

    }

L110:
    return 0;

/*     End of ZSPTRF */

} /* zsptrf_ */
Пример #27
0
/* Subroutine */ int zgetf2_(integer *m, integer *n, doublecomplex *a, 
	integer *lda, integer *ipiv, integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    doublecomplex z__1;

    /* Builtin functions */
    double z_abs(doublecomplex *);
    void z_div(doublecomplex *, doublecomplex *, doublecomplex *);

    /* Local variables */
    integer i__, j, jp;
    doublereal sfmin;
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *), zgeru_(integer *, integer *, 
	    doublecomplex *, doublecomplex *, integer *, doublecomplex *, 
	    integer *, doublecomplex *, integer *), zswap_(integer *, 
	    doublecomplex *, integer *, doublecomplex *, integer *);
    extern doublereal dlamch_(char *);
    extern /* Subroutine */ int xerbla_(char *, integer *);
    extern integer izamax_(integer *, doublecomplex *, integer *);


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ZGETF2 computes an LU factorization of a general m-by-n matrix A */
/*  using partial pivoting with row interchanges. */

/*  The factorization has the form */
/*     A = P * L * U */
/*  where P is a permutation matrix, L is lower triangular with unit */
/*  diagonal elements (lower trapezoidal if m > n), and U is upper */
/*  triangular (upper trapezoidal if m < n). */

/*  This is the right-looking Level 2 BLAS version of the algorithm. */

/*  Arguments */
/*  ========= */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A.  M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix A.  N >= 0. */

/*  A       (input/output) COMPLEX*16 array, dimension (LDA,N) */
/*          On entry, the m by n matrix to be factored. */
/*          On exit, the factors L and U from the factorization */
/*          A = P*L*U; the unit diagonal elements of L are not stored. */

/*  LDA     (input) INTEGER */
/*          The leading dimension of the array A.  LDA >= max(1,M). */

/*  IPIV    (output) INTEGER array, dimension (min(M,N)) */
/*          The pivot indices; for 1 <= i <= min(M,N), row i of the */
/*          matrix was interchanged with row IPIV(i). */

/*  INFO    (output) INTEGER */
/*          = 0: successful exit */
/*          < 0: if INFO = -k, the k-th argument had an illegal value */
/*          > 0: if INFO = k, U(k,k) is exactly zero. The factorization */
/*               has been completed, but the factor U is exactly */
/*               singular, and division by zero will occur if it is used */
/*               to solve a system of equations. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input parameters. */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --ipiv;

    /* Function Body */
    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < 0) {
	*info = -2;
    } else if (*lda < max(1,*m)) {
	*info = -4;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZGETF2", &i__1);
	return 0;
    }

/*     Quick return if possible */

    if (*m == 0 || *n == 0) {
	return 0;
    }

/*     Compute machine safe minimum */

    sfmin = dlamch_("S");

    i__1 = min(*m,*n);
    for (j = 1; j <= i__1; ++j) {

/*        Find pivot and test for singularity. */

	i__2 = *m - j + 1;
	jp = j - 1 + izamax_(&i__2, &a[j + j * a_dim1], &c__1);
	ipiv[j] = jp;
	i__2 = jp + j * a_dim1;
	if (a[i__2].r != 0. || a[i__2].i != 0.) {

/*           Apply the interchange to columns 1:N. */

	    if (jp != j) {
		zswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda);
	    }

/*           Compute elements J+1:M of J-th column. */

	    if (j < *m) {
		if (z_abs(&a[j + j * a_dim1]) >= sfmin) {
		    i__2 = *m - j;
		    z_div(&z__1, &c_b1, &a[j + j * a_dim1]);
		    zscal_(&i__2, &z__1, &a[j + 1 + j * a_dim1], &c__1);
		} else {
		    i__2 = *m - j;
		    for (i__ = 1; i__ <= i__2; ++i__) {
			i__3 = j + i__ + j * a_dim1;
			z_div(&z__1, &a[j + i__ + j * a_dim1], &a[j + j * 
				a_dim1]);
			a[i__3].r = z__1.r, a[i__3].i = z__1.i;
/* L20: */
		    }
		}
	    }

	} else if (*info == 0) {

	    *info = j;
	}

	if (j < min(*m,*n)) {

/*           Update trailing submatrix. */

	    i__2 = *m - j;
	    i__3 = *n - j;
	    z__1.r = -1., z__1.i = -0.;
	    zgeru_(&i__2, &i__3, &z__1, &a[j + 1 + j * a_dim1], &c__1, &a[j + 
		    (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], lda)
		    ;
	}
/* L10: */
    }
    return 0;

/*     End of ZGETF2 */

} /* zgetf2_ */
Пример #28
0
/* Subroutine */ int zungr2_(integer *m, integer *n, integer *k, 
	doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
	work, integer *info)
{
/*  -- LAPACK routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    ZUNGR2 generates an m by n complex matrix Q with orthonormal rows,   
    which is defined as the last m rows of a product of k elementary   
    reflectors of order n   

          Q  =  H(1)' H(2)' . . . H(k)'   

    as returned by ZGERQF.   

    Arguments   
    =========   

    M       (input) INTEGER   
            The number of rows of the matrix Q. M >= 0.   

    N       (input) INTEGER   
            The number of columns of the matrix Q. N >= M.   

    K       (input) INTEGER   
            The number of elementary reflectors whose product defines the   
            matrix Q. M >= K >= 0.   

    A       (input/output) COMPLEX*16 array, dimension (LDA,N)   
            On entry, the (m-k+i)-th row must contain the vector which   
            defines the elementary reflector H(i), for i = 1,2,...,k, as   
            returned by ZGERQF in the last k rows of its array argument   
            A.   
            On exit, the m-by-n matrix Q.   

    LDA     (input) INTEGER   
            The first dimension of the array A. LDA >= max(1,M).   

    TAU     (input) COMPLEX*16 array, dimension (K)   
            TAU(i) must contain the scalar factor of the elementary   
            reflector H(i), as returned by ZGERQF.   

    WORK    (workspace) COMPLEX*16 array, dimension (M)   

    INFO    (output) INTEGER   
            = 0: successful exit   
            < 0: if INFO = -i, the i-th argument has an illegal value   

    =====================================================================   


       Test the input arguments   

       Parameter adjustments */
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    doublecomplex z__1, z__2;
    /* Builtin functions */
    void d_cnjg(doublecomplex *, doublecomplex *);
    /* Local variables */
    static integer i__, j, l;
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *), zlarf_(char *, integer *, integer *, 
	    doublecomplex *, integer *, doublecomplex *, doublecomplex *, 
	    integer *, doublecomplex *);
    static integer ii;
    extern /* Subroutine */ int xerbla_(char *, integer *), zlacgv_(
	    integer *, doublecomplex *, integer *);
#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]

    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    --tau;
    --work;

    /* Function Body */
    *info = 0;
    if (*m < 0) {
	*info = -1;
    } else if (*n < *m) {
	*info = -2;
    } else if (*k < 0 || *k > *m) {
	*info = -3;
    } else if (*lda < max(1,*m)) {
	*info = -5;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZUNGR2", &i__1);
	return 0;
    }

/*     Quick return if possible */

    if (*m <= 0) {
	return 0;
    }

    if (*k < *m) {

/*        Initialise rows 1:m-k to rows of the unit matrix */

	i__1 = *n;
	for (j = 1; j <= i__1; ++j) {
	    i__2 = *m - *k;
	    for (l = 1; l <= i__2; ++l) {
		i__3 = a_subscr(l, j);
		a[i__3].r = 0., a[i__3].i = 0.;
/* L10: */
	    }
	    if (j > *n - *m && j <= *n - *k) {
		i__2 = a_subscr(*m - *n + j, j);
		a[i__2].r = 1., a[i__2].i = 0.;
	    }
/* L20: */
	}
    }

    i__1 = *k;
    for (i__ = 1; i__ <= i__1; ++i__) {
	ii = *m - *k + i__;

/*        Apply H(i)' to A(1:m-k+i,1:n-k+i) from the right */

	i__2 = *n - *m + ii - 1;
	zlacgv_(&i__2, &a_ref(ii, 1), lda);
	i__2 = a_subscr(ii, *n - *m + ii);
	a[i__2].r = 1., a[i__2].i = 0.;
	i__2 = ii - 1;
	i__3 = *n - *m + ii;
	d_cnjg(&z__1, &tau[i__]);
	zlarf_("Right", &i__2, &i__3, &a_ref(ii, 1), lda, &z__1, &a[a_offset],
		 lda, &work[1]);
	i__2 = *n - *m + ii - 1;
	i__3 = i__;
	z__1.r = -tau[i__3].r, z__1.i = -tau[i__3].i;
	zscal_(&i__2, &z__1, &a_ref(ii, 1), lda);
	i__2 = *n - *m + ii - 1;
	zlacgv_(&i__2, &a_ref(ii, 1), lda);
	i__2 = a_subscr(ii, *n - *m + ii);
	d_cnjg(&z__2, &tau[i__]);
	z__1.r = 1. - z__2.r, z__1.i = 0. - z__2.i;
	a[i__2].r = z__1.r, a[i__2].i = z__1.i;

/*        Set A(m-k+i,n-k+i+1:n) to zero */

	i__2 = *n;
	for (l = *n - *m + ii + 1; l <= i__2; ++l) {
	    i__3 = a_subscr(ii, l);
	    a[i__3].r = 0., a[i__3].i = 0.;
/* L30: */
	}
/* L40: */
    }
    return 0;

/*     End of ZUNGR2 */

} /* zungr2_ */
Пример #29
0
/* Subroutine */ int zlaror_slu(char *side, char *init, integer *m, integer *n, 
	doublecomplex *a, integer *lda, integer *iseed, doublecomplex *x, 
	integer *info)
{
    /* System generated locals */
    integer a_dim1, a_offset, i__1, i__2, i__3;
    doublecomplex z__1, z__2;

    /* Builtin functions */
    double z_abs(doublecomplex *);
    void d_cnjg(doublecomplex *, doublecomplex *);

    /* Local variables */
    static integer kbeg, jcol;
    static doublereal xabs;
    static integer irow, j;
    static doublecomplex csign;
    extern /* Subroutine */ int zgerc_(integer *, integer *, doublecomplex *, 
	    doublecomplex *, integer *, doublecomplex *, integer *, 
	    doublecomplex *, integer *), zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *);
    static integer ixfrm;
    extern /* Subroutine */ int zgemv_(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, integer *, doublecomplex *, 
	    integer *, doublecomplex *, doublecomplex *, integer *);
    static integer itype, nxfrm;
    static doublereal xnorm;
    extern doublereal dznrm2_(integer *, doublecomplex *, integer *);
    extern int input_error(char *, int *);
    static doublereal factor;
    extern /* Subroutine */ int zlacgv_slu(integer *, doublecomplex *, integer *)
	    ;
    extern /* Double Complex */ VOID zlarnd_slu(doublecomplex *, integer *, 
	    integer *);
    extern /* Subroutine */ int zlaset_slu(char *, integer *, integer *, 
	    doublecomplex *, doublecomplex *, doublecomplex *, integer *);
    static doublecomplex xnorms;


/*  -- LAPACK auxiliary test routine (version 2.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

       ZLAROR pre- or post-multiplies an M by N matrix A by a random   
       unitary matrix U, overwriting A. A may optionally be   
       initialized to the identity matrix before multiplying by U.   
       U is generated using the method of G.W. Stewart   
       ( SIAM J. Numer. Anal. 17, 1980, pp. 403-409 ).   
       (BLAS-2 version)   

    Arguments   
    =========   

    SIDE   - CHARACTER*1   
             SIDE specifies whether A is multiplied on the left or right 
  
             by U.   
         SIDE = 'L'   Multiply A on the left (premultiply) by U   
         SIDE = 'R'   Multiply A on the right (postmultiply) by U*   
         SIDE = 'C'   Multiply A on the left by U and the right by U*   
         SIDE = 'T'   Multiply A on the left by U and the right by U'   
             Not modified.   

    INIT   - CHARACTER*1   
             INIT specifies whether or not A should be initialized to   
             the identity matrix.   
                INIT = 'I'   Initialize A to (a section of) the   
                             identity matrix before applying U.   
                INIT = 'N'   No initialization.  Apply U to the   
                             input matrix A.   

             INIT = 'I' may be used to generate square (i.e., unitary)   
             or rectangular orthogonal matrices (orthogonality being   
             in the sense of ZDOTC):   

             For square matrices, M=N, and SIDE many be either 'L' or   
             'R'; the rows will be orthogonal to each other, as will the 
  
             columns.   
             For rectangular matrices where M < N, SIDE = 'R' will   
             produce a dense matrix whose rows will be orthogonal and   
             whose columns will not, while SIDE = 'L' will produce a   
             matrix whose rows will be orthogonal, and whose first M   
             columns will be orthogonal, the remaining columns being   
             zero.   
             For matrices where M > N, just use the previous   
             explaination, interchanging 'L' and 'R' and "rows" and   
             "columns".   

             Not modified.   

    M      - INTEGER   
             Number of rows of A. Not modified.   

    N      - INTEGER   
             Number of columns of A. Not modified.   

    A      - COMPLEX*16 array, dimension ( LDA, N )   
             Input and output array. Overwritten by U A ( if SIDE = 'L' ) 
  
             or by A U ( if SIDE = 'R' )   
             or by U A U* ( if SIDE = 'C')   
             or by U A U' ( if SIDE = 'T') on exit.   

    LDA    - INTEGER   
             Leading dimension of A. Must be at least MAX ( 1, M ).   
             Not modified.   

    ISEED  - INTEGER array, dimension ( 4 )   
             On entry ISEED specifies the seed of the random number   
             generator. The array elements should be between 0 and 4095; 
  
             if not they will be reduced mod 4096.  Also, ISEED(4) must   
             be odd.  The random number generator uses a linear   
             congruential sequence limited to small integers, and so   
             should produce machine independent random numbers. The   
             values of ISEED are changed on exit, and can be used in the 
  
             next call to ZLAROR to continue the same random number   
             sequence.   
             Modified.   

    X      - COMPLEX*16 array, dimension ( 3*MAX( M, N ) )   
             Workspace. Of length:   
                 2*M + N if SIDE = 'L',   
                 2*N + M if SIDE = 'R',   
                 3*N     if SIDE = 'C' or 'T'.   
             Modified.   

    INFO   - INTEGER   
             An error flag.  It is set to:   
              0  if no error.   
              1  if ZLARND returned a bad random number (installation   
                 problem)   
             -1  if SIDE is not L, R, C, or T.   
             -3  if M is negative.   
             -4  if N is negative or if SIDE is C or T and N is not equal 
  
                 to M.   
             -6  if LDA is less than M.   

    ===================================================================== 
  


       Parameter adjustments */
    a_dim1 = *lda;
    a_offset = a_dim1 + 1;
    a -= a_offset;
    --iseed;
    --x;

    /* Function Body */
    if (*n == 0 || *m == 0) {
	return 0;
    }

    itype = 0;
    if (strncmp(side, "L", 1)==0) {
	itype = 1;
    } else if (strncmp(side, "R", 1)==0) {
	itype = 2;
    } else if (strncmp(side, "C", 1)==0) {
	itype = 3;
    } else if (strncmp(side, "T", 1)==0) {
	itype = 4;
    }

/*     Check for argument errors. */

    *info = 0;
    if (itype == 0) {
	*info = -1;
    } else if (*m < 0) {
	*info = -3;
    } else if (*n < 0 || itype == 3 && *n != *m) {
	*info = -4;
    } else if (*lda < *m) {
	*info = -6;
    }
    if (*info != 0) {
	i__1 = -(*info);
	input_error("ZLAROR", &i__1);
	return 0;
    }

    if (itype == 1) {
	nxfrm = *m;
    } else {
	nxfrm = *n;
    }

/*     Initialize A to the identity matrix if desired */

    if (strncmp(init, "I", 1)==0) {
	zlaset_slu("Full", m, n, &c_b1, &c_b2, &a[a_offset], lda);
    }

/*     If no rotation possible, still multiply by   
       a random complex number from the circle |x| = 1   

        2)      Compute Rotation by computing Householder   
                Transformations H(2), H(3), ..., H(n).  Note that the   
                order in which they are computed is irrelevant. */

    i__1 = nxfrm;
    for (j = 1; j <= i__1; ++j) {
	i__2 = j;
	x[i__2].r = 0., x[i__2].i = 0.;
/* L10: */
    }

    i__1 = nxfrm;
    for (ixfrm = 2; ixfrm <= i__1; ++ixfrm) {
	kbeg = nxfrm - ixfrm + 1;

/*        Generate independent normal( 0, 1 ) random numbers */

	i__2 = nxfrm;
	for (j = kbeg; j <= i__2; ++j) {
	    i__3 = j;
	    zlarnd_slu(&z__1, &c__3, &iseed[1]);
	    x[i__3].r = z__1.r, x[i__3].i = z__1.i;
/* L20: */
	}

/*        Generate a Householder transformation from the random vector
 X */

	xnorm = dznrm2_(&ixfrm, &x[kbeg], &c__1);
	xabs = z_abs(&x[kbeg]);
	if (xabs != 0.) {
	    i__2 = kbeg;
	    z__1.r = x[i__2].r / xabs, z__1.i = x[i__2].i / xabs;
	    csign.r = z__1.r, csign.i = z__1.i;
	} else {
	    csign.r = 1., csign.i = 0.;
	}
	z__1.r = xnorm * csign.r, z__1.i = xnorm * csign.i;
	xnorms.r = z__1.r, xnorms.i = z__1.i;
	i__2 = nxfrm + kbeg;
	z__1.r = -csign.r, z__1.i = -csign.i;
	x[i__2].r = z__1.r, x[i__2].i = z__1.i;
	factor = xnorm * (xnorm + xabs);
	if (abs(factor) < 1e-20) {
	    *info = 1;
	    i__2 = -(*info);
	    input_error("ZLAROR", &i__2);
	    return 0;
	} else {
	    factor = 1. / factor;
	}
	i__2 = kbeg;
	i__3 = kbeg;
	z__1.r = x[i__3].r + xnorms.r, z__1.i = x[i__3].i + xnorms.i;
	x[i__2].r = z__1.r, x[i__2].i = z__1.i;

/*        Apply Householder transformation to A */

	if (itype == 1 || itype == 3 || itype == 4) {

/*           Apply H(k) on the left of A */

	    zgemv_("C", &ixfrm, n, &c_b2, &a[kbeg + a_dim1], lda, &x[kbeg], &
		    c__1, &c_b1, &x[(nxfrm << 1) + 1], &c__1);
	    z__2.r = factor, z__2.i = 0.;
	    z__1.r = -z__2.r, z__1.i = -z__2.i;
	    zgerc_(&ixfrm, n, &z__1, &x[kbeg], &c__1, &x[(nxfrm << 1) + 1], &
		    c__1, &a[kbeg + a_dim1], lda);

	}

	if (itype >= 2 && itype <= 4) {

/*           Apply H(k)* (or H(k)') on the right of A */

	    if (itype == 4) {
		zlacgv_slu(&ixfrm, &x[kbeg], &c__1);
	    }

	    zgemv_("N", m, &ixfrm, &c_b2, &a[kbeg * a_dim1 + 1], lda, &x[kbeg]
		    , &c__1, &c_b1, &x[(nxfrm << 1) + 1], &c__1);
	    z__2.r = factor, z__2.i = 0.;
	    z__1.r = -z__2.r, z__1.i = -z__2.i;
	    zgerc_(m, &ixfrm, &z__1, &x[(nxfrm << 1) + 1], &c__1, &x[kbeg], &
		    c__1, &a[kbeg * a_dim1 + 1], lda);

	}
/* L30: */
    }

    zlarnd_slu(&z__1, &c__3, &iseed[1]);
    x[1].r = z__1.r, x[1].i = z__1.i;
    xabs = z_abs(&x[1]);
    if (xabs != 0.) {
	z__1.r = x[1].r / xabs, z__1.i = x[1].i / xabs;
	csign.r = z__1.r, csign.i = z__1.i;
    } else {
	csign.r = 1., csign.i = 0.;
    }
    i__1 = nxfrm << 1;
    x[i__1].r = csign.r, x[i__1].i = csign.i;

/*     Scale the matrix A by D. */

    if (itype == 1 || itype == 3 || itype == 4) {
	i__1 = *m;
	for (irow = 1; irow <= i__1; ++irow) {
	    d_cnjg(&z__1, &x[nxfrm + irow]);
	    zscal_(n, &z__1, &a[irow + a_dim1], lda);
/* L40: */
	}
    }

    if (itype == 2 || itype == 3) {
	i__1 = *n;
	for (jcol = 1; jcol <= i__1; ++jcol) {
	    zscal_(m, &x[nxfrm + jcol], &a[jcol * a_dim1 + 1], &c__1);
/* L50: */
	}
    }

    if (itype == 4) {
	i__1 = *n;
	for (jcol = 1; jcol <= i__1; ++jcol) {
	    d_cnjg(&z__1, &x[nxfrm + jcol]);
	    zscal_(m, &z__1, &a[jcol * a_dim1 + 1], &c__1);
/* L60: */
	}
    }
    return 0;

/*     End of ZLAROR */

} /* zlaror_slu */
Пример #30
0
/* Subroutine */ int ztptri_(char *uplo, char *diag, integer *n, 
	doublecomplex *ap, integer *info)
{
/*  -- LAPACK routine (version 2.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       September 30, 1994   


    Purpose   
    =======   

    ZTPTRI computes the inverse of a complex upper or lower triangular   
    matrix A stored in packed format.   

    Arguments   
    =========   

    UPLO    (input) CHARACTER*1   
            = 'U':  A is upper triangular;   
            = 'L':  A is lower triangular.   

    DIAG    (input) CHARACTER*1   
            = 'N':  A is non-unit triangular;   
            = 'U':  A is unit triangular.   

    N       (input) INTEGER   
            The order of the matrix A.  N >= 0.   

    AP      (input/output) COMPLEX*16 array, dimension (N*(N+1)/2)   
            On entry, the upper or lower triangular matrix A, stored   
            columnwise in a linear array.  The j-th column of A is stored 
  
            in the array AP as follows:   
            if UPLO = 'U', AP(i + (j-1)*j/2) = A(i,j) for 1<=i<=j;   
            if UPLO = 'L', AP(i + (j-1)*((2*n-j)/2) = A(i,j) for j<=i<=n. 
  
            See below for further details.   
            On exit, the (triangular) inverse of the original matrix, in 
  
            the same packed storage format.   

    INFO    (output) INTEGER   
            = 0:  successful exit   
            < 0:  if INFO = -i, the i-th argument had an illegal value   
            > 0:  if INFO = i, A(i,i) is exactly zero.  The triangular   
                  matrix is singular and its inverse can not be computed. 
  

    Further Details   
    ===============   

    A triangular matrix A can be transferred to packed storage using one 
  
    of the following program segments:   

    UPLO = 'U':                      UPLO = 'L':   

          JC = 1                           JC = 1   
          DO 2 J = 1, N                    DO 2 J = 1, N   
             DO 1 I = 1, J                    DO 1 I = J, N   
                AP(JC+I-1) = A(I,J)              AP(JC+I-J) = A(I,J)   
        1    CONTINUE                    1    CONTINUE   
             JC = JC + J                      JC = JC + N - J + 1   
        2 CONTINUE                       2 CONTINUE   

    ===================================================================== 
  


       Test the input parameters.   

    
   Parameter adjustments   
       Function Body */
    /* Table of constant values */
    static doublecomplex c_b1 = {1.,0.};
    static integer c__1 = 1;
    
    /* System generated locals */
    integer i__1, i__2;
    doublecomplex z__1;
    /* Builtin functions */
    void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
    /* Local variables */
    static integer j;
    extern logical lsame_(char *, char *);
    extern /* Subroutine */ int zscal_(integer *, doublecomplex *, 
	    doublecomplex *, integer *);
    static logical upper;
    extern /* Subroutine */ int ztpmv_(char *, char *, char *, integer *, 
	    doublecomplex *, doublecomplex *, integer *);
    static integer jc, jj;
    extern /* Subroutine */ int xerbla_(char *, integer *);
    static integer jclast;
    static logical nounit;
    static doublecomplex ajj;



#define AP(I) ap[(I)-1]


    *info = 0;
    upper = lsame_(uplo, "U");
    nounit = lsame_(diag, "N");
    if (! upper && ! lsame_(uplo, "L")) {
	*info = -1;
    } else if (! nounit && ! lsame_(diag, "U")) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("ZTPTRI", &i__1);
	return 0;
    }

/*     Check for singularity if non-unit. */

    if (nounit) {
	if (upper) {
	    jj = 0;
	    i__1 = *n;
	    for (*info = 1; *info <= i__1; ++(*info)) {
		jj += *info;
		i__2 = jj;
		if (AP(jj).r == 0. && AP(jj).i == 0.) {
		    return 0;
		}
/* L10: */
	    }
	} else {
	    jj = 1;
	    i__1 = *n;
	    for (*info = 1; *info <= i__1; ++(*info)) {
		i__2 = jj;
		if (AP(jj).r == 0. && AP(jj).i == 0.) {
		    return 0;
		}
		jj = jj + *n - *info + 1;
/* L20: */
	    }
	}
	*info = 0;
    }

    if (upper) {

/*        Compute inverse of upper triangular matrix. */

	jc = 1;
	i__1 = *n;
	for (j = 1; j <= *n; ++j) {
	    if (nounit) {
		i__2 = jc + j - 1;
		z_div(&z__1, &c_b1, &AP(jc + j - 1));
		AP(jc+j-1).r = z__1.r, AP(jc+j-1).i = z__1.i;
		i__2 = jc + j - 1;
		z__1.r = -AP(jc+j-1).r, z__1.i = -AP(jc+j-1).i;
		ajj.r = z__1.r, ajj.i = z__1.i;
	    } else {
		z__1.r = -1., z__1.i = 0.;
		ajj.r = z__1.r, ajj.i = z__1.i;
	    }

/*           Compute elements 1:j-1 of j-th column. */

	    i__2 = j - 1;
	    ztpmv_("Upper", "No transpose", diag, &i__2, &AP(1), &AP(jc), &
		    c__1);
	    i__2 = j - 1;
	    zscal_(&i__2, &ajj, &AP(jc), &c__1);
	    jc += j;
/* L30: */
	}

    } else {

/*        Compute inverse of lower triangular matrix. */

	jc = *n * (*n + 1) / 2;
	for (j = *n; j >= 1; --j) {
	    if (nounit) {
		i__1 = jc;
		z_div(&z__1, &c_b1, &AP(jc));
		AP(jc).r = z__1.r, AP(jc).i = z__1.i;
		i__1 = jc;
		z__1.r = -AP(jc).r, z__1.i = -AP(jc).i;
		ajj.r = z__1.r, ajj.i = z__1.i;
	    } else {
		z__1.r = -1., z__1.i = 0.;
		ajj.r = z__1.r, ajj.i = z__1.i;
	    }
	    if (j < *n) {

/*              Compute elements j+1:n of j-th column. */

		i__1 = *n - j;
		ztpmv_("Lower", "No transpose", diag, &i__1, &AP(jclast), &AP(
			jc + 1), &c__1);
		i__1 = *n - j;
		zscal_(&i__1, &ajj, &AP(jc + 1), &c__1);
	    }
	    jclast = jc;
	    jc = jc - *n + j - 2;
/* L40: */
	}
    }

    return 0;

/*     End of ZTPTRI */

} /* ztptri_ */