/* Subroutine */ int pdnaup2_(integer *comm, integer *ido, char *bmat, 
	integer *n, char *which, integer *nev, integer *np, doublereal *tol, 
	doublereal *resid, integer *mode, integer *iupd, integer *ishift, 
	integer *mxiter, doublereal *v, integer *ldv, doublereal *h__, 
	integer *ldh, doublereal *ritzr, doublereal *ritzi, doublereal *
	bounds, doublereal *q, integer *ldq, doublereal *workl, integer *
	ipntr, doublereal *workd, integer *info, ftnlen bmat_len, ftnlen 
	which_len)
{
    /* System generated locals */
    integer h_dim1, h_offset, q_dim1, q_offset, v_dim1, v_offset, i__1, i__2;
    doublereal d__1, d__2;

    /* Builtin functions */
    double pow_dd(doublereal *, doublereal *);
    integer s_cmp(char *, char *, ftnlen, ftnlen);
    /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen);
    double sqrt(doublereal);

    /* Local variables */
    static integer j;
    static real t0, t1, t2, t3;
    static doublereal rnorm_buf__;
    static integer kp[4], np0, nev0;
    extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, 
	    integer *);
    static doublereal eps23;
    static integer ierr, iter;
    static doublereal temp;
    static logical getv0, cnorm;
    extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, 
	    doublereal *, integer *);
    static integer nconv;
    static logical initv;
    static doublereal rnorm;
    extern /* Subroutine */ int dvout_(integer *, integer *, doublereal *, 
	    integer *, char *, ftnlen), mpi_allreduce__(doublereal *, 
	    doublereal *, integer *, integer *, integer *, integer *, integer 
	    *);
    extern doublereal dlapy2_(doublereal *, doublereal *);
    static integer nevbef;
    static char wprime[2];
    static logical update, ushift;
    static integer kplusp, msglvl, nptemp, numcnv;
    extern /* Subroutine */ int dnconv_(integer *, doublereal *, doublereal *,
	     doublereal *, doublereal *, integer *), pdvout_(integer *, 
	    integer *, integer *, doublereal *, integer *, char *, ftnlen), 
	    pivout_(integer *, integer *, integer *, integer *, integer *, 
	    char *, ftnlen), second_(real *), dsortc_(char *, logical *, 
	    integer *, doublereal *, doublereal *, doublereal *, ftnlen), 
	    pdmout_(integer *, integer *, integer *, integer *, doublereal *, 
	    integer *, integer *, char *, ftnlen), pdgetv0_(integer *, 
	    integer *, char *, integer *, logical *, integer *, integer *, 
	    doublereal *, integer *, doublereal *, doublereal *, integer *, 
	    doublereal *, doublereal *, integer *, ftnlen);
    extern doublereal pdnorm2_(integer *, integer *, doublereal *, integer *),
	     pdlamch_(integer *, char *, ftnlen);
    extern /* Subroutine */ int pdneigh_(integer *, doublereal *, integer *, 
	    doublereal *, integer *, doublereal *, doublereal *, doublereal *,
	     doublereal *, integer *, doublereal *, integer *), pdnaitr_(
	    integer *, integer *, char *, integer *, integer *, integer *, 
	    integer *, doublereal *, doublereal *, doublereal *, integer *, 
	    doublereal *, integer *, integer *, doublereal *, doublereal *, 
	    integer *, ftnlen), pdngets_(integer *, integer *, char *, 
	    integer *, integer *, doublereal *, doublereal *, doublereal *, 
	    doublereal *, doublereal *, ftnlen), pdnapps_(integer *, integer *
	    , integer *, integer *, doublereal *, doublereal *, doublereal *, 
	    integer *, doublereal *, integer *, doublereal *, doublereal *, 
	    integer *, doublereal *, doublereal *);



/*     %---------------% */
/*     | MPI Variables | */
/*     %---------------% */

/* /+ */
/* * */
/* *  (C) 1993 by Argonne National Laboratory and Mississipi State University. */
/* *      All rights reserved.  See COPYRIGHT in top-level directory. */
/* +/ */

/* /+ user include file for MPI programs, with no dependencies +/ */

/* /+ return codes +/ */







/*     We handle datatypes by putting the variables that hold them into */
/*     common.  This way, a Fortran program can directly use the various */
/*     datatypes and can even give them to C programs. */

/*     MPI_BOTTOM needs to be a known address; here we put it at the */
/*     beginning of the common block.  The point-to-point and collective */
/*     routines know about MPI_BOTTOM, but MPI_TYPE_STRUCT as yet does not. */

/*     The types MPI_INTEGER1,2,4 and MPI_REAL4,8 are OPTIONAL. */
/*     Their values are zero if they are not available.  Note that */
/*     using these reduces the portability of code (though may enhance */
/*     portability between Crays and other systems) */



/*     All other MPI routines are subroutines */

/*     The attribute copy/delete functions are symbols that can be passed */
/*     to MPI routines */

/*     %----------------------------------------------------% */
/*     | Include files for debugging and timing information | */
/*     %----------------------------------------------------% */


/* \SCCS Information: @(#) */
/* FILE: debug.h   SID: 2.3   DATE OF SID: 11/16/95   RELEASE: 2 */

/*     %---------------------------------% */
/*     | See debug.doc for documentation | */
/*     %---------------------------------% */

/*     %------------------% */
/*     | Scalar Arguments | */
/*     %------------------% */

/*     %--------------------------------% */
/*     | See stat.doc for documentation | */
/*     %--------------------------------% */

/* \SCCS Information: @(#) */
/* FILE: stat.h   SID: 2.2   DATE OF SID: 11/16/95   RELEASE: 2 */



/*     %-----------------% */
/*     | Array Arguments | */
/*     %-----------------% */


/*     %------------% */
/*     | Parameters | */
/*     %------------% */


/*     %---------------% */
/*     | Local Scalars | */
/*     %---------------% */



/*     %-----------------------% */
/*     | Local array arguments | */
/*     %-----------------------% */


/*     %----------------------% */
/*     | External Subroutines | */
/*     %----------------------% */


/*     %--------------------% */
/*     | External Functions | */
/*     %--------------------% */


/*     %---------------------% */
/*     | Intrinsic Functions | */
/*     %---------------------% */


/*     %-----------------------% */
/*     | Executable Statements | */
/*     %-----------------------% */

    /* Parameter adjustments */
    --workd;
    --resid;
    --workl;
    --bounds;
    --ritzi;
    --ritzr;
    v_dim1 = *ldv;
    v_offset = 1 + v_dim1;
    v -= v_offset;
    h_dim1 = *ldh;
    h_offset = 1 + h_dim1;
    h__ -= h_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    --ipntr;

    /* Function Body */
    if (*ido == 0) {

	second_(&t0);

	msglvl = debug_1.mnaup2;

/*        %-------------------------------------% */
/*        | Get the machine dependent constant. | */
/*        %-------------------------------------% */

	eps23 = pdlamch_(comm, "Epsilon-Machine", (ftnlen)15);
	eps23 = pow_dd(&eps23, &c_b3);

	nev0 = *nev;
	np0 = *np;

/*        %-------------------------------------% */
/*        | kplusp is the bound on the largest  | */
/*        |        Lanczos factorization built. | */
/*        | nconv is the current number of      | */
/*        |        "converged" eigenvlues.      | */
/*        | iter is the counter on the current  | */
/*        |      iteration step.                | */
/*        %-------------------------------------% */

	kplusp = *nev + *np;
	nconv = 0;
	iter = 0;

/*        %---------------------------------------% */
/*        | Set flags for computing the first NEV | */
/*        | steps of the Arnoldi factorization.   | */
/*        %---------------------------------------% */

	getv0 = TRUE_;
	update = FALSE_;
	ushift = FALSE_;
	cnorm = FALSE_;

	if (*info != 0) {

/*           %--------------------------------------------% */
/*           | User provides the initial residual vector. | */
/*           %--------------------------------------------% */

	    initv = TRUE_;
	    *info = 0;
	} else {
	    initv = FALSE_;
	}
    }

/*     %---------------------------------------------% */
/*     | Get a possibly random starting vector and   | */
/*     | force it into the range of the operator OP. | */
/*     %---------------------------------------------% */

/* L10: */

    if (getv0) {
	pdgetv0_(comm, ido, bmat, &c__1, &initv, n, &c__1, &v[v_offset], ldv, 
		&resid[1], &rnorm, &ipntr[1], &workd[1], &workl[1], info, (
		ftnlen)1);

	if (*ido != 99) {
	    goto L9000;
	}

	if (rnorm == 0.) {

/*           %-----------------------------------------% */
/*           | The initial vector is zero. Error exit. | */
/*           %-----------------------------------------% */

	    *info = -9;
	    goto L1100;
	}
	getv0 = FALSE_;
	*ido = 0;
    }

/*     %-----------------------------------% */
/*     | Back from reverse communication : | */
/*     | continue with update step         | */
/*     %-----------------------------------% */

    if (update) {
	goto L20;
    }

/*     %-------------------------------------------% */
/*     | Back from computing user specified shifts | */
/*     %-------------------------------------------% */

    if (ushift) {
	goto L50;
    }

/*     %-------------------------------------% */
/*     | Back from computing residual norm   | */
/*     | at the end of the current iteration | */
/*     %-------------------------------------% */

    if (cnorm) {
	goto L100;
    }

/*     %----------------------------------------------------------% */
/*     | Compute the first NEV steps of the Arnoldi factorization | */
/*     %----------------------------------------------------------% */

    pdnaitr_(comm, ido, bmat, n, &c__0, nev, mode, &resid[1], &rnorm, &v[
	    v_offset], ldv, &h__[h_offset], ldh, &ipntr[1], &workd[1], &workl[
	    1], info, (ftnlen)1);

/*     %---------------------------------------------------% */
/*     | ido .ne. 99 implies use of reverse communication  | */
/*     | to compute operations involving OP and possibly B | */
/*     %---------------------------------------------------% */

    if (*ido != 99) {
	goto L9000;
    }

    if (*info > 0) {
	*np = *info;
	*mxiter = iter;
	*info = -9999;
	goto L1200;
    }

/*     %--------------------------------------------------------------% */
/*     |                                                              | */
/*     |           M A I N  ARNOLDI  I T E R A T I O N  L O O P       | */
/*     |           Each iteration implicitly restarts the Arnoldi     | */
/*     |           factorization in place.                            | */
/*     |                                                              | */
/*     %--------------------------------------------------------------% */

L1000:

    ++iter;

    if (msglvl > 0) {
	pivout_(comm, &debug_1.logfil, &c__1, &iter, &debug_1.ndigit, "_naup"
		"2: **** Start of major iteration number ****", (ftnlen)49);
    }

/*        %-----------------------------------------------------------% */
/*        | Compute NP additional steps of the Arnoldi factorization. | */
/*        | Adjust NP since NEV might have been updated by last call  | */
/*        | to the shift application routine pdnapps .                 | */
/*        %-----------------------------------------------------------% */

    *np = kplusp - *nev;

    if (msglvl > 1) {
	pivout_(comm, &debug_1.logfil, &c__1, nev, &debug_1.ndigit, "_naup2:"
		" The length of the current Arnoldi factorization", (ftnlen)55)
		;
	pivout_(comm, &debug_1.logfil, &c__1, np, &debug_1.ndigit, "_naup2: "
		"Extend the Arnoldi factorization by", (ftnlen)43);
    }

/*        %-----------------------------------------------------------% */
/*        | Compute NP additional steps of the Arnoldi factorization. | */
/*        %-----------------------------------------------------------% */

    *ido = 0;
L20:
    update = TRUE_;

    pdnaitr_(comm, ido, bmat, n, nev, np, mode, &resid[1], &rnorm, &v[
	    v_offset], ldv, &h__[h_offset], ldh, &ipntr[1], &workd[1], &workl[
	    1], info, (ftnlen)1);

/*        %---------------------------------------------------% */
/*        | ido .ne. 99 implies use of reverse communication  | */
/*        | to compute operations involving OP and possibly B | */
/*        %---------------------------------------------------% */

    if (*ido != 99) {
	goto L9000;
    }

    if (*info > 0) {
	*np = *info;
	*mxiter = iter;
	*info = -9999;
	goto L1200;
    }
    update = FALSE_;

    if (msglvl > 1) {
	pdvout_(comm, &debug_1.logfil, &c__1, &rnorm, &debug_1.ndigit, "_nau"
		"p2: Corresponding B-norm of the residual", (ftnlen)44);
    }

/*        %--------------------------------------------------------% */
/*        | Compute the eigenvalues and corresponding error bounds | */
/*        | of the current upper Hessenberg matrix.                | */
/*        %--------------------------------------------------------% */

    pdneigh_(comm, &rnorm, &kplusp, &h__[h_offset], ldh, &ritzr[1], &ritzi[1],
	     &bounds[1], &q[q_offset], ldq, &workl[1], &ierr);

    if (ierr != 0) {
	*info = -8;
	goto L1200;
    }

/*        %----------------------------------------------------% */
/*        | Make a copy of eigenvalues and corresponding error | */
/*        | bounds obtained from pdneigh .                      | */
/*        %----------------------------------------------------% */

/* Computing 2nd power */
    i__1 = kplusp;
    dcopy_(&kplusp, &ritzr[1], &c__1, &workl[i__1 * i__1 + 1], &c__1);
/* Computing 2nd power */
    i__1 = kplusp;
    dcopy_(&kplusp, &ritzi[1], &c__1, &workl[i__1 * i__1 + kplusp + 1], &c__1)
	    ;
/* Computing 2nd power */
    i__1 = kplusp;
    dcopy_(&kplusp, &bounds[1], &c__1, &workl[i__1 * i__1 + (kplusp << 1) + 1]
	    , &c__1);

/*        %---------------------------------------------------% */
/*        | Select the wanted Ritz values and their bounds    | */
/*        | to be used in the convergence test.               | */
/*        | The wanted part of the spectrum and corresponding | */
/*        | error bounds are in the last NEV loc. of RITZR,   | */
/*        | RITZI and BOUNDS respectively. The variables NEV  | */
/*        | and NP may be updated if the NEV-th wanted Ritz   | */
/*        | value has a non zero imaginary part. In this case | */
/*        | NEV is increased by one and NP decreased by one.  | */
/*        | NOTE: The last two arguments of pdngets  are no    | */
/*        | longer used as of version 2.1.                    | */
/*        %---------------------------------------------------% */

    *nev = nev0;
    *np = np0;
    numcnv = *nev;
    pdngets_(comm, ishift, which, nev, np, &ritzr[1], &ritzi[1], &bounds[1], &
	    workl[1], &workl[*np + 1], (ftnlen)2);
    if (*nev == nev0 + 1) {
	numcnv = nev0 + 1;
    }

/*        %-------------------% */
/*        | Convergence test. | */
/*        %-------------------% */

    dcopy_(nev, &bounds[*np + 1], &c__1, &workl[(*np << 1) + 1], &c__1);
    dnconv_(nev, &ritzr[*np + 1], &ritzi[*np + 1], &workl[(*np << 1) + 1], 
	    tol, &nconv);

    if (msglvl > 2) {
	kp[0] = *nev;
	kp[1] = *np;
	kp[2] = numcnv;
	kp[3] = nconv;
	pivout_(comm, &debug_1.logfil, &c__4, kp, &debug_1.ndigit, "_naup2: "
		"NEV, NP, NUMCNV, NCONV are", (ftnlen)34);
	pdvout_(comm, &debug_1.logfil, &kplusp, &ritzr[1], &debug_1.ndigit, 
		"_naup2: Real part of the eigenvalues of H", (ftnlen)41);
	pdvout_(comm, &debug_1.logfil, &kplusp, &ritzi[1], &debug_1.ndigit, 
		"_naup2: Imaginary part of the eigenvalues of H", (ftnlen)46);
	pdvout_(comm, &debug_1.logfil, &kplusp, &bounds[1], &debug_1.ndigit, 
		"_naup2: Ritz estimates of the current NCV Ritz values", (
		ftnlen)53);
    }

/*        %---------------------------------------------------------% */
/*        | Count the number of unwanted Ritz values that have zero | */
/*        | Ritz estimates. If any Ritz estimates are equal to zero | */
/*        | then a leading block of H of order equal to at least    | */
/*        | the number of Ritz values with zero Ritz estimates has  | */
/*        | split off. None of these Ritz values may be removed by  | */
/*        | shifting. Decrease NP the number of shifts to apply. If | */
/*        | no shifts may be applied, then prepare to exit          | */
/*        %---------------------------------------------------------% */

    nptemp = *np;
    i__1 = nptemp;
    for (j = 1; j <= i__1; ++j) {
	if (bounds[j] == 0.) {
	    --(*np);
	    ++(*nev);
	}
/* L30: */
    }

    if (nconv >= numcnv || iter > *mxiter || *np == 0) {

	if (msglvl > 4) {
/* Computing 2nd power */
	    i__1 = kplusp;
	    dvout_(&debug_1.logfil, &kplusp, &workl[i__1 * i__1 + 1], &
		    debug_1.ndigit, "_naup2: Real part of the eig computed b"
		    "y _neigh:", (ftnlen)48);
/* Computing 2nd power */
	    i__1 = kplusp;
	    dvout_(&debug_1.logfil, &kplusp, &workl[i__1 * i__1 + kplusp + 1],
		     &debug_1.ndigit, "_naup2: Imag part of the eig computed"
		    " by _neigh:", (ftnlen)48);
/* Computing 2nd power */
	    i__1 = kplusp;
	    dvout_(&debug_1.logfil, &kplusp, &workl[i__1 * i__1 + (kplusp << 
		    1) + 1], &debug_1.ndigit, "_naup2: Ritz estimates comput"
		    "ed by _neigh:", (ftnlen)42);
	}

/*           %------------------------------------------------% */
/*           | Prepare to exit. Put the converged Ritz values | */
/*           | and corresponding bounds in RITZ(1:NCONV) and  | */
/*           | BOUNDS(1:NCONV) respectively. Then sort. Be    | */
/*           | careful when NCONV > NP                        | */
/*           %------------------------------------------------% */

/*           %------------------------------------------% */
/*           |  Use h( 3,1 ) as storage to communicate  | */
/*           |  rnorm to _neupd if needed               | */
/*           %------------------------------------------% */
	h__[h_dim1 + 3] = rnorm;

/*           %----------------------------------------------% */
/*           | To be consistent with dngets , we first do a  | */
/*           | pre-processing sort in order to keep complex | */
/*           | conjugate pairs together.  This is similar   | */
/*           | to the pre-processing sort used in dngets     | */
/*           | except that the sort is done in the opposite | */
/*           | order.                                       | */
/*           %----------------------------------------------% */

	if (s_cmp(which, "LM", (ftnlen)2, (ftnlen)2) == 0) {
	    s_copy(wprime, "SR", (ftnlen)2, (ftnlen)2);
	}
	if (s_cmp(which, "SM", (ftnlen)2, (ftnlen)2) == 0) {
	    s_copy(wprime, "LR", (ftnlen)2, (ftnlen)2);
	}
	if (s_cmp(which, "LR", (ftnlen)2, (ftnlen)2) == 0) {
	    s_copy(wprime, "SM", (ftnlen)2, (ftnlen)2);
	}
	if (s_cmp(which, "SR", (ftnlen)2, (ftnlen)2) == 0) {
	    s_copy(wprime, "LM", (ftnlen)2, (ftnlen)2);
	}
	if (s_cmp(which, "LI", (ftnlen)2, (ftnlen)2) == 0) {
	    s_copy(wprime, "SM", (ftnlen)2, (ftnlen)2);
	}
	if (s_cmp(which, "SI", (ftnlen)2, (ftnlen)2) == 0) {
	    s_copy(wprime, "LM", (ftnlen)2, (ftnlen)2);
	}

	dsortc_(wprime, &c_true, &kplusp, &ritzr[1], &ritzi[1], &bounds[1], (
		ftnlen)2);

/*           %----------------------------------------------% */
/*           | Now sort Ritz values so that converged Ritz  | */
/*           | values appear within the first NEV locations | */
/*           | of ritzr, ritzi and bounds, and the most     | */
/*           | desired one appears at the front.            | */
/*           %----------------------------------------------% */

	if (s_cmp(which, "LM", (ftnlen)2, (ftnlen)2) == 0) {
	    s_copy(wprime, "SM", (ftnlen)2, (ftnlen)2);
	}
	if (s_cmp(which, "SM", (ftnlen)2, (ftnlen)2) == 0) {
	    s_copy(wprime, "LM", (ftnlen)2, (ftnlen)2);
	}
	if (s_cmp(which, "LR", (ftnlen)2, (ftnlen)2) == 0) {
	    s_copy(wprime, "SR", (ftnlen)2, (ftnlen)2);
	}
	if (s_cmp(which, "SR", (ftnlen)2, (ftnlen)2) == 0) {
	    s_copy(wprime, "LR", (ftnlen)2, (ftnlen)2);
	}
	if (s_cmp(which, "LI", (ftnlen)2, (ftnlen)2) == 0) {
	    s_copy(wprime, "SI", (ftnlen)2, (ftnlen)2);
	}
	if (s_cmp(which, "SI", (ftnlen)2, (ftnlen)2) == 0) {
	    s_copy(wprime, "LI", (ftnlen)2, (ftnlen)2);
	}

	dsortc_(wprime, &c_true, &kplusp, &ritzr[1], &ritzi[1], &bounds[1], (
		ftnlen)2);

/*           %--------------------------------------------------% */
/*           | Scale the Ritz estimate of each Ritz value       | */
/*           | by 1 / max(eps23,magnitude of the Ritz value).   | */
/*           %--------------------------------------------------% */

	i__1 = numcnv;
	for (j = 1; j <= i__1; ++j) {
/* Computing MAX */
	    d__1 = eps23, d__2 = dlapy2_(&ritzr[j], &ritzi[j]);
	    temp = max(d__1,d__2);
	    bounds[j] /= temp;
/* L35: */
	}

/*           %----------------------------------------------------% */
/*           | Sort the Ritz values according to the scaled Ritz  | */
/*           | esitmates.  This will push all the converged ones  | */
/*           | towards the front of ritzr, ritzi, bounds          | */
/*           | (in the case when NCONV < NEV.)                    | */
/*           %----------------------------------------------------% */

	s_copy(wprime, "LR", (ftnlen)2, (ftnlen)2);
	dsortc_(wprime, &c_true, &numcnv, &bounds[1], &ritzr[1], &ritzi[1], (
		ftnlen)2);

/*           %----------------------------------------------% */
/*           | Scale the Ritz estimate back to its original | */
/*           | value.                                       | */
/*           %----------------------------------------------% */

	i__1 = numcnv;
	for (j = 1; j <= i__1; ++j) {
/* Computing MAX */
	    d__1 = eps23, d__2 = dlapy2_(&ritzr[j], &ritzi[j]);
	    temp = max(d__1,d__2);
	    bounds[j] *= temp;
/* L40: */
	}

/*           %------------------------------------------------% */
/*           | Sort the converged Ritz values again so that   | */
/*           | the "threshold" value appears at the front of  | */
/*           | ritzr, ritzi and bound.                        | */
/*           %------------------------------------------------% */

	dsortc_(which, &c_true, &nconv, &ritzr[1], &ritzi[1], &bounds[1], (
		ftnlen)2);


	if (msglvl > 1) {
	    dvout_(&debug_1.logfil, &kplusp, &ritzr[1], &debug_1.ndigit, 
		    "_naup2: Sorted real part of the eigenvalues", (ftnlen)43)
		    ;
	    dvout_(&debug_1.logfil, &kplusp, &ritzi[1], &debug_1.ndigit, 
		    "_naup2: Sorted imaginary part of the eigenvalues", (
		    ftnlen)48);
	    dvout_(&debug_1.logfil, &kplusp, &bounds[1], &debug_1.ndigit, 
		    "_naup2: Sorted ritz estimates.", (ftnlen)30);
	}

/*           %------------------------------------% */
/*           | Max iterations have been exceeded. | */
/*           %------------------------------------% */

	if (iter > *mxiter && nconv < numcnv) {
	    *info = 1;
	}

/*           %---------------------% */
/*           | No shifts to apply. | */
/*           %---------------------% */

	if (*np == 0 && nconv < numcnv) {
	    *info = 2;
	}

	*np = nconv;
	goto L1100;

    } else if (nconv < numcnv && *ishift == 1) {

/*           %-------------------------------------------------% */
/*           | Do not have all the requested eigenvalues yet.  | */
/*           | To prevent possible stagnation, adjust the size | */
/*           | of NEV.                                         | */
/*           %-------------------------------------------------% */

	nevbef = *nev;
/* Computing MIN */
	i__1 = nconv, i__2 = *np / 2;
	*nev += min(i__1,i__2);
	if (*nev == 1 && kplusp >= 6) {
	    *nev = kplusp / 2;
	} else if (*nev == 1 && kplusp > 3) {
	    *nev = 2;
	}
	*np = kplusp - *nev;

/*           %---------------------------------------% */
/*           | If the size of NEV was just increased | */
/*           | resort the eigenvalues.               | */
/*           %---------------------------------------% */

	if (nevbef < *nev) {
	    pdngets_(comm, ishift, which, nev, np, &ritzr[1], &ritzi[1], &
		    bounds[1], &workl[1], &workl[*np + 1], (ftnlen)2);
	}

    }

    if (msglvl > 0) {
	pivout_(comm, &debug_1.logfil, &c__1, &nconv, &debug_1.ndigit, "_nau"
		"p2: no. of \"converged\" Ritz values at this iter.", (ftnlen)
		52);
	if (msglvl > 1) {
	    kp[0] = *nev;
	    kp[1] = *np;
	    pivout_(comm, &debug_1.logfil, &c__2, kp, &debug_1.ndigit, "_nau"
		    "p2: NEV and NP are", (ftnlen)22);
	    pdvout_(comm, &debug_1.logfil, nev, &ritzr[*np + 1], &
		    debug_1.ndigit, "_naup2: \"wanted\" Ritz values -- real "
		    "part", (ftnlen)41);
	    pdvout_(comm, &debug_1.logfil, nev, &ritzi[*np + 1], &
		    debug_1.ndigit, "_naup2: \"wanted\" Ritz values -- imag "
		    "part", (ftnlen)41);
	    pdvout_(comm, &debug_1.logfil, nev, &bounds[*np + 1], &
		    debug_1.ndigit, "_naup2: Ritz estimates of the \"wante"
		    "d\" values ", (ftnlen)46);
	}
    }

    if (*ishift == 0) {

/*           %-------------------------------------------------------% */
/*           | User specified shifts: reverse comminucation to       | */
/*           | compute the shifts. They are returned in the first    | */
/*           | 2*NP locations of WORKL.                              | */
/*           %-------------------------------------------------------% */

	ushift = TRUE_;
	*ido = 3;
	goto L9000;
    }

L50:

/*        %------------------------------------% */
/*        | Back from reverse communication;   | */
/*        | User specified shifts are returned | */
/*        | in WORKL(1:2*NP)                   | */
/*        %------------------------------------% */

    ushift = FALSE_;

    if (*ishift == 0) {

/*            %----------------------------------% */
/*            | Move the NP shifts from WORKL to | */
/*            | RITZR, RITZI to free up WORKL    | */
/*            | for non-exact shift case.        | */
/*            %----------------------------------% */

	dcopy_(np, &workl[1], &c__1, &ritzr[1], &c__1);
	dcopy_(np, &workl[*np + 1], &c__1, &ritzi[1], &c__1);
    }

    if (msglvl > 2) {
	pivout_(comm, &debug_1.logfil, &c__1, np, &debug_1.ndigit, "_naup2: "
		"The number of shifts to apply ", (ftnlen)38);
	pdvout_(comm, &debug_1.logfil, np, &ritzr[1], &debug_1.ndigit, "_nau"
		"p2: Real part of the shifts", (ftnlen)31);
	pdvout_(comm, &debug_1.logfil, np, &ritzi[1], &debug_1.ndigit, "_nau"
		"p2: Imaginary part of the shifts", (ftnlen)36);
	if (*ishift == 1) {
	    pdvout_(comm, &debug_1.logfil, np, &bounds[1], &debug_1.ndigit, 
		    "_naup2: Ritz estimates of the shifts", (ftnlen)36);
	}
    }

/*        %---------------------------------------------------------% */
/*        | Apply the NP implicit shifts by QR bulge chasing.       | */
/*        | Each shift is applied to the whole upper Hessenberg     | */
/*        | matrix H.                                               | */
/*        | The first 2*N locations of WORKD are used as workspace. | */
/*        %---------------------------------------------------------% */

    pdnapps_(comm, n, nev, np, &ritzr[1], &ritzi[1], &v[v_offset], ldv, &h__[
	    h_offset], ldh, &resid[1], &q[q_offset], ldq, &workl[1], &workd[1]
	    );

/*        %---------------------------------------------% */
/*        | Compute the B-norm of the updated residual. | */
/*        | Keep B*RESID in WORKD(1:N) to be used in    | */
/*        | the first step of the next call to pdnaitr . | */
/*        %---------------------------------------------% */

    cnorm = TRUE_;
    second_(&t2);
    if (*(unsigned char *)bmat == 'G') {
	++timing_1.nbx;
	dcopy_(n, &resid[1], &c__1, &workd[*n + 1], &c__1);
	ipntr[1] = *n + 1;
	ipntr[2] = 1;
	*ido = 2;

/*           %----------------------------------% */
/*           | Exit in order to compute B*RESID | */
/*           %----------------------------------% */

	goto L9000;
    } else if (*(unsigned char *)bmat == 'I') {
	dcopy_(n, &resid[1], &c__1, &workd[1], &c__1);
    }

L100:

/*        %----------------------------------% */
/*        | Back from reverse communication; | */
/*        | WORKD(1:N) := B*RESID            | */
/*        %----------------------------------% */

    if (*(unsigned char *)bmat == 'G') {
	second_(&t3);
	timing_1.tmvbx += t3 - t2;
    }

    if (*(unsigned char *)bmat == 'G') {
	rnorm_buf__ = ddot_(n, &resid[1], &c__1, &workd[1], &c__1);
	mpi_allreduce__(&rnorm_buf__, &rnorm, &c__1, &
		mpipriv_1.mpi_double_precision__, &mpipriv_1.mpi_sum__, comm, 
		&ierr);
	rnorm = sqrt((abs(rnorm)));
    } else if (*(unsigned char *)bmat == 'I') {
	rnorm = pdnorm2_(comm, n, &resid[1], &c__1);
    }
    cnorm = FALSE_;

    if (msglvl > 2) {
	pdvout_(comm, &debug_1.logfil, &c__1, &rnorm, &debug_1.ndigit, "_nau"
		"p2: B-norm of residual for compressed factorization", (ftnlen)
		55);
	pdmout_(comm, &debug_1.logfil, nev, nev, &h__[h_offset], ldh, &
		debug_1.ndigit, "_naup2: Compressed upper Hessenberg matrix H"
		, (ftnlen)44);
    }

    goto L1000;

/*     %---------------------------------------------------------------% */
/*     |                                                               | */
/*     |  E N D     O F     M A I N     I T E R A T I O N     L O O P  | */
/*     |                                                               | */
/*     %---------------------------------------------------------------% */

L1100:

    *mxiter = iter;
    *nev = numcnv;

L1200:
    *ido = 99;

/*     %------------% */
/*     | Error Exit | */
/*     %------------% */

    second_(&t1);
    timing_1.tnaup2 = t1 - t0;

L9000:

/*     %----------------% */
/*     | End of pdnaup2  | */
/*     %----------------% */

    return 0;
} /* pdnaup2_ */
Example #2
0
File: diag.c Project: qsnake/gpaw
int main(int argc, char *argv[]) {

     // Some constants
     int minusone = -1;
     int zero = 0;
     int one = 1;
     double dzero = 0.0; 

     // ConText
     int ConTxt = minusone;

     // order
     char order = 'R';
     char scope = 'A';

     // root process
     int root = zero;

     // BLACS/SCALAPACK parameters
     // the size of the blocks the distributed matrix is split into
     // (applies to both rows and columns)
     int mb = 32;
     int nb = mb; // PDSYEVxxx constraint

     // the number of rows and columns in the processor grid
     // only square processor grids due to C vs. Fortran ordering
     int nprow = 2;
     int npcol = nprow; // only square processor grids, 

     // starting row and column in grid, do not change
     int rsrc = zero; 
     int csrc = zero;

     // dimensions of the matrix to diagonalize
     int m = 1000;
     int n = m; // only square matrices

     int info = zero;

     // Rest of code will only work for:
     // nprow = npcol
     // mb = nb;
     // m = n;
     // rsrc = crsc;

     // Paramteres for Trivial Matrix
     double alpha = 0.1; // off-diagonal
     double beta = 75.0; // diagonal
     
     // For timing:
     double tdiag0, tdiag, ttotal0, ttotal;

     // BLACS Communicator
     MPI_Comm blacs_comm;
     int nprocs;
     int iam;
     int myrow, mycol;

     MPI_Init(&argc, &argv);
     MPI_Barrier(MPI_COMM_WORLD);
     ttotal0 = MPI_Wtime();
     MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
     MPI_Comm_rank(MPI_COMM_WORLD, &iam);

     if (argc > one) {
       nprow = strtod(argv[1],NULL);
       m = strtod(argv[2],NULL);
       npcol = nprow;
       n = m;
     }
    
     if (iam == root) {
       printf("world size %d \n",nprocs);
       printf("n %d \n", n);
       printf("nprow %d \n", nprow);
       printf("npcol %d \n", npcol);
     }

     // We can do this on any subcommunicator.
#ifdef CartComm
     int dim[2];
     int pbc[2];
     dim[0] = nprow;
     dim[1] = npcol;
     pbc[0] = 0;
     pbc[1] = 0;
     MPI_Cart_create(MPI_COMM_WORLD, 2, dim, pbc, 1, &blacs_comm);
#else
     blacs_comm = MPI_COMM_WORLD;
#endif

     // initialize the grid
     // The lines below are equivalent to the one call to:
     if (blacs_comm != MPI_COMM_NULL) {
       ConTxt = Csys2blacs_handle_(blacs_comm);
       Cblacs_gridinit_(&ConTxt, &order, nprow, npcol);

       // get information back about the grid
       Cblacs_gridinfo_(ConTxt, &nprow, &npcol, &myrow, &mycol);
     }

     if (ConTxt != minusone) {

          int desc[9];

          // get the size of the distributed matrix
          int locM = numroc_(&m, &mb, &myrow, &rsrc, &nprow);
          int locN = numroc_(&n, &nb, &mycol, &csrc, &npcol);

	  // printf ("locM = %d \n", locM);
	  // printf ("locN = %d \n", locN);

          int lld = MAX(one,locM);

          // build the descriptor
          descinit_(desc, &m, &n, &mb, &nb, &rsrc, &csrc, &ConTxt, &lld, &info);
          // Allocate arrays
	  // eigenvalues
	  double* eigvals = malloc(n * sizeof(double));

          // allocate the distributed matrices
          double* mata = malloc(locM*locN * sizeof(double));
          // allocate the distributed matrix of eigenvectors
          double* z = malloc(locM*locN * sizeof(double));

          // Eigensolver parameters
          int ibtype = one;
          char jobz = 'V'; // eigenvectors also
          char range = 'A'; // all eiganvalues
          char uplo = 'L'; // work with upper

          double vl, vu;
          int il, iu;

          char cmach = 'U';

          double abstol = 2.0 * pdlamch_(&ConTxt, &cmach);

          int eigvalm, nz;

          double orfac = -1.0;
          //double orfac = 0.001;

          int* ifail;
          ifail = malloc(m * sizeof(int));

          int* iclustr;
          iclustr =  malloc(2*nprow*npcol * sizeof(int));

          double* gap;
          gap =  malloc(nprow*npcol * sizeof(double));

          double* work;
          work = malloc(3 * sizeof(double));
          int querylwork = minusone;
          int* iwork;
          iwork = malloc(1 * sizeof(int));
          int queryliwork = minusone;

          // Build a trivial distributed matrix: Diagonal matrix
	  pdlaset_(&uplo, &m, &n, &alpha, &beta, mata, &one, &one, desc);

	  // First there is a workspace query

          // pdsyevx_(&jobz, &range, &uplo, &n, mata, &one, &one, desc, &vl,
          //          &vu, &il, &iu, &abstol, &eigvalm, &nz, eigvals, &orfac, z, &one,
          //          &one, desc, work, &querylwork, iwork, &queryliwork, ifail, iclustr, gap, &info);
          pdsyevd_(&jobz, &uplo, &n, mata, &one, &one, desc, eigvals,
		   z, &one, &one, desc,
		   work, &querylwork, iwork, &queryliwork, &info);
          //pdsyev_(&jobz, &uplo, &m, mata, &one, &one, desc, eigvals,
          //        z, &one, &one, desc, work, &querylwork, &info);

          int lwork = (int)work[0];
          //printf("lwork %d\n", lwork);
          free(work);
          int liwork = (int)iwork[0];
          //printf("liwork %d\n", liwork);
          free(iwork);

          work = (double*)malloc(lwork * sizeof(double));
          iwork = (int*)malloc(liwork * sizeof(int));

	  // This is actually diagonalizes the matrix
          // pdsyevx_(&jobz, &range, &uplo, &n, mata, &one, &one, desc, &vl,
          //          &vu, &il, &iu, &abstol, &eigvalm, &nz, eigvals, &orfac, z, &one,
          //          &one, desc, work, &lwork, iwork, &liwork, ifail, iclustr, gap, &info);
  
          Cblacs_barrier(ConTxt, &scope);
          tdiag0 = MPI_Wtime();
          pdsyevd_(&jobz, &uplo, &n, mata, &one, &one, desc, eigvals,
                   z, &one, &one, desc,
                   work, &lwork, iwork, &liwork, &info);

          //pdsyev_(&jobz, &uplo, &m, mata, &one, &one, desc, eigvals,
          //        z, &one, &one, desc, work, &lwork, &info);
          Cblacs_barrier(ConTxt, &scope);
          tdiag = MPI_Wtime() - tdiag0;

          free(work);
          free(iwork);
          free(gap);
          free(iclustr);
          free(ifail);
          free(z);
          free(mata);

          // Destroy BLACS grid
          Cblacs_gridexit_(ConTxt);

	  // Check eigenvalues
	  if (myrow == zero && mycol == zero) {
	    for (int i = 0; i < n; i++)
	      {
                if (fabs(eigvals[i] - beta) > 0.0001) 
		    printf("Problem: eigval %d != %f5.2 but %f\n", 
                            i, beta, eigvals[i]);
	      }
	    
	    if (info != zero) {
	      printf("info = %d \n", info);
	    }
	    
	    printf("Time (s) diag: %f\n", tdiag);
	  }

	  free(eigvals);
     }

     MPI_Barrier(MPI_COMM_WORLD);
     ttotal = MPI_Wtime() - ttotal0;
     if (iam == 0)
          printf("Time (s) total: %f\n", ttotal);
     MPI_Finalize();
}
/* Subroutine */ int pdsaupd_(integer *comm, integer *ido, char *bmat, 
	integer *n, char *which, integer *nev, doublereal *tol, doublereal *
	resid, integer *ncv, doublereal *v, integer *ldv, integer *iparam, 
	integer *ipntr, doublereal *workd, doublereal *workl, integer *lworkl,
	 integer *info, ftnlen bmat_len, ftnlen which_len)
{
    /* Format strings */
    static char fmt_1000[] = "(//,5x,\002==================================="
	    "=======\002,/5x,\002= Symmetric implicit Arnoldi update code "
	    "=\002,/5x,\002= Version Number:\002,\002 2.1\002,19x,\002 =\002,"
	    "/5x,\002= Version Date:  \002,\002 3/19/97\002,14x,\002 =\002,/5"
	    "x,\002==========================================\002,/5x,\002= S"
	    "ummary of timing statistics           =\002,/5x,\002============"
	    "==============================\002,//)";
    static char fmt_1100[] = "(5x,\002Total number update iterations        "
	    "     = \002,i5,/5x,\002Total number of OP*x operations          "
	    "  = \002,i5,/5x,\002Total number of B*x operations             = "
	    "\002,i5,/5x,\002Total number of reorthogonalization steps  = "
	    "\002,i5,/5x,\002Total number of iterative refinement steps = "
	    "\002,i5,/5x,\002Total number of restart steps              = "
	    "\002,i5,/5x,\002Total time in user OP*x operation          = "
	    "\002,f12.6,/5x,\002Total time in user B*x operation           ="
	    " \002,f12.6,/5x,\002Total time in Arnoldi update routine       = "
	    "\002,f12.6,/5x,\002Total time in p_saup2 routine              ="
	    " \002,f12.6,/5x,\002Total time in basic Arnoldi iteration loop = "
	    "\002,f12.6,/5x,\002Total time in reorthogonalization phase    ="
	    " \002,f12.6,/5x,\002Total time in (re)start vector generation  = "
	    "\002,f12.6,/5x,\002Total time in trid eigenvalue subproblem   ="
	    " \002,f12.6,/5x,\002Total time in getting the shifts           = "
	    "\002,f12.6,/5x,\002Total time in applying the shifts          ="
	    " \002,f12.6,/5x,\002Total time in convergence testing          = "
	    "\002,f12.6)";

    /* System generated locals */
    integer v_dim1, v_offset, i__1, i__2;

    /* Builtin functions */
    integer s_cmp(char *, char *, ftnlen, ftnlen), s_wsfe(cilist *), e_wsfe(
	    void), do_fio(integer *, char *, ftnlen);

    /* Local variables */
    static integer j;
    static real t0, t1;
    static integer nb, ih, iq, np, iw, ldh, ldq, nev0, mode, ierr, myid, iupd,
	     next, ritz;
    extern /* Subroutine */ int mpi_comm_rank__(integer *, integer *, integer 
	    *);
    static integer bounds, ishift, msglvl, mxiter;
    extern /* Subroutine */ int pdvout_(integer *, integer *, integer *, 
	    doublereal *, integer *, char *, ftnlen), pivout_(integer *, 
	    integer *, integer *, integer *, integer *, char *, ftnlen), 
	    second_(real *), dstats_(void), pdsaup2_(integer *, integer *, 
	    char *, integer *, char *, integer *, integer *, doublereal *, 
	    doublereal *, integer *, integer *, integer *, integer *, 
	    doublereal *, integer *, doublereal *, integer *, doublereal *, 
	    doublereal *, doublereal *, integer *, doublereal *, integer *, 
	    doublereal *, integer *, ftnlen, ftnlen);
    extern doublereal pdlamch_(integer *, char *, ftnlen);

    /* Fortran I/O blocks */
    static cilist io___22 = { 0, 6, 0, fmt_1000, 0 };
    static cilist io___23 = { 0, 6, 0, fmt_1100, 0 };




/*     %------------------% */
/*     | MPI Variables    | */
/*     %------------------% */

/* /+ */
/* * */
/* *  (C) 1993 by Argonne National Laboratory and Mississipi State University. */
/* *      All rights reserved.  See COPYRIGHT in top-level directory. */
/* +/ */

/* /+ user include file for MPI programs, with no dependencies +/ */

/* /+ return codes +/ */







/*     We handle datatypes by putting the variables that hold them into */
/*     common.  This way, a Fortran program can directly use the various */
/*     datatypes and can even give them to C programs. */

/*     MPI_BOTTOM needs to be a known address; here we put it at the */
/*     beginning of the common block.  The point-to-point and collective */
/*     routines know about MPI_BOTTOM, but MPI_TYPE_STRUCT as yet does not. */

/*     The types MPI_INTEGER1,2,4 and MPI_REAL4,8 are OPTIONAL. */
/*     Their values are zero if they are not available.  Note that */
/*     using these reduces the portability of code (though may enhance */
/*     portability between Crays and other systems) */



/*     All other MPI routines are subroutines */

/*     The attribute copy/delete functions are symbols that can be passed */
/*     to MPI routines */

/*     %----------------------------------------------------% */
/*     | Include files for debugging and timing information | */
/*     %----------------------------------------------------% */


/* \SCCS Information: @(#) */
/* FILE: debug.h   SID: 2.3   DATE OF SID: 11/16/95   RELEASE: 2 */

/*     %---------------------------------% */
/*     | See debug.doc for documentation | */
/*     %---------------------------------% */

/*     %------------------% */
/*     | Scalar Arguments | */
/*     %------------------% */

/*     %--------------------------------% */
/*     | See stat.doc for documentation | */
/*     %--------------------------------% */

/* \SCCS Information: @(#) */
/* FILE: stat.h   SID: 2.2   DATE OF SID: 11/16/95   RELEASE: 2 */



/*     %-----------------% */
/*     | Array Arguments | */
/*     %-----------------% */


/*     %------------% */
/*     | Parameters | */
/*     %------------% */


/*     %---------------% */
/*     | Local Scalars | */
/*     %---------------% */


/*     %----------------------% */
/*     | External Subroutines | */
/*     %----------------------% */


/*     %--------------------% */
/*     | External Functions | */
/*     %--------------------% */


/*     %-----------------------% */
/*     | Executable Statements | */
/*     %-----------------------% */

    /* Parameter adjustments */
    --workd;
    --resid;
    v_dim1 = *ldv;
    v_offset = 1 + v_dim1;
    v -= v_offset;
    --iparam;
    --ipntr;
    --workl;

    /* Function Body */
    if (*ido == 0) {

/*        %-------------------------------% */
/*        | Initialize timing statistics  | */
/*        | & message level for debugging | */
/*        %-------------------------------% */

	dstats_();
	second_(&t0);
	msglvl = debug_1.msaupd;

	ierr = 0;
	ishift = iparam[1];
	mxiter = iparam[3];
/*         nb     = iparam(4) */
	nb = 1;

/*        %--------------------------------------------% */
/*        | Revision 2 performs only implicit restart. | */
/*        %--------------------------------------------% */

	iupd = 1;
	mode = iparam[7];

/*        %----------------% */
/*        | Error checking | */
/*        %----------------% */

	if (*n <= 0) {
	    ierr = -1;
	} else if (*nev <= 0) {
	    ierr = -2;
	} else if (*ncv <= *nev) {
	    ierr = -3;
	}

/*        %----------------------------------------------% */
/*        | NP is the number of additional steps to      | */
/*        | extend the length NEV Lanczos factorization. | */
/*        %----------------------------------------------% */

	np = *ncv - *nev;

	if (mxiter <= 0) {
	    ierr = -4;
	}
	if (s_cmp(which, "LM", (ftnlen)2, (ftnlen)2) != 0 && s_cmp(which, 
		"SM", (ftnlen)2, (ftnlen)2) != 0 && s_cmp(which, "LA", (
		ftnlen)2, (ftnlen)2) != 0 && s_cmp(which, "SA", (ftnlen)2, (
		ftnlen)2) != 0 && s_cmp(which, "BE", (ftnlen)2, (ftnlen)2) != 
		0) {
	    ierr = -5;
	}
	if (*(unsigned char *)bmat != 'I' && *(unsigned char *)bmat != 'G') {
	    ierr = -6;
	}

/* Computing 2nd power */
	i__1 = *ncv;
	if (*lworkl < i__1 * i__1 + (*ncv << 3)) {
	    ierr = -7;
	}
	if (mode < 1 || mode > 5) {
	    ierr = -10;
	} else if (mode == 1 && *(unsigned char *)bmat == 'G') {
	    ierr = -11;
	} else if (ishift < 0 || ishift > 1) {
	    ierr = -12;
	} else if (*nev == 1 && s_cmp(which, "BE", (ftnlen)2, (ftnlen)2) == 0)
		 {
	    ierr = -13;
	}

/*        %------------% */
/*        | Error Exit | */
/*        %------------% */

	if (ierr != 0) {
	    *info = ierr;
	    *ido = 99;
	    goto L9000;
	}

/*        %------------------------% */
/*        | Set default parameters | */
/*        %------------------------% */

	if (nb <= 0) {
	    nb = 1;
	}
	if (*tol <= 0.) {
	    *tol = pdlamch_(comm, "EpsMach", (ftnlen)7);
	}

/*        %----------------------------------------------% */
/*        | NP is the number of additional steps to      | */
/*        | extend the length NEV Lanczos factorization. | */
/*        | NEV0 is the local variable designating the   | */
/*        | size of the invariant subspace desired.      | */
/*        %----------------------------------------------% */

	np = *ncv - *nev;
	nev0 = *nev;

/*        %-----------------------------% */
/*        | Zero out internal workspace | */
/*        %-----------------------------% */

/* Computing 2nd power */
	i__2 = *ncv;
	i__1 = i__2 * i__2 + (*ncv << 3);
	for (j = 1; j <= i__1; ++j) {
	    workl[j] = 0.;
/* L10: */
	}

/*        %-------------------------------------------------------% */
/*        | Pointer into WORKL for address of H, RITZ, BOUNDS, Q  | */
/*        | etc... and the remaining workspace.                   | */
/*        | Also update pointer to be used on output.             | */
/*        | Memory is laid out as follows:                        | */
/*        | workl(1:2*ncv) := generated tridiagonal matrix        | */
/*        | workl(2*ncv+1:2*ncv+ncv) := ritz values               | */
/*        | workl(3*ncv+1:3*ncv+ncv) := computed error bounds     | */
/*        | workl(4*ncv+1:4*ncv+ncv*ncv) := rotation matrix Q     | */
/*        | workl(4*ncv+ncv*ncv+1:7*ncv+ncv*ncv) := workspace     | */
/*        %-------------------------------------------------------% */

	ldh = *ncv;
	ldq = *ncv;
	ih = 1;
	ritz = ih + (ldh << 1);
	bounds = ritz + *ncv;
	iq = bounds + *ncv;
/* Computing 2nd power */
	i__1 = *ncv;
	iw = iq + i__1 * i__1;
	next = iw + *ncv * 3;

	ipntr[4] = next;
	ipntr[5] = ih;
	ipntr[6] = ritz;
	ipntr[7] = bounds;
	ipntr[11] = iw;
    }

/*     %-------------------------------------------------------% */
/*     | Carry out the Implicitly restarted Lanczos Iteration. | */
/*     %-------------------------------------------------------% */

    pdsaup2_(comm, ido, bmat, n, which, &nev0, &np, tol, &resid[1], &mode, &
	    iupd, &ishift, &mxiter, &v[v_offset], ldv, &workl[ih], &ldh, &
	    workl[ritz], &workl[bounds], &workl[iq], &ldq, &workl[iw], &ipntr[
	    1], &workd[1], info, (ftnlen)1, (ftnlen)2);

/*     %--------------------------------------------------% */
/*     | ido .ne. 99 implies use of reverse communication | */
/*     | to compute operations involving OP or shifts.    | */
/*     %--------------------------------------------------% */

    if (*ido == 3) {
	iparam[8] = np;
    }
    if (*ido != 99) {
	goto L9000;
    }

    iparam[3] = mxiter;
    iparam[5] = np;
    iparam[9] = timing_1.nopx;
    iparam[10] = timing_1.nbx;
    iparam[11] = timing_1.nrorth;

/*     %------------------------------------% */
/*     | Exit if there was an informational | */
/*     | error within pdsaup2 .              | */
/*     %------------------------------------% */

    if (*info < 0) {
	goto L9000;
    }
    if (*info == 2) {
	*info = 3;
    }

    if (msglvl > 0) {
	pivout_(comm, &debug_1.logfil, &c__1, &mxiter, &debug_1.ndigit, "_sa"
		"upd: number of update iterations taken", (ftnlen)41);
	pivout_(comm, &debug_1.logfil, &c__1, &np, &debug_1.ndigit, "_saupd:"
		" number of \"converged\" Ritz values", (ftnlen)41);
	pdvout_(comm, &debug_1.logfil, &np, &workl[ritz], &debug_1.ndigit, 
		"_saupd: final Ritz values", (ftnlen)25);
	pdvout_(comm, &debug_1.logfil, &np, &workl[bounds], &debug_1.ndigit, 
		"_saupd: corresponding error bounds", (ftnlen)34);
    }

    second_(&t1);
    timing_1.tsaupd = t1 - t0;

    if (msglvl > 0) {
	mpi_comm_rank__(comm, &myid, &ierr);
	if (myid == 0) {

/*        %--------------------------------------------------------% */
/*        | Version Number & Version Date are defined in version.h | */
/*        %--------------------------------------------------------% */

	    s_wsfe(&io___22);
	    e_wsfe();
	    s_wsfe(&io___23);
	    do_fio(&c__1, (char *)&mxiter, (ftnlen)sizeof(integer));
	    do_fio(&c__1, (char *)&timing_1.nopx, (ftnlen)sizeof(integer));
	    do_fio(&c__1, (char *)&timing_1.nbx, (ftnlen)sizeof(integer));
	    do_fio(&c__1, (char *)&timing_1.nrorth, (ftnlen)sizeof(integer));
	    do_fio(&c__1, (char *)&timing_1.nitref, (ftnlen)sizeof(integer));
	    do_fio(&c__1, (char *)&timing_1.nrstrt, (ftnlen)sizeof(integer));
	    do_fio(&c__1, (char *)&timing_1.tmvopx, (ftnlen)sizeof(real));
	    do_fio(&c__1, (char *)&timing_1.tmvbx, (ftnlen)sizeof(real));
	    do_fio(&c__1, (char *)&timing_1.tsaupd, (ftnlen)sizeof(real));
	    do_fio(&c__1, (char *)&timing_1.tsaup2, (ftnlen)sizeof(real));
	    do_fio(&c__1, (char *)&timing_1.tsaitr, (ftnlen)sizeof(real));
	    do_fio(&c__1, (char *)&timing_1.titref, (ftnlen)sizeof(real));
	    do_fio(&c__1, (char *)&timing_1.tgetv0, (ftnlen)sizeof(real));
	    do_fio(&c__1, (char *)&timing_1.tseigt, (ftnlen)sizeof(real));
	    do_fio(&c__1, (char *)&timing_1.tsgets, (ftnlen)sizeof(real));
	    do_fio(&c__1, (char *)&timing_1.tsapps, (ftnlen)sizeof(real));
	    do_fio(&c__1, (char *)&timing_1.tsconv, (ftnlen)sizeof(real));
	    e_wsfe();
	}
    }

L9000:

    return 0;

/*     %----------------% */
/*     | End of pdsaupd  | */
/*     %----------------% */

} /* pdsaupd_ */
/* Subroutine */ int pdsapps_(integer *comm, integer *n, integer *kev, 
	integer *np, doublereal *shift, doublereal *v, integer *ldv, 
	doublereal *h__, integer *ldh, doublereal *resid, doublereal *q, 
	integer *ldq, doublereal *workd)
{
    /* Initialized data */

    static logical first = TRUE_;

    /* System generated locals */
    integer h_dim1, h_offset, q_dim1, q_offset, v_dim1, v_offset, i__1, i__2, 
	    i__3, i__4;
    doublereal d__1, d__2;

    /* Local variables */
    static doublereal c__, f, g;
    static integer i__, j;
    static doublereal r__, s, a1, a2, a3, a4;
    static real t0, t1;
    static integer jj;
    static doublereal big;
    static integer iend, itop;
    extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, 
	    integer *), dgemv_(char *, integer *, integer *, doublereal *, 
	    doublereal *, integer *, doublereal *, integer *, doublereal *, 
	    doublereal *, integer *, ftnlen), dcopy_(integer *, doublereal *, 
	    integer *, doublereal *, integer *), daxpy_(integer *, doublereal 
	    *, doublereal *, integer *, doublereal *, integer *), second_(
	    real *);
    static doublereal epsmch;
    static integer istart, kplusp, msglvl;
    extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, 
	    doublereal *, integer *, doublereal *, integer *, ftnlen), 
	    dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, 
	    doublereal *), dlaset_(char *, integer *, integer *, doublereal *,
	     doublereal *, doublereal *, integer *, ftnlen), pdvout_(integer *
	    , integer *, integer *, doublereal *, integer *, char *, ftnlen), 
	    pivout_(integer *, integer *, integer *, integer *, integer *, 
	    char *, ftnlen);
    extern doublereal pdlamch_(integer *, char *, ftnlen);


/*     %--------------------% */
/*     | MPI Communicator | */
/*     %--------------------% */


/*     %----------------------------------------------------% */
/*     | Include files for debugging and timing information | */
/*     %----------------------------------------------------% */


/* \SCCS Information: @(#) */
/* FILE: debug.h   SID: 2.3   DATE OF SID: 11/16/95   RELEASE: 2 */

/*     %---------------------------------% */
/*     | See debug.doc for documentation | */
/*     %---------------------------------% */

/*     %------------------% */
/*     | Scalar Arguments | */
/*     %------------------% */

/*     %--------------------------------% */
/*     | See stat.doc for documentation | */
/*     %--------------------------------% */

/* \SCCS Information: @(#) */
/* FILE: stat.h   SID: 2.2   DATE OF SID: 11/16/95   RELEASE: 2 */



/*     %-----------------% */
/*     | Array Arguments | */
/*     %-----------------% */


/*     %------------% */
/*     | Parameters | */
/*     %------------% */


/*     %---------------% */
/*     | Local Scalars | */
/*     %---------------% */



/*     %----------------------% */
/*     | External Subroutines | */
/*     %----------------------% */


/*     %--------------------% */
/*     | External Functions | */
/*     %--------------------% */


/*     %----------------------% */
/*     | Intrinsics Functions | */
/*     %----------------------% */


/*     %----------------% */
/*     | Data statments | */
/*     %----------------% */

    /* Parameter adjustments */
    --workd;
    --resid;
    --shift;
    v_dim1 = *ldv;
    v_offset = 1 + v_dim1;
    v -= v_offset;
    h_dim1 = *ldh;
    h_offset = 1 + h_dim1;
    h__ -= h_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;

    /* Function Body */

/*     %-----------------------% */
/*     | Executable Statements | */
/*     %-----------------------% */

    if (first) {
	epsmch = pdlamch_(comm, "Epsilon-Machine", (ftnlen)15);
	first = FALSE_;
    }
    itop = 1;

/*     %-------------------------------% */
/*     | Initialize timing statistics  | */
/*     | & message level for debugging | */
/*     %-------------------------------% */

    second_(&t0);
    msglvl = debug_1.msapps;

    kplusp = *kev + *np;

/*     %----------------------------------------------% */
/*     | Initialize Q to the identity matrix of order | */
/*     | kplusp used to accumulate the rotations.     | */
/*     %----------------------------------------------% */

    dlaset_("All", &kplusp, &kplusp, &c_b4, &c_b5, &q[q_offset], ldq, (ftnlen)
	    3);

/*     %----------------------------------------------% */
/*     | Quick return if there are no shifts to apply | */
/*     %----------------------------------------------% */

    if (*np == 0) {
	goto L9000;
    }

/*     %----------------------------------------------------------% */
/*     | Apply the np shifts implicitly. Apply each shift to the  | */
/*     | whole matrix and not just to the submatrix from which it | */
/*     | comes.                                                   | */
/*     %----------------------------------------------------------% */

    i__1 = *np;
    for (jj = 1; jj <= i__1; ++jj) {

	istart = itop;

/*        %----------------------------------------------------------% */
/*        | Check for splitting and deflation. Currently we consider | */
/*        | an off-diagonal element h(i+1,1) negligible if           | */
/*        |         h(i+1,1) .le. epsmch*( |h(i,2)| + |h(i+1,2)| )   | */
/*        | for i=1:KEV+NP-1.                                        | */
/*        | If above condition tests true then we set h(i+1,1) = 0.  | */
/*        | Note that h(1:KEV+NP,1) are assumed to be non negative.  | */
/*        %----------------------------------------------------------% */

L20:

/*        %------------------------------------------------% */
/*        | The following loop exits early if we encounter | */
/*        | a negligible off diagonal element.             | */
/*        %------------------------------------------------% */

	i__2 = kplusp - 1;
	for (i__ = istart; i__ <= i__2; ++i__) {
	    big = (d__1 = h__[i__ + (h_dim1 << 1)], abs(d__1)) + (d__2 = h__[
		    i__ + 1 + (h_dim1 << 1)], abs(d__2));
	    if (h__[i__ + 1 + h_dim1] <= epsmch * big) {
		if (msglvl > 0) {
		    pivout_(comm, &debug_1.logfil, &c__1, &i__, &
			    debug_1.ndigit, "_sapps: deflation at row/column"
			    " no.", (ftnlen)35);
		    pivout_(comm, &debug_1.logfil, &c__1, &jj, &
			    debug_1.ndigit, "_sapps: occured before shift nu"
			    "mber.", (ftnlen)36);
		    pdvout_(comm, &debug_1.logfil, &c__1, &h__[i__ + 1 + 
			    h_dim1], &debug_1.ndigit, "_sapps: the correspon"
			    "ding off diagonal element", (ftnlen)46);
		}
		h__[i__ + 1 + h_dim1] = 0.;
		iend = i__;
		goto L40;
	    }
/* L30: */
	}
	iend = kplusp;
L40:

	if (istart < iend) {

/*           %--------------------------------------------------------% */
/*           | Construct the plane rotation G'(istart,istart+1,theta) | */
/*           | that attempts to drive h(istart+1,1) to zero.          | */
/*           %--------------------------------------------------------% */

	    f = h__[istart + (h_dim1 << 1)] - shift[jj];
	    g = h__[istart + 1 + h_dim1];
	    dlartg_(&f, &g, &c__, &s, &r__);

/*            %-------------------------------------------------------% */
/*            | Apply rotation to the left and right of H;            | */
/*            | H <- G' * H * G,  where G = G(istart,istart+1,theta). | */
/*            | This will create a "bulge".                           | */
/*            %-------------------------------------------------------% */

	    a1 = c__ * h__[istart + (h_dim1 << 1)] + s * h__[istart + 1 + 
		    h_dim1];
	    a2 = c__ * h__[istart + 1 + h_dim1] + s * h__[istart + 1 + (
		    h_dim1 << 1)];
	    a4 = c__ * h__[istart + 1 + (h_dim1 << 1)] - s * h__[istart + 1 + 
		    h_dim1];
	    a3 = c__ * h__[istart + 1 + h_dim1] - s * h__[istart + (h_dim1 << 
		    1)];
	    h__[istart + (h_dim1 << 1)] = c__ * a1 + s * a2;
	    h__[istart + 1 + (h_dim1 << 1)] = c__ * a4 - s * a3;
	    h__[istart + 1 + h_dim1] = c__ * a3 + s * a4;

/*            %----------------------------------------------------% */
/*            | Accumulate the rotation in the matrix Q;  Q <- Q*G | */
/*            %----------------------------------------------------% */

/* Computing MIN */
	    i__3 = istart + jj;
	    i__2 = min(i__3,kplusp);
	    for (j = 1; j <= i__2; ++j) {
		a1 = c__ * q[j + istart * q_dim1] + s * q[j + (istart + 1) * 
			q_dim1];
		q[j + (istart + 1) * q_dim1] = -s * q[j + istart * q_dim1] + 
			c__ * q[j + (istart + 1) * q_dim1];
		q[j + istart * q_dim1] = a1;
/* L60: */
	    }


/*            %----------------------------------------------% */
/*            | The following loop chases the bulge created. | */
/*            | Note that the previous rotation may also be  | */
/*            | done within the following loop. But it is    | */
/*            | kept separate to make the distinction among  | */
/*            | the bulge chasing sweeps and the first plane | */
/*            | rotation designed to drive h(istart+1,1) to  | */
/*            | zero.                                        | */
/*            %----------------------------------------------% */

	    i__2 = iend - 1;
	    for (i__ = istart + 1; i__ <= i__2; ++i__) {

/*               %----------------------------------------------% */
/*               | Construct the plane rotation G'(i,i+1,theta) | */
/*               | that zeros the i-th bulge that was created   | */
/*               | by G(i-1,i,theta). g represents the bulge.   | */
/*               %----------------------------------------------% */

		f = h__[i__ + h_dim1];
		g = s * h__[i__ + 1 + h_dim1];

/*               %----------------------------------% */
/*               | Final update with G(i-1,i,theta) | */
/*               %----------------------------------% */

		h__[i__ + 1 + h_dim1] = c__ * h__[i__ + 1 + h_dim1];
		dlartg_(&f, &g, &c__, &s, &r__);

/*               %-------------------------------------------% */
/*               | The following ensures that h(1:iend-1,1), | */
/*               | the first iend-2 off diagonal of elements | */
/*               | H, remain non negative.                   | */
/*               %-------------------------------------------% */

		if (r__ < 0.) {
		    r__ = -r__;
		    c__ = -c__;
		    s = -s;
		}

/*               %--------------------------------------------% */
/*               | Apply rotation to the left and right of H; | */
/*               | H <- G * H * G',  where G = G(i,i+1,theta) | */
/*               %--------------------------------------------% */

		h__[i__ + h_dim1] = r__;

		a1 = c__ * h__[i__ + (h_dim1 << 1)] + s * h__[i__ + 1 + 
			h_dim1];
		a2 = c__ * h__[i__ + 1 + h_dim1] + s * h__[i__ + 1 + (h_dim1 
			<< 1)];
		a3 = c__ * h__[i__ + 1 + h_dim1] - s * h__[i__ + (h_dim1 << 1)
			];
		a4 = c__ * h__[i__ + 1 + (h_dim1 << 1)] - s * h__[i__ + 1 + 
			h_dim1];

		h__[i__ + (h_dim1 << 1)] = c__ * a1 + s * a2;
		h__[i__ + 1 + (h_dim1 << 1)] = c__ * a4 - s * a3;
		h__[i__ + 1 + h_dim1] = c__ * a3 + s * a4;

/*               %----------------------------------------------------% */
/*               | Accumulate the rotation in the matrix Q;  Q <- Q*G | */
/*               %----------------------------------------------------% */

/* Computing MIN */
		i__4 = i__ + jj;
		i__3 = min(i__4,kplusp);
		for (j = 1; j <= i__3; ++j) {
		    a1 = c__ * q[j + i__ * q_dim1] + s * q[j + (i__ + 1) * 
			    q_dim1];
		    q[j + (i__ + 1) * q_dim1] = -s * q[j + i__ * q_dim1] + 
			    c__ * q[j + (i__ + 1) * q_dim1];
		    q[j + i__ * q_dim1] = a1;
/* L50: */
		}

/* L70: */
	    }

	}

/*        %--------------------------% */
/*        | Update the block pointer | */
/*        %--------------------------% */

	istart = iend + 1;

/*        %------------------------------------------% */
/*        | Make sure that h(iend,1) is non-negative | */
/*        | If not then set h(iend,1) <-- -h(iend,1) | */
/*        | and negate the last column of Q.         | */
/*        | We have effectively carried out a        | */
/*        | similarity on transformation H           | */
/*        %------------------------------------------% */

	if (h__[iend + h_dim1] < 0.) {
	    h__[iend + h_dim1] = -h__[iend + h_dim1];
	    dscal_(&kplusp, &c_b20, &q[iend * q_dim1 + 1], &c__1);
	}

/*        %--------------------------------------------------------% */
/*        | Apply the same shift to the next block if there is any | */
/*        %--------------------------------------------------------% */

	if (iend < kplusp) {
	    goto L20;
	}

/*        %-----------------------------------------------------% */
/*        | Check if we can increase the the start of the block | */
/*        %-----------------------------------------------------% */

	i__2 = kplusp - 1;
	for (i__ = itop; i__ <= i__2; ++i__) {
	    if (h__[i__ + 1 + h_dim1] > 0.) {
		goto L90;
	    }
	    ++itop;
/* L80: */
	}

/*        %-----------------------------------% */
/*        | Finished applying the jj-th shift | */
/*        %-----------------------------------% */

L90:
	;
    }

/*     %------------------------------------------% */
/*     | All shifts have been applied. Check for  | */
/*     | more possible deflation that might occur | */
/*     | after the last shift is applied.         | */
/*     %------------------------------------------% */

    i__1 = kplusp - 1;
    for (i__ = itop; i__ <= i__1; ++i__) {
	big = (d__1 = h__[i__ + (h_dim1 << 1)], abs(d__1)) + (d__2 = h__[i__ 
		+ 1 + (h_dim1 << 1)], abs(d__2));
	if (h__[i__ + 1 + h_dim1] <= epsmch * big) {
	    if (msglvl > 0) {
		pivout_(comm, &debug_1.logfil, &c__1, &i__, &debug_1.ndigit, 
			"_sapps: deflation at row/column no.", (ftnlen)35);
		pdvout_(comm, &debug_1.logfil, &c__1, &h__[i__ + 1 + h_dim1], 
			&debug_1.ndigit, "_sapps: the corresponding off diag"
			"onal element", (ftnlen)46);
	    }
	    h__[i__ + 1 + h_dim1] = 0.;
	}
/* L100: */
    }

/*     %-------------------------------------------------% */
/*     | Compute the (kev+1)-st column of (V*Q) and      | */
/*     | temporarily store the result in WORKD(N+1:2*N). | */
/*     | This is not necessary if h(kev+1,1) = 0.         | */
/*     %-------------------------------------------------% */

    if (h__[*kev + 1 + h_dim1] > 0.) {
	dgemv_("N", n, &kplusp, &c_b5, &v[v_offset], ldv, &q[(*kev + 1) * 
		q_dim1 + 1], &c__1, &c_b4, &workd[*n + 1], &c__1, (ftnlen)1);
    }

/*     %-------------------------------------------------------% */
/*     | Compute column 1 to kev of (V*Q) in backward order    | */
/*     | taking advantage that Q is an upper triangular matrix | */
/*     | with lower bandwidth np.                              | */
/*     | Place results in v(:,kplusp-kev:kplusp) temporarily.  | */
/*     %-------------------------------------------------------% */

    i__1 = *kev;
    for (i__ = 1; i__ <= i__1; ++i__) {
	i__2 = kplusp - i__ + 1;
	dgemv_("N", n, &i__2, &c_b5, &v[v_offset], ldv, &q[(*kev - i__ + 1) * 
		q_dim1 + 1], &c__1, &c_b4, &workd[1], &c__1, (ftnlen)1);
	dcopy_(n, &workd[1], &c__1, &v[(kplusp - i__ + 1) * v_dim1 + 1], &
		c__1);
/* L130: */
    }

/*     %-------------------------------------------------% */
/*     |  Move v(:,kplusp-kev+1:kplusp) into v(:,1:kev). | */
/*     %-------------------------------------------------% */

    dlacpy_("All", n, kev, &v[(*np + 1) * v_dim1 + 1], ldv, &v[v_offset], ldv,
	     (ftnlen)3);

/*     %--------------------------------------------% */
/*     | Copy the (kev+1)-st column of (V*Q) in the | */
/*     | appropriate place if h(kev+1,1) .ne. zero. | */
/*     %--------------------------------------------% */

    if (h__[*kev + 1 + h_dim1] > 0.) {
	dcopy_(n, &workd[*n + 1], &c__1, &v[(*kev + 1) * v_dim1 + 1], &c__1);
    }

/*     %-------------------------------------% */
/*     | Update the residual vector:         | */
/*     |    r <- sigmak*r + betak*v(:,kev+1) | */
/*     | where                               | */
/*     |    sigmak = (e_{kev+p}'*Q)*e_{kev}  | */
/*     |    betak = e_{kev+1}'*H*e_{kev}     | */
/*     %-------------------------------------% */

    dscal_(n, &q[kplusp + *kev * q_dim1], &resid[1], &c__1);
    if (h__[*kev + 1 + h_dim1] > 0.) {
	daxpy_(n, &h__[*kev + 1 + h_dim1], &v[(*kev + 1) * v_dim1 + 1], &c__1,
		 &resid[1], &c__1);
    }

    if (msglvl > 1) {
	pdvout_(comm, &debug_1.logfil, &c__1, &q[kplusp + *kev * q_dim1], &
		debug_1.ndigit, "_sapps: sigmak of the updated residual vect"
		"or", (ftnlen)45);
	pdvout_(comm, &debug_1.logfil, &c__1, &h__[*kev + 1 + h_dim1], &
		debug_1.ndigit, "_sapps: betak of the updated residual vector"
		, (ftnlen)44);
	pdvout_(comm, &debug_1.logfil, kev, &h__[(h_dim1 << 1) + 1], &
		debug_1.ndigit, "_sapps: updated main diagonal of H for next"
		" iteration", (ftnlen)53);
	if (*kev > 1) {
	    i__1 = *kev - 1;
	    pdvout_(comm, &debug_1.logfil, &i__1, &h__[h_dim1 + 2], &
		    debug_1.ndigit, "_sapps: updated sub diagonal of H for n"
		    "ext iteration", (ftnlen)52);
	}
    }

    second_(&t1);
    timing_1.tsapps += t1 - t0;

L9000:
    return 0;

/*     %----------------% */
/*     | End of pdsapps | */
/*     %----------------% */

} /* pdsapps_ */
/* Subroutine */ int pdnapps_(integer *comm, integer *n, integer *kev, 
	integer *np, doublereal *shiftr, doublereal *shifti, doublereal *v, 
	integer *ldv, doublereal *h__, integer *ldh, doublereal *resid, 
	doublereal *q, integer *ldq, doublereal *workl, doublereal *workd)
{
    /* Initialized data */

    static logical first = TRUE_;

    /* System generated locals */
    integer h_dim1, h_offset, v_dim1, v_offset, q_dim1, q_offset, i__1, i__2, 
	    i__3, i__4;
    doublereal d__1, d__2;

    /* Local variables */
    static doublereal c__, f, g;
    static integer i__, j;
    static doublereal r__, s, t, u[3];
    static real t0, t1;
    static doublereal h11, h12, h21, h22, h32;
    static integer jj, ir, nr;
    static doublereal tau, ulp, tst1;
    static integer iend;
    static doublereal unfl, ovfl;
    extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, 
	    integer *), dlarf_(char *, integer *, integer *, doublereal *, 
	    integer *, doublereal *, doublereal *, integer *, doublereal *, 
	    ftnlen);
    static logical cconj;
    extern /* Subroutine */ int dgemv_(char *, integer *, integer *, 
	    doublereal *, doublereal *, integer *, doublereal *, integer *, 
	    doublereal *, doublereal *, integer *, ftnlen), dcopy_(integer *, 
	    doublereal *, integer *, doublereal *, integer *), daxpy_(integer 
	    *, doublereal *, doublereal *, integer *, doublereal *, integer *)
	    ;
    extern doublereal dlapy2_(doublereal *, doublereal *);
    extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dlarfg_(
	    integer *, doublereal *, doublereal *, integer *, doublereal *);
    static doublereal sigmai;
    extern /* Subroutine */ int second_(real *);
    static doublereal sigmar;
    static integer istart, kplusp, msglvl;
    static doublereal smlnum;
    extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, 
	    doublereal *, integer *, doublereal *, integer *, ftnlen), 
	    dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, 
	    doublereal *), dlaset_(char *, integer *, integer *, doublereal *,
	     doublereal *, doublereal *, integer *, ftnlen), pivout_(integer *
	    , integer *, integer *, integer *, integer *, char *, ftnlen), 
	    pdvout_(integer *, integer *, integer *, doublereal *, integer *, 
	    char *, ftnlen), pdmout_(integer *, integer *, integer *, integer 
	    *, doublereal *, integer *, integer *, char *, ftnlen);
    extern doublereal dlanhs_(char *, integer *, doublereal *, integer *, 
	    doublereal *, ftnlen), pdlamch_(integer *, char *, ftnlen);


/*     %--------------------% */
/*     | MPI Communicator | */
/*     %--------------------% */


/*     %----------------------------------------------------% */
/*     | Include files for debugging and timing information | */
/*     %----------------------------------------------------% */


/* \SCCS Information: @(#) */
/* FILE: debug.h   SID: 2.3   DATE OF SID: 11/16/95   RELEASE: 2 */

/*     %---------------------------------% */
/*     | See debug.doc for documentation | */
/*     %---------------------------------% */

/*     %------------------% */
/*     | Scalar Arguments | */
/*     %------------------% */

/*     %--------------------------------% */
/*     | See stat.doc for documentation | */
/*     %--------------------------------% */

/* \SCCS Information: @(#) */
/* FILE: stat.h   SID: 2.2   DATE OF SID: 11/16/95   RELEASE: 2 */



/*     %-----------------% */
/*     | Array Arguments | */
/*     %-----------------% */


/*     %------------% */
/*     | Parameters | */
/*     %------------% */


/*     %------------------------% */
/*     | Local Scalars & Arrays | */
/*     %------------------------% */


/*     %----------------------% */
/*     | External Subroutines | */
/*     %----------------------% */


/*     %--------------------% */
/*     | External Functions | */
/*     %--------------------% */


/*     %----------------------% */
/*     | Intrinsics Functions | */
/*     %----------------------% */


/*     %----------------% */
/*     | Data statments | */
/*     %----------------% */

    /* Parameter adjustments */
    --workd;
    --resid;
    --workl;
    --shifti;
    --shiftr;
    v_dim1 = *ldv;
    v_offset = 1 + v_dim1;
    v -= v_offset;
    h_dim1 = *ldh;
    h_offset = 1 + h_dim1;
    h__ -= h_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;

    /* Function Body */

/*     %-----------------------% */
/*     | Executable Statements | */
/*     %-----------------------% */

    if (first) {

/*        %-----------------------------------------------% */
/*        | Set machine-dependent constants for the       | */
/*        | stopping criterion. If norm(H) <= sqrt(OVFL), | */
/*        | overflow should not occur.                    | */
/*        | REFERENCE: LAPACK subroutine dlahqr           | */
/*        %-----------------------------------------------% */

	unfl = pdlamch_(comm, "safe minimum", (ftnlen)12);
	ovfl = 1. / unfl;
	dlabad_(&unfl, &ovfl);
	ulp = pdlamch_(comm, "precision", (ftnlen)9);
	smlnum = unfl * (*n / ulp);
	first = FALSE_;
    }

/*     %-------------------------------% */
/*     | Initialize timing statistics  | */
/*     | & message level for debugging | */
/*     %-------------------------------% */

    second_(&t0);
    msglvl = debug_1.mnapps;

    kplusp = *kev + *np;

/*     %--------------------------------------------% */
/*     | Initialize Q to the identity to accumulate | */
/*     | the rotations and reflections              | */
/*     %--------------------------------------------% */

    dlaset_("All", &kplusp, &kplusp, &c_b5, &c_b6, &q[q_offset], ldq, (ftnlen)
	    3);

/*     %----------------------------------------------% */
/*     | Quick return if there are no shifts to apply | */
/*     %----------------------------------------------% */

    if (*np == 0) {
	goto L9000;
    }

/*     %----------------------------------------------% */
/*     | Chase the bulge with the application of each | */
/*     | implicit shift. Each shift is applied to the | */
/*     | whole matrix including each block.           | */
/*     %----------------------------------------------% */

    cconj = FALSE_;
    i__1 = *np;
    for (jj = 1; jj <= i__1; ++jj) {
	sigmar = shiftr[jj];
	sigmai = shifti[jj];

	if (msglvl > 2) {
	    pivout_(comm, &debug_1.logfil, &c__1, &jj, &debug_1.ndigit, "_na"
		    "pps: shift number.", (ftnlen)21);
	    pdvout_(comm, &debug_1.logfil, &c__1, &sigmar, &debug_1.ndigit, 
		    "_napps: The real part of the shift ", (ftnlen)35);
	    pdvout_(comm, &debug_1.logfil, &c__1, &sigmai, &debug_1.ndigit, 
		    "_napps: The imaginary part of the shift ", (ftnlen)40);
	}

/*        %-------------------------------------------------% */
/*        | The following set of conditionals is necessary  | */
/*        | in order that complex conjugate pairs of shifts | */
/*        | are applied together or not at all.             | */
/*        %-------------------------------------------------% */

	if (cconj) {

/*           %-----------------------------------------% */
/*           | cconj = .true. means the previous shift | */
/*           | had non-zero imaginary part.            | */
/*           %-----------------------------------------% */

	    cconj = FALSE_;
	    goto L110;
	} else if (jj < *np && abs(sigmai) > 0.) {

/*           %------------------------------------% */
/*           | Start of a complex conjugate pair. | */
/*           %------------------------------------% */

	    cconj = TRUE_;
	} else if (jj == *np && abs(sigmai) > 0.) {

/*           %----------------------------------------------% */
/*           | The last shift has a nonzero imaginary part. | */
/*           | Don't apply it; thus the order of the        | */
/*           | compressed H is order KEV+1 since only np-1  | */
/*           | were applied.                                | */
/*           %----------------------------------------------% */

	    ++(*kev);
	    goto L110;
	}
	istart = 1;
L20:

/*        %--------------------------------------------------% */
/*        | if sigmai = 0 then                               | */
/*        |    Apply the jj-th shift ...                     | */
/*        | else                                             | */
/*        |    Apply the jj-th and (jj+1)-th together ...    | */
/*        |    (Note that jj < np at this point in the code) | */
/*        | end                                              | */
/*        | to the current block of H. The next do loop      | */
/*        | determines the current block ;                   | */
/*        %--------------------------------------------------% */

	i__2 = kplusp - 1;
	for (i__ = istart; i__ <= i__2; ++i__) {

/*           %----------------------------------------% */
/*           | Check for splitting and deflation. Use | */
/*           | a standard test as in the QR algorithm | */
/*           | REFERENCE: LAPACK subroutine dlahqr    | */
/*           %----------------------------------------% */

	    tst1 = (d__1 = h__[i__ + i__ * h_dim1], abs(d__1)) + (d__2 = h__[
		    i__ + 1 + (i__ + 1) * h_dim1], abs(d__2));
	    if (tst1 == 0.) {
		i__3 = kplusp - jj + 1;
		tst1 = dlanhs_("1", &i__3, &h__[h_offset], ldh, &workl[1], (
			ftnlen)1);
	    }
/* Computing MAX */
	    d__2 = ulp * tst1;
	    if ((d__1 = h__[i__ + 1 + i__ * h_dim1], abs(d__1)) <= max(d__2,
		    smlnum)) {
		if (msglvl > 0) {
		    pivout_(comm, &debug_1.logfil, &c__1, &i__, &
			    debug_1.ndigit, "_napps: matrix splitting at row"
			    "/column no.", (ftnlen)42);
		    pivout_(comm, &debug_1.logfil, &c__1, &jj, &
			    debug_1.ndigit, "_napps: matrix splitting with s"
			    "hift number.", (ftnlen)43);
		    pdvout_(comm, &debug_1.logfil, &c__1, &h__[i__ + 1 + i__ *
			     h_dim1], &debug_1.ndigit, "_napps: off diagonal"
			    " element.", (ftnlen)29);
		}
		iend = i__;
		h__[i__ + 1 + i__ * h_dim1] = 0.;
		goto L40;
	    }
/* L30: */
	}
	iend = kplusp;
L40:

	if (msglvl > 2) {
	    pivout_(comm, &debug_1.logfil, &c__1, &istart, &debug_1.ndigit, 
		    "_napps: Start of current block ", (ftnlen)31);
	    pivout_(comm, &debug_1.logfil, &c__1, &iend, &debug_1.ndigit, 
		    "_napps: End of current block ", (ftnlen)29);
	}

/*        %------------------------------------------------% */
/*        | No reason to apply a shift to block of order 1 | */
/*        %------------------------------------------------% */

	if (istart == iend) {
	    goto L100;
	}

/*        %------------------------------------------------------% */
/*        | If istart + 1 = iend then no reason to apply a       | */
/*        | complex conjugate pair of shifts on a 2 by 2 matrix. | */
/*        %------------------------------------------------------% */

	if (istart + 1 == iend && abs(sigmai) > 0.) {
	    goto L100;
	}

	h11 = h__[istart + istart * h_dim1];
	h21 = h__[istart + 1 + istart * h_dim1];
	if (abs(sigmai) <= 0.) {

/*           %---------------------------------------------% */
/*           | Real-valued shift ==> apply single shift QR | */
/*           %---------------------------------------------% */

	    f = h11 - sigmar;
	    g = h21;

	    i__2 = iend - 1;
	    for (i__ = istart; i__ <= i__2; ++i__) {

/*              %-----------------------------------------------------% */
/*              | Contruct the plane rotation G to zero out the bulge | */
/*              %-----------------------------------------------------% */

		dlartg_(&f, &g, &c__, &s, &r__);
		if (i__ > istart) {

/*                 %-------------------------------------------% */
/*                 | The following ensures that h(1:iend-1,1), | */
/*                 | the first iend-2 off diagonal of elements | */
/*                 | H, remain non negative.                   | */
/*                 %-------------------------------------------% */

		    if (r__ < 0.) {
			r__ = -r__;
			c__ = -c__;
			s = -s;
		    }
		    h__[i__ + (i__ - 1) * h_dim1] = r__;
		    h__[i__ + 1 + (i__ - 1) * h_dim1] = 0.;
		}

/*              %---------------------------------------------% */
/*              | Apply rotation to the left of H;  H <- G'*H | */
/*              %---------------------------------------------% */

		i__3 = kplusp;
		for (j = i__; j <= i__3; ++j) {
		    t = c__ * h__[i__ + j * h_dim1] + s * h__[i__ + 1 + j * 
			    h_dim1];
		    h__[i__ + 1 + j * h_dim1] = -s * h__[i__ + j * h_dim1] + 
			    c__ * h__[i__ + 1 + j * h_dim1];
		    h__[i__ + j * h_dim1] = t;
/* L50: */
		}

/*              %---------------------------------------------% */
/*              | Apply rotation to the right of H;  H <- H*G | */
/*              %---------------------------------------------% */

/* Computing MIN */
		i__4 = i__ + 2;
		i__3 = min(i__4,iend);
		for (j = 1; j <= i__3; ++j) {
		    t = c__ * h__[j + i__ * h_dim1] + s * h__[j + (i__ + 1) * 
			    h_dim1];
		    h__[j + (i__ + 1) * h_dim1] = -s * h__[j + i__ * h_dim1] 
			    + c__ * h__[j + (i__ + 1) * h_dim1];
		    h__[j + i__ * h_dim1] = t;
/* L60: */
		}

/*              %----------------------------------------------------% */
/*              | Accumulate the rotation in the matrix Q;  Q <- Q*G | */
/*              %----------------------------------------------------% */

/* Computing MIN */
		i__4 = i__ + jj;
		i__3 = min(i__4,kplusp);
		for (j = 1; j <= i__3; ++j) {
		    t = c__ * q[j + i__ * q_dim1] + s * q[j + (i__ + 1) * 
			    q_dim1];
		    q[j + (i__ + 1) * q_dim1] = -s * q[j + i__ * q_dim1] + 
			    c__ * q[j + (i__ + 1) * q_dim1];
		    q[j + i__ * q_dim1] = t;
/* L70: */
		}

/*              %---------------------------% */
/*              | Prepare for next rotation | */
/*              %---------------------------% */

		if (i__ < iend - 1) {
		    f = h__[i__ + 1 + i__ * h_dim1];
		    g = h__[i__ + 2 + i__ * h_dim1];
		}
/* L80: */
	    }

/*           %-----------------------------------% */
/*           | Finished applying the real shift. | */
/*           %-----------------------------------% */

	} else {

/*           %----------------------------------------------------% */
/*           | Complex conjugate shifts ==> apply double shift QR | */
/*           %----------------------------------------------------% */

	    h12 = h__[istart + (istart + 1) * h_dim1];
	    h22 = h__[istart + 1 + (istart + 1) * h_dim1];
	    h32 = h__[istart + 2 + (istart + 1) * h_dim1];

/*           %---------------------------------------------------------% */
/*           | Compute 1st column of (H - shift*I)*(H - conj(shift)*I) | */
/*           %---------------------------------------------------------% */

	    s = sigmar * 2.f;
	    t = dlapy2_(&sigmar, &sigmai);
	    u[0] = (h11 * (h11 - s) + t * t) / h21 + h12;
	    u[1] = h11 + h22 - s;
	    u[2] = h32;

	    i__2 = iend - 1;
	    for (i__ = istart; i__ <= i__2; ++i__) {

/* Computing MIN */
		i__3 = 3, i__4 = iend - i__ + 1;
		nr = min(i__3,i__4);

/*              %-----------------------------------------------------% */
/*              | Construct Householder reflector G to zero out u(1). | */
/*              | G is of the form I - tau*( 1 u )' * ( 1 u' ).       | */
/*              %-----------------------------------------------------% */

		dlarfg_(&nr, u, &u[1], &c__1, &tau);

		if (i__ > istart) {
		    h__[i__ + (i__ - 1) * h_dim1] = u[0];
		    h__[i__ + 1 + (i__ - 1) * h_dim1] = 0.;
		    if (i__ < iend - 1) {
			h__[i__ + 2 + (i__ - 1) * h_dim1] = 0.;
		    }
		}
		u[0] = 1.;

/*              %--------------------------------------% */
/*              | Apply the reflector to the left of H | */
/*              %--------------------------------------% */

		i__3 = kplusp - i__ + 1;
		dlarf_("Left", &nr, &i__3, u, &c__1, &tau, &h__[i__ + i__ * 
			h_dim1], ldh, &workl[1], (ftnlen)4);

/*              %---------------------------------------% */
/*              | Apply the reflector to the right of H | */
/*              %---------------------------------------% */

/* Computing MIN */
		i__3 = i__ + 3;
		ir = min(i__3,iend);
		dlarf_("Right", &ir, &nr, u, &c__1, &tau, &h__[i__ * h_dim1 + 
			1], ldh, &workl[1], (ftnlen)5);

/*              %-----------------------------------------------------% */
/*              | Accumulate the reflector in the matrix Q;  Q <- Q*G | */
/*              %-----------------------------------------------------% */

		dlarf_("Right", &kplusp, &nr, u, &c__1, &tau, &q[i__ * q_dim1 
			+ 1], ldq, &workl[1], (ftnlen)5);

/*              %----------------------------% */
/*              | Prepare for next reflector | */
/*              %----------------------------% */

		if (i__ < iend - 1) {
		    u[0] = h__[i__ + 1 + i__ * h_dim1];
		    u[1] = h__[i__ + 2 + i__ * h_dim1];
		    if (i__ < iend - 2) {
			u[2] = h__[i__ + 3 + i__ * h_dim1];
		    }
		}

/* L90: */
	    }

/*           %--------------------------------------------% */
/*           | Finished applying a complex pair of shifts | */
/*           | to the current block                       | */
/*           %--------------------------------------------% */

	}

L100:

/*        %---------------------------------------------------------% */
/*        | Apply the same shift to the next block if there is any. | */
/*        %---------------------------------------------------------% */

	istart = iend + 1;
	if (iend < kplusp) {
	    goto L20;
	}

/*        %---------------------------------------------% */
/*        | Loop back to the top to get the next shift. | */
/*        %---------------------------------------------% */

L110:
	;
    }

/*     %--------------------------------------------------% */
/*     | Perform a similarity transformation that makes   | */
/*     | sure that H will have non negative sub diagonals | */
/*     %--------------------------------------------------% */

    i__1 = *kev;
    for (j = 1; j <= i__1; ++j) {
	if (h__[j + 1 + j * h_dim1] < 0.) {
	    i__2 = kplusp - j + 1;
	    dscal_(&i__2, &c_b43, &h__[j + 1 + j * h_dim1], ldh);
/* Computing MIN */
	    i__3 = j + 2;
	    i__2 = min(i__3,kplusp);
	    dscal_(&i__2, &c_b43, &h__[(j + 1) * h_dim1 + 1], &c__1);
/* Computing MIN */
	    i__3 = j + *np + 1;
	    i__2 = min(i__3,kplusp);
	    dscal_(&i__2, &c_b43, &q[(j + 1) * q_dim1 + 1], &c__1);
	}
/* L120: */
    }

    i__1 = *kev;
    for (i__ = 1; i__ <= i__1; ++i__) {

/*        %--------------------------------------------% */
/*        | Final check for splitting and deflation.   | */
/*        | Use a standard test as in the QR algorithm | */
/*        | REFERENCE: LAPACK subroutine dlahqr        | */
/*        %--------------------------------------------% */

	tst1 = (d__1 = h__[i__ + i__ * h_dim1], abs(d__1)) + (d__2 = h__[i__ 
		+ 1 + (i__ + 1) * h_dim1], abs(d__2));
	if (tst1 == 0.) {
	    tst1 = dlanhs_("1", kev, &h__[h_offset], ldh, &workl[1], (ftnlen)
		    1);
	}
/* Computing MAX */
	d__1 = ulp * tst1;
	if (h__[i__ + 1 + i__ * h_dim1] <= max(d__1,smlnum)) {
	    h__[i__ + 1 + i__ * h_dim1] = 0.;
	}
/* L130: */
    }

/*     %-------------------------------------------------% */
/*     | Compute the (kev+1)-st column of (V*Q) and      | */
/*     | temporarily store the result in WORKD(N+1:2*N). | */
/*     | This is needed in the residual update since we  | */
/*     | cannot GUARANTEE that the corresponding entry   | */
/*     | of H would be zero as in exact arithmetic.      | */
/*     %-------------------------------------------------% */

    if (h__[*kev + 1 + *kev * h_dim1] > 0.) {
	dgemv_("N", n, &kplusp, &c_b6, &v[v_offset], ldv, &q[(*kev + 1) * 
		q_dim1 + 1], &c__1, &c_b5, &workd[*n + 1], &c__1, (ftnlen)1);
    }

/*     %----------------------------------------------------------% */
/*     | Compute column 1 to kev of (V*Q) in backward order       | */
/*     | taking advantage of the upper Hessenberg structure of Q. | */
/*     %----------------------------------------------------------% */

    i__1 = *kev;
    for (i__ = 1; i__ <= i__1; ++i__) {
	i__2 = kplusp - i__ + 1;
	dgemv_("N", n, &i__2, &c_b6, &v[v_offset], ldv, &q[(*kev - i__ + 1) * 
		q_dim1 + 1], &c__1, &c_b5, &workd[1], &c__1, (ftnlen)1);
	dcopy_(n, &workd[1], &c__1, &v[(kplusp - i__ + 1) * v_dim1 + 1], &
		c__1);
/* L140: */
    }

/*     %-------------------------------------------------% */
/*     |  Move v(:,kplusp-kev+1:kplusp) into v(:,1:kev). | */
/*     %-------------------------------------------------% */

    dlacpy_("A", n, kev, &v[(kplusp - *kev + 1) * v_dim1 + 1], ldv, &v[
	    v_offset], ldv, (ftnlen)1);

/*     %--------------------------------------------------------------% */
/*     | Copy the (kev+1)-st column of (V*Q) in the appropriate place | */
/*     %--------------------------------------------------------------% */

    if (h__[*kev + 1 + *kev * h_dim1] > 0.) {
	dcopy_(n, &workd[*n + 1], &c__1, &v[(*kev + 1) * v_dim1 + 1], &c__1);
    }

/*     %---------------------------------------% */
/*     | Update the residual vector:           | */
/*     |    r <- sigmak*r + betak*v(:,kev+1)   | */
/*     | where                                 | */
/*     |    sigmak = (e_{kplusp}'*Q)*e_{kev}   | */
/*     |    betak = e_{kev+1}'*H*e_{kev}       | */
/*     %---------------------------------------% */

    dscal_(n, &q[kplusp + *kev * q_dim1], &resid[1], &c__1);
    if (h__[*kev + 1 + *kev * h_dim1] > 0.) {
	daxpy_(n, &h__[*kev + 1 + *kev * h_dim1], &v[(*kev + 1) * v_dim1 + 1],
		 &c__1, &resid[1], &c__1);
    }

    if (msglvl > 1) {
	pdvout_(comm, &debug_1.logfil, &c__1, &q[kplusp + *kev * q_dim1], &
		debug_1.ndigit, "_napps: sigmak = (e_{kev+p}^T*Q)*e_{kev}", (
		ftnlen)40);
	pdvout_(comm, &debug_1.logfil, &c__1, &h__[*kev + 1 + *kev * h_dim1], 
		&debug_1.ndigit, "_napps: betak = e_{kev+1}^T*H*e_{kev}", (
		ftnlen)37);
	pivout_(comm, &debug_1.logfil, &c__1, kev, &debug_1.ndigit, "_napps:"
		" Order of the final Hessenberg matrix ", (ftnlen)45);
	if (msglvl > 2) {
	    pdmout_(comm, &debug_1.logfil, kev, kev, &h__[h_offset], ldh, &
		    debug_1.ndigit, "_napps: updated Hessenberg matrix H for"
		    " next iteration", (ftnlen)54);
	}

    }

L9000:
    second_(&t1);
    timing_1.tnapps += t1 - t0;

    return 0;

/*     %----------------% */
/*     | End of pdnapps | */
/*     %----------------% */

} /* pdnapps_ */
Example #6
0
/*==== MAIN FUNCTION =================================================*/
int main( int argc, char *argv[] ){

/*  ==== Declarations =================================================== */

/*  File variables */
    FILE    *fin;

/*  Matrix descriptors */
    MDESC   descA, descB, descC, descA_local, descB_local;

/*  Local scalars */
    MKL_INT iam, nprocs, ictxt, myrow, mycol, nprow, npcol;
    MKL_INT n, nb, mp, nq, lld, lld_local;
    MKL_INT i, j, info;
    int     n_int, nb_int, nprow_int, npcol_int;
    double  thresh, diffnorm, anorm, bnorm, residual, eps;

/*  Local arrays */
    double  *A_local, *B_local, *A, *B, *C, *work;
    MKL_INT iwork[ 4 ];


/*  ==== Executable statements ========================================== */

/*  Get information about how many processes are used for program execution
    and number of current process */
    blacs_pinfo_( &iam, &nprocs );

/*  Init temporary 1D process grid */
    blacs_get_( &i_negone, &i_zero, &ictxt );
    blacs_gridinit_( &ictxt, "C", &nprocs, &i_one );

/*  Open input file */
    if ( iam == 0 ) {
        fin = fopen( "../in/pblas3ex.in", "r" );
        if ( fin == NULL ) {
            printf( "Error while open input file." );
            return 2;
        }
    }

/*  Read data and send it to all processes */
    if ( iam == 0 ) {

/*      Read parameters */
        fscanf( fin, "%d n, dimension of vectors, must be > 0 ", &n_int );
        fscanf( fin, "%d nb, size of blocks, must be > 0 ", &nb_int );
        fscanf( fin, "%d p, number of rows in the process grid, must be > 0", &nprow_int );
        fscanf( fin, "%d q, number of columns in the process grid, must be > 0, p*q = number of processes", &npcol_int );
        fscanf( fin, "%lf threshold for residual check (to switch off check set it < 0.0) ", &thresh );
        n = (MKL_INT) n_int;
        nb = (MKL_INT) nb_int;
        nprow = (MKL_INT) nprow_int;
        npcol = (MKL_INT) npcol_int;

/*      Check if all parameters are correct */
        if( ( n<=0 )||( nb<=0 )||( nprow<=0 )||( npcol<=0 )||( nprow*npcol != nprocs ) ) {
            printf( "One or several input parameters has incorrect value. Limitations:\n" );
            printf( "n > 0, nb > 0, p > 0, q > 0 - integer\n" );
            printf( "p*q = number of processes\n" );
            printf( "threshold - double (set to negative to swicth off check)\n");
            return 2;
        }

/*      Pack data into array and send it to other processes */
        iwork[ 0 ] = n;
        iwork[ 1 ] = nb;
        iwork[ 2 ] = nprow;
        iwork[ 3 ] = npcol;
        igebs2d_( &ictxt, "All", " ", &i_four, &i_one, iwork, &i_four );
        dgebs2d_( &ictxt, "All", " ", &i_one, &i_one, &thresh, &i_one );
    } else {

/*      Recieve and unpack data */
        igebr2d_( &ictxt, "All", " ", &i_four, &i_one, iwork, &i_four, &i_zero, &i_zero );
        dgebr2d_( &ictxt, "All", " ", &i_one, &i_one, &thresh, &i_one, &i_zero, &i_zero );
        n = iwork[ 0 ];
        nb = iwork[ 1 ];
        nprow = iwork[ 2 ];
        npcol = iwork[ 3 ];
    }
    if ( iam == 0 ) { fclose( fin ); }

/*  Destroy temporary process grid */
    blacs_gridexit_( &ictxt );

/*  Init workind 2D process grid */
    blacs_get_( &i_negone, &i_zero, &ictxt );
    blacs_gridinit_( &ictxt, "R", &nprow, &npcol );
    blacs_gridinfo_( &ictxt, &nprow, &npcol, &myrow, &mycol );

/*  Create on process 0 two matrices: A - orthonormal, B -random */
    if ( ( myrow == 0 ) && ( mycol == 0 ) ){

/*      Allocate arrays */
        A_local = (double*) calloc( n*n, sizeof( double ) );
        B_local = (double*) calloc( n*n, sizeof( double ) );

/*      Set arrays */
        for ( i=0; i<n; i++ ){
            for ( j=0; j<n; j++ ){
                B_local[ i+n*j ] = one*rand()/RAND_MAX;
            }
            B_local[ i+n*i ] += two;
        }
        for ( j=0; j<n; j++ ){
            for ( i=0; i<n; i++ ){
                if ( j < n-1 ){
                    if ( i <= j ){
                        A_local[ i+n*j ] = one / sqrt( ( double )( (j+1)*(j+2) ) );
                    } else if ( i == j+1 ) {
                        A_local[ i+n*j ] = -one / sqrt( one + one/( double )(j+1) );
                    } else {
                        A_local[ i+n*j ] = zero;
                    }
                } else {
                    A_local[ i+n*(n-1) ] = one / sqrt( ( double )n );
                }
            }
        }

/*      Print information of task */
        printf( "=== START OF EXAMPLE ===================\n" );
        printf( "Matrix-matrix multiplication: A*B = C\n\n" );
        printf( "/  1/q_1 ........   1/q_n-1     1/q_n  \\ \n" );
        printf( "|        .                             | \n" );
        printf( "|         `.           :         :     | \n" );
        printf( "| -1/q_1    `.         :         :     | \n" );
        printf( "|        .    `.       :         :     |  =  A \n" );
        printf( "|   0     `.    `                      | \n" );
        printf( "|   : `.    `.      1/q_n-1     1/q_n  | \n" );
        printf( "|   :   `.    `.                       | \n" );
        printf( "\\   0 .... 0     -(n-1)/q_n-1   1/q_n  / \n\n" );
        printf( "q_i = sqrt( i^2 + i ), i=1..n-1, q_n = sqrt( n )\n\n" );
        printf( "A  -  n*n real matrix (orthonormal) \n" );
        printf( "B  -  random n*n real matrix\n\n" );
        printf( "n = %d, nb = %d; %dx%d - process grid\n\n", n, nb, nprow, npcol );
        printf( "=== PROGRESS ===========================\n" );
    } else {

/*      Other processes don't contain parts of initial arrays */
        A_local = NULL;
        B_local = NULL;
    }

/*  Compute precise length of local pieces and allocate array on
    each process for parts of distributed vectors */
    mp = numroc_( &n, &nb, &myrow, &i_zero, &nprow );
    nq = numroc_( &n, &nb, &mycol, &i_zero, &npcol );
    A = (double*) calloc( mp*nq, sizeof( double ) );
    B = (double*) calloc( mp*nq, sizeof( double ) );
    C = (double*) calloc( mp*nq, sizeof( double ) );

/*  Compute leading dimensions */
    lld_local = MAX( numroc_( &n, &n, &myrow, &i_zero, &nprow ), 1 );
    lld = MAX( mp, 1 );

/*  Initialize descriptors for initial arrays located on 0 process */
    descinit_( descA_local, &n, &n, &n, &n, &i_zero, &i_zero, &ictxt, &lld_local, &info );
    descinit_( descB_local, &n, &n, &n, &n, &i_zero, &i_zero, &ictxt, &lld_local, &info );

/*  Initialize descriptors for distributed arrays */
    descinit_( descA, &n, &n, &nb, &nb, &i_zero, &i_zero, &ictxt, &lld, &info );
    descinit_( descB, &n, &n, &nb, &nb, &i_zero, &i_zero, &ictxt, &lld, &info );
    descinit_( descC, &n, &n, &nb, &nb, &i_zero, &i_zero, &ictxt, &lld, &info );

/*  Distribute matrices from 0 process over process grid */
    pdgeadd_( &trans, &n, &n, &one, A_local, &i_one, &i_one, descA_local, &zero, A, &i_one, &i_one, descA );
    pdgeadd_( &trans, &n, &n, &one, B_local, &i_one, &i_one, descB_local, &zero, B, &i_one, &i_one, descB );
    if( iam == 0 ){ printf( ".. Arrays are distributed ( p?geadd ) ..\n" ); }

/*  Destroy arrays on 0 process - they are not necessary anymore */
    if( ( myrow == 0 ) && ( mycol == 0 ) ){
        free( A_local );
        free( B_local );
    }

/*  Compute norm of A and B */
    work = (double*) calloc( mp, sizeof( double ) );
    anorm = pdlange_( "I", &n, &n, A, &i_one, &i_one, descA, work );
    bnorm = pdlange_( "I", &n, &n, B, &i_one, &i_one, descB, work );
    if( iam == 0 ){ printf( ".. Norms of A and B are computed ( p?lange ) ..\n" ); }

/*  Compute product C = A*B */
    pdgemm_( "N", "N", &n, &n, &n, &one, A, &i_one, &i_one, descA, B, &i_one, &i_one, descB,
             &zero, C, &i_one, &i_one, descC );
    if( iam == 0 ){ printf( ".. Multiplication A*B=C is done ( p?gemm ) ..\n" ); }

/*  Compute difference  B - inv_A*C (inv_A = transpose(A) because A is orthonormal) */
    pdgemm_( "T", "N", &n, &n, &n, &one, A, &i_one, &i_one, descA, C, &i_one, &i_one, descC,
             &negone, B, &i_one, &i_one, descB );
    if( iam == 0 ){ printf( ".. Difference is computed ( p?gemm ) ..\n" ); }

/*  Compute norm of B - inv_A*C (which is contained in B) */
    diffnorm = pdlange_( "I", &n, &n, B, &i_one, &i_one, descB, work );
    free( work );
    if( iam == 0 ){ printf( ".. Norms of the difference B-inv_A*C is computed ( p?lange ) ..\n" ); }

/*  Print results */
    if( iam == 0 ){
        printf( ".. Solutions are compared ..\n" );
        printf( "== Results ==\n" );
        printf( "||A|| = %03.11f\n", anorm );
        printf( "||B|| = %03.11f\n", bnorm );
        printf( "=== END OF EXAMPLE =====================\n" );
    }

/*  Compute machine epsilon */
    eps = pdlamch_( &ictxt, "e" );

/*  Compute residual */
    residual = diffnorm /( two*anorm*bnorm*eps );

/*  Destroy arrays */
    free( A );
    free( B );
    free( C );

/*  Destroy process grid */    
    blacs_gridexit_( &ictxt );
    blacs_exit_( &i_zero );
    
/*  Check if residual passed or failed the threshold */
    if ( ( iam == 0 ) && ( thresh >= zero ) && !( residual <= thresh ) ){
        printf( "FAILED. Residual = %05.16f\n", residual );
        return 1;
    } else {
        return 0;
    }

/*========================================================================
  ====== End of PBLAS Level 3 example ====================================
  ======================================================================*/
}
/* Subroutine */ int pdnaitr_(integer *comm, integer *ido, char *bmat, 
	integer *n, integer *k, integer *np, integer *nb, doublereal *resid, 
	doublereal *rnorm, doublereal *v, integer *ldv, doublereal *h__, 
	integer *ldh, integer *ipntr, doublereal *workd, doublereal *workl, 
	integer *info, ftnlen bmat_len)
{
    /* Initialized data */

    static logical first = TRUE_;

    /* System generated locals */
    integer h_dim1, h_offset, v_dim1, v_offset, i__1, i__2;
    doublereal d__1, d__2;

    /* Builtin functions */
    double sqrt(doublereal);

    /* Local variables */
    static integer i__, j;
    static real t0, t1, t2, t3, t4, t5;
    static doublereal rnorm_buf__;
    static integer jj, ipj, irj, ivj;
    static doublereal ulp, tst1;
    extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, 
	    integer *);
    static integer ierr, iter;
    static doublereal unfl, ovfl;
    static integer itry;
    static doublereal temp1;
    static logical orth1, orth2, step3, step4;
    static doublereal betaj;
    extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, 
	    integer *), dgemv_(char *, integer *, integer *, doublereal *, 
	    doublereal *, integer *, doublereal *, integer *, doublereal *, 
	    doublereal *, integer *, ftnlen);
    static integer infol;
    extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, 
	    doublereal *, integer *), daxpy_(integer *, doublereal *, 
	    doublereal *, integer *, doublereal *, integer *);
    static doublereal xtemp[2], wnorm;
    extern /* Subroutine */ int mpi_allreduce__(doublereal *, doublereal *, 
	    integer *, integer *, integer *, integer *, integer *), dlabad_(
	    doublereal *, doublereal *);
    static doublereal rnorm1;
    extern /* Subroutine */ int dlascl_(char *, integer *, integer *, 
	    doublereal *, doublereal *, integer *, integer *, doublereal *, 
	    integer *, integer *, ftnlen);
    extern doublereal dlanhs_(char *, integer *, doublereal *, integer *, 
	    doublereal *, ftnlen);
    static logical rstart;
    static integer msglvl;
    static doublereal smlnum;
    extern /* Subroutine */ int pdvout_(integer *, integer *, integer *, 
	    doublereal *, integer *, char *, ftnlen), pdmout_(integer *, 
	    integer *, integer *, integer *, doublereal *, integer *, integer 
	    *, char *, ftnlen), pivout_(integer *, integer *, integer *, 
	    integer *, integer *, char *, ftnlen), second_(real *), pdgetv0_(
	    integer *, integer *, char *, integer *, logical *, integer *, 
	    integer *, doublereal *, integer *, doublereal *, doublereal *, 
	    integer *, doublereal *, doublereal *, integer *, ftnlen);
    extern doublereal pdnorm2_(integer *, integer *, doublereal *, integer *),
	     pdlamch_(integer *, char *, ftnlen);



/*     %---------------% */
/*     | MPI Variables | */
/*     %---------------% */

/* /+ */
/* * */
/* *  (C) 1993 by Argonne National Laboratory and Mississipi State University. */
/* *      All rights reserved.  See COPYRIGHT in top-level directory. */
/* +/ */

/* /+ user include file for MPI programs, with no dependencies +/ */

/* /+ return codes +/ */







/*     We handle datatypes by putting the variables that hold them into */
/*     common.  This way, a Fortran program can directly use the various */
/*     datatypes and can even give them to C programs. */

/*     MPI_BOTTOM needs to be a known address; here we put it at the */
/*     beginning of the common block.  The point-to-point and collective */
/*     routines know about MPI_BOTTOM, but MPI_TYPE_STRUCT as yet does not. */

/*     The types MPI_INTEGER1,2,4 and MPI_REAL4,8 are OPTIONAL. */
/*     Their values are zero if they are not available.  Note that */
/*     using these reduces the portability of code (though may enhance */
/*     portability between Crays and other systems) */



/*     All other MPI routines are subroutines */

/*     The attribute copy/delete functions are symbols that can be passed */
/*     to MPI routines */

/*     %----------------------------------------------------% */
/*     | Include files for debugging and timing information | */
/*     %----------------------------------------------------% */


/* \SCCS Information: @(#) */
/* FILE: debug.h   SID: 2.3   DATE OF SID: 11/16/95   RELEASE: 2 */

/*     %---------------------------------% */
/*     | See debug.doc for documentation | */
/*     %---------------------------------% */

/*     %------------------% */
/*     | Scalar Arguments | */
/*     %------------------% */

/*     %--------------------------------% */
/*     | See stat.doc for documentation | */
/*     %--------------------------------% */

/* \SCCS Information: @(#) */
/* FILE: stat.h   SID: 2.2   DATE OF SID: 11/16/95   RELEASE: 2 */



/*     %-----------------% */
/*     | Array Arguments | */
/*     %-----------------% */


/*     %------------% */
/*     | Parameters | */
/*     %------------% */


/*     %---------------% */
/*     | Local Scalars | */
/*     %---------------% */




/*     %-----------------------% */
/*     | Local Array Arguments | */
/*     %-----------------------% */


/*     %----------------------% */
/*     | External Subroutines | */
/*     %----------------------% */


/*     %--------------------% */
/*     | External Functions | */
/*     %--------------------% */


/*     %---------------------% */
/*     | Intrinsic Functions | */
/*     %---------------------% */


/*     %-----------------% */
/*     | Data statements | */
/*     %-----------------% */

    /* Parameter adjustments */
    --workd;
    --resid;
    v_dim1 = *ldv;
    v_offset = 1 + v_dim1;
    v -= v_offset;
    --workl;
    h_dim1 = *ldh;
    h_offset = 1 + h_dim1;
    h__ -= h_offset;
    --ipntr;

    /* Function Body */

/*     %-----------------------% */
/*     | Executable Statements | */
/*     %-----------------------% */

    if (first) {

/*        %-----------------------------------------% */
/*        | Set machine-dependent constants for the | */
/*        | the splitting and deflation criterion.  | */
/*        | If norm(H) <= sqrt(OVFL),               | */
/*        | overflow should not occur.              | */
/*        | REFERENCE: LAPACK subroutine dlahqr     | */
/*        %-----------------------------------------% */

	unfl = pdlamch_(comm, "safe minimum", (ftnlen)12);
	ovfl = 1. / unfl;
	dlabad_(&unfl, &ovfl);
	ulp = pdlamch_(comm, "precision", (ftnlen)9);
	smlnum = unfl * (*n / ulp);
	first = FALSE_;
    }

    if (*ido == 0) {

/*        %-------------------------------% */
/*        | Initialize timing statistics  | */
/*        | & message level for debugging | */
/*        %-------------------------------% */

	second_(&t0);
	msglvl = debug_1.mnaitr;

/*        %------------------------------% */
/*        | Initial call to this routine | */
/*        %------------------------------% */

	*info = 0;
	step3 = FALSE_;
	step4 = FALSE_;
	rstart = FALSE_;
	orth1 = FALSE_;
	orth2 = FALSE_;
	j = *k + 1;
	ipj = 1;
	irj = ipj + *n;
	ivj = irj + *n;
    }

/*     %-------------------------------------------------% */
/*     | When in reverse communication mode one of:      | */
/*     | STEP3, STEP4, ORTH1, ORTH2, RSTART              | */
/*     | will be .true. when ....                        | */
/*     | STEP3: return from computing OP*v_{j}.          | */
/*     | STEP4: return from computing B-norm of OP*v_{j} | */
/*     | ORTH1: return from computing B-norm of r_{j+1}  | */
/*     | ORTH2: return from computing B-norm of          | */
/*     |        correction to the residual vector.       | */
/*     | RSTART: return from OP computations needed by   | */
/*     |         pdgetv0.                                | */
/*     %-------------------------------------------------% */

    if (step3) {
	goto L50;
    }
    if (step4) {
	goto L60;
    }
    if (orth1) {
	goto L70;
    }
    if (orth2) {
	goto L90;
    }
    if (rstart) {
	goto L30;
    }

/*     %-----------------------------% */
/*     | Else this is the first step | */
/*     %-----------------------------% */

/*     %--------------------------------------------------------------% */
/*     |                                                              | */
/*     |        A R N O L D I     I T E R A T I O N     L O O P       | */
/*     |                                                              | */
/*     | Note:  B*r_{j-1} is already in WORKD(1:N)=WORKD(IPJ:IPJ+N-1) | */
/*     %--------------------------------------------------------------% */
L1000:

    if (msglvl > 1) {
	pivout_(comm, &debug_1.logfil, &c__1, &j, &debug_1.ndigit, "_naitr: "
		"generating Arnoldi vector number", (ftnlen)40);
	pdvout_(comm, &debug_1.logfil, &c__1, rnorm, &debug_1.ndigit, "_nait"
		"r: B-norm of the current residual is", (ftnlen)41);
    }

/*        %---------------------------------------------------% */
/*        | STEP 1: Check if the B norm of j-th residual      | */
/*        | vector is zero. Equivalent to determing whether   | */
/*        | an exact j-step Arnoldi factorization is present. | */
/*        %---------------------------------------------------% */

    betaj = *rnorm;
    if (*rnorm > 0.) {
	goto L40;
    }

/*           %---------------------------------------------------% */
/*           | Invariant subspace found, generate a new starting | */
/*           | vector which is orthogonal to the current Arnoldi | */
/*           | basis and continue the iteration.                 | */
/*           %---------------------------------------------------% */

    if (msglvl > 0) {
	pivout_(comm, &debug_1.logfil, &c__1, &j, &debug_1.ndigit, "_naitr: "
		"****** RESTART AT STEP ******", (ftnlen)37);
    }

/*           %---------------------------------------------% */
/*           | ITRY is the loop variable that controls the | */
/*           | maximum amount of times that a restart is   | */
/*           | attempted. NRSTRT is used by stat.h         | */
/*           %---------------------------------------------% */

    betaj = 0.;
    ++timing_1.nrstrt;
    itry = 1;
L20:
    rstart = TRUE_;
    *ido = 0;
L30:

/*           %--------------------------------------% */
/*           | If in reverse communication mode and | */
/*           | RSTART = .true. flow returns here.   | */
/*           %--------------------------------------% */

    pdgetv0_(comm, ido, bmat, &itry, &c_false, n, &j, &v[v_offset], ldv, &
	    resid[1], rnorm, &ipntr[1], &workd[1], &workl[1], &ierr, (ftnlen)
	    1);
    if (*ido != 99) {
	goto L9000;
    }
    if (ierr < 0) {
	++itry;
	if (itry <= 3) {
	    goto L20;
	}

/*              %------------------------------------------------% */
/*              | Give up after several restart attempts.        | */
/*              | Set INFO to the size of the invariant subspace | */
/*              | which spans OP and exit.                       | */
/*              %------------------------------------------------% */

	*info = j - 1;
	second_(&t1);
	timing_1.tnaitr += t1 - t0;
	*ido = 99;
	goto L9000;
    }

L40:

/*        %---------------------------------------------------------% */
/*        | STEP 2:  v_{j} = r_{j-1}/rnorm and p_{j} = p_{j}/rnorm  | */
/*        | Note that p_{j} = B*r_{j-1}. In order to avoid overflow | */
/*        | when reciprocating a small RNORM, test against lower    | */
/*        | machine bound.                                          | */
/*        %---------------------------------------------------------% */

    dcopy_(n, &resid[1], &c__1, &v[j * v_dim1 + 1], &c__1);
    if (*rnorm >= unfl) {
	temp1 = 1. / *rnorm;
	dscal_(n, &temp1, &v[j * v_dim1 + 1], &c__1);
	dscal_(n, &temp1, &workd[ipj], &c__1);
    } else {

/*            %-----------------------------------------% */
/*            | To scale both v_{j} and p_{j} carefully | */
/*            | use LAPACK routine SLASCL               | */
/*            %-----------------------------------------% */

	dlascl_("General", &i__, &i__, rnorm, &c_b25, n, &c__1, &v[j * v_dim1 
		+ 1], n, &infol, (ftnlen)7);
	dlascl_("General", &i__, &i__, rnorm, &c_b25, n, &c__1, &workd[ipj], 
		n, &infol, (ftnlen)7);
    }

/*        %------------------------------------------------------% */
/*        | STEP 3:  r_{j} = OP*v_{j}; Note that p_{j} = B*v_{j} | */
/*        | Note that this is not quite yet r_{j}. See STEP 4    | */
/*        %------------------------------------------------------% */

    step3 = TRUE_;
    ++timing_1.nopx;
    second_(&t2);
    dcopy_(n, &v[j * v_dim1 + 1], &c__1, &workd[ivj], &c__1);
    ipntr[1] = ivj;
    ipntr[2] = irj;
    ipntr[3] = ipj;
    *ido = 1;

/*        %-----------------------------------% */
/*        | Exit in order to compute OP*v_{j} | */
/*        %-----------------------------------% */

    goto L9000;
L50:

/*        %----------------------------------% */
/*        | Back from reverse communication; | */
/*        | WORKD(IRJ:IRJ+N-1) := OP*v_{j}   | */
/*        | if step3 = .true.                | */
/*        %----------------------------------% */

    second_(&t3);
    timing_1.tmvopx += t3 - t2;
    step3 = FALSE_;

/*        %------------------------------------------% */
/*        | Put another copy of OP*v_{j} into RESID. | */
/*        %------------------------------------------% */

    dcopy_(n, &workd[irj], &c__1, &resid[1], &c__1);

/*        %---------------------------------------% */
/*        | STEP 4:  Finish extending the Arnoldi | */
/*        |          factorization to length j.   | */
/*        %---------------------------------------% */

    second_(&t2);
    if (*(unsigned char *)bmat == 'G') {
	++timing_1.nbx;
	step4 = TRUE_;
	ipntr[1] = irj;
	ipntr[2] = ipj;
	*ido = 2;

/*           %-------------------------------------% */
/*           | Exit in order to compute B*OP*v_{j} | */
/*           %-------------------------------------% */

	goto L9000;
    } else if (*(unsigned char *)bmat == 'I') {
	dcopy_(n, &resid[1], &c__1, &workd[ipj], &c__1);
    }
L60:

/*        %----------------------------------% */
/*        | Back from reverse communication; | */
/*        | WORKD(IPJ:IPJ+N-1) := B*OP*v_{j} | */
/*        | if step4 = .true.                | */
/*        %----------------------------------% */

    if (*(unsigned char *)bmat == 'G') {
	second_(&t3);
	timing_1.tmvbx += t3 - t2;
    }

    step4 = FALSE_;

/*        %-------------------------------------% */
/*        | The following is needed for STEP 5. | */
/*        | Compute the B-norm of OP*v_{j}.     | */
/*        %-------------------------------------% */

    if (*(unsigned char *)bmat == 'G') {
	rnorm_buf__ = ddot_(n, &resid[1], &c__1, &workd[ipj], &c__1);
	mpi_allreduce__(&rnorm_buf__, &wnorm, &c__1, &
		mpipriv_1.mpi_double_precision__, &mpipriv_1.mpi_sum__, comm, 
		&ierr);
	wnorm = sqrt((abs(wnorm)));
    } else if (*(unsigned char *)bmat == 'I') {
	wnorm = pdnorm2_(comm, n, &resid[1], &c__1);
    }

/*        %-----------------------------------------% */
/*        | Compute the j-th residual corresponding | */
/*        | to the j step factorization.            | */
/*        | Use Classical Gram Schmidt and compute: | */
/*        | w_{j} <-  V_{j}^T * B * OP * v_{j}      | */
/*        | r_{j} <-  OP*v_{j} - V_{j} * w_{j}      | */
/*        %-----------------------------------------% */


/*        %------------------------------------------% */
/*        | Compute the j Fourier coefficients w_{j} | */
/*        | WORKD(IPJ:IPJ+N-1) contains B*OP*v_{j}.  | */
/*        %------------------------------------------% */

    dgemv_("T", n, &j, &c_b25, &v[v_offset], ldv, &workd[ipj], &c__1, &c_b48, 
	    &workl[1], &c__1, (ftnlen)1);
    mpi_allreduce__(&workl[1], &h__[j * h_dim1 + 1], &j, &
	    mpipriv_1.mpi_double_precision__, &mpipriv_1.mpi_sum__, comm, &
	    ierr);

/*        %--------------------------------------% */
/*        | Orthogonalize r_{j} against V_{j}.   | */
/*        | RESID contains OP*v_{j}. See STEP 3. | */
/*        %--------------------------------------% */

    dgemv_("N", n, &j, &c_b51, &v[v_offset], ldv, &h__[j * h_dim1 + 1], &c__1,
	     &c_b25, &resid[1], &c__1, (ftnlen)1);

    if (j > 1) {
	h__[j + (j - 1) * h_dim1] = betaj;
    }

    second_(&t4);

    orth1 = TRUE_;

    second_(&t2);
    if (*(unsigned char *)bmat == 'G') {
	++timing_1.nbx;
	dcopy_(n, &resid[1], &c__1, &workd[irj], &c__1);
	ipntr[1] = irj;
	ipntr[2] = ipj;
	*ido = 2;

/*           %----------------------------------% */
/*           | Exit in order to compute B*r_{j} | */
/*           %----------------------------------% */

	goto L9000;
    } else if (*(unsigned char *)bmat == 'I') {
	dcopy_(n, &resid[1], &c__1, &workd[ipj], &c__1);
    }
L70:

/*        %---------------------------------------------------% */
/*        | Back from reverse communication if ORTH1 = .true. | */
/*        | WORKD(IPJ:IPJ+N-1) := B*r_{j}.                    | */
/*        %---------------------------------------------------% */

    if (*(unsigned char *)bmat == 'G') {
	second_(&t3);
	timing_1.tmvbx += t3 - t2;
    }

    orth1 = FALSE_;

/*        %------------------------------% */
/*        | Compute the B-norm of r_{j}. | */
/*        %------------------------------% */

    if (*(unsigned char *)bmat == 'G') {
	rnorm_buf__ = ddot_(n, &resid[1], &c__1, &workd[ipj], &c__1);
	mpi_allreduce__(&rnorm_buf__, rnorm, &c__1, &
		mpipriv_1.mpi_double_precision__, &mpipriv_1.mpi_sum__, comm, 
		&ierr);
	*rnorm = sqrt((abs(*rnorm)));
    } else if (*(unsigned char *)bmat == 'I') {
	*rnorm = pdnorm2_(comm, n, &resid[1], &c__1);
    }

/*        %-----------------------------------------------------------% */
/*        | STEP 5: Re-orthogonalization / Iterative refinement phase | */
/*        | Maximum NITER_ITREF tries.                                | */
/*        |                                                           | */
/*        |          s      = V_{j}^T * B * r_{j}                     | */
/*        |          r_{j}  = r_{j} - V_{j}*s                         | */
/*        |          alphaj = alphaj + s_{j}                          | */
/*        |                                                           | */
/*        | The stopping criteria used for iterative refinement is    | */
/*        | discussed in Parlett's book SEP, page 107 and in Gragg &  | */
/*        | Reichel ACM TOMS paper; Algorithm 686, Dec. 1990.         | */
/*        | Determine if we need to correct the residual. The goal is | */
/*        | to enforce ||v(:,1:j)^T * r_{j}|| .le. eps * || r_{j} ||  | */
/*        | The following test determines whether the sine of the     | */
/*        | angle between  OP*x and the computed residual is less     | */
/*        | than or equal to 0.717.                                   | */
/*        %-----------------------------------------------------------% */

    if (*rnorm > wnorm * .717f) {
	goto L100;
    }
    iter = 0;
    ++timing_1.nrorth;

/*        %---------------------------------------------------% */
/*        | Enter the Iterative refinement phase. If further  | */
/*        | refinement is necessary, loop back here. The loop | */
/*        | variable is ITER. Perform a step of Classical     | */
/*        | Gram-Schmidt using all the Arnoldi vectors V_{j}  | */
/*        %---------------------------------------------------% */

L80:

    if (msglvl > 2) {
	xtemp[0] = wnorm;
	xtemp[1] = *rnorm;
	pdvout_(comm, &debug_1.logfil, &c__2, xtemp, &debug_1.ndigit, "_nait"
		"r: re-orthonalization; wnorm and rnorm are", (ftnlen)47);
	pdvout_(comm, &debug_1.logfil, &j, &h__[j * h_dim1 + 1], &
		debug_1.ndigit, "_naitr: j-th column of H", (ftnlen)24);
    }

/*        %----------------------------------------------------% */
/*        | Compute V_{j}^T * B * r_{j}.                       | */
/*        | WORKD(IRJ:IRJ+J-1) = v(:,1:J)'*WORKD(IPJ:IPJ+N-1). | */
/*        %----------------------------------------------------% */

    dgemv_("T", n, &j, &c_b25, &v[v_offset], ldv, &workd[ipj], &c__1, &c_b48, 
	    &workl[j + 1], &c__1, (ftnlen)1);
    mpi_allreduce__(&workl[j + 1], &workl[1], &j, &
	    mpipriv_1.mpi_double_precision__, &mpipriv_1.mpi_sum__, comm, &
	    ierr);

/*        %---------------------------------------------% */
/*        | Compute the correction to the residual:     | */
/*        | r_{j} = r_{j} - V_{j} * WORKD(IRJ:IRJ+J-1). | */
/*        | The correction to H is v(:,1:J)*H(1:J,1:J)  | */
/*        | + v(:,1:J)*WORKD(IRJ:IRJ+J-1)*e'_j.         | */
/*        %---------------------------------------------% */

    dgemv_("N", n, &j, &c_b51, &v[v_offset], ldv, &workl[1], &c__1, &c_b25, &
	    resid[1], &c__1, (ftnlen)1);
    daxpy_(&j, &c_b25, &workl[1], &c__1, &h__[j * h_dim1 + 1], &c__1);

    orth2 = TRUE_;
    second_(&t2);
    if (*(unsigned char *)bmat == 'G') {
	++timing_1.nbx;
	dcopy_(n, &resid[1], &c__1, &workd[irj], &c__1);
	ipntr[1] = irj;
	ipntr[2] = ipj;
	*ido = 2;

/*           %-----------------------------------% */
/*           | Exit in order to compute B*r_{j}. | */
/*           | r_{j} is the corrected residual.  | */
/*           %-----------------------------------% */

	goto L9000;
    } else if (*(unsigned char *)bmat == 'I') {
	dcopy_(n, &resid[1], &c__1, &workd[ipj], &c__1);
    }
L90:

/*        %---------------------------------------------------% */
/*        | Back from reverse communication if ORTH2 = .true. | */
/*        %---------------------------------------------------% */

    if (*(unsigned char *)bmat == 'G') {
	second_(&t3);
	timing_1.tmvbx += t3 - t2;
    }

/*        %-----------------------------------------------------% */
/*        | Compute the B-norm of the corrected residual r_{j}. | */
/*        %-----------------------------------------------------% */

    if (*(unsigned char *)bmat == 'G') {
	rnorm_buf__ = ddot_(n, &resid[1], &c__1, &workd[ipj], &c__1);
	mpi_allreduce__(&rnorm_buf__, &rnorm1, &c__1, &
		mpipriv_1.mpi_double_precision__, &mpipriv_1.mpi_sum__, comm, 
		&ierr);
	rnorm1 = sqrt((abs(rnorm1)));
    } else if (*(unsigned char *)bmat == 'I') {
	rnorm1 = pdnorm2_(comm, n, &resid[1], &c__1);
    }

    if (msglvl > 0 && iter > 0) {
	pivout_(comm, &debug_1.logfil, &c__1, &j, &debug_1.ndigit, "_naitr: "
		"Iterative refinement for Arnoldi residual", (ftnlen)49);
	if (msglvl > 2) {
	    xtemp[0] = *rnorm;
	    xtemp[1] = rnorm1;
	    pdvout_(comm, &debug_1.logfil, &c__2, xtemp, &debug_1.ndigit, 
		    "_naitr: iterative refinement ; rnorm and rnorm1 are", (
		    ftnlen)51);
	}
    }

/*        %-----------------------------------------% */
/*        | Determine if we need to perform another | */
/*        | step of re-orthogonalization.           | */
/*        %-----------------------------------------% */

    if (rnorm1 > *rnorm * .717f) {

/*           %---------------------------------------% */
/*           | No need for further refinement.       | */
/*           | The cosine of the angle between the   | */
/*           | corrected residual vector and the old | */
/*           | residual vector is greater than 0.717 | */
/*           | In other words the corrected residual | */
/*           | and the old residual vector share an  | */
/*           | angle of less than arcCOS(0.717)      | */
/*           %---------------------------------------% */

	*rnorm = rnorm1;

    } else {

/*           %-------------------------------------------% */
/*           | Another step of iterative refinement step | */
/*           | is required. NITREF is used by stat.h     | */
/*           %-------------------------------------------% */

	++timing_1.nitref;
	*rnorm = rnorm1;
	++iter;
	if (iter <= 1) {
	    goto L80;
	}

/*           %-------------------------------------------------% */
/*           | Otherwise RESID is numerically in the span of V | */
/*           %-------------------------------------------------% */

	i__1 = *n;
	for (jj = 1; jj <= i__1; ++jj) {
	    resid[jj] = 0.;
/* L95: */
	}
	*rnorm = 0.;
    }

/*        %----------------------------------------------% */
/*        | Branch here directly if iterative refinement | */
/*        | wasn't necessary or after at most NITER_REF  | */
/*        | steps of iterative refinement.               | */
/*        %----------------------------------------------% */

L100:

    rstart = FALSE_;
    orth2 = FALSE_;

    second_(&t5);
    timing_1.titref += t5 - t4;

/*        %------------------------------------% */
/*        | STEP 6: Update  j = j+1;  Continue | */
/*        %------------------------------------% */

    ++j;
    if (j > *k + *np) {
	second_(&t1);
	timing_1.tnaitr += t1 - t0;
	*ido = 99;
	i__1 = *k + *np - 1;
	for (i__ = max(1,*k); i__ <= i__1; ++i__) {

/*              %--------------------------------------------% */
/*              | Check for splitting and deflation.         | */
/*              | Use a standard test as in the QR algorithm | */
/*              | REFERENCE: LAPACK subroutine dlahqr        | */
/*              %--------------------------------------------% */

	    tst1 = (d__1 = h__[i__ + i__ * h_dim1], abs(d__1)) + (d__2 = h__[
		    i__ + 1 + (i__ + 1) * h_dim1], abs(d__2));
	    if (tst1 == 0.) {
		i__2 = *k + *np;
		tst1 = dlanhs_("1", &i__2, &h__[h_offset], ldh, &workd[*n + 1]
			, (ftnlen)1);
	    }
/* Computing MAX */
	    d__2 = ulp * tst1;
	    if ((d__1 = h__[i__ + 1 + i__ * h_dim1], abs(d__1)) <= max(d__2,
		    smlnum)) {
		h__[i__ + 1 + i__ * h_dim1] = 0.;
	    }
/* L110: */
	}

	if (msglvl > 2) {
	    i__1 = *k + *np;
	    i__2 = *k + *np;
	    pdmout_(comm, &debug_1.logfil, &i__1, &i__2, &h__[h_offset], ldh, 
		    &debug_1.ndigit, "_naitr: Final upper Hessenberg matrix "
		    "H of order K+NP", (ftnlen)53);
	}

	goto L9000;
    }

/*        %--------------------------------------------------------% */
/*        | Loop back to extend the factorization by another step. | */
/*        %--------------------------------------------------------% */

    goto L1000;

/*     %---------------------------------------------------------------% */
/*     |                                                               | */
/*     |  E N D     O F     M A I N     I T E R A T I O N     L O O P  | */
/*     |                                                               | */
/*     %---------------------------------------------------------------% */

L9000:
    return 0;

/*     %----------------% */
/*     | End of pdnaitr | */
/*     %----------------% */

} /* pdnaitr_ */