Exemplo n.º 1
0
float clangs(char *norm, SuperMatrix *A)
{
    
    /* Local variables */
    NCformat *Astore;
    complex   *Aval;
    int      i, j, irow;
    float   value, sum;
    float   *rwork;

    Astore = A->Store;
    Aval   = Astore->nzval;
    
    if ( SUPERLU_MIN(A->nrow, A->ncol) == 0) {
	value = 0.;
	
    } else if (strncmp(norm, "M", 1)==0) {
	/* Find max(abs(A(i,j))). */
	value = 0.;
	for (j = 0; j < A->ncol; ++j)
	    for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++)
		value = SUPERLU_MAX( value, c_abs( &Aval[i]) );
	
    } else if (strncmp(norm, "O", 1)==0 || *(unsigned char *)norm == '1') {
	/* Find norm1(A). */
	value = 0.;
	for (j = 0; j < A->ncol; ++j) {
	    sum = 0.;
	    for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) 
		sum += c_abs( &Aval[i] );
	    value = SUPERLU_MAX(value,sum);
	}
	
    } else if (strncmp(norm, "I", 1)==0) {
	/* Find normI(A). */
	if ( !(rwork = (float *) SUPERLU_MALLOC(A->nrow * sizeof(float))) )
	    ABORT("SUPERLU_MALLOC fails for rwork.");
	for (i = 0; i < A->nrow; ++i) rwork[i] = 0.;
	for (j = 0; j < A->ncol; ++j)
	    for (i = Astore->colptr[j]; i < Astore->colptr[j+1]; i++) {
		irow = Astore->rowind[i];
		rwork[irow] += c_abs( &Aval[i] );
	    }
	value = 0.;
	for (i = 0; i < A->nrow; ++i)
	    value = SUPERLU_MAX(value, rwork[i]);
	
	SUPERLU_FREE (rwork);
	
    } else if (strncmp(norm, "F", 1)==0 || strncmp(norm, "E", 1)==0) {
	/* Find normF(A). */
	ABORT("Not implemented.");
    } else
	ABORT("Illegal norm specified.");

    return (value);

} /* clangs */
Exemplo n.º 2
0
/*! \brief

<pre>
    Purpose
    =======

    SCSUM1 takes the sum of the absolute values of a complex
    vector and returns a single precision result.

    Based on SCASUM from the Level 1 BLAS.
    The change is to use the 'genuine' absolute value.

    Contributed by Nick Higham for use with CLACON.

    Arguments
    =========

    N       (input) INT
            The number of elements in the vector CX.

    CX      (input) COMPLEX array, dimension (N)
            The vector whose elements will be summed.

    INCX    (input) INT
            The spacing between successive values of CX.  INCX > 0.

    =====================================================================
</pre>
*/
double scsum1_(int *n, complex *cx, int *incx)
{
    /* System generated locals */
    int i__1, i__2;
    float ret_val;
    /* Builtin functions */
    double c_abs(complex *);
    /* Local variables */
    static int i, nincx;
    static float stemp;


#define CX(I) cx[(I)-1]


    ret_val = 0.f;
    stemp = 0.f;
    if (*n <= 0) {
        return ret_val;
    }
    if (*incx == 1) {
        goto L20;
    }

/*     CODE FOR INCREMENT NOT EQUAL TO 1 */

    nincx = *n * *incx;
    i__1 = nincx;
    i__2 = *incx;
    for (i = 1; *incx < 0 ? i >= nincx : i <= nincx; i += *incx) {

/*        NEXT LINE MODIFIED. */

        stemp += c_abs(&CX(i));
/* L10: */
    }
    ret_val = stemp;
    return ret_val;

/*     CODE FOR INCREMENT EQUAL TO 1 */

L20:
    i__2 = *n;
    for (i = 1; i <= *n; ++i) {

/*        NEXT LINE MODIFIED. */

        stemp += c_abs(&CX(i));
/* L30: */
    }
    ret_val = stemp;
    return ret_val;

/*     End of SCSUM1 */

} /* scsum1_ */
long RoundDoubleLong(double val)
{
    long ival = c_abs((long)val);
    if((c_abs(val) - ival) >= 0.5)
        ival++;

	if(val > 0)
	{
		return ival;
	}
	else
	{
		return 0 - ival;
	}
}
Exemplo n.º 4
0
struct c_float mult_cc(struct c_float a, struct c_float b)
{
    struct c_float c;
    int sign_c = 1;

    if (a.mantisse < 0)
    {
        sign_c = -1;
        a.mantisse = -a.mantisse;
    }

    if (b.mantisse < 0)
    {
        sign_c = -sign_c;
        b.mantisse = -b.mantisse;
    }

    a.mantisse = a.mantisse >> (INT_MAXE / 2);
    b.mantisse = b.mantisse >> (INT_MAXE / 2);

    c.exponent = a.exponent + b.exponent;
    c.mantisse = a.mantisse * b.mantisse * sign_c;

    if (c_abs(c.mantisse) < INT_MAXX)
    {
        c.exponent--;
        c.mantisse = c.mantisse << 1;
    };

    return c;
}
Exemplo n.º 5
0
void MainContentComponent::calc_tuning() {								//calculate tuning

	//initialise fft i/o-buffers:									
	in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * file_len);
	out = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * file_len);
	p = fftw_plan_dft_1d(file_len, in, out, FFTW_FORWARD, FFTW_ESTIMATE);
	for(long n=0; n<file_len; n++) {									// fill fft input buffer:
		in[n][0] = file_buf[n];		//	real part
		in[n][1] = 0;				//	imag part
	}
	fftw_execute(p);													// execute fft

	file_mX = new float[file_len];										//get mag and phase:
	file_pX = new float[file_len];
	for(long n=0; n<file_len/2; n++) {
		file_mX[n] = c_abs(out[n][0], out[n][1]);
		file_pX[n] = c_arg(out[n][0], out[n][1]);
/*
		std::cout<<"magnitude: "<<file_mX[n];
		std::cout<<"\tphase: "<<file_pX[n]<<std::endl;
*/
	}
		fftw_destroy_plan(p);
	fftw_free(in); fftw_free(out); 
	ladder = new tuning(file_mX, file_len/2, key_count, mirror_tuning, peak_tresh, file_fs);	//(spectrum, length of spectrum (N/2 ivm symmetrie),
																								//	amount of keys, mirror intervals round 3/2?, 
																								//		peak detection treshold)  
	

}
Exemplo n.º 6
0
//只能接收长度为128的数组
//若不足128补全为0
double* fftMe(double list[],int length) {
    complex theList[128];
    int i;
    for (i = 0; i < length; i++) {
        complex co;
        co.real=list[i];
        co.imag=0;
        theList[i] = co;
    }
    for (i = length; i < 128; i++) {
        complex co;
        co.imag=0;
        co.real=0;
        theList[i] = co;
    }

    // fft
    fft(length,theList);

    double alpha=1.0/(double)length;
    for (i = 0; i < length; i++) {
        complex co;
        co.real = alpha*theList[i].real;
        co.imag = alpha*theList[i].imag;
        theList[i] = co;
    }
    float fftSeries[length];
    double out[length/2];
    c_abs(theList,fftSeries,length);
    for(i=0;i<length/2;i++){
       out[i] = fftSeries[i+1]*2;
       //printf("%.10f\n",out[i]);
    }
    return out;
}
Exemplo n.º 7
0
/*! \brief SIGN functions for complex number. Returns z/abs(z) */
complex c_sgn(complex *z)
{
    register float t = c_abs(z);
    register complex retval;

    if (t == 0.0) {
	retval.r = 1.0, retval.i = 0.0;
    } else {
	retval.r = z->r / t, retval.i = z->i / t;
    }

    return retval;
}
Exemplo n.º 8
0
/*! \brief Check the inf-norm of the error vector 
 */
void cinf_norm_error(int nrhs, SuperMatrix *X, complex *xtrue)
{
    DNformat *Xstore;
    float err, xnorm;
    complex *Xmat, *soln_work;
    complex temp;
    int i, j;

    Xstore = X->Store;
    Xmat = Xstore->nzval;

    for (j = 0; j < nrhs; j++) {
      soln_work = &Xmat[j*Xstore->lda];
      err = xnorm = 0.0;
      for (i = 0; i < X->nrow; i++) {
        c_sub(&temp, &soln_work[i], &xtrue[i]);
	err = SUPERLU_MAX(err, c_abs(&temp));
	xnorm = SUPERLU_MAX(xnorm, c_abs(&soln_work[i]));
      }
      err = err / xnorm;
      printf("||X - Xtrue||/||X|| = %e\n", err);
    }
}
Exemplo n.º 9
0
struct c_complex Lentz_Dn(struct c_complex z,long n)

/*:10*/
#line 126 "./mie.w"

{
struct c_complex alpha_j1,alpha_j2,zinv,aj;
struct c_complex alpha,result,ratio,runratio;

/*12:*/
#line 156 "./mie.w"


zinv= c_sdiv(2.0,z);
alpha= c_smul(n+0.5,zinv);
aj= c_smul(-n-1.5,zinv);
alpha_j1= c_add(aj,c_inv(alpha));
alpha_j2= aj;
ratio= c_div(alpha_j1,alpha_j2);
runratio= c_mul(alpha,ratio);


/*:12*/
#line 131 "./mie.w"


do
/*13:*/
#line 179 "./mie.w"

{
aj.re= zinv.re-aj.re;
aj.im= zinv.im-aj.im;
alpha_j1= c_add(c_inv(alpha_j1),aj);
alpha_j2= c_add(c_inv(alpha_j2),aj);
ratio= c_div(alpha_j1,alpha_j2);
zinv.re*= -1;
zinv.im*= -1;
runratio= c_mul(ratio,runratio);
}

/*:13*/
#line 134 "./mie.w"


while(fabs(c_abs(ratio)-1.0)> 1e-12);

result= c_add(c_sdiv((double)-n,z),runratio);
return result;
}
Exemplo n.º 10
0
complex c_sqrt(complex x)  /* 平方根 $\sqrt{x}$ */
{
    double r, w;

    r = c_abs(x);
    w = sqrt(r + fabs(x.re));
    if (x.re >= 0) {
        x.re = SQRT05 * w;
        x.im = SQRT05 * x.im / w;
    } else {
        x.re = SQRT05 * fabs(x.im) / w;
        x.im = (x.im >= 0) ? SQRT05 * w : -SQRT05 * w;
    }
    return x;
}
Exemplo n.º 11
0
static PyObject *
complex_abs(PyComplexObject *v)
{
	double result;

	PyFPE_START_PROTECT("complex_abs", return 0)
	result = c_abs(v->cval);
	PyFPE_END_PROTECT(result)

	if (errno == ERANGE) {
		PyErr_SetString(PyExc_OverflowError,
				"absolute value too large");
		return NULL;
	}
	return PyFloat_FromDouble(result);
}
Exemplo n.º 12
0
/* Subroutine */ int PASTEF77(c,rotg)(singlecomplex *ca, singlecomplex *cb, real *c__, singlecomplex *s)
{
    /* System generated locals */
    real r__1, r__2;
    singlecomplex q__1, q__2, q__3;

    /* Builtin functions */
    double c_abs(singlecomplex *), sqrt(doublereal);
    void bla_r_cnjg(singlecomplex *, singlecomplex *);

    /* Local variables */
    real norm;
    singlecomplex alpha;
    real scale;

    if (c_abs(ca) != 0.f) {
	goto L10;
    }
    *c__ = 0.f;
    s->real = 1.f, s->imag = 0.f;
    ca->real = cb->real, ca->imag = cb->imag;
    goto L20;
L10:
    scale = c_abs(ca) + c_abs(cb);
    q__1.real = ca->real / scale, q__1.imag = ca->imag / scale;
/* Computing 2nd power */
    r__1 = c_abs(&q__1);
    q__2.real = cb->real / scale, q__2.imag = cb->imag / scale;
/* Computing 2nd power */
    r__2 = c_abs(&q__2);
    norm = scale * sqrt(r__1 * r__1 + r__2 * r__2);
    r__1 = c_abs(ca);
    q__1.real = ca->real / r__1, q__1.imag = ca->imag / r__1;
    alpha.real = q__1.real, alpha.imag = q__1.imag;
    *c__ = c_abs(ca) / norm;
    bla_r_cnjg(&q__3, cb);
    q__2.real = alpha.real * q__3.real - alpha.imag * q__3.imag, q__2.imag = alpha.real * q__3.imag + 
	    alpha.imag * q__3.real;
    q__1.real = q__2.real / norm, q__1.imag = q__2.imag / norm;
    s->real = q__1.real, s->imag = q__1.imag;
    q__1.real = norm * alpha.real, q__1.imag = norm * alpha.imag;
    ca->real = q__1.real, ca->imag = q__1.imag;
L20:
    return 0;
} /* crotg_ */
Exemplo n.º 13
0
int fft(float *cbd,float *dx2,int *idxcnt)
{

    static integer nbin = 64;
    /*static integer nxx = (6*64)+150;*/

    /* Local variables */
    complex chat[64];
    float magn[64],wk[534];
    integer iwk[534],nbinx=nbin;
    float a, x, dx;
    integer icbd,icnt,iok;

/*       ** perform FFT ananlysis on data */
    for (icbd = 0; icbd < nbin; ++icbd) {
	chat[icbd].r = cbd[icbd], chat[icbd].i = (float)0.;
    }
    iok=fftcc_(chat, &nbinx, iwk, wk);
    if (iok!=0) return -1;
    for (icbd = 0; icbd < nbin; ++icbd) {
	magn[icbd] = c_abs(&chat[icbd]) / (double)nbin;
    }
    dx = 0.0F;
    icnt = 0;
    for (icbd = 1; icbd < (nbin/2)-1; ++icbd) {
	a = magn[icbd-1];
	x = magn[icbd];
	dx += (x + a) / 2.0F;
	if ((magn[icbd] > magn[icbd-1])&&(magn[icbd] > magn[icbd+1])) {
	    ++icnt;
	}
    }
/* added to "normalize" fft for various satellite resolutions */
    *dx2 = dx / magn[0];
    *idxcnt=icnt;

    return 0;
} /* MAIN__ */
Exemplo n.º 14
0
/////////////////////////////////////////////////////////////////////////////////////////////
//int main(int argc, char *argv[ ])
//{
 int main(void)
 {
   	sortingindex = 0;
	if((fpp = fopen(INPUT_FILE, "r")) == NULL)
	{
		printf("Cannot open 'parameter_file'.\n");
	}

	for(j = 0; j < 11; j++)
	{
		if(fscanf(fpp, "%lf", &tmp) != EOF) 
		{
				my_array[j] = tmp;
				
		} else 		{
			printf("Not enough data in 'input_parameter'!");
		}
	}

	fclose(fpp);
 

   	In_n= (int) my_array[0];
	In_vect_n= (int) my_array[1];
	Out_n= (int) my_array[2];
	Mf_n= (int) my_array[3];
	training_data_n= (int) my_array[4];
	checking_data_n= (int) my_array[5];
	epoch_n= (int) my_array[6];
	step_size=my_array[7];
	increase_rate=my_array[8];
	decrease_rate=my_array[9];
	threshold = my_array[10];
		
	Rule_n = (int)pow((double)Mf_n, (double)In_n); //number of rules 
	Node_n = In_n + In_n*Mf_n + 3*Rule_n + In_n*Rule_n + Out_n;

	/* allocate matrices and memories */
	int trnnumcheck[training_data_n + 1];
	int trnnumchecku[training_data_n + 1];
	for(i=0; i<training_data_n +1; i++)
	{
	trnnumcheck[i]=0;
	trnnumchecku[i]=0;
	}
	
	diff =(double **)create_matrix(Out_n, training_data_n, sizeof(double)); 
	double chkvar[checking_data_n];
	double cdavg[checking_data_n];
	double chkvar_un[checking_data_n];
	double cdavg_un[checking_data_n];
	target = calloc(Out_n, sizeof(double));
	de_out = calloc(Out_n, sizeof(double));
	node_p = (NODE_T **)create_array(Node_n, sizeof(NODE_T *)); 
	config = (int **)create_matrix(Node_n, Node_n, sizeof(int)); 
	training_data_matrix = (double **)create_matrix(training_data_n, In_n*In_vect_n + Out_n, sizeof(double));
	if(checking_data_n > 0)
	{
		checking_data_matrix = (double **)create_matrix(checking_data_n, In_n*In_vect_n +Out_n, sizeof(double));
		checking_data_matrix_un = (double **)create_matrix(checking_data_n, Out_n, sizeof(double));
		chk_output =  (double **)create_matrix(checking_data_n, Out_n, sizeof(double));
	}
	layer_1_to_4_output = (COMPLEX_T **)create_matrix(training_data_n, In_n*Mf_n + 3*Rule_n, sizeof(COMPLEX_T));
	trn_rmse_error = calloc(epoch_n, sizeof(double));
	trnNMSE = calloc(epoch_n, sizeof(double));
	chk_rmse_error = calloc(epoch_n, sizeof(double));
	kalman_parameter = (double **)create_matrix(Out_n ,(In_n*In_vect_n + 1)*Rule_n, sizeof(double)); 
	kalman_data = (double **)create_matrix(Out_n ,(In_n*In_vect_n + 1)*Rule_n, sizeof(double));
	step_size_array = calloc(epoch_n, sizeof(double));
	ancfis_output = (double **)create_matrix(training_data_n , Out_n, sizeof(double)); 
	trn_error =calloc(Out_n +1, sizeof(double));
	chk_error_n = calloc(Out_n +1, sizeof(double));// changing size for adding new error measures
	chk_error_un = calloc(Out_n +1, sizeof(double));// changing size for adding new error measures
	trn_datapair_error = calloc(training_data_n, sizeof(double));
	trn_datapair_error_sorted = (double **)create_matrix(2,training_data_n, sizeof(double));
	NMSE = calloc(Out_n, sizeof(double));
	NDEI = calloc(Out_n, sizeof(double));
	unNMSE = calloc(Out_n, sizeof(double));
	unNDEI = calloc(Out_n, sizeof(double));
	//Build Matrix of 0 nd 1 to show the connected nodes
	gen_config(In_n, Mf_n,Out_n, config);//gen_config.c
	//With the above matrix, build ANCFIS connected nodes
	build_ancfis(config); //datastru.c
	//Find total number of nodes in layer 1 and 5
	parameter_n = set_parameter_mode(); //datastru.c
	parameter_array = calloc(parameter_n, sizeof(double));
	initpara(TRAIN_DATA_FILE, training_data_n, In_n, In_vect_n+1, Mf_n); // initpara.c
// after this step, the parameters (they are present in layer 1 and layer 5 only) are assigned a random initial value 
// using some basic algebra and these value are then stored in "para.ini"
	get_parameter(node_p,Node_n,INIT_PARA_FILE); //input.c
// after this step, the initial random values of the parametrs are read from "oara.ini" and assigned to the appropriate nodes in the node structure by accessing their para list.
	//Get training and testing data
	get_data(TRAIN_DATA_FILE, training_data_n, training_data_matrix); //input.c
// after this step, the training input data is read from the "data.trn" fle and stroed in the training data matrix.
	get_data(CHECK_DATA_FILE, checking_data_n, checking_data_matrix); //input.c
// after the above step, the checking data is read from the "data.chk" file and then stored in the checking data matrix.

	for(i=0; i< Out_n; i++)
	{
	for(j=0; j<training_data_n; j++)
	{
	trnavg = trnavg + training_data_matrix[j][(i+1)*In_vect_n +i];
	}
	}
	trnavg = trnavg /(Out_n * training_data_n);
	
	for(i=0; i< Out_n; i++)
	{
	for(j=0; j<training_data_n; j++)
	{
	temp = training_data_matrix[j][(i+1)*In_vect_n +i]- trnavg;
	temp = temp*temp;
	trnvariance = trnvariance + temp;
	}
	}
	trnvariance = trnvariance /((Out_n * training_data_n)-1);

	temp = 0.0;
	for(i=0; i< Out_n; i++)
	{
	for(j=0; j< checking_data_n; j++)
	{
	chkavg = chkavg + checking_data_matrix[j][(i+1)*In_vect_n +i];
	}
	}
	chkavg = chkavg /(Out_n * checking_data_n);
	
	for(i=0; i< Out_n; i++)
	{
	for(j=0; j<checking_data_n; j++)
	{
	temp = checking_data_matrix[j][(i+1)*In_vect_n +i]- chkavg;
	temp = temp*temp;
	chkvariance = chkvariance + temp;
	}
	}
	chkvariance = chkvariance /((Out_n * checking_data_n)-1);
	printf("epochs \t trn error \t tst error\n");
	printf("------ \t --------- \t ---------\n");
	//printf("not entering the epoch loop and the i loop yoyoyo\n");
/**************
	for(ep_n = 0; ep_n < epoch_n; ep_n++)
	{ 
		//step_size_pointer= &step_size;		
		//printf("epoch numbernumber %d \n", ep_n);	
		//step_size_array[ep_n] = step_size_pointer;
		step_size_array[ep_n] = step_size;
	// after the above step, the updated stepsize at the end of the last loop is stored in the step_size_array.
	// this will keep happening every time we start en epoch and hence at the end of the loop, step_size_array will 
	// have a list of all the updated step sizes. Since this is a offline version, step sizes are updated only
	// at the end of an epoch. 
		for(m = 0; m < Out_n; m++)
		{ 	
			//printf("m loop number %d \n", m);	
			for(j = 0; j < training_data_n; j++)
			{ 
				//printf("j loop number %d \n", j);				
				//copy the input vector(s) to input node(s)
				put_input_data(node_p,j, training_data_matrix); //input.c
	// after this(above) step, the input data is transferred frm the training data matrix to the "node" structure.
				//printf("testing \n");	
				//printf("reeeetesting \n");	
				target[m] = training_data_matrix[j][(m+1)*In_vect_n+m]; // *** 
	// this step assigns the value of the "m"th output of "j" th trainig data pair to target.
				//printf("testing \n");	
				//forward pass, get node outputs from layer 1 to layer 4
				calculate_output(In_n, In_n + In_n*Mf_n + 3*Rule_n - 1, j); //forward.c
	// after this step, output of nodes in layer 1 to 4 is calculated. Please note that when this happens for the first
	// time, i.e. when ep_n=0, our network parametrs are already initialized. thus, it is possible to get the
	// output of each node using the function definitios proposed in forward.c. After first epoch, our parametrs get 
	// updated and this output is then calculated using teh new parameters. The essential point to note here is that
	// we can always calculate the output of each node since we have already initialized our parameters.
				//printf("testing \n");	
				//put outputs of layer 1 to 4 into layer_1_to_4_output
		
				for(k = 0; k < Mf_n*In_n + 3*Rule_n; k++)
				{
				//printf("testing \n");	
				layer_1_to_4_output[j][k] = *node_p[k + In_n]->value;
				}
	// the above loop simply puts the values of nodes from layer 1 to layer 4 in the layer_1_to_4_output matrix.

				//identify layer 5 params using LSE (Kalman filter)
				//printf("testing \n");	
				get_kalman_data(kalman_data, target); //kalman.c
	// this function call finds out the values of O4iXnl .. these are basically the coefficients
	// of the kalman parametrs for a given training data pair
	//puts them in kalman_data matrix.
	// this kalman_data matrix has In_n number of rows and number of columns equal to number of parametrs that are
	// responsible for determining each output... as stated above, the outputs are actually the coefficients of the
	// parameters.

				//printf("testing \n");	
				//calculate Kalman parameters
				
				kalman(ep_n, j+(m*training_data_n), m, kalman_data, kalman_parameter,target); //kalman.c
	// this function call evaluates kalman parametrs for a given output, for a given epoch.. that is it takes the epoch 
	// number from us, takes the info about how many times has kalman been invoked before, also takes in the
	// output number(row number) for whihc the parametrs are to be found out... it also takes kalman_data and reads 
	// from it to estimate the kalman parameters... it also takes target .. and stores the output in the mth row of 
	// kalman_parameter.
				//printf("testing \n");	
			}
	// let me tell u what the abopve loop is doing.. after observing closely, it is easy to see that in the above loop, 
	// for a given output node, one by one, all the training data are taken inside the ANCFIS structure, outputs
	// are calculated from one to 4, then a recursive kalman filetr is used to identify the kalman
	// parametrs corresponding to the output node.. these kalman parameters are updated after every tarining data pair 
	// and finally at the end of all the training data, we have an estimate for the kalman parametrs corresponding to 		// the output node.
		}
	// thus, at the of the above loop, the kalman parametrs for all the output nodes are evaluated...

	// now, we are ready to actually calculate the outputs.. plase remember that, all this while, the actual 
	// values of the parametrs of nodes in layer 1 and layer 5 are the ones that were randomly initialized.

		for(j = 0; j < training_data_n; j++)
		{ 
			//printf("testing 1\n");	
			put_input_data(node_p,j, training_data_matrix); //input.c
			//printf("testing 2 \n");	
			for(k = 0; k < Mf_n*In_n + 3*Rule_n; k++)
			{
				*node_p[k + In_n]->value = layer_1_to_4_output[j][k];
			}
	// u must be able to see that in the above two loops, each time, whatever output we got for a given training 
	// datta pair, it was safely stored in layer_1_to_4 array...and each time, the value on the actual nodes in the
	// structure got changed.. due to new incoming training data pair..this was periodic with period trainingdata_n..
	// that is for each output node, we got the same results for a given training dat aapir.. that is the node values
	// were independent of m. Now, for a given traing data pair, we are getting those value back in the actual node 
	// node structure from that laye blh blah matrix..

			//printf("testing 3\n");	
			put_kalman_parameter(Out_n,kalman_parameter); //kalman.c
	// using this function call, we are placing the setimated value of the layer 5 parametrs in the node structure
	// by accessing each node and its parameter list.
			// Do forward pass for L5 and L6
			calculate_output(In_n + In_n*Mf_n + 3*Rule_n, Node_n, j); //forward.c
	// for a given value of the training data pair, this function calculates the output of layer 5 and layer 6 nodes 
	// and places them in the node structure.

			calculate_root(training_data_matrix,diff,j,node_p); //debug.c
	// this function call calculates the square of the erro between the predicted value of an output node and the 
	// corresponding actual value in the training data matrix..(actual output) and stores it in diff.
	// this call performs the above action for a given training data pair and for all output nodes.

			// Do backward pass and calculate all error rate for each node
			calculate_de_do(j,Out_n,target,ancfis_output); //backward.c
	// calculates de_do for each node fora given training data pair
			update_de_dp(j); //de_dp.c	
	// updates de_do for each node....
		}
	// thus at the end of this loop, estimated outputs for all the training data are calculated..also back propogatin 
	// is done and de_dp for all the nodes is updated.
		
		//printf("testing 1\n");	
		calculate_trn_err(diff,trn_error,trn_datapair_error,training_data_n); //debug.c
		//printf("testing 2 \n");	
		//training_error_measure(target,ancfis_output,diff, training_data_n, trn_error,out_n); //trn_err.c
		trn_rmse_error[ep_n] = trn_error[Out_n];
		printf("%3d \t %.11f \n", ep_n+1, trn_error[Out_n]);
		//Find RMSE of testing error
	/*************************************	if(checking_data_n != 0) 
		{
			printf("testing 3 \n");	
			epoch_checking_error(checking_data_matrix, checking_data_n, chk_error, training_data_n, chk_output, ep_n); //chk_err.c  writes to tracking.txt
			printf("testing 4 \n");	
			chk_rmse_error[ep_n] = chk_error[Out_n];
			for (i=0; i<Out_n; i++)
			//printf("%3d \t %.11f \t %.11f\n", ep_n+1, trn_error[i], chk_error[i]);
			printf("%3d \t %.11f \n", ep_n+1, trn_error[i]);
			//printf("%.11f\t %.11f\n", trn_error[Out_n],chk_error[Out_n]);
			printf("%.11f\t %.11f\n", trn_error[Out_n]);
			write_result(ep_n+1,Out_n,trn_error,chk_error);  //debug.c writes to result.txt
		} 
		else 
		{
			for (i=0; i<Out_n; i++)	
			printf("%4d \t %.11f\n", ep_n+1, trn_error[i]);
		}
***************************/

/**
		//Find minimum training error and its epoch-number
		if(trn_rmse_error[ep_n] < min_trn_RMSE) {
			min_trn_RMSE_epoch = ep_n +1;
			min_trn_RMSE = trn_rmse_error[ep_n];
			record_parameter(parameter_array);
		}

		if(ep_n < epoch_n-1)
		{ 
			//update parameters in 1st layer (Using VNCSA)
			update_parameter(1, step_size); //new_para.c
			//update stepsize
			update_step_size(trn_rmse_error, ep_n, &step_size, decrease_rate, increase_rate); //stepsize.c
		}
	}
***/
////////////////////////////////////////////////////////////

fppp = (FILE *)open_file("status.txt", "w");
fpppp = (FILE *)open_file("trn.txt", "w");


	ep_n=0;

	do
	{
		//step_size_pointer= &step_size;		
		printf("epoch numbernumber %d \n", ep_n+1);	
		//step_size_array[ep_n] = step_size_pointer;
		step_size_array[ep_n] = step_size;
	// after the above step, the updated stepsize at the end of the last loop is stored in the step_size_array.
	// this will keep happening every time we start en epoch and hence at the end of the loop, step_size_array will 
	// have a list of all the updated step sizes. Since this is a offline version, step sizes are updated only
	// at the end of an epoch. 
		for(m = 0; m < Out_n; m++)
		{ 	
			//printf("m loop number %d \n", m);	
			for(j = 0; j < training_data_n; j++)
			{ 
				//printf("j loop number %d \n", j);				
				//copy the input vector(s) to input node(s)
				put_input_data(node_p,j, training_data_matrix); //input.c
	// after this(above) step, the input data is transferred frm the training data matrix to the "node" structure.
				//printf("testing \n");	
				//printf("reeeetesting \n");	
				target[m] = training_data_matrix[j][(m+1)*In_vect_n+m]; // *** 
	// this step assigns the value of the "m"th output of "j" th trainig data pair to target.
				//printf("testing \n");	
				//forward pass, get node outputs from layer 1 to layer 4
				calculate_output(In_n, In_n + In_n*Mf_n + 3*Rule_n, j); //forward.c
	// after this step, output of nodes in layer 1 to 4 is calculated. Please note that when this happens for the first
	// time, i.e. when ep_n=0, our network parametrs are already initialized. thus, it is possible to get the
	// output of each node using the function definitios proposed in forward.c. After first epoch, our parametrs get 
	// updated and this output is then calculated using teh new parameters. The essential point to note here is that
	// we can always calculate the output of each node since we have already initialized our parameters.
				//printf("testing \n");	
				//put outputs of layer 1 to 4 into layer_1_to_4_output
		
				for(k = 0; k < Mf_n*In_n + 3*Rule_n; k++)
				{
				//printf("testing \n");	
				layer_1_to_4_output[j][k] = *node_p[k + In_n]->value;
				//fprintf(fppp, "%lf \t %lf \t \n", (layer_1_to_4_output[j][k]).real, (layer_1_to_4_output[j][k]).imag);
				}
	// the above loop simply puts the values of nodes from layer 1 to layer 4 in the layer_1_to_4_output matrix.

				//identify layer 5 params using LSE (Kalman filter)
				//printf("testing \n");	
				get_kalman_data(kalman_data, target); //kalman.c
	// this function call finds out the values of O4iXnl .. these are basically the coefficients
	// of the kalman parametrs for a given training data pair
	//puts them in kalman_data matrix.
	// this kalman_data matrix has In_n number of rows and number of columns equal to number of parametrs that are
	// responsible for determining each output... as stated above, the outputs are actually the coefficients of the
	// parameters.

				//printf("testing \n");	
				//calculate Kalman parameters
				
				kalman(ep_n, j+(m*training_data_n), m, kalman_data, kalman_parameter,target); //kalman.c
	// this function call evaluates kalman parametrs for a given output, for a given epoch.. that is it takes the epoch 
	// number from us, takes the info about how many times has kalman been invoked before, also takes in the
	// output number(row number) for whihc the parametrs are to be found out... it also takes kalman_data and reads 
	// from it to estimate the kalman parameters... it also takes target .. and stores the output in the mth row of 
	// kalman_parameter.
				//printf("testing \n");	
			}
	// let me tell u what the abopve loop is doing.. after observing closely, it is easy to see that in the above loop, 
	// for a given output node, one by one, all the training data are taken inside the ANCFIS structure, outputs
	// are calculated from one to 4, then a recursive kalman filetr is used to identify the kalman
	// parametrs corresponding to the output node.. these kalman parameters are updated after every tarining data pair 
	// and finally at the end of all the training data, we have an estimate for the kalman parametrs corresponding to 		// the output node.
		}
	// thus, at the of the above loop, the kalman parametrs for all the output nodes are evaluated...

	// now, we are ready to actually calculate the outputs.. plase remember that, all this while, the actual 
	// values of the parametrs of nodes in layer 1 and layer 5 are the ones that were randomly initialized.

		for(j = 0; j < training_data_n; j++)
		{ 
			//printf("testing 1\n");	
			put_input_data(node_p,j, training_data_matrix); //input.c
			//printf("testing 2 \n");	
			for(k = 0; k < Mf_n*In_n + 3*Rule_n; k++)
			{
				*node_p[k + In_n]->value = layer_1_to_4_output[j][k];
				/*if(ep_n==1)
				{
				fprintf(fppp, "%d.\t %lf \t + \t i%lf \n", k, (layer_1_to_4_output[j][k]).real,(layer_1_to_4_output[j][k]).imag);
				}*/
			}
	// u must be able to see that in the above two loops, each time, whatever output we got for a given training 
	// datta pair, it was safely stored in layer_1_to_4 array...and each time, the value on the actual nodes in the
	// structure got changed.. due to new incoming training data pair..this was periodic with period trainingdata_n..
	// that is for each output node, we got the same results for a given training dat aapir.. that is the node values
	// were independent of m. Now, for a given traing data pair, we are getting those value back in the actual node 
	// node structure from that laye blh blah matrix..

			//printf("testing 3\n");	
			put_kalman_parameter(Out_n,kalman_parameter); //kalman.c
			//printf("hihahahha \n");
	// using this function call, we are placing the setimated value of the layer 5 parametrs in the node structure
	// by accessing each node and its parameter list.
			// Do forward pass for L5 and L6
			calculate_output(In_n + In_n*Mf_n + 3*Rule_n, Node_n, j); //forward.c
	// for a given value of the training data pair, this function calculates the output of layer 5 and layer 6 nodes 
	// and places them in the node structure.
			//printf("hihahahha  no 2 \n");
	calculate_root(training_data_matrix,diff,j,node_p); //debug.c
	// this function call calculates the square of the erro between the predicted value of an output node and the 
	// corresponding actual value in the training data matrix..(actual output) and stores it in diff.
	// this call performs the above action for a given training data pair and for all output nodes.

			// Do backward pass and calculate all error rate for each node
			calculate_de_do(j,Out_n,target,ancfis_output); //backward.c
			//printf("hihahahha no 3 \n");
	// calculates de_do for each node fora given training data pair
			update_de_dp(j); //de_dp.c	
	// updates de_do for each node....
		}
	// thus at the end of this loop, estimated outputs for all the training data are calculated..also back propogatin 
	// is done and de_dp for all the nodes is updated.
		
		//printf("testing 1\n");	
		calculate_trn_err(diff,trn_error, trn_datapair_error, training_data_n); //debug.c
		//printf("testing 2 \n");	
		//training_error_measure(target,ancfis_output,diff, training_data_n, trn_error,out_n); //trn_err.c
		trn_rmse_error[ep_n] = trn_error[Out_n];
		trnNMSE[ep_n] = trn_rmse_error[ep_n]*trn_rmse_error[ep_n]/trnvariance;
		fprintf(fppp, "epoch number is %d \t trn RMSE is %.11f \t trn NMSE is  %lf \t \n", ep_n + 1,  trn_rmse_error[ep_n], trnNMSE[ep_n]);
		//fprintf(fpppp, "\n");
		fprintf(fpppp, "epoch number is %d \t trn RMSE is %.11f \t trn NMSE is  %lf \t \n", ep_n + 1,  trn_rmse_error[ep_n], trnNMSE[ep_n]);
		printf("trn RMSE is \t %lf \n", trn_rmse_error[ep_n]);
		printf("trn NMSE is \t %lf \n", trnNMSE[ep_n]);
		for(i=0; i<training_data_n; i++)
		{
		trn_datapair_error_sorted[0][i]=trn_datapair_error[i];
		trn_datapair_error_sorted[1][i]= i+1;
		}

		for(j=1; j<training_data_n; j++)
		{		
		for(i=0; i<training_data_n-j; i++)
		{
		if(trn_datapair_error_sorted[0][i]>trn_datapair_error_sorted[0][i+1])
		{	
		sorting=trn_datapair_error_sorted[0][i+1];
		trn_datapair_error_sorted[0][i+1]=trn_datapair_error_sorted[0][i];
		trn_datapair_error_sorted[0][i]=sorting;
		sortingindex = sorting=trn_datapair_error_sorted[1][i+1];
		trn_datapair_error_sorted[1][i+1]=trn_datapair_error_sorted[1][i];
		trn_datapair_error_sorted[1][i]=sortingindex;
		}
		}
		}

		for(j=0; j<training_data_n; j++)
		{
		fprintf(fppp, "\n");		
		fprintf(fppp, "training data pair sorted number \t %d \n", j+1);
		fprintf(fppp, "training data pair original number \t %d \n", (int)(trn_datapair_error_sorted[1][j]));
		fprintf(fppp, "training data pair sorted error in RMSE is \t %lf \n",trn_datapair_error_sorted[0][j]);
		fprintf(fpppp, "%d \t", (int)(trn_datapair_error_sorted[1][j]));
		complexsum = complex(0.0, 0.0);
		fprintf(fppp,"Normalized layer 3 outputs are as follows \n");
		for(k= In_n*Mf_n + Rule_n; k< In_n*Mf_n + 2*Rule_n; k++)
		{
		fprintf(fppp, "%d.\t %lf + i%lf \t %lf < %lf \n", k, (layer_1_to_4_output[j][k]).real,(layer_1_to_4_output[j][k]).imag, c_abs(layer_1_to_4_output[j][k]), c_phase(layer_1_to_4_output[j][k])*180/PI);
		complexsum = c_add(complexsum, layer_1_to_4_output[j][k]);
		}
		
		
		fprintf(fppp, "Sum of the outputs of layer 3 is \t %lf+i%lf \t %lf<%lf \n", complexsum.real, complexsum.imag, c_abs(complexsum), c_phase(complexsum)*180/PI);
		complexsum = complex(0.0, 0.0);
		fprintf(fppp,"dot producted layer 4 outputs are as follows \n");
		for(k=In_n*Mf_n + 2*Rule_n; k< In_n*Mf_n + 3*Rule_n; k++)
		{
		
		fprintf(fppp, "%d.\t %lf + i%lf \t %lf < %lf \n", k, (layer_1_to_4_output[j][k]).real,(layer_1_to_4_output[j][k]).imag, c_abs(layer_1_to_4_output[j][k]), c_phase(layer_1_to_4_output[j][k])*180/PI);
		complexsum = c_add(complexsum, layer_1_to_4_output[j][k]);
		}

		fprintf(fppp, "sum of the outputs of layer 4 is \t %lf +i%lf \t %lf<%lf \n", complexsum.real, complexsum.imag, c_abs(complexsum), c_phase(complexsum)*180/PI);
		if(j> training_data_n -6 )
		{
		trnnumcheck[(int)(trn_datapair_error_sorted[1][j])]= trnnumcheck[(int)(trn_datapair_error_sorted[1][j])] +1;
		}
		if(j<5 )
		{
		trnnumchecku[(int)(trn_datapair_error_sorted[1][j])]= trnnumchecku[(int)(trn_datapair_error_sorted[1][j])] +1;
		}

		}
		fprintf(fpppp, "\n");
		
		//Find RMSE of testing error
/********************************************************************************
if(checking_data_n != 0) 
		{
			printf("testing 3 \n");	
			epoch_checking_error(checking_data_matrix, checking_data_n, chk_error, training_data_n, chk_output, ep_n); //chk_err.c  writes to tracking.txt
			printf("testing 4 \n");	
			chk_rmse_error[ep_n] = chk_error[Out_n];
			for (i=0; i<Out_n; i++)
			printf("%3d \t %.11f \t %.11f\n", ep_n+1, trn_error[i], chk_error[i]);
			printf("%.11f\t %.11f\n", trn_error[Out_n],chk_error[Out_n]);
			write_result(ep_n+1,Out_n,trn_error,chk_error);  //debug.c writes to result.txt
		} 
		else 
		{
			for (i=0; i<Out_n; i++)	
			printf("%4d \t %.11f\n", ep_n+1, trn_error[i]);
		}
**************************************************************************************/

		// check whether the current training RMSE is less than the threhold and store its epch number and parametrs
		
		if(trn_rmse_error[ep_n] < min_trn_RMSE) 
		{
			min_trn_RMSE_epoch = ep_n +1;
			min_trn_RMSE = trn_rmse_error[ep_n];
			min_trnNMSE = trnNMSE[ep_n];
			record_parameter(parameter_array);
		}

		if(ep_n < epoch_n-1)
		{ 
			//update parameters in 1st layer (Using VNCSA)
			update_parameter(1, step_size); //new_para.c
			//update stepsize
			update_step_size(trn_rmse_error, ep_n, &step_size, decrease_rate, increase_rate); //stepsize.c
		}
		ep_n++;
		
	} while((trnNMSE[ep_n -1]>= threshold) && (ep_n <= epoch_n -1));

for(i=1; i<=training_data_n; i++)
{
	fprintf(fpppp, "%d \t %d \n", i, trnnumcheck[i]);
}
for(i=1; i<=training_data_n; i++)
{
	fprintf(fpppp, "%d \t %d \n", i, trnnumchecku[i]);
}


if(trnNMSE[ep_n -1]< threshold)
{
fprintf(fppp, "\n");
fprintf(fppp, "We have gone below the threshold value \n");
fprintf(fppp, "the epoch number in which this happened is %d \n", min_trn_RMSE_epoch);
}
else
{
fprintf(fppp, "\n");
fprintf(fppp, "We exhausted the available epochs and threshold was not broken :( \n");
fprintf(fppp, "the epoch number which yielded minimum training RMSE is %d \n", min_trn_RMSE_epoch);
}


fclose(fppp);
fclose(fpppp);

double *minmaxc;
minmaxc= (double *)calloc(2*In_n, sizeof(double));
	
	if((fpp = fopen("minmax.txt", "r")) == NULL)
	{
		printf("Cannot open 'parameter_file'.\n");
	}

	for(j = 0; j < 2*In_n; j++)
	{
		if(fscanf(fpp, "%lf", &tmp) != EOF) 
		{
				minmaxc[j] = tmp;
				
		} else 		{
			printf("Not enough data in 'input_parameter'!");
		}
	}

	fclose(fpp);
//////////////////////////////////////////////////////////////


	restore_parameter(parameter_array); //output.c
	write_parameter(FINA_PARA_FILE); //output.c
	write_array(trnNMSE, epoch_n, TRAIN_ERR_FILE); //lib.c
	if (checking_data_n != 0)
	{
		//printf("testing 3 \n");	
		epoch_checking_error(checking_data_matrix, checking_data_n, chk_error_n, chk_error_un, training_data_n, chk_output, ep_n -1, minmaxc); //chk_err.c  writes to tracking.txt
		//printf("testing 4 \n");	
		//chk_rmse_error[ep_n] = chk_error[Out_n];
		min_chk_RMSE_n = chk_error_n[Out_n];
		printf(" initial checking RMSE is %lf \n ", min_chk_RMSE_n);
		min_chk_RMSE_un = chk_error_un[Out_n];
		//for (i=0; i<Out_n; i++)
		//printf("%3d \t %.11f \t %.11f\n", ep_n+1, trn_error[i], chk_error[i]);
		//printf("%3d \t %.11f \n", ep_n+1, trn_error[i]);
			//printf("%.11f\t %.11f\n", trn_error[Out_n],chk_error[Out_n]);
		//printf("%.11f\t \n", trn_error[Out_n]);
		//write_result(min_trn_RMSE_epoch ,Out_n,trn_rmse_error,chk_error);  //debug.c writes to result.txt about the epoch number at which the stopping was done and the corresponding training RMSE and checking RMSE
	} 
	//write_array(chk_rmse_error, epoch_n, CHECK_ERR_FILE); //lib.c
	//}
	
	write_array(step_size_array, epoch_n, STEP_SIZE_FILE); //lib.c

/**************************************************************************
	min_chk_RMSE = chk_rmse_error[epoch_n -1];
	min_chk_RMSE_epoch = epoch_n -1;	
	for(j=0; j< epoch_n; j++)
	{
	if(chk_rmse_error[j]< min_chk_RMSE)
	{
	min_chk_RMSE = chk_rmse_error[j];
	min_chk_RMSE_epoch = j;
	}
	}
*************************************************************************/
/**************************************************************
	double minmaxc[2*In_n];
	
	if((fpp = fopen("minmax.txt", "r")) == NULL)
	{
		printf("Cannot open 'parameter_file'.\n");
	}

	for(j = 0; j < 2*In_n; j++)
	{
		if(fscanf(fpp, "%lf", &tmp) != EOF) 
		{
				minmaxc[j] = tmp;
				
		} else 		{
			printf("Not enough data in 'input_parameter'!");
		}
	}

	fclose(fpp);
***************************************************************************/
	for(k=0; k< Out_n; k++)
	{
	for(j=0; j< checking_data_n; j++)
		{
		 checking_data_matrix_un[j][k]= (checking_data_matrix[j][(k+1)*In_vect_n +k])* (minmaxc[(2*k) +1] - minmaxc[2*k]) + minmaxc[2*k];
		}
	}





// the following code calculates the cdavg_un and checking datat average bothe un normalized
for(k=0; k< Out_n; k++)
	{
	for(j=0; j< checking_data_n; j++)
		{
		checking_data_average_un = checking_data_average_un + checking_data_matrix_un[j][k];
		}
	cdavg_un[k]=checking_data_average_un/checking_data_n;
	checking_data_average_un_temp=checking_data_average_un_temp+checking_data_average_un;
	checking_data_average_un=0;
		}
	
	checking_data_average_un = checking_data_average_un_temp/(Out_n*checking_data_n);
	printf("%lf is the checking datat average non normalized\n", checking_data_average_un);





// the following code calcuates the chkvar_un un normalized 
for(k=0; k< Out_n; k++)
	{	
	for(j=0; j< checking_data_n; j++)
		{				
		temp= temp + (checking_data_matrix_un[j][k] - cdavg_un[k])*(checking_data_matrix_un[j][k] - cdavg_un[k]);
		}
	chkvar_un[k]=temp/(checking_data_n-1);
	//checking_variance_un = checking_variance_un + temp;
	temp=0;
	}




temp =0.0;
// the following code cacluates the un normalized checking varinace
for(j=0; j< checking_data_n; j++)
	{
	for(k=0; k< Out_n; k++)
		{
		temp = checking_data_matrix_un[j][k] - checking_data_average_un;
		temp = temp*temp;
		checking_variance_un = checking_variance_un + temp;
		}
	}
	checking_variance_un = checking_variance_un/((Out_n*checking_data_n)-1);
	printf("%lf is the checking variance non normalized \n", checking_variance_un);

temp =0.0;




checking_data_average_n=0.0;
checking_data_average_n_temp=0.0;
// the following code calculates the cdavg and checking data average both normalized
for(k=0; k< Out_n; k++)
	{	
for(j=0; j< checking_data_n; j++)
		{		
		checking_data_average_n = checking_data_average_n + checking_data_matrix[j][(k+1)*In_vect_n +k];
		}
		cdavg[k]=checking_data_average_n/checking_data_n;
		checking_data_average_n_temp=checking_data_average_n_temp+checking_data_average_n;
		checking_data_average_n=0;
	}
	checking_data_average_n = checking_data_average_n_temp/(Out_n*checking_data_n);
	printf("%lf is the checking datat average  normalized\n", checking_data_average_n);


temp =0.0;
checking_variance_n =0.0;
// the following code cacluates the normalized checking varinace
for(j=0; j< checking_data_n; j++)
	{
	for(k=0; k< Out_n; k++)
		{
		temp = checking_data_matrix[j][(k+1)*In_vect_n +k] - checking_data_average_n;
		temp = temp*temp;
		checking_variance_n = checking_variance_n + temp;
		}
	}
checking_variance_n = checking_variance_n/((Out_n*checking_data_n)-1);
temp = 0.0;
printf("%lf is the checking variance normalized \n", checking_variance_n);



// the following code calcuatres the normalized chkvar[k]
temp=0.0;
for(k=0; k< Out_n; k++)
	{
	for(j=0; j< checking_data_n; j++)
		{	
		temp= temp + (checking_data_matrix[j][(k+1)*In_vect_n +k] - cdavg[k])*(checking_data_matrix[j][(k+1)*In_vect_n +k] - cdavg[k]);
	}
	chkvar[k]=temp/(checking_data_n-1);
	//checking_variance_n = checking_variance_n + temp;
	temp=0;
	}


	
	
	

	NMSE_un = min_chk_RMSE_un * min_chk_RMSE_un / checking_variance_un;
	NMSE_n = min_chk_RMSE_n * min_chk_RMSE_n / checking_variance_n;
	NMSE_n2 = min_chk_RMSE_n * min_chk_RMSE_n / chkvariance;
	NDEI_un = sqrt(NMSE_un);
	NDEI_n = sqrt(NMSE_n);




	for(k=0;k<Out_n;k++)
	{
	NMSE[k]=chk_error_n[k]*chk_error_n[k]/chkvar[k];
	NDEI[k]=sqrt(NMSE[k]);
	unNMSE[k]=chk_error_un[k]*chk_error_un[k]/chkvar_un[k];
	unNDEI[k]=sqrt(unNMSE[k]);
	}






	write_result(min_trn_RMSE_epoch ,Out_n,trn_rmse_error,chk_error_un, chk_error_n, NDEI_un, NMSE_un, NDEI_n, NMSE_n, NMSE, NDEI, unNMSE, unNDEI); //debug.c writes to result.txt about the epoch number at which the stopping was done and the corresponding training RMSE and checking RMSE
	printf("Minimum training RMSE is \t %f \t \n",min_trn_RMSE); 
	printf("Minimum training RMSE epoch is \t %d \n",min_trn_RMSE_epoch); 
	printf("Minimum training NMSE is \t %f \t \n",min_trnNMSE); 
	//printf("Minimum training RMSE epoch is \t %d \n",min_trnNMSE_epoch); 
	//printf("Minimum training RMSE is \t %f \t \n",min_trn_RMSE); 
	//printf("Minimum training RMSE epoch is \t %d \n",min_trn_RMSE_epoch); 
	printf("%f \t is the checking RMSE non normalized\n",min_chk_RMSE_un);
	printf("%f \t is the checking RMSE normalized\n",min_chk_RMSE_n);
	//printf("%f \t is the checking RMSE normalized22222222 \n",min_chk_RMSE_n2);
	printf(" checking NMSE non normlized is %f \t NDEI non normalized is %f \n",NMSE_un, NDEI_un); 
	printf("checking NMSE normalized is %f \t NDEI normalized is %f \n",NMSE_n, NDEI_n); 
	printf("checking NMSE2 normalized is %f \n",NMSE_n2); 
	printf("traning data variance is  %f \n",trnvariance); 
	return(0);
Exemplo n.º 15
0
 int chgeqz_(char *job, char *compq, char *compz, int *n, 
	int *ilo, int *ihi, complex *h__, int *ldh, complex *t, 
	int *ldt, complex *alpha, complex *beta, complex *q, int *ldq, 
	 complex *z__, int *ldz, complex *work, int *lwork, float *
	rwork, int *info)
{
    /* System generated locals */
    int h_dim1, h_offset, q_dim1, q_offset, t_dim1, t_offset, z_dim1, 
	    z_offset, i__1, i__2, i__3, i__4, i__5, i__6;
    float r__1, r__2, r__3, r__4, r__5, r__6;
    complex q__1, q__2, q__3, q__4, q__5, q__6;

    /* Builtin functions */
    double c_abs(complex *);
    void r_cnjg(complex *, complex *);
    double r_imag(complex *);
    void c_div(complex *, complex *, complex *), pow_ci(complex *, complex *, 
	    int *), c_sqrt(complex *, complex *);

    /* Local variables */
    float c__;
    int j;
    complex s, t1;
    int jc, in;
    complex u12;
    int jr;
    complex ad11, ad12, ad21, ad22;
    int jch;
    int ilq, ilz;
    float ulp;
    complex abi22;
    float absb, atol, btol, temp;
    extern  int crot_(int *, complex *, int *, 
	    complex *, int *, float *, complex *);
    float temp2;
    extern  int cscal_(int *, complex *, complex *, 
	    int *);
    extern int lsame_(char *, char *);
    complex ctemp;
    int iiter, ilast, jiter;
    float anorm, bnorm;
    int maxit;
    complex shift;
    float tempr;
    complex ctemp2, ctemp3;
    int ilazr2;
    float ascale, bscale;
    complex signbc;
    extern double slamch_(char *), clanhs_(char *, int *, 
	    complex *, int *, float *);
    extern  int claset_(char *, int *, int *, complex 
	    *, complex *, complex *, int *), clartg_(complex *, 
	    complex *, float *, complex *, complex *);
    float safmin;
    extern  int xerbla_(char *, int *);
    complex eshift;
    int ilschr;
    int icompq, ilastm;
    complex rtdisc;
    int ischur;
    int ilazro;
    int icompz, ifirst, ifrstm, istart;
    int lquery;


/*  -- LAPACK routine (version 3.2) -- */
/*  -- LAPACK is a software package provided by Univ. of Tennessee,    -- */
/*  -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CHGEQZ computes the eigenvalues of a complex matrix pair (H,T), */
/*  where H is an upper Hessenberg matrix and T is upper triangular, */
/*  using the single-shift QZ method. */
/*  Matrix pairs of this type are produced by the reduction to */
/*  generalized upper Hessenberg form of a complex matrix pair (A,B): */

/*     A = Q1*H*Z1**H,  B = Q1*T*Z1**H, */

/*  as computed by CGGHRD. */

/*  If JOB='S', then the Hessenberg-triangular pair (H,T) is */
/*  also reduced to generalized Schur form, */

/*     H = Q*S*Z**H,  T = Q*P*Z**H, */

/*  where Q and Z are unitary matrices and S and P are upper triangular. */

/*  Optionally, the unitary matrix Q from the generalized Schur */
/*  factorization may be postmultiplied into an input matrix Q1, and the */
/*  unitary matrix Z may be postmultiplied into an input matrix Z1. */
/*  If Q1 and Z1 are the unitary matrices from CGGHRD that reduced */
/*  the matrix pair (A,B) to generalized Hessenberg form, then the output */
/*  matrices Q1*Q and Z1*Z are the unitary factors from the generalized */
/*  Schur factorization of (A,B): */

/*     A = (Q1*Q)*S*(Z1*Z)**H,  B = (Q1*Q)*P*(Z1*Z)**H. */

/*  To avoid overflow, eigenvalues of the matrix pair (H,T) */
/*  (equivalently, of (A,B)) are computed as a pair of complex values */
/*  (alpha,beta).  If beta is nonzero, lambda = alpha / beta is an */
/*  eigenvalue of the generalized nonsymmetric eigenvalue problem (GNEP) */
/*     A*x = lambda*B*x */
/*  and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the */
/*  alternate form of the GNEP */
/*     mu*A*y = B*y. */
/*  The values of alpha and beta for the i-th eigenvalue can be read */
/*  directly from the generalized Schur form:  alpha = S(i,i), */
/*  beta = P(i,i). */

/*  Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix */
/*       Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), */
/*       pp. 241--256. */

/*  Arguments */
/*  ========= */

/*  JOB     (input) CHARACTER*1 */
/*          = 'E': Compute eigenvalues only; */
/*          = 'S': Computer eigenvalues and the Schur form. */

/*  COMPQ   (input) CHARACTER*1 */
/*          = 'N': Left Schur vectors (Q) are not computed; */
/*          = 'I': Q is initialized to the unit matrix and the matrix Q */
/*                 of left Schur vectors of (H,T) is returned; */
/*          = 'V': Q must contain a unitary matrix Q1 on entry and */
/*                 the product Q1*Q is returned. */

/*  COMPZ   (input) CHARACTER*1 */
/*          = 'N': Right Schur vectors (Z) are not computed; */
/*          = 'I': Q is initialized to the unit matrix and the matrix Z */
/*                 of right Schur vectors of (H,T) is returned; */
/*          = 'V': Z must contain a unitary matrix Z1 on entry and */
/*                 the product Z1*Z is returned. */

/*  N       (input) INTEGER */
/*          The order of the matrices H, T, Q, and Z.  N >= 0. */

/*  ILO     (input) INTEGER */
/*  IHI     (input) INTEGER */
/*          ILO and IHI mark the rows and columns of H which are in */
/*          Hessenberg form.  It is assumed that A is already upper */
/*          triangular in rows and columns 1:ILO-1 and IHI+1:N. */
/*          If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. */

/*  H       (input/output) COMPLEX array, dimension (LDH, N) */
/*          On entry, the N-by-N upper Hessenberg matrix H. */
/*          On exit, if JOB = 'S', H contains the upper triangular */
/*          matrix S from the generalized Schur factorization. */
/*          If JOB = 'E', the diagonal of H matches that of S, but */
/*          the rest of H is unspecified. */

/*  LDH     (input) INTEGER */
/*          The leading dimension of the array H.  LDH >= MAX( 1, N ). */

/*  T       (input/output) COMPLEX array, dimension (LDT, N) */
/*          On entry, the N-by-N upper triangular matrix T. */
/*          On exit, if JOB = 'S', T contains the upper triangular */
/*          matrix P from the generalized Schur factorization. */
/*          If JOB = 'E', the diagonal of T matches that of P, but */
/*          the rest of T is unspecified. */

/*  LDT     (input) INTEGER */
/*          The leading dimension of the array T.  LDT >= MAX( 1, N ). */

/*  ALPHA   (output) COMPLEX array, dimension (N) */
/*          The complex scalars alpha that define the eigenvalues of */
/*          GNEP.  ALPHA(i) = S(i,i) in the generalized Schur */
/*          factorization. */

/*  BETA    (output) COMPLEX array, dimension (N) */
/*          The float non-negative scalars beta that define the */
/*          eigenvalues of GNEP.  BETA(i) = P(i,i) in the generalized */
/*          Schur factorization. */

/*          Together, the quantities alpha = ALPHA(j) and beta = BETA(j) */
/*          represent the j-th eigenvalue of the matrix pair (A,B), in */
/*          one of the forms lambda = alpha/beta or mu = beta/alpha. */
/*          Since either lambda or mu may overflow, they should not, */
/*          in general, be computed. */

/*  Q       (input/output) COMPLEX array, dimension (LDQ, N) */
/*          On entry, if COMPZ = 'V', the unitary matrix Q1 used in the */
/*          reduction of (A,B) to generalized Hessenberg form. */
/*          On exit, if COMPZ = 'I', the unitary matrix of left Schur */
/*          vectors of (H,T), and if COMPZ = 'V', the unitary matrix of */
/*          left Schur vectors of (A,B). */
/*          Not referenced if COMPZ = 'N'. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q.  LDQ >= 1. */
/*          If COMPQ='V' or 'I', then LDQ >= N. */

/*  Z       (input/output) COMPLEX array, dimension (LDZ, N) */
/*          On entry, if COMPZ = 'V', the unitary matrix Z1 used in the */
/*          reduction of (A,B) to generalized Hessenberg form. */
/*          On exit, if COMPZ = 'I', the unitary matrix of right Schur */
/*          vectors of (H,T), and if COMPZ = 'V', the unitary matrix of */
/*          right Schur vectors of (A,B). */
/*          Not referenced if COMPZ = 'N'. */

/*  LDZ     (input) INTEGER */
/*          The leading dimension of the array Z.  LDZ >= 1. */
/*          If COMPZ='V' or 'I', then LDZ >= N. */

/*  WORK    (workspace/output) COMPLEX array, dimension (MAX(1,LWORK)) */
/*          On exit, if INFO >= 0, WORK(1) returns the optimal LWORK. */

/*  LWORK   (input) INTEGER */
/*          The dimension of the array WORK.  LWORK >= MAX(1,N). */

/*          If LWORK = -1, then a workspace query is assumed; the routine */
/*          only calculates the optimal size of the WORK array, returns */
/*          this value as the first entry of the WORK array, and no error */
/*          message related to LWORK is issued by XERBLA. */

/*  RWORK   (workspace) REAL array, dimension (N) */

/*  INFO    (output) INTEGER */
/*          = 0: successful exit */
/*          < 0: if INFO = -i, the i-th argument had an illegal value */
/*          = 1,...,N: the QZ iteration did not converge.  (H,T) is not */
/*                     in Schur form, but ALPHA(i) and BETA(i), */
/*                     i=INFO+1,...,N should be correct. */
/*          = N+1,...,2*N: the shift calculation failed.  (H,T) is not */
/*                     in Schur form, but ALPHA(i) and BETA(i), */
/*                     i=INFO-N+1,...,N should be correct. */

/*  Further Details */
/*  =============== */

/*  We assume that complex ABS works as long as its value is less than */
/*  overflow. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Statement Functions .. */
/*     .. */
/*     .. Statement Function definitions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Decode JOB, COMPQ, COMPZ */

    /* Parameter adjustments */
    h_dim1 = *ldh;
    h_offset = 1 + h_dim1;
    h__ -= h_offset;
    t_dim1 = *ldt;
    t_offset = 1 + t_dim1;
    t -= t_offset;
    --alpha;
    --beta;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1;
    z__ -= z_offset;
    --work;
    --rwork;

    /* Function Body */
    if (lsame_(job, "E")) {
	ilschr = FALSE;
	ischur = 1;
    } else if (lsame_(job, "S")) {
	ilschr = TRUE;
	ischur = 2;
    } else {
	ischur = 0;
    }

    if (lsame_(compq, "N")) {
	ilq = FALSE;
	icompq = 1;
    } else if (lsame_(compq, "V")) {
	ilq = TRUE;
	icompq = 2;
    } else if (lsame_(compq, "I")) {
	ilq = TRUE;
	icompq = 3;
    } else {
	icompq = 0;
    }

    if (lsame_(compz, "N")) {
	ilz = FALSE;
	icompz = 1;
    } else if (lsame_(compz, "V")) {
	ilz = TRUE;
	icompz = 2;
    } else if (lsame_(compz, "I")) {
	ilz = TRUE;
	icompz = 3;
    } else {
	icompz = 0;
    }

/*     Check Argument Values */

    *info = 0;
    i__1 = MAX(1,*n);
    work[1].r = (float) i__1, work[1].i = 0.f;
    lquery = *lwork == -1;
    if (ischur == 0) {
	*info = -1;
    } else if (icompq == 0) {
	*info = -2;
    } else if (icompz == 0) {
	*info = -3;
    } else if (*n < 0) {
	*info = -4;
    } else if (*ilo < 1) {
	*info = -5;
    } else if (*ihi > *n || *ihi < *ilo - 1) {
	*info = -6;
    } else if (*ldh < *n) {
	*info = -8;
    } else if (*ldt < *n) {
	*info = -10;
    } else if (*ldq < 1 || ilq && *ldq < *n) {
	*info = -14;
    } else if (*ldz < 1 || ilz && *ldz < *n) {
	*info = -16;
    } else if (*lwork < MAX(1,*n) && ! lquery) {
	*info = -18;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CHGEQZ", &i__1);
	return 0;
    } else if (lquery) {
	return 0;
    }

/*     Quick return if possible */

/*     WORK( 1 ) = CMPLX( 1 ) */
    if (*n <= 0) {
	work[1].r = 1.f, work[1].i = 0.f;
	return 0;
    }

/*     Initialize Q and Z */

    if (icompq == 3) {
	claset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
    }
    if (icompz == 3) {
	claset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
    }

/*     Machine Constants */

    in = *ihi + 1 - *ilo;
    safmin = slamch_("S");
    ulp = slamch_("E") * slamch_("B");
    anorm = clanhs_("F", &in, &h__[*ilo + *ilo * h_dim1], ldh, &rwork[1]);
    bnorm = clanhs_("F", &in, &t[*ilo + *ilo * t_dim1], ldt, &rwork[1]);
/* Computing MAX */
    r__1 = safmin, r__2 = ulp * anorm;
    atol = MAX(r__1,r__2);
/* Computing MAX */
    r__1 = safmin, r__2 = ulp * bnorm;
    btol = MAX(r__1,r__2);
    ascale = 1.f / MAX(safmin,anorm);
    bscale = 1.f / MAX(safmin,bnorm);


/*     Set Eigenvalues IHI+1:N */

    i__1 = *n;
    for (j = *ihi + 1; j <= i__1; ++j) {
	absb = c_abs(&t[j + j * t_dim1]);
	if (absb > safmin) {
	    i__2 = j + j * t_dim1;
	    q__2.r = t[i__2].r / absb, q__2.i = t[i__2].i / absb;
	    r_cnjg(&q__1, &q__2);
	    signbc.r = q__1.r, signbc.i = q__1.i;
	    i__2 = j + j * t_dim1;
	    t[i__2].r = absb, t[i__2].i = 0.f;
	    if (ilschr) {
		i__2 = j - 1;
		cscal_(&i__2, &signbc, &t[j * t_dim1 + 1], &c__1);
		cscal_(&j, &signbc, &h__[j * h_dim1 + 1], &c__1);
	    } else {
		i__2 = j + j * h_dim1;
		i__3 = j + j * h_dim1;
		q__1.r = h__[i__3].r * signbc.r - h__[i__3].i * signbc.i, 
			q__1.i = h__[i__3].r * signbc.i + h__[i__3].i * 
			signbc.r;
		h__[i__2].r = q__1.r, h__[i__2].i = q__1.i;
	    }
	    if (ilz) {
		cscal_(n, &signbc, &z__[j * z_dim1 + 1], &c__1);
	    }
	} else {
	    i__2 = j + j * t_dim1;
	    t[i__2].r = 0.f, t[i__2].i = 0.f;
	}
	i__2 = j;
	i__3 = j + j * h_dim1;
	alpha[i__2].r = h__[i__3].r, alpha[i__2].i = h__[i__3].i;
	i__2 = j;
	i__3 = j + j * t_dim1;
	beta[i__2].r = t[i__3].r, beta[i__2].i = t[i__3].i;
/* L10: */
    }

/*     If IHI < ILO, skip QZ steps */

    if (*ihi < *ilo) {
	goto L190;
    }

/*     MAIN QZ ITERATION LOOP */

/*     Initialize dynamic indices */

/*     Eigenvalues ILAST+1:N have been found. */
/*        Column operations modify rows IFRSTM:whatever */
/*        Row operations modify columns whatever:ILASTM */

/*     If only eigenvalues are being computed, then */
/*        IFRSTM is the row of the last splitting row above row ILAST; */
/*        this is always at least ILO. */
/*     IITER counts iterations since the last eigenvalue was found, */
/*        to tell when to use an extraordinary shift. */
/*     MAXIT is the maximum number of QZ sweeps allowed. */

    ilast = *ihi;
    if (ilschr) {
	ifrstm = 1;
	ilastm = *n;
    } else {
	ifrstm = *ilo;
	ilastm = *ihi;
    }
    iiter = 0;
    eshift.r = 0.f, eshift.i = 0.f;
    maxit = (*ihi - *ilo + 1) * 30;

    i__1 = maxit;
    for (jiter = 1; jiter <= i__1; ++jiter) {

/*        Check for too many iterations. */

	if (jiter > maxit) {
	    goto L180;
	}

/*        Split the matrix if possible. */

/*        Two tests: */
/*           1: H(j,j-1)=0  or  j=ILO */
/*           2: T(j,j)=0 */

/*        Special case: j=ILAST */

	if (ilast == *ilo) {
	    goto L60;
	} else {
	    i__2 = ilast + (ilast - 1) * h_dim1;
	    if ((r__1 = h__[i__2].r, ABS(r__1)) + (r__2 = r_imag(&h__[ilast 
		    + (ilast - 1) * h_dim1]), ABS(r__2)) <= atol) {
		i__2 = ilast + (ilast - 1) * h_dim1;
		h__[i__2].r = 0.f, h__[i__2].i = 0.f;
		goto L60;
	    }
	}

	if (c_abs(&t[ilast + ilast * t_dim1]) <= btol) {
	    i__2 = ilast + ilast * t_dim1;
	    t[i__2].r = 0.f, t[i__2].i = 0.f;
	    goto L50;
	}

/*        General case: j<ILAST */

	i__2 = *ilo;
	for (j = ilast - 1; j >= i__2; --j) {

/*           Test 1: for H(j,j-1)=0 or j=ILO */

	    if (j == *ilo) {
		ilazro = TRUE;
	    } else {
		i__3 = j + (j - 1) * h_dim1;
		if ((r__1 = h__[i__3].r, ABS(r__1)) + (r__2 = r_imag(&h__[j 
			+ (j - 1) * h_dim1]), ABS(r__2)) <= atol) {
		    i__3 = j + (j - 1) * h_dim1;
		    h__[i__3].r = 0.f, h__[i__3].i = 0.f;
		    ilazro = TRUE;
		} else {
		    ilazro = FALSE;
		}
	    }

/*           Test 2: for T(j,j)=0 */

	    if (c_abs(&t[j + j * t_dim1]) < btol) {
		i__3 = j + j * t_dim1;
		t[i__3].r = 0.f, t[i__3].i = 0.f;

/*              Test 1a: Check for 2 consecutive small subdiagonals in A */

		ilazr2 = FALSE;
		if (! ilazro) {
		    i__3 = j + (j - 1) * h_dim1;
		    i__4 = j + 1 + j * h_dim1;
		    i__5 = j + j * h_dim1;
		    if (((r__1 = h__[i__3].r, ABS(r__1)) + (r__2 = r_imag(&
			    h__[j + (j - 1) * h_dim1]), ABS(r__2))) * (
			    ascale * ((r__3 = h__[i__4].r, ABS(r__3)) + (
			    r__4 = r_imag(&h__[j + 1 + j * h_dim1]), ABS(
			    r__4)))) <= ((r__5 = h__[i__5].r, ABS(r__5)) + (
			    r__6 = r_imag(&h__[j + j * h_dim1]), ABS(r__6))) 
			    * (ascale * atol)) {
			ilazr2 = TRUE;
		    }
		}

/*              If both tests pass (1 & 2), i.e., the leading diagonal */
/*              element of B in the block is zero, split a 1x1 block off */
/*              at the top. (I.e., at the J-th row/column) The leading */
/*              diagonal element of the remainder can also be zero, so */
/*              this may have to be done repeatedly. */

		if (ilazro || ilazr2) {
		    i__3 = ilast - 1;
		    for (jch = j; jch <= i__3; ++jch) {
			i__4 = jch + jch * h_dim1;
			ctemp.r = h__[i__4].r, ctemp.i = h__[i__4].i;
			clartg_(&ctemp, &h__[jch + 1 + jch * h_dim1], &c__, &
				s, &h__[jch + jch * h_dim1]);
			i__4 = jch + 1 + jch * h_dim1;
			h__[i__4].r = 0.f, h__[i__4].i = 0.f;
			i__4 = ilastm - jch;
			crot_(&i__4, &h__[jch + (jch + 1) * h_dim1], ldh, &
				h__[jch + 1 + (jch + 1) * h_dim1], ldh, &c__, 
				&s);
			i__4 = ilastm - jch;
			crot_(&i__4, &t[jch + (jch + 1) * t_dim1], ldt, &t[
				jch + 1 + (jch + 1) * t_dim1], ldt, &c__, &s);
			if (ilq) {
			    r_cnjg(&q__1, &s);
			    crot_(n, &q[jch * q_dim1 + 1], &c__1, &q[(jch + 1)
				     * q_dim1 + 1], &c__1, &c__, &q__1);
			}
			if (ilazr2) {
			    i__4 = jch + (jch - 1) * h_dim1;
			    i__5 = jch + (jch - 1) * h_dim1;
			    q__1.r = c__ * h__[i__5].r, q__1.i = c__ * h__[
				    i__5].i;
			    h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
			}
			ilazr2 = FALSE;
			i__4 = jch + 1 + (jch + 1) * t_dim1;
			if ((r__1 = t[i__4].r, ABS(r__1)) + (r__2 = r_imag(&
				t[jch + 1 + (jch + 1) * t_dim1]), ABS(r__2)) 
				>= btol) {
			    if (jch + 1 >= ilast) {
				goto L60;
			    } else {
				ifirst = jch + 1;
				goto L70;
			    }
			}
			i__4 = jch + 1 + (jch + 1) * t_dim1;
			t[i__4].r = 0.f, t[i__4].i = 0.f;
/* L20: */
		    }
		    goto L50;
		} else {

/*                 Only test 2 passed -- chase the zero to T(ILAST,ILAST) */
/*                 Then process as in the case T(ILAST,ILAST)=0 */

		    i__3 = ilast - 1;
		    for (jch = j; jch <= i__3; ++jch) {
			i__4 = jch + (jch + 1) * t_dim1;
			ctemp.r = t[i__4].r, ctemp.i = t[i__4].i;
			clartg_(&ctemp, &t[jch + 1 + (jch + 1) * t_dim1], &
				c__, &s, &t[jch + (jch + 1) * t_dim1]);
			i__4 = jch + 1 + (jch + 1) * t_dim1;
			t[i__4].r = 0.f, t[i__4].i = 0.f;
			if (jch < ilastm - 1) {
			    i__4 = ilastm - jch - 1;
			    crot_(&i__4, &t[jch + (jch + 2) * t_dim1], ldt, &
				    t[jch + 1 + (jch + 2) * t_dim1], ldt, &
				    c__, &s);
			}
			i__4 = ilastm - jch + 2;
			crot_(&i__4, &h__[jch + (jch - 1) * h_dim1], ldh, &
				h__[jch + 1 + (jch - 1) * h_dim1], ldh, &c__, 
				&s);
			if (ilq) {
			    r_cnjg(&q__1, &s);
			    crot_(n, &q[jch * q_dim1 + 1], &c__1, &q[(jch + 1)
				     * q_dim1 + 1], &c__1, &c__, &q__1);
			}
			i__4 = jch + 1 + jch * h_dim1;
			ctemp.r = h__[i__4].r, ctemp.i = h__[i__4].i;
			clartg_(&ctemp, &h__[jch + 1 + (jch - 1) * h_dim1], &
				c__, &s, &h__[jch + 1 + jch * h_dim1]);
			i__4 = jch + 1 + (jch - 1) * h_dim1;
			h__[i__4].r = 0.f, h__[i__4].i = 0.f;
			i__4 = jch + 1 - ifrstm;
			crot_(&i__4, &h__[ifrstm + jch * h_dim1], &c__1, &h__[
				ifrstm + (jch - 1) * h_dim1], &c__1, &c__, &s)
				;
			i__4 = jch - ifrstm;
			crot_(&i__4, &t[ifrstm + jch * t_dim1], &c__1, &t[
				ifrstm + (jch - 1) * t_dim1], &c__1, &c__, &s)
				;
			if (ilz) {
			    crot_(n, &z__[jch * z_dim1 + 1], &c__1, &z__[(jch 
				    - 1) * z_dim1 + 1], &c__1, &c__, &s);
			}
/* L30: */
		    }
		    goto L50;
		}
	    } else if (ilazro) {

/*              Only test 1 passed -- work on J:ILAST */

		ifirst = j;
		goto L70;
	    }

/*           Neither test passed -- try next J */

/* L40: */
	}

/*        (Drop-through is "impossible") */

	*info = (*n << 1) + 1;
	goto L210;

/*        T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a */
/*        1x1 block. */

L50:
	i__2 = ilast + ilast * h_dim1;
	ctemp.r = h__[i__2].r, ctemp.i = h__[i__2].i;
	clartg_(&ctemp, &h__[ilast + (ilast - 1) * h_dim1], &c__, &s, &h__[
		ilast + ilast * h_dim1]);
	i__2 = ilast + (ilast - 1) * h_dim1;
	h__[i__2].r = 0.f, h__[i__2].i = 0.f;
	i__2 = ilast - ifrstm;
	crot_(&i__2, &h__[ifrstm + ilast * h_dim1], &c__1, &h__[ifrstm + (
		ilast - 1) * h_dim1], &c__1, &c__, &s);
	i__2 = ilast - ifrstm;
	crot_(&i__2, &t[ifrstm + ilast * t_dim1], &c__1, &t[ifrstm + (ilast - 
		1) * t_dim1], &c__1, &c__, &s);
	if (ilz) {
	    crot_(n, &z__[ilast * z_dim1 + 1], &c__1, &z__[(ilast - 1) * 
		    z_dim1 + 1], &c__1, &c__, &s);
	}

/*        H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA */

L60:
	absb = c_abs(&t[ilast + ilast * t_dim1]);
	if (absb > safmin) {
	    i__2 = ilast + ilast * t_dim1;
	    q__2.r = t[i__2].r / absb, q__2.i = t[i__2].i / absb;
	    r_cnjg(&q__1, &q__2);
	    signbc.r = q__1.r, signbc.i = q__1.i;
	    i__2 = ilast + ilast * t_dim1;
	    t[i__2].r = absb, t[i__2].i = 0.f;
	    if (ilschr) {
		i__2 = ilast - ifrstm;
		cscal_(&i__2, &signbc, &t[ifrstm + ilast * t_dim1], &c__1);
		i__2 = ilast + 1 - ifrstm;
		cscal_(&i__2, &signbc, &h__[ifrstm + ilast * h_dim1], &c__1);
	    } else {
		i__2 = ilast + ilast * h_dim1;
		i__3 = ilast + ilast * h_dim1;
		q__1.r = h__[i__3].r * signbc.r - h__[i__3].i * signbc.i, 
			q__1.i = h__[i__3].r * signbc.i + h__[i__3].i * 
			signbc.r;
		h__[i__2].r = q__1.r, h__[i__2].i = q__1.i;
	    }
	    if (ilz) {
		cscal_(n, &signbc, &z__[ilast * z_dim1 + 1], &c__1);
	    }
	} else {
	    i__2 = ilast + ilast * t_dim1;
	    t[i__2].r = 0.f, t[i__2].i = 0.f;
	}
	i__2 = ilast;
	i__3 = ilast + ilast * h_dim1;
	alpha[i__2].r = h__[i__3].r, alpha[i__2].i = h__[i__3].i;
	i__2 = ilast;
	i__3 = ilast + ilast * t_dim1;
	beta[i__2].r = t[i__3].r, beta[i__2].i = t[i__3].i;

/*        Go to next block -- exit if finished. */

	--ilast;
	if (ilast < *ilo) {
	    goto L190;
	}

/*        Reset counters */

	iiter = 0;
	eshift.r = 0.f, eshift.i = 0.f;
	if (! ilschr) {
	    ilastm = ilast;
	    if (ifrstm > ilast) {
		ifrstm = *ilo;
	    }
	}
	goto L160;

/*        QZ step */

/*        This iteration only involves rows/columns IFIRST:ILAST.  We */
/*        assume IFIRST < ILAST, and that the diagonal of B is non-zero. */

L70:
	++iiter;
	if (! ilschr) {
	    ifrstm = ifirst;
	}

/*        Compute the Shift. */

/*        At this point, IFIRST < ILAST, and the diagonal elements of */
/*        T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in */
/*        magnitude) */

	if (iiter / 10 * 10 != iiter) {

/*           The Wilkinson shift (AEP p.512), i.e., the eigenvalue of */
/*           the bottom-right 2x2 block of A inv(B) which is nearest to */
/*           the bottom-right element. */

/*           We factor B as U*D, where U has unit diagonals, and */
/*           compute (A*inv(D))*inv(U). */

	    i__2 = ilast - 1 + ilast * t_dim1;
	    q__2.r = bscale * t[i__2].r, q__2.i = bscale * t[i__2].i;
	    i__3 = ilast + ilast * t_dim1;
	    q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
	    c_div(&q__1, &q__2, &q__3);
	    u12.r = q__1.r, u12.i = q__1.i;
	    i__2 = ilast - 1 + (ilast - 1) * h_dim1;
	    q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
	    i__3 = ilast - 1 + (ilast - 1) * t_dim1;
	    q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
	    c_div(&q__1, &q__2, &q__3);
	    ad11.r = q__1.r, ad11.i = q__1.i;
	    i__2 = ilast + (ilast - 1) * h_dim1;
	    q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
	    i__3 = ilast - 1 + (ilast - 1) * t_dim1;
	    q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
	    c_div(&q__1, &q__2, &q__3);
	    ad21.r = q__1.r, ad21.i = q__1.i;
	    i__2 = ilast - 1 + ilast * h_dim1;
	    q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
	    i__3 = ilast + ilast * t_dim1;
	    q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
	    c_div(&q__1, &q__2, &q__3);
	    ad12.r = q__1.r, ad12.i = q__1.i;
	    i__2 = ilast + ilast * h_dim1;
	    q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
	    i__3 = ilast + ilast * t_dim1;
	    q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
	    c_div(&q__1, &q__2, &q__3);
	    ad22.r = q__1.r, ad22.i = q__1.i;
	    q__2.r = u12.r * ad21.r - u12.i * ad21.i, q__2.i = u12.r * ad21.i 
		    + u12.i * ad21.r;
	    q__1.r = ad22.r - q__2.r, q__1.i = ad22.i - q__2.i;
	    abi22.r = q__1.r, abi22.i = q__1.i;

	    q__2.r = ad11.r + abi22.r, q__2.i = ad11.i + abi22.i;
	    q__1.r = q__2.r * .5f, q__1.i = q__2.i * .5f;
	    t1.r = q__1.r, t1.i = q__1.i;
	    pow_ci(&q__4, &t1, &c__2);
	    q__5.r = ad12.r * ad21.r - ad12.i * ad21.i, q__5.i = ad12.r * 
		    ad21.i + ad12.i * ad21.r;
	    q__3.r = q__4.r + q__5.r, q__3.i = q__4.i + q__5.i;
	    q__6.r = ad11.r * ad22.r - ad11.i * ad22.i, q__6.i = ad11.r * 
		    ad22.i + ad11.i * ad22.r;
	    q__2.r = q__3.r - q__6.r, q__2.i = q__3.i - q__6.i;
	    c_sqrt(&q__1, &q__2);
	    rtdisc.r = q__1.r, rtdisc.i = q__1.i;
	    q__1.r = t1.r - abi22.r, q__1.i = t1.i - abi22.i;
	    q__2.r = t1.r - abi22.r, q__2.i = t1.i - abi22.i;
	    temp = q__1.r * rtdisc.r + r_imag(&q__2) * r_imag(&rtdisc);
	    if (temp <= 0.f) {
		q__1.r = t1.r + rtdisc.r, q__1.i = t1.i + rtdisc.i;
		shift.r = q__1.r, shift.i = q__1.i;
	    } else {
		q__1.r = t1.r - rtdisc.r, q__1.i = t1.i - rtdisc.i;
		shift.r = q__1.r, shift.i = q__1.i;
	    }
	} else {

/*           Exceptional shift.  Chosen for no particularly good reason. */

	    i__2 = ilast - 1 + ilast * h_dim1;
	    q__4.r = ascale * h__[i__2].r, q__4.i = ascale * h__[i__2].i;
	    i__3 = ilast - 1 + (ilast - 1) * t_dim1;
	    q__5.r = bscale * t[i__3].r, q__5.i = bscale * t[i__3].i;
	    c_div(&q__3, &q__4, &q__5);
	    r_cnjg(&q__2, &q__3);
	    q__1.r = eshift.r + q__2.r, q__1.i = eshift.i + q__2.i;
	    eshift.r = q__1.r, eshift.i = q__1.i;
	    shift.r = eshift.r, shift.i = eshift.i;
	}

/*        Now check for two consecutive small subdiagonals. */

	i__2 = ifirst + 1;
	for (j = ilast - 1; j >= i__2; --j) {
	    istart = j;
	    i__3 = j + j * h_dim1;
	    q__2.r = ascale * h__[i__3].r, q__2.i = ascale * h__[i__3].i;
	    i__4 = j + j * t_dim1;
	    q__4.r = bscale * t[i__4].r, q__4.i = bscale * t[i__4].i;
	    q__3.r = shift.r * q__4.r - shift.i * q__4.i, q__3.i = shift.r * 
		    q__4.i + shift.i * q__4.r;
	    q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
	    ctemp.r = q__1.r, ctemp.i = q__1.i;
	    temp = (r__1 = ctemp.r, ABS(r__1)) + (r__2 = r_imag(&ctemp), 
		    ABS(r__2));
	    i__3 = j + 1 + j * h_dim1;
	    temp2 = ascale * ((r__1 = h__[i__3].r, ABS(r__1)) + (r__2 = 
		    r_imag(&h__[j + 1 + j * h_dim1]), ABS(r__2)));
	    tempr = MAX(temp,temp2);
	    if (tempr < 1.f && tempr != 0.f) {
		temp /= tempr;
		temp2 /= tempr;
	    }
	    i__3 = j + (j - 1) * h_dim1;
	    if (((r__1 = h__[i__3].r, ABS(r__1)) + (r__2 = r_imag(&h__[j + (
		    j - 1) * h_dim1]), ABS(r__2))) * temp2 <= temp * atol) {
		goto L90;
	    }
/* L80: */
	}

	istart = ifirst;
	i__2 = ifirst + ifirst * h_dim1;
	q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
	i__3 = ifirst + ifirst * t_dim1;
	q__4.r = bscale * t[i__3].r, q__4.i = bscale * t[i__3].i;
	q__3.r = shift.r * q__4.r - shift.i * q__4.i, q__3.i = shift.r * 
		q__4.i + shift.i * q__4.r;
	q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
	ctemp.r = q__1.r, ctemp.i = q__1.i;
L90:

/*        Do an implicit-shift QZ sweep. */

/*        Initial Q */

	i__2 = istart + 1 + istart * h_dim1;
	q__1.r = ascale * h__[i__2].r, q__1.i = ascale * h__[i__2].i;
	ctemp2.r = q__1.r, ctemp2.i = q__1.i;
	clartg_(&ctemp, &ctemp2, &c__, &s, &ctemp3);

/*        Sweep */

	i__2 = ilast - 1;
	for (j = istart; j <= i__2; ++j) {
	    if (j > istart) {
		i__3 = j + (j - 1) * h_dim1;
		ctemp.r = h__[i__3].r, ctemp.i = h__[i__3].i;
		clartg_(&ctemp, &h__[j + 1 + (j - 1) * h_dim1], &c__, &s, &
			h__[j + (j - 1) * h_dim1]);
		i__3 = j + 1 + (j - 1) * h_dim1;
		h__[i__3].r = 0.f, h__[i__3].i = 0.f;
	    }

	    i__3 = ilastm;
	    for (jc = j; jc <= i__3; ++jc) {
		i__4 = j + jc * h_dim1;
		q__2.r = c__ * h__[i__4].r, q__2.i = c__ * h__[i__4].i;
		i__5 = j + 1 + jc * h_dim1;
		q__3.r = s.r * h__[i__5].r - s.i * h__[i__5].i, q__3.i = s.r *
			 h__[i__5].i + s.i * h__[i__5].r;
		q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
		ctemp.r = q__1.r, ctemp.i = q__1.i;
		i__4 = j + 1 + jc * h_dim1;
		r_cnjg(&q__4, &s);
		q__3.r = -q__4.r, q__3.i = -q__4.i;
		i__5 = j + jc * h_dim1;
		q__2.r = q__3.r * h__[i__5].r - q__3.i * h__[i__5].i, q__2.i =
			 q__3.r * h__[i__5].i + q__3.i * h__[i__5].r;
		i__6 = j + 1 + jc * h_dim1;
		q__5.r = c__ * h__[i__6].r, q__5.i = c__ * h__[i__6].i;
		q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
		h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
		i__4 = j + jc * h_dim1;
		h__[i__4].r = ctemp.r, h__[i__4].i = ctemp.i;
		i__4 = j + jc * t_dim1;
		q__2.r = c__ * t[i__4].r, q__2.i = c__ * t[i__4].i;
		i__5 = j + 1 + jc * t_dim1;
		q__3.r = s.r * t[i__5].r - s.i * t[i__5].i, q__3.i = s.r * t[
			i__5].i + s.i * t[i__5].r;
		q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
		ctemp2.r = q__1.r, ctemp2.i = q__1.i;
		i__4 = j + 1 + jc * t_dim1;
		r_cnjg(&q__4, &s);
		q__3.r = -q__4.r, q__3.i = -q__4.i;
		i__5 = j + jc * t_dim1;
		q__2.r = q__3.r * t[i__5].r - q__3.i * t[i__5].i, q__2.i = 
			q__3.r * t[i__5].i + q__3.i * t[i__5].r;
		i__6 = j + 1 + jc * t_dim1;
		q__5.r = c__ * t[i__6].r, q__5.i = c__ * t[i__6].i;
		q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
		t[i__4].r = q__1.r, t[i__4].i = q__1.i;
		i__4 = j + jc * t_dim1;
		t[i__4].r = ctemp2.r, t[i__4].i = ctemp2.i;
/* L100: */
	    }
	    if (ilq) {
		i__3 = *n;
		for (jr = 1; jr <= i__3; ++jr) {
		    i__4 = jr + j * q_dim1;
		    q__2.r = c__ * q[i__4].r, q__2.i = c__ * q[i__4].i;
		    r_cnjg(&q__4, &s);
		    i__5 = jr + (j + 1) * q_dim1;
		    q__3.r = q__4.r * q[i__5].r - q__4.i * q[i__5].i, q__3.i =
			     q__4.r * q[i__5].i + q__4.i * q[i__5].r;
		    q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
		    ctemp.r = q__1.r, ctemp.i = q__1.i;
		    i__4 = jr + (j + 1) * q_dim1;
		    q__3.r = -s.r, q__3.i = -s.i;
		    i__5 = jr + j * q_dim1;
		    q__2.r = q__3.r * q[i__5].r - q__3.i * q[i__5].i, q__2.i =
			     q__3.r * q[i__5].i + q__3.i * q[i__5].r;
		    i__6 = jr + (j + 1) * q_dim1;
		    q__4.r = c__ * q[i__6].r, q__4.i = c__ * q[i__6].i;
		    q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
		    q[i__4].r = q__1.r, q[i__4].i = q__1.i;
		    i__4 = jr + j * q_dim1;
		    q[i__4].r = ctemp.r, q[i__4].i = ctemp.i;
/* L110: */
		}
	    }

	    i__3 = j + 1 + (j + 1) * t_dim1;
	    ctemp.r = t[i__3].r, ctemp.i = t[i__3].i;
	    clartg_(&ctemp, &t[j + 1 + j * t_dim1], &c__, &s, &t[j + 1 + (j + 
		    1) * t_dim1]);
	    i__3 = j + 1 + j * t_dim1;
	    t[i__3].r = 0.f, t[i__3].i = 0.f;

/* Computing MIN */
	    i__4 = j + 2;
	    i__3 = MIN(i__4,ilast);
	    for (jr = ifrstm; jr <= i__3; ++jr) {
		i__4 = jr + (j + 1) * h_dim1;
		q__2.r = c__ * h__[i__4].r, q__2.i = c__ * h__[i__4].i;
		i__5 = jr + j * h_dim1;
		q__3.r = s.r * h__[i__5].r - s.i * h__[i__5].i, q__3.i = s.r *
			 h__[i__5].i + s.i * h__[i__5].r;
		q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
		ctemp.r = q__1.r, ctemp.i = q__1.i;
		i__4 = jr + j * h_dim1;
		r_cnjg(&q__4, &s);
		q__3.r = -q__4.r, q__3.i = -q__4.i;
		i__5 = jr + (j + 1) * h_dim1;
		q__2.r = q__3.r * h__[i__5].r - q__3.i * h__[i__5].i, q__2.i =
			 q__3.r * h__[i__5].i + q__3.i * h__[i__5].r;
		i__6 = jr + j * h_dim1;
		q__5.r = c__ * h__[i__6].r, q__5.i = c__ * h__[i__6].i;
		q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
		h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
		i__4 = jr + (j + 1) * h_dim1;
		h__[i__4].r = ctemp.r, h__[i__4].i = ctemp.i;
/* L120: */
	    }
	    i__3 = j;
	    for (jr = ifrstm; jr <= i__3; ++jr) {
		i__4 = jr + (j + 1) * t_dim1;
		q__2.r = c__ * t[i__4].r, q__2.i = c__ * t[i__4].i;
		i__5 = jr + j * t_dim1;
		q__3.r = s.r * t[i__5].r - s.i * t[i__5].i, q__3.i = s.r * t[
			i__5].i + s.i * t[i__5].r;
		q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
		ctemp.r = q__1.r, ctemp.i = q__1.i;
		i__4 = jr + j * t_dim1;
		r_cnjg(&q__4, &s);
		q__3.r = -q__4.r, q__3.i = -q__4.i;
		i__5 = jr + (j + 1) * t_dim1;
		q__2.r = q__3.r * t[i__5].r - q__3.i * t[i__5].i, q__2.i = 
			q__3.r * t[i__5].i + q__3.i * t[i__5].r;
		i__6 = jr + j * t_dim1;
		q__5.r = c__ * t[i__6].r, q__5.i = c__ * t[i__6].i;
		q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
		t[i__4].r = q__1.r, t[i__4].i = q__1.i;
		i__4 = jr + (j + 1) * t_dim1;
		t[i__4].r = ctemp.r, t[i__4].i = ctemp.i;
/* L130: */
	    }
	    if (ilz) {
		i__3 = *n;
		for (jr = 1; jr <= i__3; ++jr) {
		    i__4 = jr + (j + 1) * z_dim1;
		    q__2.r = c__ * z__[i__4].r, q__2.i = c__ * z__[i__4].i;
		    i__5 = jr + j * z_dim1;
		    q__3.r = s.r * z__[i__5].r - s.i * z__[i__5].i, q__3.i = 
			    s.r * z__[i__5].i + s.i * z__[i__5].r;
		    q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
		    ctemp.r = q__1.r, ctemp.i = q__1.i;
		    i__4 = jr + j * z_dim1;
		    r_cnjg(&q__4, &s);
		    q__3.r = -q__4.r, q__3.i = -q__4.i;
		    i__5 = jr + (j + 1) * z_dim1;
		    q__2.r = q__3.r * z__[i__5].r - q__3.i * z__[i__5].i, 
			    q__2.i = q__3.r * z__[i__5].i + q__3.i * z__[i__5]
			    .r;
		    i__6 = jr + j * z_dim1;
		    q__5.r = c__ * z__[i__6].r, q__5.i = c__ * z__[i__6].i;
		    q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
		    z__[i__4].r = q__1.r, z__[i__4].i = q__1.i;
		    i__4 = jr + (j + 1) * z_dim1;
		    z__[i__4].r = ctemp.r, z__[i__4].i = ctemp.i;
/* L140: */
		}
	    }
/* L150: */
	}

L160:

/* L170: */
	;
    }

/*     Drop-through = non-convergence */

L180:
    *info = ilast;
    goto L210;

/*     Successful completion of all QZ steps */

L190:

/*     Set Eigenvalues 1:ILO-1 */

    i__1 = *ilo - 1;
    for (j = 1; j <= i__1; ++j) {
	absb = c_abs(&t[j + j * t_dim1]);
	if (absb > safmin) {
	    i__2 = j + j * t_dim1;
	    q__2.r = t[i__2].r / absb, q__2.i = t[i__2].i / absb;
	    r_cnjg(&q__1, &q__2);
	    signbc.r = q__1.r, signbc.i = q__1.i;
	    i__2 = j + j * t_dim1;
	    t[i__2].r = absb, t[i__2].i = 0.f;
	    if (ilschr) {
		i__2 = j - 1;
		cscal_(&i__2, &signbc, &t[j * t_dim1 + 1], &c__1);
		cscal_(&j, &signbc, &h__[j * h_dim1 + 1], &c__1);
	    } else {
		i__2 = j + j * h_dim1;
		i__3 = j + j * h_dim1;
		q__1.r = h__[i__3].r * signbc.r - h__[i__3].i * signbc.i, 
			q__1.i = h__[i__3].r * signbc.i + h__[i__3].i * 
			signbc.r;
		h__[i__2].r = q__1.r, h__[i__2].i = q__1.i;
	    }
	    if (ilz) {
		cscal_(n, &signbc, &z__[j * z_dim1 + 1], &c__1);
	    }
	} else {
	    i__2 = j + j * t_dim1;
	    t[i__2].r = 0.f, t[i__2].i = 0.f;
	}
	i__2 = j;
	i__3 = j + j * h_dim1;
	alpha[i__2].r = h__[i__3].r, alpha[i__2].i = h__[i__3].i;
	i__2 = j;
	i__3 = j + j * t_dim1;
	beta[i__2].r = t[i__3].r, beta[i__2].i = t[i__3].i;
/* L200: */
    }

/*     Normal Termination */

    *info = 0;

/*     Exit (other than argument error) -- return optimal workspace size */

L210:
    q__1.r = (float) (*n), q__1.i = 0.f;
    work[1].r = q__1.r, work[1].i = q__1.i;
    return 0;

/*     End of CHGEQZ */

} /* chgeqz_ */
Exemplo n.º 16
0
/* Subroutine */ int ctgsen_(integer *ijob, logical *wantq, logical *wantz, 
	logical *select, integer *n, complex *a, integer *lda, complex *b, 
	integer *ldb, complex *alpha, complex *beta, complex *q, integer *ldq,
	 complex *z__, integer *ldz, integer *m, real *pl, real *pr, real *
	dif, complex *work, integer *lwork, integer *iwork, integer *liwork, 
	integer *info)
{
/*  -- LAPACK routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   


    Purpose   
    =======   

    CTGSEN reorders the generalized Schur decomposition of a complex   
    matrix pair (A, B) (in terms of an unitary equivalence trans-   
    formation Q' * (A, B) * Z), so that a selected cluster of eigenvalues   
    appears in the leading diagonal blocks of the pair (A,B). The leading   
    columns of Q and Z form unitary bases of the corresponding left and   
    right eigenspaces (deflating subspaces). (A, B) must be in   
    generalized Schur canonical form, that is, A and B are both upper   
    triangular.   

    CTGSEN also computes the generalized eigenvalues   

             w(j)= ALPHA(j) / BETA(j)   

    of the reordered matrix pair (A, B).   

    Optionally, the routine computes estimates of reciprocal condition   
    numbers for eigenvalues and eigenspaces. These are Difu[(A11,B11),   
    (A22,B22)] and Difl[(A11,B11), (A22,B22)], i.e. the separation(s)   
    between the matrix pairs (A11, B11) and (A22,B22) that correspond to   
    the selected cluster and the eigenvalues outside the cluster, resp.,   
    and norms of "projections" onto left and right eigenspaces w.r.t.   
    the selected cluster in the (1,1)-block.   


    Arguments   
    =========   

    IJOB    (input) integer   
            Specifies whether condition numbers are required for the   
            cluster of eigenvalues (PL and PR) or the deflating subspaces   
            (Difu and Difl):   
             =0: Only reorder w.r.t. SELECT. No extras.   
             =1: Reciprocal of norms of "projections" onto left and right   
                 eigenspaces w.r.t. the selected cluster (PL and PR).   
             =2: Upper bounds on Difu and Difl. F-norm-based estimate   
                 (DIF(1:2)).   
             =3: Estimate of Difu and Difl. 1-norm-based estimate   
                 (DIF(1:2)).   
                 About 5 times as expensive as IJOB = 2.   
             =4: Compute PL, PR and DIF (i.e. 0, 1 and 2 above): Economic   
                 version to get it all.   
             =5: Compute PL, PR and DIF (i.e. 0, 1 and 3 above)   

    WANTQ   (input) LOGICAL   
            .TRUE. : update the left transformation matrix Q;   
            .FALSE.: do not update Q.   

    WANTZ   (input) LOGICAL   
            .TRUE. : update the right transformation matrix Z;   
            .FALSE.: do not update Z.   

    SELECT  (input) LOGICAL array, dimension (N)   
            SELECT specifies the eigenvalues in the selected cluster. To   
            select an eigenvalue w(j), SELECT(j) must be set to   
            .TRUE..   

    N       (input) INTEGER   
            The order of the matrices A and B. N >= 0.   

    A       (input/output) COMPLEX array, dimension(LDA,N)   
            On entry, the upper triangular matrix A, in generalized   
            Schur canonical form.   
            On exit, A is overwritten by the reordered matrix A.   

    LDA     (input) INTEGER   
            The leading dimension of the array A. LDA >= max(1,N).   

    B       (input/output) COMPLEX array, dimension(LDB,N)   
            On entry, the upper triangular matrix B, in generalized   
            Schur canonical form.   
            On exit, B is overwritten by the reordered matrix B.   

    LDB     (input) INTEGER   
            The leading dimension of the array B. LDB >= max(1,N).   

    ALPHA   (output) COMPLEX array, dimension (N)   
    BETA    (output) COMPLEX array, dimension (N)   
            The diagonal elements of A and B, respectively,   
            when the pair (A,B) has been reduced to generalized Schur   
            form.  ALPHA(i)/BETA(i) i=1,...,N are the generalized   
            eigenvalues.   

    Q       (input/output) COMPLEX array, dimension (LDQ,N)   
            On entry, if WANTQ = .TRUE., Q is an N-by-N matrix.   
            On exit, Q has been postmultiplied by the left unitary   
            transformation matrix which reorder (A, B); The leading M   
            columns of Q form orthonormal bases for the specified pair of   
            left eigenspaces (deflating subspaces).   
            If WANTQ = .FALSE., Q is not referenced.   

    LDQ     (input) INTEGER   
            The leading dimension of the array Q. LDQ >= 1.   
            If WANTQ = .TRUE., LDQ >= N.   

    Z       (input/output) COMPLEX array, dimension (LDZ,N)   
            On entry, if WANTZ = .TRUE., Z is an N-by-N matrix.   
            On exit, Z has been postmultiplied by the left unitary   
            transformation matrix which reorder (A, B); The leading M   
            columns of Z form orthonormal bases for the specified pair of   
            left eigenspaces (deflating subspaces).   
            If WANTZ = .FALSE., Z is not referenced.   

    LDZ     (input) INTEGER   
            The leading dimension of the array Z. LDZ >= 1.   
            If WANTZ = .TRUE., LDZ >= N.   

    M       (output) INTEGER   
            The dimension of the specified pair of left and right   
            eigenspaces, (deflating subspaces) 0 <= M <= N.   

    PL, PR  (output) REAL   
            If IJOB = 1, 4 or 5, PL, PR are lower bounds on the   
            reciprocal  of the norm of "projections" onto left and right   
            eigenspace with respect to the selected cluster.   
            0 < PL, PR <= 1.   
            If M = 0 or M = N, PL = PR  = 1.   
            If IJOB = 0, 2 or 3 PL, PR are not referenced.   

    DIF     (output) REAL array, dimension (2).   
            If IJOB >= 2, DIF(1:2) store the estimates of Difu and Difl.   
            If IJOB = 2 or 4, DIF(1:2) are F-norm-based upper bounds on   
            Difu and Difl. If IJOB = 3 or 5, DIF(1:2) are 1-norm-based   
            estimates of Difu and Difl, computed using reversed   
            communication with CLACON.   
            If M = 0 or N, DIF(1:2) = F-norm([A, B]).   
            If IJOB = 0 or 1, DIF is not referenced.   

    WORK    (workspace/output) COMPLEX array, dimension (LWORK)   
            IF IJOB = 0, WORK is not referenced.  Otherwise,   
            on exit, if INFO = 0, WORK(1) returns the optimal LWORK.   

    LWORK   (input) INTEGER   
            The dimension of the array WORK. LWORK >=  1   
            If IJOB = 1, 2 or 4, LWORK >=  2*M*(N-M)   
            If IJOB = 3 or 5, LWORK >=  4*M*(N-M)   

            If LWORK = -1, then a workspace query is assumed; the routine   
            only calculates the optimal size of the WORK array, returns   
            this value as the first entry of the WORK array, and no error   
            message related to LWORK is issued by XERBLA.   

    IWORK   (workspace/output) INTEGER, dimension (LIWORK)   
            IF IJOB = 0, IWORK is not referenced.  Otherwise,   
            on exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.   

    LIWORK  (input) INTEGER   
            The dimension of the array IWORK. LIWORK >= 1.   
            If IJOB = 1, 2 or 4, LIWORK >=  N+2;   
            If IJOB = 3 or 5, LIWORK >= MAX(N+2, 2*M*(N-M));   

            If LIWORK = -1, then a workspace query is assumed; the   
            routine only calculates the optimal size of the IWORK array,   
            returns this value as the first entry of the IWORK array, and   
            no error message related to LIWORK is issued by XERBLA.   

    INFO    (output) INTEGER   
              =0: Successful exit.   
              <0: If INFO = -i, the i-th argument had an illegal value.   
              =1: Reordering of (A, B) failed because the transformed   
                  matrix pair (A, B) would be too far from generalized   
                  Schur form; the problem is very ill-conditioned.   
                  (A, B) may have been partially reordered.   
                  If requested, 0 is returned in DIF(*), PL and PR.   


    Further Details   
    ===============   

    CTGSEN first collects the selected eigenvalues by computing unitary   
    U and W that move them to the top left corner of (A, B). In other   
    words, the selected eigenvalues are the eigenvalues of (A11, B11) in   

                  U'*(A, B)*W = (A11 A12) (B11 B12) n1   
                                ( 0  A22),( 0  B22) n2   
                                  n1  n2    n1  n2   

    where N = n1+n2 and U' means the conjugate transpose of U. The first   
    n1 columns of U and W span the specified pair of left and right   
    eigenspaces (deflating subspaces) of (A, B).   

    If (A, B) has been obtained from the generalized real Schur   
    decomposition of a matrix pair (C, D) = Q*(A, B)*Z', then the   
    reordered generalized Schur form of (C, D) is given by   

             (C, D) = (Q*U)*(U'*(A, B)*W)*(Z*W)',   

    and the first n1 columns of Q*U and Z*W span the corresponding   
    deflating subspaces of (C, D) (Q and Z store Q*U and Z*W, resp.).   

    Note that if the selected eigenvalue is sufficiently ill-conditioned,   
    then its value may differ significantly from its value before   
    reordering.   

    The reciprocal condition numbers of the left and right eigenspaces   
    spanned by the first n1 columns of U and W (or Q*U and Z*W) may   
    be returned in DIF(1:2), corresponding to Difu and Difl, resp.   

    The Difu and Difl are defined as:   

         Difu[(A11, B11), (A22, B22)] = sigma-min( Zu )   
    and   
         Difl[(A11, B11), (A22, B22)] = Difu[(A22, B22), (A11, B11)],   

    where sigma-min(Zu) is the smallest singular value of the   
    (2*n1*n2)-by-(2*n1*n2) matrix   

         Zu = [ kron(In2, A11)  -kron(A22', In1) ]   
              [ kron(In2, B11)  -kron(B22', In1) ].   

    Here, Inx is the identity matrix of size nx and A22' is the   
    transpose of A22. kron(X, Y) is the Kronecker product between   
    the matrices X and Y.   

    When DIF(2) is small, small changes in (A, B) can cause large changes   
    in the deflating subspace. An approximate (asymptotic) bound on the   
    maximum angular error in the computed deflating subspaces is   

         EPS * norm((A, B)) / DIF(2),   

    where EPS is the machine precision.   

    The reciprocal norm of the projectors on the left and right   
    eigenspaces associated with (A11, B11) may be returned in PL and PR.   
    They are computed as follows. First we compute L and R so that   
    P*(A, B)*Q is block diagonal, where   

         P = ( I -L ) n1           Q = ( I R ) n1   
             ( 0  I ) n2    and        ( 0 I ) n2   
               n1 n2                    n1 n2   

    and (L, R) is the solution to the generalized Sylvester equation   

         A11*R - L*A22 = -A12   
         B11*R - L*B22 = -B12   

    Then PL = (F-norm(L)**2+1)**(-1/2) and PR = (F-norm(R)**2+1)**(-1/2).   
    An approximate (asymptotic) bound on the average absolute error of   
    the selected eigenvalues is   

         EPS * norm((A, B)) / PL.   

    There are also global error bounds which valid for perturbations up   
    to a certain restriction:  A lower bound (x) on the smallest   
    F-norm(E,F) for which an eigenvalue of (A11, B11) may move and   
    coalesce with an eigenvalue of (A22, B22) under perturbation (E,F),   
    (i.e. (A + E, B + F), is   

     x = min(Difu,Difl)/((1/(PL*PL)+1/(PR*PR))**(1/2)+2*max(1/PL,1/PR)).   

    An approximate bound on x can be computed from DIF(1:2), PL and PR.   

    If y = ( F-norm(E,F) / x) <= 1, the angles between the perturbed   
    (L', R') and unperturbed (L, R) left and right deflating subspaces   
    associated with the selected cluster in the (1,1)-blocks can be   
    bounded as   

     max-angle(L, L') <= arctan( y * PL / (1 - y * (1 - PL * PL)**(1/2))   
     max-angle(R, R') <= arctan( y * PR / (1 - y * (1 - PR * PR)**(1/2))   

    See LAPACK User's Guide section 4.11 or the following references   
    for more information.   

    Note that if the default method for computing the Frobenius-norm-   
    based estimate DIF is not wanted (see CLATDF), then the parameter   
    IDIFJB (see below) should be changed from 3 to 4 (routine CLATDF   
    (IJOB = 2 will be used)). See CTGSYL for more details.   

    Based on contributions by   
       Bo Kagstrom and Peter Poromaa, Department of Computing Science,   
       Umea University, S-901 87 Umea, Sweden.   

    References   
    ==========   

    [1] B. Kagstrom; A Direct Method for Reordering Eigenvalues in the   
        Generalized Real Schur Form of a Regular Matrix Pair (A, B), in   
        M.S. Moonen et al (eds), Linear Algebra for Large Scale and   
        Real-Time Applications, Kluwer Academic Publ. 1993, pp 195-218.   

    [2] B. Kagstrom and P. Poromaa; Computing Eigenspaces with Specified   
        Eigenvalues of a Regular Matrix Pair (A, B) and Condition   
        Estimation: Theory, Algorithms and Software, Report   
        UMINF - 94.04, Department of Computing Science, Umea University,   
        S-901 87 Umea, Sweden, 1994. Also as LAPACK Working Note 87.   
        To appear in Numerical Algorithms, 1996.   

    [3] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software   
        for Solving the Generalized Sylvester Equation and Estimating the   
        Separation between Regular Matrix Pairs, Report UMINF - 93.23,   
        Department of Computing Science, Umea University, S-901 87 Umea,   
        Sweden, December 1993, Revised April 1994, Also as LAPACK working   
        Note 75. To appear in ACM Trans. on Math. Software, Vol 22, No 1,   
        1996.   

    =====================================================================   


       Decode and test the input parameters   

       Parameter adjustments */
    /* Table of constant values */
    static integer c__1 = 1;
    
    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1, 
	    z_offset, i__1, i__2, i__3;
    complex q__1, q__2;
    /* Builtin functions */
    double sqrt(doublereal), c_abs(complex *);
    void r_cnjg(complex *, complex *);
    /* Local variables */
    static integer kase, ierr;
    static real dsum;
    static logical swap;
    static integer i__, k;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *, 
	    integer *);
    static logical wantd;
    static integer lwmin;
    static logical wantp;
    static integer n1, n2;
    static logical wantd1, wantd2;
    static real dscale;
    static integer ks;
    extern /* Subroutine */ int clacon_(integer *, complex *, complex *, real 
	    *, integer *);
    extern doublereal slamch_(char *);
    static real rdscal;
    extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex 
	    *, integer *, complex *, integer *);
    static real safmin;
    extern /* Subroutine */ int ctgexc_(logical *, logical *, integer *, 
	    complex *, integer *, complex *, integer *, complex *, integer *, 
	    complex *, integer *, integer *, integer *, integer *), xerbla_(
	    char *, integer *), classq_(integer *, complex *, integer 
	    *, real *, real *);
    static integer liwmin;
    extern /* Subroutine */ int ctgsyl_(char *, integer *, integer *, integer 
	    *, complex *, integer *, complex *, integer *, complex *, integer 
	    *, complex *, integer *, complex *, integer *, complex *, integer 
	    *, real *, real *, complex *, integer *, integer *, integer *);
    static integer mn2;
    static logical lquery;
    static integer ijb;
#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]
#define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1
#define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)]
#define q_subscr(a_1,a_2) (a_2)*q_dim1 + a_1
#define q_ref(a_1,a_2) q[q_subscr(a_1,a_2)]


    --select;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    b_dim1 = *ldb;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;
    --alpha;
    --beta;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1 * 1;
    q -= q_offset;
    z_dim1 = *ldz;
    z_offset = 1 + z_dim1 * 1;
    z__ -= z_offset;
    --dif;
    --work;
    --iwork;

    /* Function Body */
    *info = 0;
    lquery = *lwork == -1 || *liwork == -1;

    if (*ijob < 0 || *ijob > 5) {
	*info = -1;
    } else if (*n < 0) {
	*info = -5;
    } else if (*lda < max(1,*n)) {
	*info = -7;
    } else if (*ldb < max(1,*n)) {
	*info = -9;
    } else if (*ldq < 1 || *wantq && *ldq < *n) {
	*info = -13;
    } else if (*ldz < 1 || *wantz && *ldz < *n) {
	*info = -15;
    }

    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CTGSEN", &i__1);
	return 0;
    }

    ierr = 0;

    wantp = *ijob == 1 || *ijob >= 4;
    wantd1 = *ijob == 2 || *ijob == 4;
    wantd2 = *ijob == 3 || *ijob == 5;
    wantd = wantd1 || wantd2;

/*     Set M to the dimension of the specified pair of deflating   
       subspaces. */

    *m = 0;
    i__1 = *n;
    for (k = 1; k <= i__1; ++k) {
	i__2 = k;
	i__3 = a_subscr(k, k);
	alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
	i__2 = k;
	i__3 = b_subscr(k, k);
	beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;
	if (k < *n) {
	    if (select[k]) {
		++(*m);
	    }
	} else {
	    if (select[*n]) {
		++(*m);
	    }
	}
/* L10: */
    }

    if (*ijob == 1 || *ijob == 2 || *ijob == 4) {
/* Computing MAX */
	i__1 = 1, i__2 = (*m << 1) * (*n - *m);
	lwmin = max(i__1,i__2);
/* Computing MAX */
	i__1 = 1, i__2 = *n + 2;
	liwmin = max(i__1,i__2);
    } else if (*ijob == 3 || *ijob == 5) {
/* Computing MAX */
	i__1 = 1, i__2 = (*m << 2) * (*n - *m);
	lwmin = max(i__1,i__2);
/* Computing MAX */
	i__1 = 1, i__2 = (*m << 1) * (*n - *m), i__1 = max(i__1,i__2), i__2 = 
		*n + 2;
	liwmin = max(i__1,i__2);
    } else {
	lwmin = 1;
	liwmin = 1;
    }

    work[1].r = (real) lwmin, work[1].i = 0.f;
    iwork[1] = liwmin;

    if (*lwork < lwmin && ! lquery) {
	*info = -21;
    } else if (*liwork < liwmin && ! lquery) {
	*info = -23;
    }

    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CTGSEN", &i__1);
	return 0;
    } else if (lquery) {
	return 0;
    }

/*     Quick return if possible. */

    if (*m == *n || *m == 0) {
	if (wantp) {
	    *pl = 1.f;
	    *pr = 1.f;
	}
	if (wantd) {
	    dscale = 0.f;
	    dsum = 1.f;
	    i__1 = *n;
	    for (i__ = 1; i__ <= i__1; ++i__) {
		classq_(n, &a_ref(1, i__), &c__1, &dscale, &dsum);
		classq_(n, &b_ref(1, i__), &c__1, &dscale, &dsum);
/* L20: */
	    }
	    dif[1] = dscale * sqrt(dsum);
	    dif[2] = dif[1];
	}
	goto L70;
    }

/*     Get machine constant */

    safmin = slamch_("S");

/*     Collect the selected blocks at the top-left corner of (A, B). */

    ks = 0;
    i__1 = *n;
    for (k = 1; k <= i__1; ++k) {
	swap = select[k];
	if (swap) {
	    ++ks;

/*           Swap the K-th block to position KS. Compute unitary Q   
             and Z that will swap adjacent diagonal blocks in (A, B). */

	    if (k != ks) {
		ctgexc_(wantq, wantz, n, &a[a_offset], lda, &b[b_offset], ldb,
			 &q[q_offset], ldq, &z__[z_offset], ldz, &k, &ks, &
			ierr);
	    }

	    if (ierr > 0) {

/*              Swap is rejected: exit. */

		*info = 1;
		if (wantp) {
		    *pl = 0.f;
		    *pr = 0.f;
		}
		if (wantd) {
		    dif[1] = 0.f;
		    dif[2] = 0.f;
		}
		goto L70;
	    }
	}
/* L30: */
    }
    if (wantp) {

/*        Solve generalized Sylvester equation for R and L:   
                     A11 * R - L * A22 = A12   
                     B11 * R - L * B22 = B12 */

	n1 = *m;
	n2 = *n - *m;
	i__ = n1 + 1;
	clacpy_("Full", &n1, &n2, &a_ref(1, i__), lda, &work[1], &n1);
	clacpy_("Full", &n1, &n2, &b_ref(1, i__), ldb, &work[n1 * n2 + 1], &
		n1);
	ijb = 0;
	i__1 = *lwork - (n1 << 1) * n2;
	ctgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a_ref(i__, i__), lda,
		 &work[1], &n1, &b[b_offset], ldb, &b_ref(i__, i__), ldb, &
		work[n1 * n2 + 1], &n1, &dscale, &dif[1], &work[(n1 * n2 << 1)
		 + 1], &i__1, &iwork[1], &ierr);

/*        Estimate the reciprocal of norms of "projections" onto   
          left and right eigenspaces */

	rdscal = 0.f;
	dsum = 1.f;
	i__1 = n1 * n2;
	classq_(&i__1, &work[1], &c__1, &rdscal, &dsum);
	*pl = rdscal * sqrt(dsum);
	if (*pl == 0.f) {
	    *pl = 1.f;
	} else {
	    *pl = dscale / (sqrt(dscale * dscale / *pl + *pl) * sqrt(*pl));
	}
	rdscal = 0.f;
	dsum = 1.f;
	i__1 = n1 * n2;
	classq_(&i__1, &work[n1 * n2 + 1], &c__1, &rdscal, &dsum);
	*pr = rdscal * sqrt(dsum);
	if (*pr == 0.f) {
	    *pr = 1.f;
	} else {
	    *pr = dscale / (sqrt(dscale * dscale / *pr + *pr) * sqrt(*pr));
	}
    }
    if (wantd) {

/*        Compute estimates Difu and Difl. */

	if (wantd1) {
	    n1 = *m;
	    n2 = *n - *m;
	    i__ = n1 + 1;
	    ijb = 3;

/*           Frobenius norm-based Difu estimate. */

	    i__1 = *lwork - (n1 << 1) * n2;
	    ctgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a_ref(i__, i__), 
		    lda, &work[1], &n1, &b[b_offset], ldb, &b_ref(i__, i__), 
		    ldb, &work[n1 * n2 + 1], &n1, &dscale, &dif[1], &work[(n1 
		    * n2 << 1) + 1], &i__1, &iwork[1], &ierr);

/*           Frobenius norm-based Difl estimate. */

	    i__1 = *lwork - (n1 << 1) * n2;
	    ctgsyl_("N", &ijb, &n2, &n1, &a_ref(i__, i__), lda, &a[a_offset], 
		    lda, &work[1], &n2, &b_ref(i__, i__), ldb, &b[b_offset], 
		    ldb, &work[n1 * n2 + 1], &n2, &dscale, &dif[2], &work[(n1 
		    * n2 << 1) + 1], &i__1, &iwork[1], &ierr);
	} else {

/*           Compute 1-norm-based estimates of Difu and Difl using   
             reversed communication with CLACON. In each step a   
             generalized Sylvester equation or a transposed variant   
             is solved. */

	    kase = 0;
	    n1 = *m;
	    n2 = *n - *m;
	    i__ = n1 + 1;
	    ijb = 0;
	    mn2 = (n1 << 1) * n2;

/*           1-norm-based estimate of Difu. */

L40:
	    clacon_(&mn2, &work[mn2 + 1], &work[1], &dif[1], &kase);
	    if (kase != 0) {
		if (kase == 1) {

/*                 Solve generalized Sylvester equation */

		    i__1 = *lwork - (n1 << 1) * n2;
		    ctgsyl_("N", &ijb, &n1, &n2, &a[a_offset], lda, &a_ref(
			    i__, i__), lda, &work[1], &n1, &b[b_offset], ldb, 
			    &b_ref(i__, i__), ldb, &work[n1 * n2 + 1], &n1, &
			    dscale, &dif[1], &work[(n1 * n2 << 1) + 1], &i__1,
			     &iwork[1], &ierr);
		} else {

/*                 Solve the transposed variant. */

		    i__1 = *lwork - (n1 << 1) * n2;
		    ctgsyl_("C", &ijb, &n1, &n2, &a[a_offset], lda, &a_ref(
			    i__, i__), lda, &work[1], &n1, &b[b_offset], ldb, 
			    &b_ref(i__, i__), ldb, &work[n1 * n2 + 1], &n1, &
			    dscale, &dif[1], &work[(n1 * n2 << 1) + 1], &i__1,
			     &iwork[1], &ierr);
		}
		goto L40;
	    }
	    dif[1] = dscale / dif[1];

/*           1-norm-based estimate of Difl. */

L50:
	    clacon_(&mn2, &work[mn2 + 1], &work[1], &dif[2], &kase);
	    if (kase != 0) {
		if (kase == 1) {

/*                 Solve generalized Sylvester equation */

		    i__1 = *lwork - (n1 << 1) * n2;
		    ctgsyl_("N", &ijb, &n2, &n1, &a_ref(i__, i__), lda, &a[
			    a_offset], lda, &work[1], &n2, &b_ref(i__, i__), 
			    ldb, &b[b_offset], ldb, &work[n1 * n2 + 1], &n2, &
			    dscale, &dif[2], &work[(n1 * n2 << 1) + 1], &i__1,
			     &iwork[1], &ierr);
		} else {

/*                 Solve the transposed variant. */

		    i__1 = *lwork - (n1 << 1) * n2;
		    ctgsyl_("C", &ijb, &n2, &n1, &a_ref(i__, i__), lda, &a[
			    a_offset], lda, &work[1], &n2, &b[b_offset], ldb, 
			    &b_ref(i__, i__), ldb, &work[n1 * n2 + 1], &n2, &
			    dscale, &dif[2], &work[(n1 * n2 << 1) + 1], &i__1,
			     &iwork[1], &ierr);
		}
		goto L50;
	    }
	    dif[2] = dscale / dif[2];
	}
    }

/*     If B(K,K) is complex, make it real and positive (normalization   
       of the generalized Schur form) and Store the generalized   
       eigenvalues of reordered pair (A, B) */

    i__1 = *n;
    for (k = 1; k <= i__1; ++k) {
	dscale = c_abs(&b_ref(k, k));
	if (dscale > safmin) {
	    i__2 = b_subscr(k, k);
	    q__2.r = b[i__2].r / dscale, q__2.i = b[i__2].i / dscale;
	    r_cnjg(&q__1, &q__2);
	    work[1].r = q__1.r, work[1].i = q__1.i;
	    i__2 = b_subscr(k, k);
	    q__1.r = b[i__2].r / dscale, q__1.i = b[i__2].i / dscale;
	    work[2].r = q__1.r, work[2].i = q__1.i;
	    i__2 = b_subscr(k, k);
	    b[i__2].r = dscale, b[i__2].i = 0.f;
	    i__2 = *n - k;
	    cscal_(&i__2, &work[1], &b_ref(k, k + 1), ldb);
	    i__2 = *n - k + 1;
	    cscal_(&i__2, &work[1], &a_ref(k, k), lda);
	    if (*wantq) {
		cscal_(n, &work[2], &q_ref(1, k), &c__1);
	    }
	} else {
	    i__2 = b_subscr(k, k);
	    b[i__2].r = 0.f, b[i__2].i = 0.f;
	}

	i__2 = k;
	i__3 = a_subscr(k, k);
	alpha[i__2].r = a[i__3].r, alpha[i__2].i = a[i__3].i;
	i__2 = k;
	i__3 = b_subscr(k, k);
	beta[i__2].r = b[i__3].r, beta[i__2].i = b[i__3].i;

/* L60: */
    }

L70:

    work[1].r = (real) lwmin, work[1].i = 0.f;
    iwork[1] = liwmin;

    return 0;

/*     End of CTGSEN */

} /* ctgsen_ */
Exemplo n.º 17
0
/* Subroutine */ int cchkgl_(integer *nin, integer *nout)
{
    /* Format strings */
    static char fmt_9999[] = "(\002 .. test output of CGGBAL .. \002)";
    static char fmt_9998[] = "(\002 ratio of largest test error             "
	    " = \002,e12.3)";
    static char fmt_9997[] = "(\002 example number where info is not zero   "
	    " = \002,i4)";
    static char fmt_9996[] = "(\002 example number where ILO or IHI is wrong"
	    " = \002,i4)";
    static char fmt_9995[] = "(\002 example number having largest error     "
	    " = \002,i4)";
    static char fmt_9994[] = "(\002 number of examples where info is not 0  "
	    " = \002,i4)";
    static char fmt_9993[] = "(\002 total number of examples tested         "
	    " = \002,i4)";

    /* System generated locals */
    integer i__1, i__2, i__3, i__4;
    real r__1, r__2, r__3;
    complex q__1;

    /* Builtin functions */
    integer s_rsle(cilist *), do_lio(integer *, integer *, char *, ftnlen), 
	    e_rsle(void);
    double c_abs(complex *);
    integer s_wsfe(cilist *), e_wsfe(void), do_fio(integer *, char *, ftnlen);

    /* Local variables */
    complex a[400]	/* was [20][20] */, b[400]	/* was [20][20] */;
    integer i__, j, n;
    complex ain[400]	/* was [20][20] */, bin[400]	/* was [20][20] */;
    integer ihi, ilo;
    real eps;
    integer knt, info, lmax[3];
    real rmax, vmax, work[120];
    integer ihiin, ninfo, iloin;
    real anorm, bnorm;
    extern /* Subroutine */ int cggbal_(char *, integer *, complex *, integer 
	    *, complex *, integer *, integer *, integer *, real *, real *, 
	    real *, integer *);
    extern doublereal clange_(char *, integer *, integer *, complex *, 
	    integer *, real *);
    real lscale[20];
    extern doublereal slamch_(char *);
    real rscale[20], lsclin[20], rsclin[20];

    /* Fortran I/O blocks */
    static cilist io___6 = { 0, 0, 0, 0, 0 };
    static cilist io___9 = { 0, 0, 0, 0, 0 };
    static cilist io___12 = { 0, 0, 0, 0, 0 };
    static cilist io___14 = { 0, 0, 0, 0, 0 };
    static cilist io___17 = { 0, 0, 0, 0, 0 };
    static cilist io___19 = { 0, 0, 0, 0, 0 };
    static cilist io___21 = { 0, 0, 0, 0, 0 };
    static cilist io___23 = { 0, 0, 0, 0, 0 };
    static cilist io___34 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___35 = { 0, 0, 0, fmt_9998, 0 };
    static cilist io___36 = { 0, 0, 0, fmt_9997, 0 };
    static cilist io___37 = { 0, 0, 0, fmt_9996, 0 };
    static cilist io___38 = { 0, 0, 0, fmt_9995, 0 };
    static cilist io___39 = { 0, 0, 0, fmt_9994, 0 };
    static cilist io___40 = { 0, 0, 0, fmt_9993, 0 };



/*  -- LAPACK test routine (version 3.1) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CCHKGL tests CGGBAL, a routine for balancing a matrix pair (A, B). */

/*  Arguments */
/*  ========= */

/*  NIN     (input) INTEGER */
/*          The logical unit number for input.  NIN > 0. */

/*  NOUT    (input) INTEGER */
/*          The logical unit number for output.  NOUT > 0. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

    lmax[0] = 0;
    lmax[1] = 0;
    lmax[2] = 0;
    ninfo = 0;
    knt = 0;
    rmax = 0.f;

    eps = slamch_("Precision");

L10:

    io___6.ciunit = *nin;
    s_rsle(&io___6);
    do_lio(&c__3, &c__1, (char *)&n, (ftnlen)sizeof(integer));
    e_rsle();
    if (n == 0) {
	goto L90;
    }
    i__1 = n;
    for (i__ = 1; i__ <= i__1; ++i__) {
	io___9.ciunit = *nin;
	s_rsle(&io___9);
	i__2 = n;
	for (j = 1; j <= i__2; ++j) {
	    do_lio(&c__6, &c__1, (char *)&a[i__ + j * 20 - 21], (ftnlen)
		    sizeof(complex));
	}
	e_rsle();
/* L20: */
    }

    i__1 = n;
    for (i__ = 1; i__ <= i__1; ++i__) {
	io___12.ciunit = *nin;
	s_rsle(&io___12);
	i__2 = n;
	for (j = 1; j <= i__2; ++j) {
	    do_lio(&c__6, &c__1, (char *)&b[i__ + j * 20 - 21], (ftnlen)
		    sizeof(complex));
	}
	e_rsle();
/* L30: */
    }

    io___14.ciunit = *nin;
    s_rsle(&io___14);
    do_lio(&c__3, &c__1, (char *)&iloin, (ftnlen)sizeof(integer));
    do_lio(&c__3, &c__1, (char *)&ihiin, (ftnlen)sizeof(integer));
    e_rsle();
    i__1 = n;
    for (i__ = 1; i__ <= i__1; ++i__) {
	io___17.ciunit = *nin;
	s_rsle(&io___17);
	i__2 = n;
	for (j = 1; j <= i__2; ++j) {
	    do_lio(&c__6, &c__1, (char *)&ain[i__ + j * 20 - 21], (ftnlen)
		    sizeof(complex));
	}
	e_rsle();
/* L40: */
    }
    i__1 = n;
    for (i__ = 1; i__ <= i__1; ++i__) {
	io___19.ciunit = *nin;
	s_rsle(&io___19);
	i__2 = n;
	for (j = 1; j <= i__2; ++j) {
	    do_lio(&c__6, &c__1, (char *)&bin[i__ + j * 20 - 21], (ftnlen)
		    sizeof(complex));
	}
	e_rsle();
/* L50: */
    }

    io___21.ciunit = *nin;
    s_rsle(&io___21);
    i__1 = n;
    for (i__ = 1; i__ <= i__1; ++i__) {
	do_lio(&c__4, &c__1, (char *)&lsclin[i__ - 1], (ftnlen)sizeof(real));
    }
    e_rsle();
    io___23.ciunit = *nin;
    s_rsle(&io___23);
    i__1 = n;
    for (i__ = 1; i__ <= i__1; ++i__) {
	do_lio(&c__4, &c__1, (char *)&rsclin[i__ - 1], (ftnlen)sizeof(real));
    }
    e_rsle();

    anorm = clange_("M", &n, &n, a, &c__20, work);
    bnorm = clange_("M", &n, &n, b, &c__20, work);

    ++knt;

    cggbal_("B", &n, a, &c__20, b, &c__20, &ilo, &ihi, lscale, rscale, work, &
	    info);

    if (info != 0) {
	++ninfo;
	lmax[0] = knt;
    }

    if (ilo != iloin || ihi != ihiin) {
	++ninfo;
	lmax[1] = knt;
    }

    vmax = 0.f;
    i__1 = n;
    for (i__ = 1; i__ <= i__1; ++i__) {
	i__2 = n;
	for (j = 1; j <= i__2; ++j) {
/* Computing MAX */
	    i__3 = i__ + j * 20 - 21;
	    i__4 = i__ + j * 20 - 21;
	    q__1.r = a[i__3].r - ain[i__4].r, q__1.i = a[i__3].i - ain[i__4]
		    .i;
	    r__1 = vmax, r__2 = c_abs(&q__1);
	    vmax = dmax(r__1,r__2);
/* Computing MAX */
	    i__3 = i__ + j * 20 - 21;
	    i__4 = i__ + j * 20 - 21;
	    q__1.r = b[i__3].r - bin[i__4].r, q__1.i = b[i__3].i - bin[i__4]
		    .i;
	    r__1 = vmax, r__2 = c_abs(&q__1);
	    vmax = dmax(r__1,r__2);
/* L60: */
	}
/* L70: */
    }

    i__1 = n;
    for (i__ = 1; i__ <= i__1; ++i__) {
/* Computing MAX */
	r__2 = vmax, r__3 = (r__1 = lscale[i__ - 1] - lsclin[i__ - 1], dabs(
		r__1));
	vmax = dmax(r__2,r__3);
/* Computing MAX */
	r__2 = vmax, r__3 = (r__1 = rscale[i__ - 1] - rsclin[i__ - 1], dabs(
		r__1));
	vmax = dmax(r__2,r__3);
/* L80: */
    }

    vmax /= eps * dmax(anorm,bnorm);

    if (vmax > rmax) {
	lmax[2] = knt;
	rmax = vmax;
    }

    goto L10;

L90:

    io___34.ciunit = *nout;
    s_wsfe(&io___34);
    e_wsfe();

    io___35.ciunit = *nout;
    s_wsfe(&io___35);
    do_fio(&c__1, (char *)&rmax, (ftnlen)sizeof(real));
    e_wsfe();
    io___36.ciunit = *nout;
    s_wsfe(&io___36);
    do_fio(&c__1, (char *)&lmax[0], (ftnlen)sizeof(integer));
    e_wsfe();
    io___37.ciunit = *nout;
    s_wsfe(&io___37);
    do_fio(&c__1, (char *)&lmax[1], (ftnlen)sizeof(integer));
    e_wsfe();
    io___38.ciunit = *nout;
    s_wsfe(&io___38);
    do_fio(&c__1, (char *)&lmax[2], (ftnlen)sizeof(integer));
    e_wsfe();
    io___39.ciunit = *nout;
    s_wsfe(&io___39);
    do_fio(&c__1, (char *)&ninfo, (ftnlen)sizeof(integer));
    e_wsfe();
    io___40.ciunit = *nout;
    s_wsfe(&io___40);
    do_fio(&c__1, (char *)&knt, (ftnlen)sizeof(integer));
    e_wsfe();

    return 0;

/*     End of CCHKGL */

} /* cchkgl_ */
Exemplo n.º 18
0
/* Subroutine */ int cdrvgb_(logical *dotype, integer *nn, integer *nval, 
	integer *nrhs, real *thresh, logical *tsterr, complex *a, integer *la, 
	 complex *afb, integer *lafb, complex *asav, complex *b, complex *
	bsav, complex *x, complex *xact, real *s, complex *work, real *rwork, 
	integer *iwork, integer *nout)
{
    /* Initialized data */

    static integer iseedy[4] = { 1988,1989,1990,1991 };
    static char transs[1*3] = "N" "T" "C";
    static char facts[1*3] = "F" "N" "E";
    static char equeds[1*4] = "N" "R" "C" "B";

    /* Format strings */
    static char fmt_9999[] = "(\002 *** In CDRVGB, LA=\002,i5,\002 is too sm"
	    "all for N=\002,i5,\002, KU=\002,i5,\002, KL=\002,i5,/\002 ==> In"
	    "crease LA to at least \002,i5)";
    static char fmt_9998[] = "(\002 *** In CDRVGB, LAFB=\002,i5,\002 is too "
	    "small for N=\002,i5,\002, KU=\002,i5,\002, KL=\002,i5,/\002 ==> "
	    "Increase LAFB to at least \002,i5)";
    static char fmt_9997[] = "(1x,a6,\002, N=\002,i5,\002, KL=\002,i5,\002, "
	    "KU=\002,i5,\002, type \002,i1,\002, test(\002,i1,\002)=\002,g12."
	    "5)";
    static char fmt_9995[] = "(1x,a6,\002( '\002,a1,\002','\002,a1,\002',"
	    "\002,i5,\002,\002,i5,\002,\002,i5,\002,...), EQUED='\002,a1,\002"
	    "', type \002,i1,\002, test(\002,i1,\002)=\002,g12.5)";
    static char fmt_9996[] = "(1x,a6,\002( '\002,a1,\002','\002,a1,\002',"
	    "\002,i5,\002,\002,i5,\002,\002,i5,\002,...), type \002,i1,\002, "
	    "test(\002,i1,\002)=\002,g12.5)";

    /* System generated locals */
    address a__1[2];
    integer i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8, i__9, i__10, 
	    i__11[2];
    real r__1, r__2;
    char ch__1[2];

    /* Builtin functions */
    /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen);
    integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void);
    /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
    double c_abs(complex *);

    /* Local variables */
    integer i__, j, k, n, i1, i2, k1, nb, in, kl, ku, nt, lda, ldb, ikl, nkl, 
	    iku, nku;
    char fact[1];
    integer ioff, mode;
    real amax;
    char path[3];
    integer imat, info;
    char dist[1];
    real rdum[1];
    char type__[1];
    integer nrun, ldafb;
    extern /* Subroutine */ int cgbt01_(integer *, integer *, integer *, 
	    integer *, complex *, integer *, complex *, integer *, integer *, 
	    complex *, real *), cgbt02_(char *, integer *, integer *, integer 
	    *, integer *, integer *, complex *, integer *, complex *, integer 
	    *, complex *, integer *, real *), cgbt05_(char *, integer 
	    *, integer *, integer *, integer *, complex *, integer *, complex 
	    *, integer *, complex *, integer *, complex *, integer *, real *, 
	    real *, real *);
    integer ifact;
    extern /* Subroutine */ int cget04_(integer *, integer *, complex *, 
	    integer *, complex *, integer *, real *, real *);
    integer nfail, iseed[4], nfact;
    extern logical lsame_(char *, char *);
    char equed[1];
    integer nbmin;
    real rcond, roldc;
    extern /* Subroutine */ int cgbsv_(integer *, integer *, integer *, 
	    integer *, complex *, integer *, integer *, complex *, integer *, 
	    integer *);
    integer nimat;
    real roldi;
    extern doublereal sget06_(real *, real *);
    real anorm;
    integer itran;
    logical equil;
    real roldo;
    char trans[1];
    integer izero, nerrs;
    logical zerot;
    char xtype[1];
    extern /* Subroutine */ int clatb4_(char *, integer *, integer *, integer 
	    *, char *, integer *, integer *, real *, integer *, real *, char *
), aladhd_(integer *, char *);
    extern doublereal clangb_(char *, integer *, integer *, integer *, 
	    complex *, integer *, real *), clange_(char *, integer *, 
	    integer *, complex *, integer *, real *);
    extern /* Subroutine */ int claqgb_(integer *, integer *, integer *, 
	    integer *, complex *, integer *, real *, real *, real *, real *, 
	    real *, char *), alaerh_(char *, char *, integer *, 
	    integer *, char *, integer *, integer *, integer *, integer *, 
	    integer *, integer *, integer *, integer *, integer *);
    logical prefac;
    real colcnd;
    extern doublereal clantb_(char *, char *, char *, integer *, integer *, 
	    complex *, integer *, real *);
    extern /* Subroutine */ int cgbequ_(integer *, integer *, integer *, 
	    integer *, complex *, integer *, real *, real *, real *, real *, 
	    real *, integer *);
    real rcondc;
    extern doublereal slamch_(char *);
    logical nofact;
    extern /* Subroutine */ int cgbtrf_(integer *, integer *, integer *, 
	    integer *, complex *, integer *, integer *, integer *);
    integer iequed;
    extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex 
	    *, integer *, complex *, integer *);
    real rcondi;
    extern /* Subroutine */ int clarhs_(char *, char *, char *, char *, 
	    integer *, integer *, integer *, integer *, integer *, complex *, 
	    integer *, complex *, integer *, complex *, integer *, integer *, 
	    integer *), claset_(char *, 
	    integer *, integer *, complex *, complex *, complex *, integer *), alasvm_(char *, integer *, integer *, integer *, integer 
	    *);
    real cndnum, anormi, rcondo, ainvnm;
    extern /* Subroutine */ int cgbtrs_(char *, integer *, integer *, integer 
	    *, integer *, complex *, integer *, integer *, complex *, integer 
	    *, integer *), clatms_(integer *, integer *, char *, 
	    integer *, char *, real *, integer *, real *, real *, integer *, 
	    integer *, char *, complex *, integer *, complex *, integer *);
    logical trfcon;
    real anormo, rowcnd;
    extern /* Subroutine */ int cgbsvx_(char *, char *, integer *, integer *, 
	    integer *, integer *, complex *, integer *, complex *, integer *, 
	    integer *, char *, real *, real *, complex *, integer *, complex *
, integer *, real *, real *, real *, complex *, real *, integer *), xlaenv_(integer *, integer *);
    real anrmpv;
    extern /* Subroutine */ int cerrvx_(char *, integer *);
    real result[7], rpvgrw;

    /* Fortran I/O blocks */
    static cilist io___26 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___27 = { 0, 0, 0, fmt_9998, 0 };
    static cilist io___65 = { 0, 0, 0, fmt_9997, 0 };
    static cilist io___73 = { 0, 0, 0, fmt_9995, 0 };
    static cilist io___74 = { 0, 0, 0, fmt_9996, 0 };
    static cilist io___75 = { 0, 0, 0, fmt_9995, 0 };
    static cilist io___76 = { 0, 0, 0, fmt_9996, 0 };
    static cilist io___77 = { 0, 0, 0, fmt_9995, 0 };
    static cilist io___78 = { 0, 0, 0, fmt_9996, 0 };
    static cilist io___79 = { 0, 0, 0, fmt_9995, 0 };
    static cilist io___80 = { 0, 0, 0, fmt_9996, 0 };



/*  -- LAPACK test routine (version 3.1) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CDRVGB tests the driver routines CGBSV and -SVX. */

/*  Arguments */
/*  ========= */

/*  DOTYPE  (input) LOGICAL array, dimension (NTYPES) */
/*          The matrix types to be used for testing.  Matrices of type j */
/*          (for 1 <= j <= NTYPES) are used for testing if DOTYPE(j) = */
/*          .TRUE.; if DOTYPE(j) = .FALSE., then type j is not used. */

/*  NN      (input) INTEGER */
/*          The number of values of N contained in the vector NVAL. */

/*  NVAL    (input) INTEGER array, dimension (NN) */
/*          The values of the matrix column dimension N. */

/*  NRHS    (input) INTEGER */
/*          The number of right hand side vectors to be generated for */
/*          each linear system. */

/*  THRESH  (input) REAL */
/*          The threshold value for the test ratios.  A result is */
/*          included in the output file if RESULT >= THRESH.  To have */
/*          every test ratio printed, use THRESH = 0. */

/*  TSTERR  (input) LOGICAL */
/*          Flag that indicates whether error exits are to be tested. */

/*  A       (workspace) COMPLEX array, dimension (LA) */

/*  LA      (input) INTEGER */
/*          The length of the array A.  LA >= (2*NMAX-1)*NMAX */
/*          where NMAX is the largest entry in NVAL. */

/*  AFB     (workspace) COMPLEX array, dimension (LAFB) */

/*  LAFB    (input) INTEGER */
/*          The length of the array AFB.  LAFB >= (3*NMAX-2)*NMAX */
/*          where NMAX is the largest entry in NVAL. */

/*  ASAV    (workspace) COMPLEX array, dimension (LA) */

/*  B       (workspace) COMPLEX array, dimension (NMAX*NRHS) */

/*  BSAV    (workspace) COMPLEX array, dimension (NMAX*NRHS) */

/*  X       (workspace) COMPLEX array, dimension (NMAX*NRHS) */

/*  XACT    (workspace) COMPLEX array, dimension (NMAX*NRHS) */

/*  S       (workspace) REAL array, dimension (2*NMAX) */

/*  WORK    (workspace) COMPLEX array, dimension */
/*                      (NMAX*max(3,NRHS,NMAX)) */

/*  RWORK   (workspace) REAL array, dimension */
/*                      (max(NMAX,2*NRHS)) */

/*  IWORK   (workspace) INTEGER array, dimension (NMAX) */

/*  NOUT    (input) INTEGER */
/*          The unit number for output. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Scalars in Common .. */
/*     .. */
/*     .. Common blocks .. */
/*     .. */
/*     .. Data statements .. */
    /* Parameter adjustments */
    --iwork;
    --rwork;
    --work;
    --s;
    --xact;
    --x;
    --bsav;
    --b;
    --asav;
    --afb;
    --a;
    --nval;
    --dotype;

    /* Function Body */
/*     .. */
/*     .. Executable Statements .. */

/*     Initialize constants and the random number seed. */

    s_copy(path, "Complex precision", (ftnlen)1, (ftnlen)17);
    s_copy(path + 1, "GB", (ftnlen)2, (ftnlen)2);
    nrun = 0;
    nfail = 0;
    nerrs = 0;
    for (i__ = 1; i__ <= 4; ++i__) {
	iseed[i__ - 1] = iseedy[i__ - 1];
/* L10: */
    }

/*     Test the error exits */

    if (*tsterr) {
	cerrvx_(path, nout);
    }
    infoc_1.infot = 0;

/*     Set the block size and minimum block size for testing. */

    nb = 1;
    nbmin = 2;
    xlaenv_(&c__1, &nb);
    xlaenv_(&c__2, &nbmin);

/*     Do for each value of N in NVAL */

    i__1 = *nn;
    for (in = 1; in <= i__1; ++in) {
	n = nval[in];
	ldb = max(n,1);
	*(unsigned char *)xtype = 'N';

/*        Set limits on the number of loop iterations. */

/* Computing MAX */
	i__2 = 1, i__3 = min(n,4);
	nkl = max(i__2,i__3);
	if (n == 0) {
	    nkl = 1;
	}
	nku = nkl;
	nimat = 8;
	if (n <= 0) {
	    nimat = 1;
	}

	i__2 = nkl;
	for (ikl = 1; ikl <= i__2; ++ikl) {

/*           Do for KL = 0, N-1, (3N-1)/4, and (N+1)/4. This order makes */
/*           it easier to skip redundant values for small values of N. */

	    if (ikl == 1) {
		kl = 0;
	    } else if (ikl == 2) {
/* Computing MAX */
		i__3 = n - 1;
		kl = max(i__3,0);
	    } else if (ikl == 3) {
		kl = (n * 3 - 1) / 4;
	    } else if (ikl == 4) {
		kl = (n + 1) / 4;
	    }
	    i__3 = nku;
	    for (iku = 1; iku <= i__3; ++iku) {

/*              Do for KU = 0, N-1, (3N-1)/4, and (N+1)/4. This order */
/*              makes it easier to skip redundant values for small */
/*              values of N. */

		if (iku == 1) {
		    ku = 0;
		} else if (iku == 2) {
/* Computing MAX */
		    i__4 = n - 1;
		    ku = max(i__4,0);
		} else if (iku == 3) {
		    ku = (n * 3 - 1) / 4;
		} else if (iku == 4) {
		    ku = (n + 1) / 4;
		}

/*              Check that A and AFB are big enough to generate this */
/*              matrix. */

		lda = kl + ku + 1;
		ldafb = (kl << 1) + ku + 1;
		if (lda * n > *la || ldafb * n > *lafb) {
		    if (nfail == 0 && nerrs == 0) {
			aladhd_(nout, path);
		    }
		    if (lda * n > *la) {
			io___26.ciunit = *nout;
			s_wsfe(&io___26);
			do_fio(&c__1, (char *)&(*la), (ftnlen)sizeof(integer))
				;
			do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&kl, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&ku, (ftnlen)sizeof(integer));
			i__4 = n * (kl + ku + 1);
			do_fio(&c__1, (char *)&i__4, (ftnlen)sizeof(integer));
			e_wsfe();
			++nerrs;
		    }
		    if (ldafb * n > *lafb) {
			io___27.ciunit = *nout;
			s_wsfe(&io___27);
			do_fio(&c__1, (char *)&(*lafb), (ftnlen)sizeof(
				integer));
			do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&kl, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&ku, (ftnlen)sizeof(integer));
			i__4 = n * ((kl << 1) + ku + 1);
			do_fio(&c__1, (char *)&i__4, (ftnlen)sizeof(integer));
			e_wsfe();
			++nerrs;
		    }
		    goto L130;
		}

		i__4 = nimat;
		for (imat = 1; imat <= i__4; ++imat) {

/*                 Do the tests only if DOTYPE( IMAT ) is true. */

		    if (! dotype[imat]) {
			goto L120;
		    }

/*                 Skip types 2, 3, or 4 if the matrix is too small. */

		    zerot = imat >= 2 && imat <= 4;
		    if (zerot && n < imat - 1) {
			goto L120;
		    }

/*                 Set up parameters with CLATB4 and generate a */
/*                 test matrix with CLATMS. */

		    clatb4_(path, &imat, &n, &n, type__, &kl, &ku, &anorm, &
			    mode, &cndnum, dist);
		    rcondc = 1.f / cndnum;

		    s_copy(srnamc_1.srnamt, "CLATMS", (ftnlen)6, (ftnlen)6);
		    clatms_(&n, &n, dist, iseed, type__, &rwork[1], &mode, &
			    cndnum, &anorm, &kl, &ku, "Z", &a[1], &lda, &work[
			    1], &info);

/*                 Check the error code from CLATMS. */

		    if (info != 0) {
			alaerh_(path, "CLATMS", &info, &c__0, " ", &n, &n, &
				kl, &ku, &c_n1, &imat, &nfail, &nerrs, nout);
			goto L120;
		    }

/*                 For types 2, 3, and 4, zero one or more columns of */
/*                 the matrix to test that INFO is returned correctly. */

		    izero = 0;
		    if (zerot) {
			if (imat == 2) {
			    izero = 1;
			} else if (imat == 3) {
			    izero = n;
			} else {
			    izero = n / 2 + 1;
			}
			ioff = (izero - 1) * lda;
			if (imat < 4) {
/* Computing MAX */
			    i__5 = 1, i__6 = ku + 2 - izero;
			    i1 = max(i__5,i__6);
/* Computing MIN */
			    i__5 = kl + ku + 1, i__6 = ku + 1 + (n - izero);
			    i2 = min(i__5,i__6);
			    i__5 = i2;
			    for (i__ = i1; i__ <= i__5; ++i__) {
				i__6 = ioff + i__;
				a[i__6].r = 0.f, a[i__6].i = 0.f;
/* L20: */
			    }
			} else {
			    i__5 = n;
			    for (j = izero; j <= i__5; ++j) {
/* Computing MAX */
				i__6 = 1, i__7 = ku + 2 - j;
/* Computing MIN */
				i__9 = kl + ku + 1, i__10 = ku + 1 + (n - j);
				i__8 = min(i__9,i__10);
				for (i__ = max(i__6,i__7); i__ <= i__8; ++i__)
					 {
				    i__6 = ioff + i__;
				    a[i__6].r = 0.f, a[i__6].i = 0.f;
/* L30: */
				}
				ioff += lda;
/* L40: */
			    }
			}
		    }

/*                 Save a copy of the matrix A in ASAV. */

		    i__5 = kl + ku + 1;
		    clacpy_("Full", &i__5, &n, &a[1], &lda, &asav[1], &lda);

		    for (iequed = 1; iequed <= 4; ++iequed) {
			*(unsigned char *)equed = *(unsigned char *)&equeds[
				iequed - 1];
			if (iequed == 1) {
			    nfact = 3;
			} else {
			    nfact = 1;
			}

			i__5 = nfact;
			for (ifact = 1; ifact <= i__5; ++ifact) {
			    *(unsigned char *)fact = *(unsigned char *)&facts[
				    ifact - 1];
			    prefac = lsame_(fact, "F");
			    nofact = lsame_(fact, "N");
			    equil = lsame_(fact, "E");

			    if (zerot) {
				if (prefac) {
				    goto L100;
				}
				rcondo = 0.f;
				rcondi = 0.f;

			    } else if (! nofact) {

/*                          Compute the condition number for comparison */
/*                          with the value returned by SGESVX (FACT = */
/*                          'N' reuses the condition number from the */
/*                          previous iteration with FACT = 'F'). */

				i__8 = kl + ku + 1;
				clacpy_("Full", &i__8, &n, &asav[1], &lda, &
					afb[kl + 1], &ldafb);
				if (equil || iequed > 1) {

/*                             Compute row and column scale factors to */
/*                             equilibrate the matrix A. */

				    cgbequ_(&n, &n, &kl, &ku, &afb[kl + 1], &
					    ldafb, &s[1], &s[n + 1], &rowcnd, 
					    &colcnd, &amax, &info);
				    if (info == 0 && n > 0) {
					if (lsame_(equed, "R")) {
					    rowcnd = 0.f;
					    colcnd = 1.f;
					} else if (lsame_(equed, "C")) {
					    rowcnd = 1.f;
					    colcnd = 0.f;
					} else if (lsame_(equed, "B")) {
					    rowcnd = 0.f;
					    colcnd = 0.f;
					}

/*                                Equilibrate the matrix. */

					claqgb_(&n, &n, &kl, &ku, &afb[kl + 1]
, &ldafb, &s[1], &s[n + 1], &
						rowcnd, &colcnd, &amax, equed);
				    }
				}

/*                          Save the condition number of the */
/*                          non-equilibrated system for use in CGET04. */

				if (equil) {
				    roldo = rcondo;
				    roldi = rcondi;
				}

/*                          Compute the 1-norm and infinity-norm of A. */

				anormo = clangb_("1", &n, &kl, &ku, &afb[kl + 
					1], &ldafb, &rwork[1]);
				anormi = clangb_("I", &n, &kl, &ku, &afb[kl + 
					1], &ldafb, &rwork[1]);

/*                          Factor the matrix A. */

				cgbtrf_(&n, &n, &kl, &ku, &afb[1], &ldafb, &
					iwork[1], &info);

/*                          Form the inverse of A. */

				claset_("Full", &n, &n, &c_b48, &c_b49, &work[
					1], &ldb);
				s_copy(srnamc_1.srnamt, "CGBTRS", (ftnlen)6, (
					ftnlen)6);
				cgbtrs_("No transpose", &n, &kl, &ku, &n, &
					afb[1], &ldafb, &iwork[1], &work[1], &
					ldb, &info);

/*                          Compute the 1-norm condition number of A. */

				ainvnm = clange_("1", &n, &n, &work[1], &ldb, 
					&rwork[1]);
				if (anormo <= 0.f || ainvnm <= 0.f) {
				    rcondo = 1.f;
				} else {
				    rcondo = 1.f / anormo / ainvnm;
				}

/*                          Compute the infinity-norm condition number */
/*                          of A. */

				ainvnm = clange_("I", &n, &n, &work[1], &ldb, 
					&rwork[1]);
				if (anormi <= 0.f || ainvnm <= 0.f) {
				    rcondi = 1.f;
				} else {
				    rcondi = 1.f / anormi / ainvnm;
				}
			    }

			    for (itran = 1; itran <= 3; ++itran) {

/*                          Do for each value of TRANS. */

				*(unsigned char *)trans = *(unsigned char *)&
					transs[itran - 1];
				if (itran == 1) {
				    rcondc = rcondo;
				} else {
				    rcondc = rcondi;
				}

/*                          Restore the matrix A. */

				i__8 = kl + ku + 1;
				clacpy_("Full", &i__8, &n, &asav[1], &lda, &a[
					1], &lda);

/*                          Form an exact solution and set the right hand */
/*                          side. */

				s_copy(srnamc_1.srnamt, "CLARHS", (ftnlen)6, (
					ftnlen)6);
				clarhs_(path, xtype, "Full", trans, &n, &n, &
					kl, &ku, nrhs, &a[1], &lda, &xact[1], 
					&ldb, &b[1], &ldb, iseed, &info);
				*(unsigned char *)xtype = 'C';
				clacpy_("Full", &n, nrhs, &b[1], &ldb, &bsav[
					1], &ldb);

				if (nofact && itran == 1) {

/*                             --- Test CGBSV  --- */

/*                             Compute the LU factorization of the matrix */
/*                             and solve the system. */

				    i__8 = kl + ku + 1;
				    clacpy_("Full", &i__8, &n, &a[1], &lda, &
					    afb[kl + 1], &ldafb);
				    clacpy_("Full", &n, nrhs, &b[1], &ldb, &x[
					    1], &ldb);

				    s_copy(srnamc_1.srnamt, "CGBSV ", (ftnlen)
					    6, (ftnlen)6);
				    cgbsv_(&n, &kl, &ku, nrhs, &afb[1], &
					    ldafb, &iwork[1], &x[1], &ldb, &
					    info);

/*                             Check error code from CGBSV . */

				    if (info != izero) {
					alaerh_(path, "CGBSV ", &info, &izero, 
						 " ", &n, &n, &kl, &ku, nrhs, 
						&imat, &nfail, &nerrs, nout);
				    }

/*                             Reconstruct matrix from factors and */
/*                             compute residual. */

				    cgbt01_(&n, &n, &kl, &ku, &a[1], &lda, &
					    afb[1], &ldafb, &iwork[1], &work[
					    1], result);
				    nt = 1;
				    if (izero == 0) {

/*                                Compute residual of the computed */
/*                                solution. */

					clacpy_("Full", &n, nrhs, &b[1], &ldb, 
						 &work[1], &ldb);
					cgbt02_("No transpose", &n, &n, &kl, &
						ku, nrhs, &a[1], &lda, &x[1], 
						&ldb, &work[1], &ldb, &result[
						1]);

/*                                Check solution from generated exact */
/*                                solution. */

					cget04_(&n, nrhs, &x[1], &ldb, &xact[
						1], &ldb, &rcondc, &result[2])
						;
					nt = 3;
				    }

/*                             Print information about the tests that did */
/*                             not pass the threshold. */

				    i__8 = nt;
				    for (k = 1; k <= i__8; ++k) {
					if (result[k - 1] >= *thresh) {
					    if (nfail == 0 && nerrs == 0) {
			  aladhd_(nout, path);
					    }
					    io___65.ciunit = *nout;
					    s_wsfe(&io___65);
					    do_fio(&c__1, "CGBSV ", (ftnlen)6)
						    ;
					    do_fio(&c__1, (char *)&n, (ftnlen)
						    sizeof(integer));
					    do_fio(&c__1, (char *)&kl, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&ku, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&imat, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&k, (ftnlen)
						    sizeof(integer));
					    do_fio(&c__1, (char *)&result[k - 
						    1], (ftnlen)sizeof(real));
					    e_wsfe();
					    ++nfail;
					}
/* L50: */
				    }
				    nrun += nt;
				}

/*                          --- Test CGBSVX --- */

				if (! prefac) {
				    i__8 = (kl << 1) + ku + 1;
				    claset_("Full", &i__8, &n, &c_b48, &c_b48, 
					     &afb[1], &ldafb);
				}
				claset_("Full", &n, nrhs, &c_b48, &c_b48, &x[
					1], &ldb);
				if (iequed > 1 && n > 0) {

/*                             Equilibrate the matrix if FACT = 'F' and */
/*                             EQUED = 'R', 'C', or 'B'. */

				    claqgb_(&n, &n, &kl, &ku, &a[1], &lda, &s[
					    1], &s[n + 1], &rowcnd, &colcnd, &
					    amax, equed);
				}

/*                          Solve the system and compute the condition */
/*                          number and error bounds using CGBSVX. */

				s_copy(srnamc_1.srnamt, "CGBSVX", (ftnlen)6, (
					ftnlen)6);
				cgbsvx_(fact, trans, &n, &kl, &ku, nrhs, &a[1]
, &lda, &afb[1], &ldafb, &iwork[1], 
					equed, &s[1], &s[ldb + 1], &b[1], &
					ldb, &x[1], &ldb, &rcond, &rwork[1], &
					rwork[*nrhs + 1], &work[1], &rwork[(*
					nrhs << 1) + 1], &info);

/*                          Check the error code from CGBSVX. */

				if (info != izero) {
/* Writing concatenation */
				    i__11[0] = 1, a__1[0] = fact;
				    i__11[1] = 1, a__1[1] = trans;
				    s_cat(ch__1, a__1, i__11, &c__2, (ftnlen)
					    2);
				    alaerh_(path, "CGBSVX", &info, &izero, 
					    ch__1, &n, &n, &kl, &ku, nrhs, &
					    imat, &nfail, &nerrs, nout);
				}
/*                          Compare RWORK(2*NRHS+1) from CGBSVX with the */
/*                          computed reciprocal pivot growth RPVGRW */

				if (info != 0) {
				    anrmpv = 0.f;
				    i__8 = info;
				    for (j = 1; j <= i__8; ++j) {
/* Computing MAX */
					i__6 = ku + 2 - j;
/* Computing MIN */
					i__9 = n + ku + 1 - j, i__10 = kl + 
						ku + 1;
					i__7 = min(i__9,i__10);
					for (i__ = max(i__6,1); i__ <= i__7; 
						++i__) {
/* Computing MAX */
					    r__1 = anrmpv, r__2 = c_abs(&a[
						    i__ + (j - 1) * lda]);
					    anrmpv = dmax(r__1,r__2);
/* L60: */
					}
/* L70: */
				    }
/* Computing MIN */
				    i__7 = info - 1, i__6 = kl + ku;
				    i__8 = min(i__7,i__6);
/* Computing MAX */
				    i__9 = 1, i__10 = kl + ku + 2 - info;
				    rpvgrw = clantb_("M", "U", "N", &info, &
					    i__8, &afb[max(i__9, i__10)], &
					    ldafb, rdum);
				    if (rpvgrw == 0.f) {
					rpvgrw = 1.f;
				    } else {
					rpvgrw = anrmpv / rpvgrw;
				    }
				} else {
				    i__8 = kl + ku;
				    rpvgrw = clantb_("M", "U", "N", &n, &i__8, 
					     &afb[1], &ldafb, rdum);
				    if (rpvgrw == 0.f) {
					rpvgrw = 1.f;
				    } else {
					rpvgrw = clangb_("M", &n, &kl, &ku, &
						a[1], &lda, rdum) /
						 rpvgrw;
				    }
				}
/* Computing MAX */
				r__2 = rwork[(*nrhs << 1) + 1];
				result[6] = (r__1 = rpvgrw - rwork[(*nrhs << 
					1) + 1], dabs(r__1)) / dmax(r__2,
					rpvgrw) / slamch_("E");

				if (! prefac) {

/*                             Reconstruct matrix from factors and */
/*                             compute residual. */

				    cgbt01_(&n, &n, &kl, &ku, &a[1], &lda, &
					    afb[1], &ldafb, &iwork[1], &work[
					    1], result);
				    k1 = 1;
				} else {
				    k1 = 2;
				}

				if (info == 0) {
				    trfcon = FALSE_;

/*                             Compute residual of the computed solution. */

				    clacpy_("Full", &n, nrhs, &bsav[1], &ldb, 
					    &work[1], &ldb);
				    cgbt02_(trans, &n, &n, &kl, &ku, nrhs, &
					    asav[1], &lda, &x[1], &ldb, &work[
					    1], &ldb, &result[1]);

/*                             Check solution from generated exact */
/*                             solution. */

				    if (nofact || prefac && lsame_(equed, 
					    "N")) {
					cget04_(&n, nrhs, &x[1], &ldb, &xact[
						1], &ldb, &rcondc, &result[2])
						;
				    } else {
					if (itran == 1) {
					    roldc = roldo;
					} else {
					    roldc = roldi;
					}
					cget04_(&n, nrhs, &x[1], &ldb, &xact[
						1], &ldb, &roldc, &result[2]);
				    }

/*                             Check the error bounds from iterative */
/*                             refinement. */

				    cgbt05_(trans, &n, &kl, &ku, nrhs, &asav[
					    1], &lda, &bsav[1], &ldb, &x[1], &
					    ldb, &xact[1], &ldb, &rwork[1], &
					    rwork[*nrhs + 1], &result[3]);
				} else {
				    trfcon = TRUE_;
				}

/*                          Compare RCOND from CGBSVX with the computed */
/*                          value in RCONDC. */

				result[5] = sget06_(&rcond, &rcondc);

/*                          Print information about the tests that did */
/*                          not pass the threshold. */

				if (! trfcon) {
				    for (k = k1; k <= 7; ++k) {
					if (result[k - 1] >= *thresh) {
					    if (nfail == 0 && nerrs == 0) {
			  aladhd_(nout, path);
					    }
					    if (prefac) {
			  io___73.ciunit = *nout;
			  s_wsfe(&io___73);
			  do_fio(&c__1, "CGBSVX", (ftnlen)6);
			  do_fio(&c__1, fact, (ftnlen)1);
			  do_fio(&c__1, trans, (ftnlen)1);
			  do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
			  do_fio(&c__1, (char *)&kl, (ftnlen)sizeof(integer));
			  do_fio(&c__1, (char *)&ku, (ftnlen)sizeof(integer));
			  do_fio(&c__1, equed, (ftnlen)1);
			  do_fio(&c__1, (char *)&imat, (ftnlen)sizeof(integer)
				  );
			  do_fio(&c__1, (char *)&k, (ftnlen)sizeof(integer));
			  do_fio(&c__1, (char *)&result[k - 1], (ftnlen)
				  sizeof(real));
			  e_wsfe();
					    } else {
			  io___74.ciunit = *nout;
			  s_wsfe(&io___74);
			  do_fio(&c__1, "CGBSVX", (ftnlen)6);
			  do_fio(&c__1, fact, (ftnlen)1);
			  do_fio(&c__1, trans, (ftnlen)1);
			  do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
			  do_fio(&c__1, (char *)&kl, (ftnlen)sizeof(integer));
			  do_fio(&c__1, (char *)&ku, (ftnlen)sizeof(integer));
			  do_fio(&c__1, (char *)&imat, (ftnlen)sizeof(integer)
				  );
			  do_fio(&c__1, (char *)&k, (ftnlen)sizeof(integer));
			  do_fio(&c__1, (char *)&result[k - 1], (ftnlen)
				  sizeof(real));
			  e_wsfe();
					    }
					    ++nfail;
					}
/* L80: */
				    }
				    nrun = nrun + 7 - k1;
				} else {
				    if (result[0] >= *thresh && ! prefac) {
					if (nfail == 0 && nerrs == 0) {
					    aladhd_(nout, path);
					}
					if (prefac) {
					    io___75.ciunit = *nout;
					    s_wsfe(&io___75);
					    do_fio(&c__1, "CGBSVX", (ftnlen)6)
						    ;
					    do_fio(&c__1, fact, (ftnlen)1);
					    do_fio(&c__1, trans, (ftnlen)1);
					    do_fio(&c__1, (char *)&n, (ftnlen)
						    sizeof(integer));
					    do_fio(&c__1, (char *)&kl, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&ku, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, equed, (ftnlen)1);
					    do_fio(&c__1, (char *)&imat, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&c__1, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&result[0], 
						    (ftnlen)sizeof(real));
					    e_wsfe();
					} else {
					    io___76.ciunit = *nout;
					    s_wsfe(&io___76);
					    do_fio(&c__1, "CGBSVX", (ftnlen)6)
						    ;
					    do_fio(&c__1, fact, (ftnlen)1);
					    do_fio(&c__1, trans, (ftnlen)1);
					    do_fio(&c__1, (char *)&n, (ftnlen)
						    sizeof(integer));
					    do_fio(&c__1, (char *)&kl, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&ku, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&imat, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&c__1, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&result[0], 
						    (ftnlen)sizeof(real));
					    e_wsfe();
					}
					++nfail;
					++nrun;
				    }
				    if (result[5] >= *thresh) {
					if (nfail == 0 && nerrs == 0) {
					    aladhd_(nout, path);
					}
					if (prefac) {
					    io___77.ciunit = *nout;
					    s_wsfe(&io___77);
					    do_fio(&c__1, "CGBSVX", (ftnlen)6)
						    ;
					    do_fio(&c__1, fact, (ftnlen)1);
					    do_fio(&c__1, trans, (ftnlen)1);
					    do_fio(&c__1, (char *)&n, (ftnlen)
						    sizeof(integer));
					    do_fio(&c__1, (char *)&kl, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&ku, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, equed, (ftnlen)1);
					    do_fio(&c__1, (char *)&imat, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&c__6, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&result[5], 
						    (ftnlen)sizeof(real));
					    e_wsfe();
					} else {
					    io___78.ciunit = *nout;
					    s_wsfe(&io___78);
					    do_fio(&c__1, "CGBSVX", (ftnlen)6)
						    ;
					    do_fio(&c__1, fact, (ftnlen)1);
					    do_fio(&c__1, trans, (ftnlen)1);
					    do_fio(&c__1, (char *)&n, (ftnlen)
						    sizeof(integer));
					    do_fio(&c__1, (char *)&kl, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&ku, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&imat, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&c__6, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&result[5], 
						    (ftnlen)sizeof(real));
					    e_wsfe();
					}
					++nfail;
					++nrun;
				    }
				    if (result[6] >= *thresh) {
					if (nfail == 0 && nerrs == 0) {
					    aladhd_(nout, path);
					}
					if (prefac) {
					    io___79.ciunit = *nout;
					    s_wsfe(&io___79);
					    do_fio(&c__1, "CGBSVX", (ftnlen)6)
						    ;
					    do_fio(&c__1, fact, (ftnlen)1);
					    do_fio(&c__1, trans, (ftnlen)1);
					    do_fio(&c__1, (char *)&n, (ftnlen)
						    sizeof(integer));
					    do_fio(&c__1, (char *)&kl, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&ku, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, equed, (ftnlen)1);
					    do_fio(&c__1, (char *)&imat, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&c__7, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&result[6], 
						    (ftnlen)sizeof(real));
					    e_wsfe();
					} else {
					    io___80.ciunit = *nout;
					    s_wsfe(&io___80);
					    do_fio(&c__1, "CGBSVX", (ftnlen)6)
						    ;
					    do_fio(&c__1, fact, (ftnlen)1);
					    do_fio(&c__1, trans, (ftnlen)1);
					    do_fio(&c__1, (char *)&n, (ftnlen)
						    sizeof(integer));
					    do_fio(&c__1, (char *)&kl, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&ku, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&imat, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&c__7, (
						    ftnlen)sizeof(integer));
					    do_fio(&c__1, (char *)&result[6], 
						    (ftnlen)sizeof(real));
					    e_wsfe();
					}
					++nfail;
					++nrun;
				    }
				}
/* L90: */
			    }
L100:
			    ;
			}
/* L110: */
		    }
L120:
		    ;
		}
L130:
		;
	    }
/* L140: */
	}
/* L150: */
    }

/*     Print a summary of the results. */

    alasvm_(path, nout, &nfail, &nrun, &nerrs);


    return 0;

/*     End of CDRVGB */

} /* cdrvgb_ */
Exemplo n.º 19
0
void
cgsrfs(trans_t trans, SuperMatrix *A, SuperMatrix *L, SuperMatrix *U,
       int *perm_r, int *perm_c, equed_t equed, float *R, float *C,
       SuperMatrix *B, SuperMatrix *X, float *ferr, float *berr,
       Gstat_t *Gstat, int *info)
{
/*
 * -- SuperLU MT routine (version 2.0) --
 * Lawrence Berkeley National Lab, Univ. of California Berkeley,
 * and Xerox Palo Alto Research Center.
 * September 10, 2007
 *
 *
 * Purpose
 * =======   
 *
 * cgsrfs improves the computed solution to a system of linear
 * equations and provides error bounds and backward error estimates for
 * the solution.
 *
 * See supermatrix.h for the definition of 'SuperMatrix' structure.
 *
 * Arguments
 * =========
 *
 * trans   (input) trans_t
 *         Specifies the form of the system of equations:
 *         = NOTRANS:  A * X = B     (No transpose)
 *         = TRANS:    A**T * X = B  (Transpose)
 *         = CONJ:     A**H * X = B  (Conjugate transpose = Transpose)
 *
 * A       (input) SuperMatrix*
 *         The original matrix A in the system, or the scaled A if
 *         equilibration was done. The type of A can be:
 *         Stype = NC, Dtype = _D, Mtype = GE.
 *
 * L       (input) SuperMatrix*
 *         The factor L from the factorization Pr*A*Pc=L*U. Use
 *         compressed row subscripts storage for supernodes,
 *         i.e., L has types: Stype = SCP, Dtype = _D, Mtype = TRLU.
 *
 * U       (input) SuperMatrix*
 *         The factor U from the factorization Pr*A*Pc=L*U as computed by
 *         dgstrf(). Use column-wise storage scheme,
 *         i.e., U has types: Stype = NCP, Dtype = _D, Mtype = TRU.
 *
 * perm_r  (input) int*, dimension (A->nrow)
 *         Row permutation vector, which defines the permutation matrix Pr;
 *         perm_r[i] = j means row i of A is in position j in Pr*A.
 *
 * perm_c  (input) int*, dimension (A->ncol)
 *         Column permutation vector, which defines the
 *         permutation matrix Pc; perm_c[i] = j means column i of A is 
 *         in position j in A*Pc.
 *
 * equed   (input) equed_t
 *         Specifies the form of equilibration that was done.
 *         = NOEQUIL: No equilibration.
 *         = ROW:  Row equilibration, i.e., A was premultiplied by diag(R).
 *         = COL:  Column equilibration, i.e., A was postmultiplied by
 *                 diag(C).
 *         = BOTH: Both row and column equilibration, i.e., A was replaced
 *                 by diag(R)*A*diag(C).
 *
 * R       (input) double*, dimension (A->nrow)
 *         The row scale factors for A.
 *         If equed = ROW or BOTH, A is premultiplied by diag(R).
 *         If equed = NOEQUIL or COL, R is not accessed.
 *
 * C       (input) double*, dimension (A->ncol)
 *         The column scale factors for A.
 *         If equed = COL or BOTH, A is postmultiplied by diag(C).
 *         If equed = NOEQUIL or ROW, C is not accessed.
 *
 * B       (input) SuperMatrix*
 *         B has types: Stype = DN, Dtype = _D, Mtype = GE.
 *         The right hand side matrix B.
 *
 * X       (input/output) SuperMatrix*
 *         X has types: Stype = DN, Dtype = _D, Mtype = GE.
 *         On entry, the solution matrix X, as computed by dgstrs().
 *         On exit, the improved solution matrix X.
 *
 * FERR    (output) double*, dimension (B->ncol)
 *         The estimated forward error bound for each solution vector
 *         X(j) (the j-th column of the solution matrix X).
 *         If XTRUE is the true solution corresponding to X(j), FERR(j)
 *         is an estimated upper bound for the magnitude of the largest
 *         element in (X(j) - XTRUE) divided by the magnitude of the
 *         largest element in X(j).  The estimate is as reliable as
 *         the estimate for RCOND, and is almost always a slight
 *         overestimate of the true error.
 *
 * BERR    (output) double*, dimension (B->ncol)
 *         The componentwise relative backward error of each solution
 *         vector X(j) (i.e., the smallest relative change in
 *         any element of A or B that makes X(j) an exact solution).
 *
 * info    (output) int*
 *         = 0:  successful exit
 *         < 0:  if INFO = -i, the i-th argument had an illegal value
 *
 * Internal Parameters
 * ===================
 *
 * ITMAX is the maximum number of steps of iterative refinement.
 *
 */

#define ITMAX 5
    
    /* Table of constant values */
    int    ione = 1;
    complex ndone = {-1., 0.};
    complex done = {1., 0.};
    
    /* Local variables */
    NCformat *Astore;
    complex   *Aval;
    SuperMatrix Bjcol;
    DNformat *Bstore, *Xstore, *Bjcol_store;
    complex   *Bmat, *Xmat, *Bptr, *Xptr;
    int      kase;
    float   safe1, safe2;
    int      i, j, k, irow, nz, count, notran, rowequ, colequ;
    int      ldb, ldx, nrhs;
    float   s, xk, lstres, eps, safmin;
    char     transc[1];
    trans_t  transt;
    complex   *work;
    float   *rwork;
    int      *iwork;
    extern double slamch_(char *);
    extern int clacon_(int *, complex *, complex *, float *, int *);
#ifdef _CRAY
    extern int CCOPY(int *, complex *, int *, complex *, int *);
    extern int CSAXPY(int *, complex *, complex *, int *, complex *, int *);
#else
    extern int ccopy_(int *, complex *, int *, complex *, int *);
    extern int caxpy_(int *, complex *, complex *, int *, complex *, int *);
#endif

    Astore = A->Store;
    Aval   = Astore->nzval;
    Bstore = B->Store;
    Xstore = X->Store;
    Bmat   = Bstore->nzval;
    Xmat   = Xstore->nzval;
    ldb    = Bstore->lda;
    ldx    = Xstore->lda;
    nrhs   = B->ncol;
    
    /* Test the input parameters */
    *info = 0;
    notran = (trans == NOTRANS);
    if ( !notran && trans != TRANS && trans != CONJ ) *info = -1;
    else if ( A->nrow != A->ncol || A->nrow < 0 ||
	      A->Stype != SLU_NC || A->Dtype != SLU_C || A->Mtype != SLU_GE )
	*info = -2;
    else if ( L->nrow != L->ncol || L->nrow < 0 ||
 	      L->Stype != SLU_SCP || L->Dtype != SLU_C || L->Mtype != SLU_TRLU )
	*info = -3;
    else if ( U->nrow != U->ncol || U->nrow < 0 ||
 	      U->Stype != SLU_NCP || U->Dtype != SLU_C || U->Mtype != SLU_TRU )
	*info = -4;
    else if ( ldb < SUPERLU_MAX(0, A->nrow) ||
 	      B->Stype != SLU_DN || B->Dtype != SLU_C || B->Mtype != SLU_GE )
        *info = -10;
    else if ( ldx < SUPERLU_MAX(0, A->nrow) ||
 	      X->Stype != SLU_DN || X->Dtype != SLU_C || X->Mtype != SLU_GE )
	*info = -11;
    if (*info != 0) {
	i = -(*info);
	xerbla_("cgsrfs", &i);
	return;
    }

    /* Quick return if possible */
    if ( A->nrow == 0 || nrhs == 0) {
	for (j = 0; j < nrhs; ++j) {
	    ferr[j] = 0.;
	    berr[j] = 0.;
	}
	return;
    }

    rowequ = (equed == ROW) || (equed == BOTH);
    colequ = (equed == COL) || (equed == BOTH);
    
    /* Allocate working space */
    work = complexMalloc(2*A->nrow);
    rwork = (float *) SUPERLU_MALLOC( (size_t) A->nrow * sizeof(float) );
    iwork = intMalloc(A->nrow);
    if ( !work || !rwork || !iwork ) 
        SUPERLU_ABORT("Malloc fails for work/rwork/iwork.");
    
    if ( notran ) {
	*(unsigned char *)transc = 'N';
        transt = TRANS;
    } else {
	*(unsigned char *)transc = 'T';
	transt = NOTRANS;
    }

    /* NZ = maximum number of nonzero elements in each row of A, plus 1 */
    nz     = A->ncol + 1;
    eps    = slamch_("Epsilon");
    safmin = slamch_("Safe minimum");
    /* Set SAFE1 essentially to be the underflow threshold times the
       number of additions in each row. */
    safe1  = nz * safmin;
    safe2  = safe1 / eps;

    /* Compute the number of nonzeros in each row (or column) of A */
    for (i = 0; i < A->nrow; ++i) iwork[i] = 0;
    if ( notran ) {
	for (k = 0; k < A->ncol; ++k)
	    for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) 
		++iwork[Astore->rowind[i]];
    } else {
	for (k = 0; k < A->ncol; ++k)
	    iwork[k] = Astore->colptr[k+1] - Astore->colptr[k];
    }	

    /* Copy one column of RHS B into Bjcol. */
    Bjcol.Stype = B->Stype;
    Bjcol.Dtype = B->Dtype;
    Bjcol.Mtype = B->Mtype;
    Bjcol.nrow  = B->nrow;
    Bjcol.ncol  = 1;
    Bjcol.Store = (void *) SUPERLU_MALLOC( sizeof(DNformat) );
    if ( !Bjcol.Store ) SUPERLU_ABORT("SUPERLU_MALLOC fails for Bjcol.Store");
    Bjcol_store = Bjcol.Store;
    Bjcol_store->lda = ldb;
    Bjcol_store->nzval = work; /* address aliasing */
	
    /* Do for each right hand side ... */
    for (j = 0; j < nrhs; ++j) {
	count = 0;
	lstres = 3.;
	Bptr = &Bmat[j*ldb];
	Xptr = &Xmat[j*ldx];

	while (1) { /* Loop until stopping criterion is satisfied. */

	    /* Compute residual R = B - op(A) * X,   
	       where op(A) = A, A**T, or A**H, depending on TRANS. */
	    
#ifdef _CRAY
	    CCOPY(&A->nrow, Bptr, &ione, work, &ione);
#else
	    ccopy_(&A->nrow, Bptr, &ione, work, &ione);
#endif
	    sp_cgemv(transc, ndone, A, Xptr, ione, done, work, ione);

	    /* Compute componentwise relative backward error from formula 
	       max(i) ( abs(R(i)) / ( abs(op(A))*abs(X) + abs(B) )(i) )   
	       where abs(Z) is the componentwise absolute value of the matrix
	       or vector Z.  If the i-th component of the denominator is less
	       than SAFE2, then SAFE1 is added to the i-th component of the   
	       numerator before dividing. */

	    for (i = 0; i < A->nrow; ++i) rwork[i] = c_abs1( &Bptr[i] );
	    
	    /* Compute abs(op(A))*abs(X) + abs(B). */
	    if (notran) {
		for (k = 0; k < A->ncol; ++k) {
		    xk = c_abs1( &Xptr[k] );
		    for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i)
			rwork[Astore->rowind[i]] += c_abs1(&Aval[i]) * xk;
		}
	    } else {
		for (k = 0; k < A->ncol; ++k) {
		    s = 0.;
		    for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) {
			irow = Astore->rowind[i];
			s += c_abs1(&Aval[i]) * c_abs1(&Xptr[irow]);
		    }
		    rwork[k] += s;
		}
	    }
	    s = 0.;
	    for (i = 0; i < A->nrow; ++i) {
		if (rwork[i] > safe2) {
		    s = SUPERLU_MAX( s, c_abs1(&work[i]) / rwork[i] );
		} else if ( rwork[i] != 0.0 ) {
		    s = SUPERLU_MAX( s, (c_abs1(&work[i]) + safe1) / rwork[i] );
                }
                /* If rwork[i] is exactly 0.0, then we know the true 
                   residual also must be exactly 0.0. */
	    }
	    berr[j] = s;

	    /* Test stopping criterion. Continue iterating if   
	       1) The residual BERR(J) is larger than machine epsilon, and   
	       2) BERR(J) decreased by at least a factor of 2 during the   
	          last iteration, and   
	       3) At most ITMAX iterations tried. */

	    if (berr[j] > eps && berr[j] * 2. <= lstres && count < ITMAX) {
		/* Update solution and try again. */
		cgstrs (trans, L, U, perm_r, perm_c, &Bjcol, Gstat, info);
		
#ifdef _CRAY
		CAXPY(&A->nrow, &done, work, &ione,
		       &Xmat[j*ldx], &ione);
#else
		caxpy_(&A->nrow, &done, work, &ione,
		       &Xmat[j*ldx], &ione);
#endif
		lstres = berr[j];
		++count;
	    } else {
		break;
	    }
        
	} /* end while */

	/* Bound error from formula:
	   norm(X - XTRUE) / norm(X) .le. FERR = norm( abs(inv(op(A)))*   
	   ( abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) / norm(X)   
          where   
            norm(Z) is the magnitude of the largest component of Z   
            inv(op(A)) is the inverse of op(A)   
            abs(Z) is the componentwise absolute value of the matrix or
	       vector Z   
            NZ is the maximum number of nonzeros in any row of A, plus 1   
            EPS is machine epsilon   

          The i-th component of abs(R)+NZ*EPS*(abs(op(A))*abs(X)+abs(B))   
          is incremented by SAFE1 if the i-th component of   
          abs(op(A))*abs(X) + abs(B) is less than SAFE2.   

          Use CLACON to estimate the infinity-norm of the matrix   
             inv(op(A)) * diag(W),   
          where W = abs(R) + NZ*EPS*( abs(op(A))*abs(X)+abs(B) ))) */
	
	for (i = 0; i < A->nrow; ++i) rwork[i] = c_abs1( &Bptr[i] );
	
	/* Compute abs(op(A))*abs(X) + abs(B). */
	if ( notran ) {
	    for (k = 0; k < A->ncol; ++k) {
		xk = c_abs1( &Xptr[k] );
		for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i)
		    rwork[Astore->rowind[i]] += c_abs1(&Aval[i]) * xk;
	    }
	} else {
	    for (k = 0; k < A->ncol; ++k) {
		s = 0.;
		for (i = Astore->colptr[k]; i < Astore->colptr[k+1]; ++i) {
		    irow = Astore->rowind[i];
		    xk = c_abs1( &Xptr[irow] );
		    s += c_abs1(&Aval[i]) * xk;
		}
		rwork[k] += s;
	    }
	}
	
	for (i = 0; i < A->nrow; ++i)
	    if (rwork[i] > safe2)
		rwork[i] = c_abs(&work[i]) + (iwork[i]+1)*eps*rwork[i];
	    else
		rwork[i] = c_abs(&work[i])+(iwork[i]+1)*eps*rwork[i]+safe1;
	kase = 0;

	do {
	    clacon_(&A->nrow, &work[A->nrow], work,
		    &ferr[j], &kase);
	    if (kase == 0) break;

	    if (kase == 1) {
		/* Multiply by diag(W)*inv(op(A)**T)*(diag(C) or diag(R)). */
		if ( notran && colequ )
		    for (i = 0; i < A->ncol; ++i) {
		        cs_mult(&work[i], &work[i], C[i]);
	            }
		else if ( !notran && rowequ )
		    for (i = 0; i < A->nrow; ++i) {
		        cs_mult(&work[i], &work[i], R[i]);
                    }

		cgstrs (transt, L, U, perm_r, perm_c, &Bjcol, Gstat, info);
		
		for (i = 0; i < A->nrow; ++i) {
		    cs_mult(&work[i], &work[i], rwork[i]);
	 	}
	    } else {
		/* Multiply by (diag(C) or diag(R))*inv(op(A))*diag(W). */
		for (i = 0; i < A->nrow; ++i) {
		    cs_mult(&work[i], &work[i], rwork[i]);
		}
		
		cgstrs (trans, L, U, perm_r, perm_c, &Bjcol, Gstat, info);
		
		if ( notran && colequ )
		    for (i = 0; i < A->ncol; ++i) {
		        cs_mult(&work[i], &work[i], C[i]);
		    }
		else if ( !notran && rowequ )
		    for (i = 0; i < A->ncol; ++i) {
		        cs_mult(&work[i], &work[i], R[i]);  
		    }
	    }
	    
	} while ( kase != 0 );

	/* Normalize error. */
	lstres = 0.;
 	if ( notran && colequ ) {
	    for (i = 0; i < A->nrow; ++i)
	    	lstres = SUPERLU_MAX( lstres, C[i] * c_abs1( &Xptr[i]) );
  	} else if ( !notran && rowequ ) {
	    for (i = 0; i < A->nrow; ++i)
	    	lstres = SUPERLU_MAX( lstres, R[i] * c_abs1( &Xptr[i]) );
	} else {
	    for (i = 0; i < A->nrow; ++i)
	    	lstres = SUPERLU_MAX( lstres, c_abs1( &Xptr[i]) );
	}
	if ( lstres != 0. )
	    ferr[j] /= lstres;

    } /* for each RHS j ... */
    
    SUPERLU_FREE(work);
    SUPERLU_FREE(rwork);
    SUPERLU_FREE(iwork);
    SUPERLU_FREE(Bjcol.Store);

    return;

} /* cgsrfs */
Exemplo n.º 20
0
/*<       subroutine csvdc(x,ldx,n,p,s,e,u,ldu,v,ldv,work,job,info) >*/
/* Subroutine */ int csvdc_(complex *x, integer *ldx, integer *n, integer *p, 
        complex *s, complex *e, complex *u, integer *ldu, complex *v, integer 
        *ldv, complex *work, integer *job, integer *info)
{
    /* System generated locals */
    integer x_dim1, x_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2, 
            i__3, i__4;
    real r__1, r__2, r__3, r__4;
    complex q__1, q__2, q__3;

    /* Builtin functions */
    double r_imag(complex *), c_abs(complex *);
    void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *);
    double sqrt(doublereal);

    /* Local variables */
    real b, c__, f, g;
    integer i__, j, k, l=0, m;
    complex r__, t;
    real t1, el;
    integer kk;
    real cs;
    integer ll, mm, ls=0;
    real sl;
    integer lu;
    real sm, sn;
    integer lm1, mm1, lp1, mp1, nct, ncu, lls, nrt;
    real emm1, smm1;
    integer kase, jobu, iter;
    real test;
    integer nctp1, nrtp1;
    extern /* Subroutine */ int cscal_(integer *, complex *, complex *, 
            integer *);
    real scale;
    extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer 
            *, complex *, integer *);
    real shift;
    extern /* Subroutine */ int cswap_(integer *, complex *, integer *, 
            complex *, integer *);
    integer maxit;
    extern /* Subroutine */ int caxpy_(integer *, complex *, complex *, 
            integer *, complex *, integer *), csrot_(integer *, complex *, 
            integer *, complex *, integer *, real *, real *);
    logical wantu, wantv;
    extern /* Subroutine */ int srotg_(real *, real *, real *, real *);
    real ztest;
    extern doublereal scnrm2_(integer *, complex *, integer *);

/*<       integer ldx,n,p,ldu,ldv,job,info >*/
/*<       complex x(ldx,1),s(1),e(1),u(ldu,1),v(ldv,1),work(1) >*/


/*     csvdc is a subroutine to reduce a complex nxp matrix x by */
/*     unitary transformations u and v to diagonal form.  the */
/*     diagonal elements s(i) are the singular values of x.  the */
/*     columns of u are the corresponding left singular vectors, */
/*     and the columns of v the right singular vectors. */

/*     on entry */

/*         x         complex(ldx,p), where ldx.ge.n. */
/*                   x contains the matrix whose singular value */
/*                   decomposition is to be computed.  x is */
/*                   destroyed by csvdc. */

/*         ldx       integer. */
/*                   ldx is the leading dimension of the array x. */

/*         n         integer. */
/*                   n is the number of rows of the matrix x. */

/*         p         integer. */
/*                   p is the number of columns of the matrix x. */

/*         ldu       integer. */
/*                   ldu is the leading dimension of the array u */
/*                   (see below). */

/*         ldv       integer. */
/*                   ldv is the leading dimension of the array v */
/*                   (see below). */

/*         work      complex(n). */
/*                   work is a scratch array. */

/*         job       integer. */
/*                   job controls the computation of the singular */
/*                   vectors.  it has the decimal expansion ab */
/*                   with the following meaning */

/*                        a.eq.0    do not compute the left singular */
/*                                  vectors. */
/*                        a.eq.1    return the n left singular vectors */
/*                                  in u. */
/*                        a.ge.2    returns the first min(n,p) */
/*                                  left singular vectors in u. */
/*                        b.eq.0    do not compute the right singular */
/*                                  vectors. */
/*                        b.eq.1    return the right singular vectors */
/*                                  in v. */

/*     on return */

/*         s         complex(mm), where mm=min(n+1,p). */
/*                   the first min(n,p) entries of s contain the */
/*                   singular values of x arranged in descending */
/*                   order of magnitude. */

/*         e         complex(p). */
/*                   e ordinarily contains zeros.  however see the */
/*                   discussion of info for exceptions. */

/*         u         complex(ldu,k), where ldu.ge.n.  if joba.eq.1 then */
/*                                   k.eq.n, if joba.ge.2 then */
/*                                   k.eq.min(n,p). */
/*                   u contains the matrix of left singular vectors. */
/*                   u is not referenced if joba.eq.0.  if n.le.p */
/*                   or if joba.gt.2, then u may be identified with x */
/*                   in the subroutine call. */

/*         v         complex(ldv,p), where ldv.ge.p. */
/*                   v contains the matrix of right singular vectors. */
/*                   v is not referenced if jobb.eq.0.  if p.le.n, */
/*                   then v may be identified whth x in the */
/*                   subroutine call. */

/*         info      integer. */
/*                   the singular values (and their corresponding */
/*                   singular vectors) s(info+1),s(info+2),...,s(m) */
/*                   are correct (here m=min(n,p)).  thus if */
/*                   info.eq.0, all the singular values and their */
/*                   vectors are correct.  in any event, the matrix */
/*                   b = ctrans(u)*x*v is the bidiagonal matrix */
/*                   with the elements of s on its diagonal and the */
/*                   elements of e on its super-diagonal (ctrans(u) */
/*                   is the conjugate-transpose of u).  thus the */
/*                   singular values of x and b are the same. */

/*     linpack. this version dated 03/19/79 . */
/*              correction to shift calculation made 2/85. */
/*     g.w. stewart, university of maryland, argonne national lab. */

/*     csvdc uses the following functions and subprograms. */

/*     external csrot */
/*     blas caxpy,cdotc,cscal,cswap,scnrm2,srotg */
/*     fortran abs,aimag,amax1,cabs,cmplx */
/*     fortran conjg,max0,min0,mod,real,sqrt */

/*     internal variables */

/*<    >*/
/*<       complex cdotc,t,r >*/
/*<    >*/
/*<       logical wantu,wantv >*/

/*<       complex csign,zdum,zdum1,zdum2 >*/
/*<       real cabs1 >*/
/*<       cabs1(zdum) = abs(real(zdum)) + abs(aimag(zdum)) >*/
/*<       csign(zdum1,zdum2) = cabs(zdum1)*(zdum2/cabs(zdum2)) >*/

/*     set the maximum number of iterations. */

/*<       maxit = 1000 >*/
    /* Parameter adjustments */
    x_dim1 = *ldx;
    x_offset = 1 + x_dim1;
    x -= x_offset;
    --s;
    --e;
    u_dim1 = *ldu;
    u_offset = 1 + u_dim1;
    u -= u_offset;
    v_dim1 = *ldv;
    v_offset = 1 + v_dim1;
    v -= v_offset;
    --work;

    /* Function Body */
    maxit = 1000;

/*     determine what is to be computed. */

/*<       wantu = .false. >*/
    wantu = FALSE_;
/*<       wantv = .false. >*/
    wantv = FALSE_;
/*<       jobu = mod(job,100)/10 >*/
    jobu = *job % 100 / 10;
/*<       ncu = n >*/
    ncu = *n;
/*<       if (jobu .gt. 1) ncu = min0(n,p) >*/
    if (jobu > 1) {
        ncu = min(*n,*p);
    }
/*<       if (jobu .ne. 0) wantu = .true. >*/
    if (jobu != 0) {
        wantu = TRUE_;
    }
/*<       if (mod(job,10) .ne. 0) wantv = .true. >*/
    if (*job % 10 != 0) {
        wantv = TRUE_;
    }

/*     reduce x to bidiagonal form, storing the diagonal elements */
/*     in s and the super-diagonal elements in e. */

/*<       info = 0 >*/
    *info = 0;
/*<       nct = min0(n-1,p) >*/
/* Computing MIN */
    i__1 = *n - 1;
    nct = min(i__1,*p);
/*<       nrt = max0(0,min0(p-2,n)) >*/
/* Computing MAX */
/* Computing MIN */
    i__3 = *p - 2;
    i__1 = 0, i__2 = min(i__3,*n);
    nrt = max(i__1,i__2);
/*<       lu = max0(nct,nrt) >*/
    lu = max(nct,nrt);
/*<       if (lu .lt. 1) go to 170 >*/
    if (lu < 1) {
        goto L170;
    }
/*<       do 160 l = 1, lu >*/
    i__1 = lu;
    for (l = 1; l <= i__1; ++l) {
/*<          lp1 = l + 1 >*/
        lp1 = l + 1;
/*<          if (l .gt. nct) go to 20 >*/
        if (l > nct) {
            goto L20;
        }

/*           compute the transformation for the l-th column and */
/*           place the l-th diagonal in s(l). */

/*<             s(l) = cmplx(scnrm2(n-l+1,x(l,l),1),0.0e0) >*/
        i__2 = l;
        i__3 = *n - l + 1;
        r__1 = scnrm2_(&i__3, &x[l + l * x_dim1], &c__1);
        q__1.r = r__1, q__1.i = (float)0.;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<             if (cabs1(s(l)) .eq. 0.0e0) go to 10 >*/
        i__2 = l;
        if ((r__1 = s[i__2].r, dabs(r__1)) + (r__2 = r_imag(&s[l]), dabs(r__2)
                ) == (float)0.) {
            goto L10;
        }
/*<                if (cabs1(x(l,l)) .ne. 0.0e0) s(l) = csign(s(l),x(l,l)) >*/
        i__2 = l + l * x_dim1;
        if ((r__1 = x[i__2].r, dabs(r__1)) + (r__2 = r_imag(&x[l + l * x_dim1]
                ), dabs(r__2)) != (float)0.) {
            i__3 = l;
            r__3 = c_abs(&s[l]);
            i__4 = l + l * x_dim1;
            r__4 = c_abs(&x[l + l * x_dim1]);
            q__2.r = x[i__4].r / r__4, q__2.i = x[i__4].i / r__4;
            q__1.r = r__3 * q__2.r, q__1.i = r__3 * q__2.i;
            s[i__3].r = q__1.r, s[i__3].i = q__1.i;
        }
/*<                call cscal(n-l+1,1.0e0/s(l),x(l,l),1) >*/
        i__2 = *n - l + 1;
        c_div(&q__1, &c_b8, &s[l]);
        cscal_(&i__2, &q__1, &x[l + l * x_dim1], &c__1);
/*<                x(l,l) = (1.0e0,0.0e0) + x(l,l) >*/
        i__2 = l + l * x_dim1;
        i__3 = l + l * x_dim1;
        q__1.r = x[i__3].r + (float)1., q__1.i = x[i__3].i + (float)0.;
        x[i__2].r = q__1.r, x[i__2].i = q__1.i;
/*<    10       continue >*/
L10:
/*<             s(l) = -s(l) >*/
        i__2 = l;
        i__3 = l;
        q__1.r = -s[i__3].r, q__1.i = -s[i__3].i;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<    20    continue >*/
L20:
/*<          if (p .lt. lp1) go to 50 >*/
        if (*p < lp1) {
            goto L50;
        }
/*<          do 40 j = lp1, p >*/
        i__2 = *p;
        for (j = lp1; j <= i__2; ++j) {
/*<             if (l .gt. nct) go to 30 >*/
            if (l > nct) {
                goto L30;
            }
/*<             if (cabs1(s(l)) .eq. 0.0e0) go to 30 >*/
            i__3 = l;
            if ((r__1 = s[i__3].r, dabs(r__1)) + (r__2 = r_imag(&s[l]), dabs(
                    r__2)) == (float)0.) {
                goto L30;
            }

/*              apply the transformation. */

/*<                t = -cdotc(n-l+1,x(l,l),1,x(l,j),1)/x(l,l) >*/
            i__3 = *n - l + 1;
            cdotc_(&q__3, &i__3, &x[l + l * x_dim1], &c__1, &x[l + j * x_dim1]
                    , &c__1);
            q__2.r = -q__3.r, q__2.i = -q__3.i;
            c_div(&q__1, &q__2, &x[l + l * x_dim1]);
            t.r = q__1.r, t.i = q__1.i;
/*<                call caxpy(n-l+1,t,x(l,l),1,x(l,j),1) >*/
            i__3 = *n - l + 1;
            caxpy_(&i__3, &t, &x[l + l * x_dim1], &c__1, &x[l + j * x_dim1], &
                    c__1);
/*<    30       continue >*/
L30:

/*           place the l-th row of x into  e for the */
/*           subsequent calculation of the row transformation. */

/*<             e(j) = conjg(x(l,j)) >*/
            i__3 = j;
            r_cnjg(&q__1, &x[l + j * x_dim1]);
            e[i__3].r = q__1.r, e[i__3].i = q__1.i;
/*<    40    continue >*/
/* L40: */
        }
/*<    50    continue >*/
L50:
/*<          if (.not.wantu .or. l .gt. nct) go to 70 >*/
        if (! wantu || l > nct) {
            goto L70;
        }

/*           place the transformation in u for subsequent back */
/*           multiplication. */

/*<             do 60 i = l, n >*/
        i__2 = *n;
        for (i__ = l; i__ <= i__2; ++i__) {
/*<                u(i,l) = x(i,l) >*/
            i__3 = i__ + l * u_dim1;
            i__4 = i__ + l * x_dim1;
            u[i__3].r = x[i__4].r, u[i__3].i = x[i__4].i;
/*<    60       continue >*/
/* L60: */
        }
/*<    70    continue >*/
L70:
/*<          if (l .gt. nrt) go to 150 >*/
        if (l > nrt) {
            goto L150;
        }

/*           compute the l-th row transformation and place the */
/*           l-th super-diagonal in e(l). */

/*<             e(l) = cmplx(scnrm2(p-l,e(lp1),1),0.0e0) >*/
        i__2 = l;
        i__3 = *p - l;
        r__1 = scnrm2_(&i__3, &e[lp1], &c__1);
        q__1.r = r__1, q__1.i = (float)0.;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<             if (cabs1(e(l)) .eq. 0.0e0) go to 80 >*/
        i__2 = l;
        if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[l]), dabs(r__2)
                ) == (float)0.) {
            goto L80;
        }
/*<                if (cabs1(e(lp1)) .ne. 0.0e0) e(l) = csign(e(l),e(lp1)) >*/
        i__2 = lp1;
        if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[lp1]), dabs(
                r__2)) != (float)0.) {
            i__3 = l;
            r__3 = c_abs(&e[l]);
            i__4 = lp1;
            r__4 = c_abs(&e[lp1]);
            q__2.r = e[i__4].r / r__4, q__2.i = e[i__4].i / r__4;
            q__1.r = r__3 * q__2.r, q__1.i = r__3 * q__2.i;
            e[i__3].r = q__1.r, e[i__3].i = q__1.i;
        }
/*<                call cscal(p-l,1.0e0/e(l),e(lp1),1) >*/
        i__2 = *p - l;
        c_div(&q__1, &c_b8, &e[l]);
        cscal_(&i__2, &q__1, &e[lp1], &c__1);
/*<                e(lp1) = (1.0e0,0.0e0) + e(lp1) >*/
        i__2 = lp1;
        i__3 = lp1;
        q__1.r = e[i__3].r + (float)1., q__1.i = e[i__3].i + (float)0.;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<    80       continue >*/
L80:
/*<             e(l) = -conjg(e(l)) >*/
        i__2 = l;
        r_cnjg(&q__2, &e[l]);
        q__1.r = -q__2.r, q__1.i = -q__2.i;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<             if (lp1 .gt. n .or. cabs1(e(l)) .eq. 0.0e0) go to 120 >*/
        i__2 = l;
        if (lp1 > *n || (r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[l])
                , dabs(r__2)) == (float)0.) {
            goto L120;
        }

/*              apply the transformation. */

/*<                do 90 i = lp1, n >*/
        i__2 = *n;
        for (i__ = lp1; i__ <= i__2; ++i__) {
/*<                   work(i) = (0.0e0,0.0e0) >*/
            i__3 = i__;
            work[i__3].r = (float)0., work[i__3].i = (float)0.;
/*<    90          continue >*/
/* L90: */
        }
/*<                do 100 j = lp1, p >*/
        i__2 = *p;
        for (j = lp1; j <= i__2; ++j) {
/*<                   call caxpy(n-l,e(j),x(lp1,j),1,work(lp1),1) >*/
            i__3 = *n - l;
            caxpy_(&i__3, &e[j], &x[lp1 + j * x_dim1], &c__1, &work[lp1], &
                    c__1);
/*<   100          continue >*/
/* L100: */
        }
/*<                do 110 j = lp1, p >*/
        i__2 = *p;
        for (j = lp1; j <= i__2; ++j) {
/*<    >*/
            i__3 = *n - l;
            i__4 = j;
            q__3.r = -e[i__4].r, q__3.i = -e[i__4].i;
            c_div(&q__2, &q__3, &e[lp1]);
            r_cnjg(&q__1, &q__2);
            caxpy_(&i__3, &q__1, &work[lp1], &c__1, &x[lp1 + j * x_dim1], &
                    c__1);
/*<   110          continue >*/
/* L110: */
        }
/*<   120       continue >*/
L120:
/*<             if (.not.wantv) go to 140 >*/
        if (! wantv) {
            goto L140;
        }

/*              place the transformation in v for subsequent */
/*              back multiplication. */

/*<                do 130 i = lp1, p >*/
        i__2 = *p;
        for (i__ = lp1; i__ <= i__2; ++i__) {
/*<                   v(i,l) = e(i) >*/
            i__3 = i__ + l * v_dim1;
            i__4 = i__;
            v[i__3].r = e[i__4].r, v[i__3].i = e[i__4].i;
/*<   130          continue >*/
/* L130: */
        }
/*<   140       continue >*/
L140:
/*<   150    continue >*/
L150:
/*<   160 continue >*/
/* L160: */
        ;
    }
/*<   170 continue >*/
L170:

/*     set up the final bidiagonal matrix or order m. */

/*<       m = min0(p,n+1) >*/
/* Computing MIN */
    i__1 = *p, i__2 = *n + 1;
    m = min(i__1,i__2);
/*<       nctp1 = nct + 1 >*/
    nctp1 = nct + 1;
/*<       nrtp1 = nrt + 1 >*/
    nrtp1 = nrt + 1;
/*<       if (nct .lt. p) s(nctp1) = x(nctp1,nctp1) >*/
    if (nct < *p) {
        i__1 = nctp1;
        i__2 = nctp1 + nctp1 * x_dim1;
        s[i__1].r = x[i__2].r, s[i__1].i = x[i__2].i;
    }
/*<       if (n .lt. m) s(m) = (0.0e0,0.0e0) >*/
    if (*n < m) {
        i__1 = m;
        s[i__1].r = (float)0., s[i__1].i = (float)0.;
    }
/*<       if (nrtp1 .lt. m) e(nrtp1) = x(nrtp1,m) >*/
    if (nrtp1 < m) {
        i__1 = nrtp1;
        i__2 = nrtp1 + m * x_dim1;
        e[i__1].r = x[i__2].r, e[i__1].i = x[i__2].i;
    }
/*<       e(m) = (0.0e0,0.0e0) >*/
    i__1 = m;
    e[i__1].r = (float)0., e[i__1].i = (float)0.;

/*     if required, generate u. */

/*<       if (.not.wantu) go to 300 >*/
    if (! wantu) {
        goto L300;
    }
/*<          if (ncu .lt. nctp1) go to 200 >*/
    if (ncu < nctp1) {
        goto L200;
    }
/*<          do 190 j = nctp1, ncu >*/
    i__1 = ncu;
    for (j = nctp1; j <= i__1; ++j) {
/*<             do 180 i = 1, n >*/
        i__2 = *n;
        for (i__ = 1; i__ <= i__2; ++i__) {
/*<                u(i,j) = (0.0e0,0.0e0) >*/
            i__3 = i__ + j * u_dim1;
            u[i__3].r = (float)0., u[i__3].i = (float)0.;
/*<   180       continue >*/
/* L180: */
        }
/*<             u(j,j) = (1.0e0,0.0e0) >*/
        i__2 = j + j * u_dim1;
        u[i__2].r = (float)1., u[i__2].i = (float)0.;
/*<   190    continue >*/
/* L190: */
    }
/*<   200    continue >*/
L200:
/*<          if (nct .lt. 1) go to 290 >*/
    if (nct < 1) {
        goto L290;
    }
/*<          do 280 ll = 1, nct >*/
    i__1 = nct;
    for (ll = 1; ll <= i__1; ++ll) {
/*<             l = nct - ll + 1 >*/
        l = nct - ll + 1;
/*<             if (cabs1(s(l)) .eq. 0.0e0) go to 250 >*/
        i__2 = l;
        if ((r__1 = s[i__2].r, dabs(r__1)) + (r__2 = r_imag(&s[l]), dabs(r__2)
                ) == (float)0.) {
            goto L250;
        }
/*<                lp1 = l + 1 >*/
        lp1 = l + 1;
/*<                if (ncu .lt. lp1) go to 220 >*/
        if (ncu < lp1) {
            goto L220;
        }
/*<                do 210 j = lp1, ncu >*/
        i__2 = ncu;
        for (j = lp1; j <= i__2; ++j) {
/*<                   t = -cdotc(n-l+1,u(l,l),1,u(l,j),1)/u(l,l) >*/
            i__3 = *n - l + 1;
            cdotc_(&q__3, &i__3, &u[l + l * u_dim1], &c__1, &u[l + j * u_dim1]
                    , &c__1);
            q__2.r = -q__3.r, q__2.i = -q__3.i;
            c_div(&q__1, &q__2, &u[l + l * u_dim1]);
            t.r = q__1.r, t.i = q__1.i;
/*<                   call caxpy(n-l+1,t,u(l,l),1,u(l,j),1) >*/
            i__3 = *n - l + 1;
            caxpy_(&i__3, &t, &u[l + l * u_dim1], &c__1, &u[l + j * u_dim1], &
                    c__1);
/*<   210          continue >*/
/* L210: */
        }
/*<   220          continue >*/
L220:
/*<                call cscal(n-l+1,(-1.0e0,0.0e0),u(l,l),1) >*/
        i__2 = *n - l + 1;
        cscal_(&i__2, &c_b53, &u[l + l * u_dim1], &c__1);
/*<                u(l,l) = (1.0e0,0.0e0) + u(l,l) >*/
        i__2 = l + l * u_dim1;
        i__3 = l + l * u_dim1;
        q__1.r = u[i__3].r + (float)1., q__1.i = u[i__3].i + (float)0.;
        u[i__2].r = q__1.r, u[i__2].i = q__1.i;
/*<                lm1 = l - 1 >*/
        lm1 = l - 1;
/*<                if (lm1 .lt. 1) go to 240 >*/
        if (lm1 < 1) {
            goto L240;
        }
/*<                do 230 i = 1, lm1 >*/
        i__2 = lm1;
        for (i__ = 1; i__ <= i__2; ++i__) {
/*<                   u(i,l) = (0.0e0,0.0e0) >*/
            i__3 = i__ + l * u_dim1;
            u[i__3].r = (float)0., u[i__3].i = (float)0.;
/*<   230          continue >*/
/* L230: */
        }
/*<   240          continue >*/
L240:
/*<             go to 270 >*/
        goto L270;
/*<   250       continue >*/
L250:
/*<                do 260 i = 1, n >*/
        i__2 = *n;
        for (i__ = 1; i__ <= i__2; ++i__) {
/*<                   u(i,l) = (0.0e0,0.0e0) >*/
            i__3 = i__ + l * u_dim1;
            u[i__3].r = (float)0., u[i__3].i = (float)0.;
/*<   260          continue >*/
/* L260: */
        }
/*<                u(l,l) = (1.0e0,0.0e0) >*/
        i__2 = l + l * u_dim1;
        u[i__2].r = (float)1., u[i__2].i = (float)0.;
/*<   270       continue >*/
L270:
/*<   280    continue >*/
/* L280: */
        ;
    }
/*<   290    continue >*/
L290:
/*<   300 continue >*/
L300:

/*     if it is required, generate v. */

/*<       if (.not.wantv) go to 350 >*/
    if (! wantv) {
        goto L350;
    }
/*<          do 340 ll = 1, p >*/
    i__1 = *p;
    for (ll = 1; ll <= i__1; ++ll) {
/*<             l = p - ll + 1 >*/
        l = *p - ll + 1;
/*<             lp1 = l + 1 >*/
        lp1 = l + 1;
/*<             if (l .gt. nrt) go to 320 >*/
        if (l > nrt) {
            goto L320;
        }
/*<             if (cabs1(e(l)) .eq. 0.0e0) go to 320 >*/
        i__2 = l;
        if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[l]), dabs(r__2)
                ) == (float)0.) {
            goto L320;
        }
/*<                do 310 j = lp1, p >*/
        i__2 = *p;
        for (j = lp1; j <= i__2; ++j) {
/*<                   t = -cdotc(p-l,v(lp1,l),1,v(lp1,j),1)/v(lp1,l) >*/
            i__3 = *p - l;
            cdotc_(&q__3, &i__3, &v[lp1 + l * v_dim1], &c__1, &v[lp1 + j * 
                    v_dim1], &c__1);
            q__2.r = -q__3.r, q__2.i = -q__3.i;
            c_div(&q__1, &q__2, &v[lp1 + l * v_dim1]);
            t.r = q__1.r, t.i = q__1.i;
/*<                   call caxpy(p-l,t,v(lp1,l),1,v(lp1,j),1) >*/
            i__3 = *p - l;
            caxpy_(&i__3, &t, &v[lp1 + l * v_dim1], &c__1, &v[lp1 + j * 
                    v_dim1], &c__1);
/*<   310          continue >*/
/* L310: */
        }
/*<   320       continue >*/
L320:
/*<             do 330 i = 1, p >*/
        i__2 = *p;
        for (i__ = 1; i__ <= i__2; ++i__) {
/*<                v(i,l) = (0.0e0,0.0e0) >*/
            i__3 = i__ + l * v_dim1;
            v[i__3].r = (float)0., v[i__3].i = (float)0.;
/*<   330       continue >*/
/* L330: */
        }
/*<             v(l,l) = (1.0e0,0.0e0) >*/
        i__2 = l + l * v_dim1;
        v[i__2].r = (float)1., v[i__2].i = (float)0.;
/*<   340    continue >*/
/* L340: */
    }
/*<   350 continue >*/
L350:

/*     transform s and e so that they are real. */

/*<       do 380 i = 1, m >*/
    i__1 = m;
    for (i__ = 1; i__ <= i__1; ++i__) {
/*<          if (cabs1(s(i)) .eq. 0.0e0) go to 360 >*/
        i__2 = i__;
        if ((r__1 = s[i__2].r, dabs(r__1)) + (r__2 = r_imag(&s[i__]), dabs(
                r__2)) == (float)0.) {
            goto L360;
        }
/*<             t = cmplx(cabs(s(i)),0.0e0) >*/
        r__1 = c_abs(&s[i__]);
        q__1.r = r__1, q__1.i = (float)0.;
        t.r = q__1.r, t.i = q__1.i;
/*<             r = s(i)/t >*/
        c_div(&q__1, &s[i__], &t);
        r__.r = q__1.r, r__.i = q__1.i;
/*<             s(i) = t >*/
        i__2 = i__;
        s[i__2].r = t.r, s[i__2].i = t.i;
/*<             if (i .lt. m) e(i) = e(i)/r >*/
        if (i__ < m) {
            i__2 = i__;
            c_div(&q__1, &e[i__], &r__);
            e[i__2].r = q__1.r, e[i__2].i = q__1.i;
        }
/*<             if (wantu) call cscal(n,r,u(1,i),1) >*/
        if (wantu) {
            cscal_(n, &r__, &u[i__ * u_dim1 + 1], &c__1);
        }
/*<   360    continue >*/
L360:
/*     ...exit */
/*<          if (i .eq. m) go to 390 >*/
        if (i__ == m) {
            goto L390;
        }
/*<          if (cabs1(e(i)) .eq. 0.0e0) go to 370 >*/
        i__2 = i__;
        if ((r__1 = e[i__2].r, dabs(r__1)) + (r__2 = r_imag(&e[i__]), dabs(
                r__2)) == (float)0.) {
            goto L370;
        }
/*<             t = cmplx(cabs(e(i)),0.0e0) >*/
        r__1 = c_abs(&e[i__]);
        q__1.r = r__1, q__1.i = (float)0.;
        t.r = q__1.r, t.i = q__1.i;
/*<             r = t/e(i) >*/
        c_div(&q__1, &t, &e[i__]);
        r__.r = q__1.r, r__.i = q__1.i;
/*<             e(i) = t >*/
        i__2 = i__;
        e[i__2].r = t.r, e[i__2].i = t.i;
/*<             s(i+1) = s(i+1)*r >*/
        i__2 = i__ + 1;
        i__3 = i__ + 1;
        q__1.r = s[i__3].r * r__.r - s[i__3].i * r__.i, q__1.i = s[i__3].r * 
                r__.i + s[i__3].i * r__.r;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<             if (wantv) call cscal(p,r,v(1,i+1),1) >*/
        if (wantv) {
            cscal_(p, &r__, &v[(i__ + 1) * v_dim1 + 1], &c__1);
        }
/*<   370    continue >*/
L370:
/*<   380 continue >*/
/* L380: */
        ;
    }
/*<   390 continue >*/
L390:

/*     main iteration loop for the singular values. */

/*<       mm = m >*/
    mm = m;
/*<       iter = 0 >*/
    iter = 0;
/*<   400 continue >*/
L400:

/*        quit if all the singular values have been found. */

/*     ...exit */
/*<          if (m .eq. 0) go to 660 >*/
    if (m == 0) {
        goto L660;
    }

/*        if too many iterations have been performed, set */
/*        flag and return. */

/*<          if (iter .lt. maxit) go to 410 >*/
    if (iter < maxit) {
        goto L410;
    }
/*<             info = m >*/
    *info = m;
/*     ......exit */
/*<             go to 660 >*/
    goto L660;
/*<   410    continue >*/
L410:

/*        this section of the program inspects for */
/*        negligible elements in the s and e arrays.  on */
/*        completion the variables kase and l are set as follows. */

/*           kase = 1     if s(m) and e(l-1) are negligible and l.lt.m */
/*           kase = 2     if s(l) is negligible and l.lt.m */
/*           kase = 3     if e(l-1) is negligible, l.lt.m, and */
/*                        s(l), ..., s(m) are not negligible (qr step). */
/*           kase = 4     if e(m-1) is negligible (convergence). */

/*<          do 430 ll = 1, m >*/
    i__1 = m;
    for (ll = 1; ll <= i__1; ++ll) {
/*<             l = m - ll >*/
        l = m - ll;
/*        ...exit */
/*<             if (l .eq. 0) go to 440 >*/
        if (l == 0) {
            goto L440;
        }
/*<             test = cabs(s(l)) + cabs(s(l+1)) >*/
        test = c_abs(&s[l]) + c_abs(&s[l + 1]);
/*<             ztest = test + cabs(e(l)) >*/
        ztest = test + c_abs(&e[l]);
/*<             if (ztest .ne. test) go to 420 >*/
        if (ztest != test) {
            goto L420;
        }
/*<                e(l) = (0.0e0,0.0e0) >*/
        i__2 = l;
        e[i__2].r = (float)0., e[i__2].i = (float)0.;
/*        ......exit */
/*<                go to 440 >*/
        goto L440;
/*<   420       continue >*/
L420:
/*<   430    continue >*/
/* L430: */
        ;
    }
/*<   440    continue >*/
L440:
/*<          if (l .ne. m - 1) go to 450 >*/
    if (l != m - 1) {
        goto L450;
    }
/*<             kase = 4 >*/
    kase = 4;
/*<          go to 520 >*/
    goto L520;
/*<   450    continue >*/
L450:
/*<             lp1 = l + 1 >*/
    lp1 = l + 1;
/*<             mp1 = m + 1 >*/
    mp1 = m + 1;
/*<             do 470 lls = lp1, mp1 >*/
    i__1 = mp1;
    for (lls = lp1; lls <= i__1; ++lls) {
/*<                ls = m - lls + lp1 >*/
        ls = m - lls + lp1;
/*           ...exit */
/*<                if (ls .eq. l) go to 480 >*/
        if (ls == l) {
            goto L480;
        }
/*<                test = 0.0e0 >*/
        test = (float)0.;
/*<                if (ls .ne. m) test = test + cabs(e(ls)) >*/
        if (ls != m) {
            test += c_abs(&e[ls]);
        }
/*<                if (ls .ne. l + 1) test = test + cabs(e(ls-1)) >*/
        if (ls != l + 1) {
            test += c_abs(&e[ls - 1]);
        }
/*<                ztest = test + cabs(s(ls)) >*/
        ztest = test + c_abs(&s[ls]);
/*<                if (ztest .ne. test) go to 460 >*/
        if (ztest != test) {
            goto L460;
        }
/*<                   s(ls) = (0.0e0,0.0e0) >*/
        i__2 = ls;
        s[i__2].r = (float)0., s[i__2].i = (float)0.;
/*           ......exit */
/*<                   go to 480 >*/
        goto L480;
/*<   460          continue >*/
L460:
/*<   470       continue >*/
/* L470: */
        ;
    }
/*<   480       continue >*/
L480:
/*<             if (ls .ne. l) go to 490 >*/
    if (ls != l) {
        goto L490;
    }
/*<                kase = 3 >*/
    kase = 3;
/*<             go to 510 >*/
    goto L510;
/*<   490       continue >*/
L490:
/*<             if (ls .ne. m) go to 500 >*/
    if (ls != m) {
        goto L500;
    }
/*<                kase = 1 >*/
    kase = 1;
/*<             go to 510 >*/
    goto L510;
/*<   500       continue >*/
L500:
/*<                kase = 2 >*/
    kase = 2;
/*<                l = ls >*/
    l = ls;
/*<   510       continue >*/
L510:
/*<   520    continue >*/
L520:
/*<          l = l + 1 >*/
    ++l;

/*        perform the task indicated by kase. */

/*<          go to (530, 560, 580, 610), kase >*/
    switch (kase) {
        case 1:  goto L530;
        case 2:  goto L560;
        case 3:  goto L580;
        case 4:  goto L610;
    }

/*        deflate negligible s(m). */

/*<   530    continue >*/
L530:
/*<             mm1 = m - 1 >*/
    mm1 = m - 1;
/*<             f = real(e(m-1)) >*/
    i__1 = m - 1;
    f = e[i__1].r;
/*<             e(m-1) = (0.0e0,0.0e0) >*/
    i__1 = m - 1;
    e[i__1].r = (float)0., e[i__1].i = (float)0.;
/*<             do 550 kk = l, mm1 >*/
    i__1 = mm1;
    for (kk = l; kk <= i__1; ++kk) {
/*<                k = mm1 - kk + l >*/
        k = mm1 - kk + l;
/*<                t1 = real(s(k)) >*/
        i__2 = k;
        t1 = s[i__2].r;
/*<                call srotg(t1,f,cs,sn) >*/
        srotg_(&t1, &f, &cs, &sn);
/*<                s(k) = cmplx(t1,0.0e0) >*/
        i__2 = k;
        q__1.r = t1, q__1.i = (float)0.;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<                if (k .eq. l) go to 540 >*/
        if (k == l) {
            goto L540;
        }
/*<                   f = -sn*real(e(k-1)) >*/
        i__2 = k - 1;
        f = -sn * e[i__2].r;
/*<                   e(k-1) = cs*e(k-1) >*/
        i__2 = k - 1;
        i__3 = k - 1;
        q__1.r = cs * e[i__3].r, q__1.i = cs * e[i__3].i;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<   540          continue >*/
L540:
/*<                if (wantv) call csrot(p,v(1,k),1,v(1,m),1,cs,sn) >*/
        if (wantv) {
            csrot_(p, &v[k * v_dim1 + 1], &c__1, &v[m * v_dim1 + 1], &c__1, &
                    cs, &sn);
        }
/*<   550       continue >*/
/* L550: */
    }
/*<          go to 650 >*/
    goto L650;

/*        split at negligible s(l). */

/*<   560    continue >*/
L560:
/*<             f = real(e(l-1)) >*/
    i__1 = l - 1;
    f = e[i__1].r;
/*<             e(l-1) = (0.0e0,0.0e0) >*/
    i__1 = l - 1;
    e[i__1].r = (float)0., e[i__1].i = (float)0.;
/*<             do 570 k = l, m >*/
    i__1 = m;
    for (k = l; k <= i__1; ++k) {
/*<                t1 = real(s(k)) >*/
        i__2 = k;
        t1 = s[i__2].r;
/*<                call srotg(t1,f,cs,sn) >*/
        srotg_(&t1, &f, &cs, &sn);
/*<                s(k) = cmplx(t1,0.0e0) >*/
        i__2 = k;
        q__1.r = t1, q__1.i = (float)0.;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<                f = -sn*real(e(k)) >*/
        i__2 = k;
        f = -sn * e[i__2].r;
/*<                e(k) = cs*e(k) >*/
        i__2 = k;
        i__3 = k;
        q__1.r = cs * e[i__3].r, q__1.i = cs * e[i__3].i;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<                if (wantu) call csrot(n,u(1,k),1,u(1,l-1),1,cs,sn) >*/
        if (wantu) {
            csrot_(n, &u[k * u_dim1 + 1], &c__1, &u[(l - 1) * u_dim1 + 1], &
                    c__1, &cs, &sn);
        }
/*<   570       continue >*/
/* L570: */
    }
/*<          go to 650 >*/
    goto L650;

/*        perform one qr step. */

/*<   580    continue >*/
L580:

/*           calculate the shift. */

/*<    >*/
/* Computing MAX */
    r__1 = c_abs(&s[m]), r__2 = c_abs(&s[m - 1]), r__1 = max(r__1,r__2), r__2 
            = c_abs(&e[m - 1]), r__1 = max(r__1,r__2), r__2 = c_abs(&s[l]), 
            r__1 = max(r__1,r__2), r__2 = c_abs(&e[l]);
    scale = dmax(r__1,r__2);
/*<             sm = real(s(m))/scale >*/
    i__1 = m;
    sm = s[i__1].r / scale;
/*<             smm1 = real(s(m-1))/scale >*/
    i__1 = m - 1;
    smm1 = s[i__1].r / scale;
/*<             emm1 = real(e(m-1))/scale >*/
    i__1 = m - 1;
    emm1 = e[i__1].r / scale;
/*<             sl = real(s(l))/scale >*/
    i__1 = l;
    sl = s[i__1].r / scale;
/*<             el = real(e(l))/scale >*/
    i__1 = l;
    el = e[i__1].r / scale;
/*<             b = ((smm1 + sm)*(smm1 - sm) + emm1**2)/2.0e0 >*/
/* Computing 2nd power */
    r__1 = emm1;
    b = ((smm1 + sm) * (smm1 - sm) + r__1 * r__1) / (float)2.;
/*<             c = (sm*emm1)**2 >*/
/* Computing 2nd power */
    r__1 = sm * emm1;
    c__ = r__1 * r__1;
/*<             shift = 0.0e0 >*/
    shift = (float)0.;
/*<             if (b .eq. 0.0e0 .and. c .eq. 0.0e0) go to 590 >*/
    if (b == (float)0. && c__ == (float)0.) {
        goto L590;
    }
/*<                shift = sqrt(b**2+c) >*/
/* Computing 2nd power */
    r__1 = b;
    shift = sqrt(r__1 * r__1 + c__);
/*<                if (b .lt. 0.0e0) shift = -shift >*/
    if (b < (float)0.) {
        shift = -shift;
    }
/*<                shift = c/(b + shift) >*/
    shift = c__ / (b + shift);
/*<   590       continue >*/
L590:
/*<             f = (sl + sm)*(sl - sm) + shift >*/
    f = (sl + sm) * (sl - sm) + shift;
/*<             g = sl*el >*/
    g = sl * el;

/*           chase zeros. */

/*<             mm1 = m - 1 >*/
    mm1 = m - 1;
/*<             do 600 k = l, mm1 >*/
    i__1 = mm1;
    for (k = l; k <= i__1; ++k) {
/*<                call srotg(f,g,cs,sn) >*/
        srotg_(&f, &g, &cs, &sn);
/*<                if (k .ne. l) e(k-1) = cmplx(f,0.0e0) >*/
        if (k != l) {
            i__2 = k - 1;
            q__1.r = f, q__1.i = (float)0.;
            e[i__2].r = q__1.r, e[i__2].i = q__1.i;
        }
/*<                f = cs*real(s(k)) + sn*real(e(k)) >*/
        i__2 = k;
        i__3 = k;
        f = cs * s[i__2].r + sn * e[i__3].r;
/*<                e(k) = cs*e(k) - sn*s(k) >*/
        i__2 = k;
        i__3 = k;
        q__2.r = cs * e[i__3].r, q__2.i = cs * e[i__3].i;
        i__4 = k;
        q__3.r = sn * s[i__4].r, q__3.i = sn * s[i__4].i;
        q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<                g = sn*real(s(k+1)) >*/
        i__2 = k + 1;
        g = sn * s[i__2].r;
/*<                s(k+1) = cs*s(k+1) >*/
        i__2 = k + 1;
        i__3 = k + 1;
        q__1.r = cs * s[i__3].r, q__1.i = cs * s[i__3].i;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<                if (wantv) call csrot(p,v(1,k),1,v(1,k+1),1,cs,sn) >*/
        if (wantv) {
            csrot_(p, &v[k * v_dim1 + 1], &c__1, &v[(k + 1) * v_dim1 + 1], &
                    c__1, &cs, &sn);
        }
/*<                call srotg(f,g,cs,sn) >*/
        srotg_(&f, &g, &cs, &sn);
/*<                s(k) = cmplx(f,0.0e0) >*/
        i__2 = k;
        q__1.r = f, q__1.i = (float)0.;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<                f = cs*real(e(k)) + sn*real(s(k+1)) >*/
        i__2 = k;
        i__3 = k + 1;
        f = cs * e[i__2].r + sn * s[i__3].r;
/*<                s(k+1) = -sn*e(k) + cs*s(k+1) >*/
        i__2 = k + 1;
        r__1 = -sn;
        i__3 = k;
        q__2.r = r__1 * e[i__3].r, q__2.i = r__1 * e[i__3].i;
        i__4 = k + 1;
        q__3.r = cs * s[i__4].r, q__3.i = cs * s[i__4].i;
        q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
        s[i__2].r = q__1.r, s[i__2].i = q__1.i;
/*<                g = sn*real(e(k+1)) >*/
        i__2 = k + 1;
        g = sn * e[i__2].r;
/*<                e(k+1) = cs*e(k+1) >*/
        i__2 = k + 1;
        i__3 = k + 1;
        q__1.r = cs * e[i__3].r, q__1.i = cs * e[i__3].i;
        e[i__2].r = q__1.r, e[i__2].i = q__1.i;
/*<    >*/
        if (wantu && k < *n) {
            csrot_(n, &u[k * u_dim1 + 1], &c__1, &u[(k + 1) * u_dim1 + 1], &
                    c__1, &cs, &sn);
        }
/*<   600       continue >*/
/* L600: */
    }
/*<             e(m-1) = cmplx(f,0.0e0) >*/
    i__1 = m - 1;
    q__1.r = f, q__1.i = (float)0.;
    e[i__1].r = q__1.r, e[i__1].i = q__1.i;
/*<             iter = iter + 1 >*/
    ++iter;
/*<          go to 650 >*/
    goto L650;

/*        convergence. */

/*<   610    continue >*/
L610:

/*           make the singular value  positive */

/*<             if (real(s(l)) .ge. 0.0e0) go to 620 >*/
    i__1 = l;
    if (s[i__1].r >= (float)0.) {
        goto L620;
    }
/*<                s(l) = -s(l) >*/
    i__1 = l;
    i__2 = l;
    q__1.r = -s[i__2].r, q__1.i = -s[i__2].i;
    s[i__1].r = q__1.r, s[i__1].i = q__1.i;
/*<                if (wantv) call cscal(p,(-1.0e0,0.0e0),v(1,l),1) >*/
    if (wantv) {
        cscal_(p, &c_b53, &v[l * v_dim1 + 1], &c__1);
    }
/*<   620       continue >*/
L620:

/*           order the singular value. */

/*<   630       if (l .eq. mm) go to 640 >*/
L630:
    if (l == mm) {
        goto L640;
    }
/*           ...exit */
/*<                if (real(s(l)) .ge. real(s(l+1))) go to 640 >*/
    i__1 = l;
    i__2 = l + 1;
    if (s[i__1].r >= s[i__2].r) {
        goto L640;
    }
/*<                t = s(l) >*/
    i__1 = l;
    t.r = s[i__1].r, t.i = s[i__1].i;
/*<                s(l) = s(l+1) >*/
    i__1 = l;
    i__2 = l + 1;
    s[i__1].r = s[i__2].r, s[i__1].i = s[i__2].i;
/*<                s(l+1) = t >*/
    i__1 = l + 1;
    s[i__1].r = t.r, s[i__1].i = t.i;
/*<    >*/
    if (wantv && l < *p) {
        cswap_(p, &v[l * v_dim1 + 1], &c__1, &v[(l + 1) * v_dim1 + 1], &c__1);
    }
/*<    >*/
    if (wantu && l < *n) {
        cswap_(n, &u[l * u_dim1 + 1], &c__1, &u[(l + 1) * u_dim1 + 1], &c__1);
    }
/*<                l = l + 1 >*/
    ++l;
/*<             go to 630 >*/
    goto L630;
/*<   640       continue >*/
L640:
/*<             iter = 0 >*/
    iter = 0;
/*<             m = m - 1 >*/
    --m;
/*<   650    continue >*/
L650:
/*<       go to 400 >*/
    goto L400;
/*<   660 continue >*/
L660:
/*<       return >*/
    return 0;
/*<       end >*/
} /* csvdc_ */
Exemplo n.º 21
0
 int cgbbrd_(char *vect, int *m, int *n, int *ncc, 
	 int *kl, int *ku, complex *ab, int *ldab, float *d__, 
	float *e, complex *q, int *ldq, complex *pt, int *ldpt, 
	complex *c__, int *ldc, complex *work, float *rwork, int *info)
{
    /* System generated locals */
    int ab_dim1, ab_offset, c_dim1, c_offset, pt_dim1, pt_offset, q_dim1, 
	    q_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7;
    complex q__1, q__2, q__3;

    /* Builtin functions */
    void r_cnjg(complex *, complex *);
    double c_abs(complex *);

    /* Local variables */
    int i__, j, l;
    complex t;
    int j1, j2, kb;
    complex ra, rb;
    float rc;
    int kk, ml, nr, mu;
    complex rs;
    int kb1, ml0, mu0, klm, kun, nrt, klu1, inca;
    float abst;
    extern  int crot_(int *, complex *, int *, 
	    complex *, int *, float *, complex *), cscal_(int *, 
	    complex *, complex *, int *);
    extern int lsame_(char *, char *);
    int wantb, wantc;
    int minmn;
    int wantq;
    extern  int claset_(char *, int *, int *, complex 
	    *, complex *, complex *, int *), clartg_(complex *, 
	    complex *, float *, complex *, complex *), xerbla_(char *, int 
	    *), clargv_(int *, complex *, int *, complex *, 
	    int *, float *, int *), clartv_(int *, complex *, 
	    int *, complex *, int *, float *, complex *, int *);
    int wantpt;


/*  -- LAPACK routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CGBBRD reduces a complex general m-by-n band matrix A to float upper */
/*  bidiagonal form B by a unitary transformation: Q' * A * P = B. */

/*  The routine computes B, and optionally forms Q or P', or computes */
/*  Q'*C for a given matrix C. */

/*  Arguments */
/*  ========= */

/*  VECT    (input) CHARACTER*1 */
/*          Specifies whether or not the matrices Q and P' are to be */
/*          formed. */
/*          = 'N': do not form Q or P'; */
/*          = 'Q': form Q only; */
/*          = 'P': form P' only; */
/*          = 'B': form both. */

/*  M       (input) INTEGER */
/*          The number of rows of the matrix A.  M >= 0. */

/*  N       (input) INTEGER */
/*          The number of columns of the matrix A.  N >= 0. */

/*  NCC     (input) INTEGER */
/*          The number of columns of the matrix C.  NCC >= 0. */

/*  KL      (input) INTEGER */
/*          The number of subdiagonals of the matrix A. KL >= 0. */

/*  KU      (input) INTEGER */
/*          The number of superdiagonals of the matrix A. KU >= 0. */

/*  AB      (input/output) COMPLEX array, dimension (LDAB,N) */
/*          On entry, the m-by-n band matrix A, stored in rows 1 to */
/*          KL+KU+1. The j-th column of A is stored in the j-th column of */
/*          the array AB as follows: */
/*          AB(ku+1+i-j,j) = A(i,j) for MAX(1,j-ku)<=i<=MIN(m,j+kl). */
/*          On exit, A is overwritten by values generated during the */
/*          reduction. */

/*  LDAB    (input) INTEGER */
/*          The leading dimension of the array A. LDAB >= KL+KU+1. */

/*  D       (output) REAL array, dimension (MIN(M,N)) */
/*          The diagonal elements of the bidiagonal matrix B. */

/*  E       (output) REAL array, dimension (MIN(M,N)-1) */
/*          The superdiagonal elements of the bidiagonal matrix B. */

/*  Q       (output) COMPLEX array, dimension (LDQ,M) */
/*          If VECT = 'Q' or 'B', the m-by-m unitary matrix Q. */
/*          If VECT = 'N' or 'P', the array Q is not referenced. */

/*  LDQ     (input) INTEGER */
/*          The leading dimension of the array Q. */
/*          LDQ >= MAX(1,M) if VECT = 'Q' or 'B'; LDQ >= 1 otherwise. */

/*  PT      (output) COMPLEX array, dimension (LDPT,N) */
/*          If VECT = 'P' or 'B', the n-by-n unitary matrix P'. */
/*          If VECT = 'N' or 'Q', the array PT is not referenced. */

/*  LDPT    (input) INTEGER */
/*          The leading dimension of the array PT. */
/*          LDPT >= MAX(1,N) if VECT = 'P' or 'B'; LDPT >= 1 otherwise. */

/*  C       (input/output) COMPLEX array, dimension (LDC,NCC) */
/*          On entry, an m-by-ncc matrix C. */
/*          On exit, C is overwritten by Q'*C. */
/*          C is not referenced if NCC = 0. */

/*  LDC     (input) INTEGER */
/*          The leading dimension of the array C. */
/*          LDC >= MAX(1,M) if NCC > 0; LDC >= 1 if NCC = 0. */

/*  WORK    (workspace) COMPLEX array, dimension (MAX(M,N)) */

/*  RWORK   (workspace) REAL array, dimension (MAX(M,N)) */

/*  INFO    (output) INTEGER */
/*          = 0:  successful exit. */
/*          < 0:  if INFO = -i, the i-th argument had an illegal value. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Executable Statements .. */

/*     Test the input parameters */

    /* Parameter adjustments */
    ab_dim1 = *ldab;
    ab_offset = 1 + ab_dim1;
    ab -= ab_offset;
    --d__;
    --e;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1;
    q -= q_offset;
    pt_dim1 = *ldpt;
    pt_offset = 1 + pt_dim1;
    pt -= pt_offset;
    c_dim1 = *ldc;
    c_offset = 1 + c_dim1;
    c__ -= c_offset;
    --work;
    --rwork;

    /* Function Body */
    wantb = lsame_(vect, "B");
    wantq = lsame_(vect, "Q") || wantb;
    wantpt = lsame_(vect, "P") || wantb;
    wantc = *ncc > 0;
    klu1 = *kl + *ku + 1;
    *info = 0;
    if (! wantq && ! wantpt && ! lsame_(vect, "N")) {
	*info = -1;
    } else if (*m < 0) {
	*info = -2;
    } else if (*n < 0) {
	*info = -3;
    } else if (*ncc < 0) {
	*info = -4;
    } else if (*kl < 0) {
	*info = -5;
    } else if (*ku < 0) {
	*info = -6;
    } else if (*ldab < klu1) {
	*info = -8;
    } else if (*ldq < 1 || wantq && *ldq < MAX(1,*m)) {
	*info = -12;
    } else if (*ldpt < 1 || wantpt && *ldpt < MAX(1,*n)) {
	*info = -14;
    } else if (*ldc < 1 || wantc && *ldc < MAX(1,*m)) {
	*info = -16;
    }
    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CGBBRD", &i__1);
	return 0;
    }

/*     Initialize Q and P' to the unit matrix, if needed */

    if (wantq) {
	claset_("Full", m, m, &c_b1, &c_b2, &q[q_offset], ldq);
    }
    if (wantpt) {
	claset_("Full", n, n, &c_b1, &c_b2, &pt[pt_offset], ldpt);
    }

/*     Quick return if possible. */

    if (*m == 0 || *n == 0) {
	return 0;
    }

    minmn = MIN(*m,*n);

    if (*kl + *ku > 1) {

/*        Reduce to upper bidiagonal form if KU > 0; if KU = 0, reduce */
/*        first to lower bidiagonal form and then transform to upper */
/*        bidiagonal */

	if (*ku > 0) {
	    ml0 = 1;
	    mu0 = 2;
	} else {
	    ml0 = 2;
	    mu0 = 1;
	}

/*        Wherever possible, plane rotations are generated and applied in */
/*        vector operations of length NR over the index set J1:J2:KLU1. */

/*        The complex sines of the plane rotations are stored in WORK, */
/*        and the float cosines in RWORK. */

/* Computing MIN */
	i__1 = *m - 1;
	klm = MIN(i__1,*kl);
/* Computing MIN */
	i__1 = *n - 1;
	kun = MIN(i__1,*ku);
	kb = klm + kun;
	kb1 = kb + 1;
	inca = kb1 * *ldab;
	nr = 0;
	j1 = klm + 2;
	j2 = 1 - kun;

	i__1 = minmn;
	for (i__ = 1; i__ <= i__1; ++i__) {

/*           Reduce i-th column and i-th row of matrix to bidiagonal form */

	    ml = klm + 1;
	    mu = kun + 1;
	    i__2 = kb;
	    for (kk = 1; kk <= i__2; ++kk) {
		j1 += kb;
		j2 += kb;

/*              generate plane rotations to annihilate nonzero elements */
/*              which have been created below the band */

		if (nr > 0) {
		    clargv_(&nr, &ab[klu1 + (j1 - klm - 1) * ab_dim1], &inca, 
			    &work[j1], &kb1, &rwork[j1], &kb1);
		}

/*              apply plane rotations from the left */

		i__3 = kb;
		for (l = 1; l <= i__3; ++l) {
		    if (j2 - klm + l - 1 > *n) {
			nrt = nr - 1;
		    } else {
			nrt = nr;
		    }
		    if (nrt > 0) {
			clartv_(&nrt, &ab[klu1 - l + (j1 - klm + l - 1) * 
				ab_dim1], &inca, &ab[klu1 - l + 1 + (j1 - klm 
				+ l - 1) * ab_dim1], &inca, &rwork[j1], &work[
				j1], &kb1);
		    }
/* L10: */
		}

		if (ml > ml0) {
		    if (ml <= *m - i__ + 1) {

/*                    generate plane rotation to annihilate a(i+ml-1,i) */
/*                    within the band, and apply rotation from the left */

			clartg_(&ab[*ku + ml - 1 + i__ * ab_dim1], &ab[*ku + 
				ml + i__ * ab_dim1], &rwork[i__ + ml - 1], &
				work[i__ + ml - 1], &ra);
			i__3 = *ku + ml - 1 + i__ * ab_dim1;
			ab[i__3].r = ra.r, ab[i__3].i = ra.i;
			if (i__ < *n) {
/* Computing MIN */
			    i__4 = *ku + ml - 2, i__5 = *n - i__;
			    i__3 = MIN(i__4,i__5);
			    i__6 = *ldab - 1;
			    i__7 = *ldab - 1;
			    crot_(&i__3, &ab[*ku + ml - 2 + (i__ + 1) * 
				    ab_dim1], &i__6, &ab[*ku + ml - 1 + (i__ 
				    + 1) * ab_dim1], &i__7, &rwork[i__ + ml - 
				    1], &work[i__ + ml - 1]);
			}
		    }
		    ++nr;
		    j1 -= kb1;
		}

		if (wantq) {

/*                 accumulate product of plane rotations in Q */

		    i__3 = j2;
		    i__4 = kb1;
		    for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) 
			    {
			r_cnjg(&q__1, &work[j]);
			crot_(m, &q[(j - 1) * q_dim1 + 1], &c__1, &q[j * 
				q_dim1 + 1], &c__1, &rwork[j], &q__1);
/* L20: */
		    }
		}

		if (wantc) {

/*                 apply plane rotations to C */

		    i__4 = j2;
		    i__3 = kb1;
		    for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) 
			    {
			crot_(ncc, &c__[j - 1 + c_dim1], ldc, &c__[j + c_dim1]
, ldc, &rwork[j], &work[j]);
/* L30: */
		    }
		}

		if (j2 + kun > *n) {

/*                 adjust J2 to keep within the bounds of the matrix */

		    --nr;
		    j2 -= kb1;
		}

		i__3 = j2;
		i__4 = kb1;
		for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {

/*                 create nonzero element a(j-1,j+ku) above the band */
/*                 and store it in WORK(n+1:2*n) */

		    i__5 = j + kun;
		    i__6 = j;
		    i__7 = (j + kun) * ab_dim1 + 1;
		    q__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[
			    i__7].i, q__1.i = work[i__6].r * ab[i__7].i + 
			    work[i__6].i * ab[i__7].r;
		    work[i__5].r = q__1.r, work[i__5].i = q__1.i;
		    i__5 = (j + kun) * ab_dim1 + 1;
		    i__6 = j;
		    i__7 = (j + kun) * ab_dim1 + 1;
		    q__1.r = rwork[i__6] * ab[i__7].r, q__1.i = rwork[i__6] * 
			    ab[i__7].i;
		    ab[i__5].r = q__1.r, ab[i__5].i = q__1.i;
/* L40: */
		}

/*              generate plane rotations to annihilate nonzero elements */
/*              which have been generated above the band */

		if (nr > 0) {
		    clargv_(&nr, &ab[(j1 + kun - 1) * ab_dim1 + 1], &inca, &
			    work[j1 + kun], &kb1, &rwork[j1 + kun], &kb1);
		}

/*              apply plane rotations from the right */

		i__4 = kb;
		for (l = 1; l <= i__4; ++l) {
		    if (j2 + l - 1 > *m) {
			nrt = nr - 1;
		    } else {
			nrt = nr;
		    }
		    if (nrt > 0) {
			clartv_(&nrt, &ab[l + 1 + (j1 + kun - 1) * ab_dim1], &
				inca, &ab[l + (j1 + kun) * ab_dim1], &inca, &
				rwork[j1 + kun], &work[j1 + kun], &kb1);
		    }
/* L50: */
		}

		if (ml == ml0 && mu > mu0) {
		    if (mu <= *n - i__ + 1) {

/*                    generate plane rotation to annihilate a(i,i+mu-1) */
/*                    within the band, and apply rotation from the right */

			clartg_(&ab[*ku - mu + 3 + (i__ + mu - 2) * ab_dim1], 
				&ab[*ku - mu + 2 + (i__ + mu - 1) * ab_dim1], 
				&rwork[i__ + mu - 1], &work[i__ + mu - 1], &
				ra);
			i__4 = *ku - mu + 3 + (i__ + mu - 2) * ab_dim1;
			ab[i__4].r = ra.r, ab[i__4].i = ra.i;
/* Computing MIN */
			i__3 = *kl + mu - 2, i__5 = *m - i__;
			i__4 = MIN(i__3,i__5);
			crot_(&i__4, &ab[*ku - mu + 4 + (i__ + mu - 2) * 
				ab_dim1], &c__1, &ab[*ku - mu + 3 + (i__ + mu 
				- 1) * ab_dim1], &c__1, &rwork[i__ + mu - 1], 
				&work[i__ + mu - 1]);
		    }
		    ++nr;
		    j1 -= kb1;
		}

		if (wantpt) {

/*                 accumulate product of plane rotations in P' */

		    i__4 = j2;
		    i__3 = kb1;
		    for (j = j1; i__3 < 0 ? j >= i__4 : j <= i__4; j += i__3) 
			    {
			r_cnjg(&q__1, &work[j + kun]);
			crot_(n, &pt[j + kun - 1 + pt_dim1], ldpt, &pt[j + 
				kun + pt_dim1], ldpt, &rwork[j + kun], &q__1);
/* L60: */
		    }
		}

		if (j2 + kb > *m) {

/*                 adjust J2 to keep within the bounds of the matrix */

		    --nr;
		    j2 -= kb1;
		}

		i__3 = j2;
		i__4 = kb1;
		for (j = j1; i__4 < 0 ? j >= i__3 : j <= i__3; j += i__4) {

/*                 create nonzero element a(j+kl+ku,j+ku-1) below the */
/*                 band and store it in WORK(1:n) */

		    i__5 = j + kb;
		    i__6 = j + kun;
		    i__7 = klu1 + (j + kun) * ab_dim1;
		    q__1.r = work[i__6].r * ab[i__7].r - work[i__6].i * ab[
			    i__7].i, q__1.i = work[i__6].r * ab[i__7].i + 
			    work[i__6].i * ab[i__7].r;
		    work[i__5].r = q__1.r, work[i__5].i = q__1.i;
		    i__5 = klu1 + (j + kun) * ab_dim1;
		    i__6 = j + kun;
		    i__7 = klu1 + (j + kun) * ab_dim1;
		    q__1.r = rwork[i__6] * ab[i__7].r, q__1.i = rwork[i__6] * 
			    ab[i__7].i;
		    ab[i__5].r = q__1.r, ab[i__5].i = q__1.i;
/* L70: */
		}

		if (ml > ml0) {
		    --ml;
		} else {
		    --mu;
		}
/* L80: */
	    }
/* L90: */
	}
    }

    if (*ku == 0 && *kl > 0) {

/*        A has been reduced to complex lower bidiagonal form */

/*        Transform lower bidiagonal form to upper bidiagonal by applying */
/*        plane rotations from the left, overwriting superdiagonal */
/*        elements on subdiagonal elements */

/* Computing MIN */
	i__2 = *m - 1;
	i__1 = MIN(i__2,*n);
	for (i__ = 1; i__ <= i__1; ++i__) {
	    clartg_(&ab[i__ * ab_dim1 + 1], &ab[i__ * ab_dim1 + 2], &rc, &rs, 
		    &ra);
	    i__2 = i__ * ab_dim1 + 1;
	    ab[i__2].r = ra.r, ab[i__2].i = ra.i;
	    if (i__ < *n) {
		i__2 = i__ * ab_dim1 + 2;
		i__4 = (i__ + 1) * ab_dim1 + 1;
		q__1.r = rs.r * ab[i__4].r - rs.i * ab[i__4].i, q__1.i = rs.r 
			* ab[i__4].i + rs.i * ab[i__4].r;
		ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
		i__2 = (i__ + 1) * ab_dim1 + 1;
		i__4 = (i__ + 1) * ab_dim1 + 1;
		q__1.r = rc * ab[i__4].r, q__1.i = rc * ab[i__4].i;
		ab[i__2].r = q__1.r, ab[i__2].i = q__1.i;
	    }
	    if (wantq) {
		r_cnjg(&q__1, &rs);
		crot_(m, &q[i__ * q_dim1 + 1], &c__1, &q[(i__ + 1) * q_dim1 + 
			1], &c__1, &rc, &q__1);
	    }
	    if (wantc) {
		crot_(ncc, &c__[i__ + c_dim1], ldc, &c__[i__ + 1 + c_dim1], 
			ldc, &rc, &rs);
	    }
/* L100: */
	}
    } else {

/*        A has been reduced to complex upper bidiagonal form or is */
/*        diagonal */

	if (*ku > 0 && *m < *n) {

/*           Annihilate a(m,m+1) by applying plane rotations from the */
/*           right */

	    i__1 = *ku + (*m + 1) * ab_dim1;
	    rb.r = ab[i__1].r, rb.i = ab[i__1].i;
	    for (i__ = *m; i__ >= 1; --i__) {
		clartg_(&ab[*ku + 1 + i__ * ab_dim1], &rb, &rc, &rs, &ra);
		i__1 = *ku + 1 + i__ * ab_dim1;
		ab[i__1].r = ra.r, ab[i__1].i = ra.i;
		if (i__ > 1) {
		    r_cnjg(&q__3, &rs);
		    q__2.r = -q__3.r, q__2.i = -q__3.i;
		    i__1 = *ku + i__ * ab_dim1;
		    q__1.r = q__2.r * ab[i__1].r - q__2.i * ab[i__1].i, 
			    q__1.i = q__2.r * ab[i__1].i + q__2.i * ab[i__1]
			    .r;
		    rb.r = q__1.r, rb.i = q__1.i;
		    i__1 = *ku + i__ * ab_dim1;
		    i__2 = *ku + i__ * ab_dim1;
		    q__1.r = rc * ab[i__2].r, q__1.i = rc * ab[i__2].i;
		    ab[i__1].r = q__1.r, ab[i__1].i = q__1.i;
		}
		if (wantpt) {
		    r_cnjg(&q__1, &rs);
		    crot_(n, &pt[i__ + pt_dim1], ldpt, &pt[*m + 1 + pt_dim1], 
			    ldpt, &rc, &q__1);
		}
/* L110: */
	    }
	}
    }

/*     Make diagonal and superdiagonal elements float, storing them in D */
/*     and E */

    i__1 = *ku + 1 + ab_dim1;
    t.r = ab[i__1].r, t.i = ab[i__1].i;
    i__1 = minmn;
    for (i__ = 1; i__ <= i__1; ++i__) {
	abst = c_abs(&t);
	d__[i__] = abst;
	if (abst != 0.f) {
	    q__1.r = t.r / abst, q__1.i = t.i / abst;
	    t.r = q__1.r, t.i = q__1.i;
	} else {
	    t.r = 1.f, t.i = 0.f;
	}
	if (wantq) {
	    cscal_(m, &t, &q[i__ * q_dim1 + 1], &c__1);
	}
	if (wantc) {
	    r_cnjg(&q__1, &t);
	    cscal_(ncc, &q__1, &c__[i__ + c_dim1], ldc);
	}
	if (i__ < minmn) {
	    if (*ku == 0 && *kl == 0) {
		e[i__] = 0.f;
		i__2 = (i__ + 1) * ab_dim1 + 1;
		t.r = ab[i__2].r, t.i = ab[i__2].i;
	    } else {
		if (*ku == 0) {
		    i__2 = i__ * ab_dim1 + 2;
		    r_cnjg(&q__2, &t);
		    q__1.r = ab[i__2].r * q__2.r - ab[i__2].i * q__2.i, 
			    q__1.i = ab[i__2].r * q__2.i + ab[i__2].i * 
			    q__2.r;
		    t.r = q__1.r, t.i = q__1.i;
		} else {
		    i__2 = *ku + (i__ + 1) * ab_dim1;
		    r_cnjg(&q__2, &t);
		    q__1.r = ab[i__2].r * q__2.r - ab[i__2].i * q__2.i, 
			    q__1.i = ab[i__2].r * q__2.i + ab[i__2].i * 
			    q__2.r;
		    t.r = q__1.r, t.i = q__1.i;
		}
		abst = c_abs(&t);
		e[i__] = abst;
		if (abst != 0.f) {
		    q__1.r = t.r / abst, q__1.i = t.i / abst;
		    t.r = q__1.r, t.i = q__1.i;
		} else {
		    t.r = 1.f, t.i = 0.f;
		}
		if (wantpt) {
		    cscal_(n, &t, &pt[i__ + 1 + pt_dim1], ldpt);
		}
		i__2 = *ku + 1 + (i__ + 1) * ab_dim1;
		r_cnjg(&q__2, &t);
		q__1.r = ab[i__2].r * q__2.r - ab[i__2].i * q__2.i, q__1.i = 
			ab[i__2].r * q__2.i + ab[i__2].i * q__2.r;
		t.r = q__1.r, t.i = q__1.i;
	    }
	}
/* L120: */
    }
    return 0;

/*     End of CGBBRD */

} /* cgbbrd_ */
Exemplo n.º 22
0
/* Subroutine */ int claic1_(integer *job, integer *j, complex *x, real *sest,
	 complex *w, complex *gamma, real *sestpr, complex *s, complex *c__)
{
/*  -- LAPACK auxiliary routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       October 31, 1992   


    Purpose   
    =======   

    CLAIC1 applies one step of incremental condition estimation in   
    its simplest version:   

    Let x, twonorm(x) = 1, be an approximate singular vector of an j-by-j   
    lower triangular matrix L, such that   
             twonorm(L*x) = sest   
    Then CLAIC1 computes sestpr, s, c such that   
    the vector   
                    [ s*x ]   
             xhat = [  c  ]   
    is an approximate singular vector of   
                    [ L     0  ]   
             Lhat = [ w' gamma ]   
    in the sense that   
             twonorm(Lhat*xhat) = sestpr.   

    Depending on JOB, an estimate for the largest or smallest singular   
    value is computed.   

    Note that [s c]' and sestpr**2 is an eigenpair of the system   

        diag(sest*sest, 0) + [alpha  gamma] * [ conjg(alpha) ]   
                                              [ conjg(gamma) ]   

    where  alpha =  conjg(x)'*w.   

    Arguments   
    =========   

    JOB     (input) INTEGER   
            = 1: an estimate for the largest singular value is computed.   
            = 2: an estimate for the smallest singular value is computed.   

    J       (input) INTEGER   
            Length of X and W   

    X       (input) COMPLEX array, dimension (J)   
            The j-vector x.   

    SEST    (input) REAL   
            Estimated singular value of j by j matrix L   

    W       (input) COMPLEX array, dimension (J)   
            The j-vector w.   

    GAMMA   (input) COMPLEX   
            The diagonal element gamma.   

    SESTPR  (output) REAL   
            Estimated singular value of (j+1) by (j+1) matrix Lhat.   

    S       (output) COMPLEX   
            Sine needed in forming xhat.   

    C       (output) COMPLEX   
            Cosine needed in forming xhat.   

    =====================================================================   


       Parameter adjustments */
    /* Table of constant values */
    static integer c__1 = 1;
    
    /* System generated locals */
    real r__1, r__2;
    complex q__1, q__2, q__3, q__4, q__5, q__6;
    /* Builtin functions */
    double c_abs(complex *);
    void r_cnjg(complex *, complex *), c_sqrt(complex *, complex *);
    double sqrt(doublereal);
    void c_div(complex *, complex *, complex *);
    /* Local variables */
    static complex sine;
    static real test, zeta1, zeta2, b, t;
    static complex alpha;
    extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer 
	    *, complex *, integer *);
    static real norma, s1, s2, absgam, absalp;
    extern doublereal slamch_(char *);
    static complex cosine;
    static real absest, scl, eps, tmp;


    --w;
    --x;

    /* Function Body */
    eps = slamch_("Epsilon");
    cdotc_(&q__1, j, &x[1], &c__1, &w[1], &c__1);
    alpha.r = q__1.r, alpha.i = q__1.i;

    absalp = c_abs(&alpha);
    absgam = c_abs(gamma);
    absest = dabs(*sest);

    if (*job == 1) {

/*        Estimating largest singular value   

          special cases */

	if (*sest == 0.f) {
	    s1 = dmax(absgam,absalp);
	    if (s1 == 0.f) {
		s->r = 0.f, s->i = 0.f;
		c__->r = 1.f, c__->i = 0.f;
		*sestpr = 0.f;
	    } else {
		q__1.r = alpha.r / s1, q__1.i = alpha.i / s1;
		s->r = q__1.r, s->i = q__1.i;
		q__1.r = gamma->r / s1, q__1.i = gamma->i / s1;
		c__->r = q__1.r, c__->i = q__1.i;
		r_cnjg(&q__4, s);
		q__3.r = s->r * q__4.r - s->i * q__4.i, q__3.i = s->r * 
			q__4.i + s->i * q__4.r;
		r_cnjg(&q__6, c__);
		q__5.r = c__->r * q__6.r - c__->i * q__6.i, q__5.i = c__->r * 
			q__6.i + c__->i * q__6.r;
		q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i;
		c_sqrt(&q__1, &q__2);
		tmp = q__1.r;
		q__1.r = s->r / tmp, q__1.i = s->i / tmp;
		s->r = q__1.r, s->i = q__1.i;
		q__1.r = c__->r / tmp, q__1.i = c__->i / tmp;
		c__->r = q__1.r, c__->i = q__1.i;
		*sestpr = s1 * tmp;
	    }
	    return 0;
	} else if (absgam <= eps * absest) {
	    s->r = 1.f, s->i = 0.f;
	    c__->r = 0.f, c__->i = 0.f;
	    tmp = dmax(absest,absalp);
	    s1 = absest / tmp;
	    s2 = absalp / tmp;
	    *sestpr = tmp * sqrt(s1 * s1 + s2 * s2);
	    return 0;
	} else if (absalp <= eps * absest) {
	    s1 = absgam;
	    s2 = absest;
	    if (s1 <= s2) {
		s->r = 1.f, s->i = 0.f;
		c__->r = 0.f, c__->i = 0.f;
		*sestpr = s2;
	    } else {
		s->r = 0.f, s->i = 0.f;
		c__->r = 1.f, c__->i = 0.f;
		*sestpr = s1;
	    }
	    return 0;
	} else if (absest <= eps * absalp || absest <= eps * absgam) {
	    s1 = absgam;
	    s2 = absalp;
	    if (s1 <= s2) {
		tmp = s1 / s2;
		scl = sqrt(tmp * tmp + 1.f);
		*sestpr = s2 * scl;
		q__2.r = alpha.r / s2, q__2.i = alpha.i / s2;
		q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
		s->r = q__1.r, s->i = q__1.i;
		q__2.r = gamma->r / s2, q__2.i = gamma->i / s2;
		q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
		c__->r = q__1.r, c__->i = q__1.i;
	    } else {
		tmp = s2 / s1;
		scl = sqrt(tmp * tmp + 1.f);
		*sestpr = s1 * scl;
		q__2.r = alpha.r / s1, q__2.i = alpha.i / s1;
		q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
		s->r = q__1.r, s->i = q__1.i;
		q__2.r = gamma->r / s1, q__2.i = gamma->i / s1;
		q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
		c__->r = q__1.r, c__->i = q__1.i;
	    }
	    return 0;
	} else {

/*           normal case */

	    zeta1 = absalp / absest;
	    zeta2 = absgam / absest;

	    b = (1.f - zeta1 * zeta1 - zeta2 * zeta2) * .5f;
	    r__1 = zeta1 * zeta1;
	    c__->r = r__1, c__->i = 0.f;
	    if (b > 0.f) {
		r__1 = b * b;
		q__4.r = r__1 + c__->r, q__4.i = c__->i;
		c_sqrt(&q__3, &q__4);
		q__2.r = b + q__3.r, q__2.i = q__3.i;
		c_div(&q__1, c__, &q__2);
		t = q__1.r;
	    } else {
		r__1 = b * b;
		q__3.r = r__1 + c__->r, q__3.i = c__->i;
		c_sqrt(&q__2, &q__3);
		q__1.r = q__2.r - b, q__1.i = q__2.i;
		t = q__1.r;
	    }

	    q__3.r = alpha.r / absest, q__3.i = alpha.i / absest;
	    q__2.r = -q__3.r, q__2.i = -q__3.i;
	    q__1.r = q__2.r / t, q__1.i = q__2.i / t;
	    sine.r = q__1.r, sine.i = q__1.i;
	    q__3.r = gamma->r / absest, q__3.i = gamma->i / absest;
	    q__2.r = -q__3.r, q__2.i = -q__3.i;
	    r__1 = t + 1.f;
	    q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
	    cosine.r = q__1.r, cosine.i = q__1.i;
	    r_cnjg(&q__4, &sine);
	    q__3.r = sine.r * q__4.r - sine.i * q__4.i, q__3.i = sine.r * 
		    q__4.i + sine.i * q__4.r;
	    r_cnjg(&q__6, &cosine);
	    q__5.r = cosine.r * q__6.r - cosine.i * q__6.i, q__5.i = cosine.r 
		    * q__6.i + cosine.i * q__6.r;
	    q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i;
	    c_sqrt(&q__1, &q__2);
	    tmp = q__1.r;
	    q__1.r = sine.r / tmp, q__1.i = sine.i / tmp;
	    s->r = q__1.r, s->i = q__1.i;
	    q__1.r = cosine.r / tmp, q__1.i = cosine.i / tmp;
	    c__->r = q__1.r, c__->i = q__1.i;
	    *sestpr = sqrt(t + 1.f) * absest;
	    return 0;
	}

    } else if (*job == 2) {

/*        Estimating smallest singular value   

          special cases */

	if (*sest == 0.f) {
	    *sestpr = 0.f;
	    if (dmax(absgam,absalp) == 0.f) {
		sine.r = 1.f, sine.i = 0.f;
		cosine.r = 0.f, cosine.i = 0.f;
	    } else {
		r_cnjg(&q__2, gamma);
		q__1.r = -q__2.r, q__1.i = -q__2.i;
		sine.r = q__1.r, sine.i = q__1.i;
		r_cnjg(&q__1, &alpha);
		cosine.r = q__1.r, cosine.i = q__1.i;
	    }
/* Computing MAX */
	    r__1 = c_abs(&sine), r__2 = c_abs(&cosine);
	    s1 = dmax(r__1,r__2);
	    q__1.r = sine.r / s1, q__1.i = sine.i / s1;
	    s->r = q__1.r, s->i = q__1.i;
	    q__1.r = cosine.r / s1, q__1.i = cosine.i / s1;
	    c__->r = q__1.r, c__->i = q__1.i;
	    r_cnjg(&q__4, s);
	    q__3.r = s->r * q__4.r - s->i * q__4.i, q__3.i = s->r * q__4.i + 
		    s->i * q__4.r;
	    r_cnjg(&q__6, c__);
	    q__5.r = c__->r * q__6.r - c__->i * q__6.i, q__5.i = c__->r * 
		    q__6.i + c__->i * q__6.r;
	    q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i;
	    c_sqrt(&q__1, &q__2);
	    tmp = q__1.r;
	    q__1.r = s->r / tmp, q__1.i = s->i / tmp;
	    s->r = q__1.r, s->i = q__1.i;
	    q__1.r = c__->r / tmp, q__1.i = c__->i / tmp;
	    c__->r = q__1.r, c__->i = q__1.i;
	    return 0;
	} else if (absgam <= eps * absest) {
	    s->r = 0.f, s->i = 0.f;
	    c__->r = 1.f, c__->i = 0.f;
	    *sestpr = absgam;
	    return 0;
	} else if (absalp <= eps * absest) {
	    s1 = absgam;
	    s2 = absest;
	    if (s1 <= s2) {
		s->r = 0.f, s->i = 0.f;
		c__->r = 1.f, c__->i = 0.f;
		*sestpr = s1;
	    } else {
		s->r = 1.f, s->i = 0.f;
		c__->r = 0.f, c__->i = 0.f;
		*sestpr = s2;
	    }
	    return 0;
	} else if (absest <= eps * absalp || absest <= eps * absgam) {
	    s1 = absgam;
	    s2 = absalp;
	    if (s1 <= s2) {
		tmp = s1 / s2;
		scl = sqrt(tmp * tmp + 1.f);
		*sestpr = absest * (tmp / scl);
		r_cnjg(&q__4, gamma);
		q__3.r = q__4.r / s2, q__3.i = q__4.i / s2;
		q__2.r = -q__3.r, q__2.i = -q__3.i;
		q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
		s->r = q__1.r, s->i = q__1.i;
		r_cnjg(&q__3, &alpha);
		q__2.r = q__3.r / s2, q__2.i = q__3.i / s2;
		q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
		c__->r = q__1.r, c__->i = q__1.i;
	    } else {
		tmp = s2 / s1;
		scl = sqrt(tmp * tmp + 1.f);
		*sestpr = absest / scl;
		r_cnjg(&q__4, gamma);
		q__3.r = q__4.r / s1, q__3.i = q__4.i / s1;
		q__2.r = -q__3.r, q__2.i = -q__3.i;
		q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
		s->r = q__1.r, s->i = q__1.i;
		r_cnjg(&q__3, &alpha);
		q__2.r = q__3.r / s1, q__2.i = q__3.i / s1;
		q__1.r = q__2.r / scl, q__1.i = q__2.i / scl;
		c__->r = q__1.r, c__->i = q__1.i;
	    }
	    return 0;
	} else {

/*           normal case */

	    zeta1 = absalp / absest;
	    zeta2 = absgam / absest;

/* Computing MAX */
	    r__1 = zeta1 * zeta1 + 1.f + zeta1 * zeta2, r__2 = zeta1 * zeta2 
		    + zeta2 * zeta2;
	    norma = dmax(r__1,r__2);

/*           See if root is closer to zero or to ONE */

	    test = (zeta1 - zeta2) * 2.f * (zeta1 + zeta2) + 1.f;
	    if (test >= 0.f) {

/*              root is close to zero, compute directly */

		b = (zeta1 * zeta1 + zeta2 * zeta2 + 1.f) * .5f;
		r__1 = zeta2 * zeta2;
		c__->r = r__1, c__->i = 0.f;
		r__2 = b * b;
		q__2.r = r__2 - c__->r, q__2.i = -c__->i;
		r__1 = b + sqrt(c_abs(&q__2));
		q__1.r = c__->r / r__1, q__1.i = c__->i / r__1;
		t = q__1.r;
		q__2.r = alpha.r / absest, q__2.i = alpha.i / absest;
		r__1 = 1.f - t;
		q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
		sine.r = q__1.r, sine.i = q__1.i;
		q__3.r = gamma->r / absest, q__3.i = gamma->i / absest;
		q__2.r = -q__3.r, q__2.i = -q__3.i;
		q__1.r = q__2.r / t, q__1.i = q__2.i / t;
		cosine.r = q__1.r, cosine.i = q__1.i;
		*sestpr = sqrt(t + eps * 4.f * eps * norma) * absest;
	    } else {

/*              root is closer to ONE, shift by that amount */

		b = (zeta2 * zeta2 + zeta1 * zeta1 - 1.f) * .5f;
		r__1 = zeta1 * zeta1;
		c__->r = r__1, c__->i = 0.f;
		if (b >= 0.f) {
		    q__2.r = -c__->r, q__2.i = -c__->i;
		    r__1 = b * b;
		    q__5.r = r__1 + c__->r, q__5.i = c__->i;
		    c_sqrt(&q__4, &q__5);
		    q__3.r = b + q__4.r, q__3.i = q__4.i;
		    c_div(&q__1, &q__2, &q__3);
		    t = q__1.r;
		} else {
		    r__1 = b * b;
		    q__3.r = r__1 + c__->r, q__3.i = c__->i;
		    c_sqrt(&q__2, &q__3);
		    q__1.r = b - q__2.r, q__1.i = -q__2.i;
		    t = q__1.r;
		}
		q__3.r = alpha.r / absest, q__3.i = alpha.i / absest;
		q__2.r = -q__3.r, q__2.i = -q__3.i;
		q__1.r = q__2.r / t, q__1.i = q__2.i / t;
		sine.r = q__1.r, sine.i = q__1.i;
		q__3.r = gamma->r / absest, q__3.i = gamma->i / absest;
		q__2.r = -q__3.r, q__2.i = -q__3.i;
		r__1 = t + 1.f;
		q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
		cosine.r = q__1.r, cosine.i = q__1.i;
		*sestpr = sqrt(t + 1.f + eps * 4.f * eps * norma) * absest;
	    }
	    r_cnjg(&q__4, &sine);
	    q__3.r = sine.r * q__4.r - sine.i * q__4.i, q__3.i = sine.r * 
		    q__4.i + sine.i * q__4.r;
	    r_cnjg(&q__6, &cosine);
	    q__5.r = cosine.r * q__6.r - cosine.i * q__6.i, q__5.i = cosine.r 
		    * q__6.i + cosine.i * q__6.r;
	    q__2.r = q__3.r + q__5.r, q__2.i = q__3.i + q__5.i;
	    c_sqrt(&q__1, &q__2);
	    tmp = q__1.r;
	    q__1.r = sine.r / tmp, q__1.i = sine.i / tmp;
	    s->r = q__1.r, s->i = q__1.i;
	    q__1.r = cosine.r / tmp, q__1.i = cosine.i / tmp;
	    c__->r = q__1.r, c__->i = q__1.i;
	    return 0;

	}
    }
    return 0;

/*     End of CLAIC1 */

} /* claic1_ */
Exemplo n.º 23
0
/* Subroutine */ int cdrvpt_(logical *dotype, integer *nn, integer *nval, 
	integer *nrhs, real *thresh, logical *tsterr, complex *a, real *d__, 
	complex *e, complex *b, complex *x, complex *xact, complex *work, 
	real *rwork, integer *nout)
{
    /* Initialized data */

    static integer iseedy[4] = { 0,0,0,1 };

    /* Format strings */
    static char fmt_9999[] = "(1x,a,\002, N =\002,i5,\002, type \002,i2,\002"
	    ", test \002,i2,\002, ratio = \002,g12.5)";
    static char fmt_9998[] = "(1x,a,\002, FACT='\002,a1,\002', N =\002,i5"
	    ",\002, type \002,i2,\002, test \002,i2,\002, ratio = \002,g12.5)";

    /* System generated locals */
    integer i__1, i__2, i__3, i__4, i__5;
    real r__1, r__2;

    /* Local variables */
    integer i__, j, k, n;
    real z__[3];
    integer k1, ia, in, kl, ku, ix, nt, lda;
    char fact[1];
    real cond;
    integer mode;
    real dmax__;
    integer imat, info;
    char path[3], dist[1], type__[1];
    integer nrun, ifact;
    integer nfail, iseed[4];
    real rcond;
    integer nimat;
    real anorm;
    integer izero, nerrs;
    logical zerot;
    real rcondc;
    real ainvnm;
    real result[6];

    /* Fortran I/O blocks */
    static cilist io___35 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___38 = { 0, 0, 0, fmt_9998, 0 };



/*  -- LAPACK test routine (version 3.1) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CDRVPT tests CPTSV and -SVX. */

/*  Arguments */
/*  ========= */

/*  DOTYPE  (input) LOGICAL array, dimension (NTYPES) */
/*          The matrix types to be used for testing.  Matrices of type j */
/*          (for 1 <= j <= NTYPES) are used for testing if DOTYPE(j) = */
/*          .TRUE.; if DOTYPE(j) = .FALSE., then type j is not used. */

/*  NN      (input) INTEGER */
/*          The number of values of N contained in the vector NVAL. */

/*  NVAL    (input) INTEGER array, dimension (NN) */
/*          The values of the matrix dimension N. */

/*  NRHS    (input) INTEGER */
/*          The number of right hand side vectors to be generated for */
/*          each linear system. */

/*  THRESH  (input) REAL */
/*          The threshold value for the test ratios.  A result is */
/*          included in the output file if RESULT >= THRESH.  To have */
/*          every test ratio printed, use THRESH = 0. */

/*  TSTERR  (input) LOGICAL */
/*          Flag that indicates whether error exits are to be tested. */

/*  A       (workspace) COMPLEX array, dimension (NMAX*2) */

/*  D       (workspace) REAL array, dimension (NMAX*2) */

/*  E       (workspace) COMPLEX array, dimension (NMAX*2) */

/*  B       (workspace) COMPLEX array, dimension (NMAX*NRHS) */

/*  X       (workspace) COMPLEX array, dimension (NMAX*NRHS) */

/*  XACT    (workspace) COMPLEX array, dimension (NMAX*NRHS) */

/*  WORK    (workspace) COMPLEX array, dimension */
/*                      (NMAX*max(3,NRHS)) */

/*  RWORK   (workspace) REAL array, dimension (NMAX+2*NRHS) */

/*  NOUT    (input) INTEGER */
/*          The unit number for output. */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. Local Arrays .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Scalars in Common .. */
/*     .. */
/*     .. Common blocks .. */
/*     .. */
/*     .. Data statements .. */
    /* Parameter adjustments */
    --rwork;
    --work;
    --xact;
    --x;
    --b;
    --e;
    --d__;
    --a;
    --nval;
    --dotype;

    /* Function Body */
/*     .. */
/*     .. Executable Statements .. */

    s_copy(path, "Complex precision", (ftnlen)1, (ftnlen)17);
    s_copy(path + 1, "PT", (ftnlen)2, (ftnlen)2);
    nrun = 0;
    nfail = 0;
    nerrs = 0;
    for (i__ = 1; i__ <= 4; ++i__) {
	iseed[i__ - 1] = iseedy[i__ - 1];
/* L10: */
    }

/*     Test the error exits */

    if (*tsterr) {
	cerrvx_(path, nout);
    }
    infoc_1.infot = 0;

    i__1 = *nn;
    for (in = 1; in <= i__1; ++in) {

/*        Do for each value of N in NVAL. */

	n = nval[in];
	lda = max(1,n);
	nimat = 12;
	if (n <= 0) {
	    nimat = 1;
	}

	i__2 = nimat;
	for (imat = 1; imat <= i__2; ++imat) {

/*           Do the tests only if DOTYPE( IMAT ) is true. */

	    if (n > 0 && ! dotype[imat]) {
		goto L110;
	    }

/*           Set up parameters with CLATB4. */

	    clatb4_(path, &imat, &n, &n, type__, &kl, &ku, &anorm, &mode, &
		    cond, dist);

	    zerot = imat >= 8 && imat <= 10;
	    if (imat <= 6) {

/*              Type 1-6:  generate a symmetric tridiagonal matrix of */
/*              known condition number in lower triangular band storage. */

		s_copy(srnamc_1.srnamt, "CLATMS", (ftnlen)32, (ftnlen)6);
		clatms_(&n, &n, dist, iseed, type__, &rwork[1], &mode, &cond, 
			&anorm, &kl, &ku, "B", &a[1], &c__2, &work[1], &info);

/*              Check the error code from CLATMS. */

		if (info != 0) {
		    alaerh_(path, "CLATMS", &info, &c__0, " ", &n, &n, &kl, &
			    ku, &c_n1, &imat, &nfail, &nerrs, nout);
		    goto L110;
		}
		izero = 0;

/*              Copy the matrix to D and E. */

		ia = 1;
		i__3 = n - 1;
		for (i__ = 1; i__ <= i__3; ++i__) {
		    i__4 = i__;
		    i__5 = ia;
		    d__[i__4] = a[i__5].r;
		    i__4 = i__;
		    i__5 = ia + 1;
		    e[i__4].r = a[i__5].r, e[i__4].i = a[i__5].i;
		    ia += 2;
/* L20: */
		}
		if (n > 0) {
		    i__3 = n;
		    i__4 = ia;
		    d__[i__3] = a[i__4].r;
		}
	    } else {

/*              Type 7-12:  generate a diagonally dominant matrix with */
/*              unknown condition number in the vectors D and E. */

		if (! zerot || ! dotype[7]) {

/*                 Let D and E have values from [-1,1]. */

		    slarnv_(&c__2, iseed, &n, &d__[1]);
		    i__3 = n - 1;
		    clarnv_(&c__2, iseed, &i__3, &e[1]);

/*                 Make the tridiagonal matrix diagonally dominant. */

		    if (n == 1) {
			d__[1] = dabs(d__[1]);
		    } else {
			d__[1] = dabs(d__[1]) + c_abs(&e[1]);
			d__[n] = (r__1 = d__[n], dabs(r__1)) + c_abs(&e[n - 1]
				);
			i__3 = n - 1;
			for (i__ = 2; i__ <= i__3; ++i__) {
			    d__[i__] = (r__1 = d__[i__], dabs(r__1)) + c_abs(&
				    e[i__]) + c_abs(&e[i__ - 1]);
/* L30: */
			}
		    }

/*                 Scale D and E so the maximum element is ANORM. */

		    ix = isamax_(&n, &d__[1], &c__1);
		    dmax__ = d__[ix];
		    r__1 = anorm / dmax__;
		    sscal_(&n, &r__1, &d__[1], &c__1);
		    if (n > 1) {
			i__3 = n - 1;
			r__1 = anorm / dmax__;
			csscal_(&i__3, &r__1, &e[1], &c__1);
		    }

		} else if (izero > 0) {

/*                 Reuse the last matrix by copying back the zeroed out */
/*                 elements. */

		    if (izero == 1) {
			d__[1] = z__[1];
			if (n > 1) {
			    e[1].r = z__[2], e[1].i = 0.f;
			}
		    } else if (izero == n) {
			i__3 = n - 1;
			e[i__3].r = z__[0], e[i__3].i = 0.f;
			d__[n] = z__[1];
		    } else {
			i__3 = izero - 1;
			e[i__3].r = z__[0], e[i__3].i = 0.f;
			d__[izero] = z__[1];
			i__3 = izero;
			e[i__3].r = z__[2], e[i__3].i = 0.f;
		    }
		}

/*              For types 8-10, set one row and column of the matrix to */
/*              zero. */

		izero = 0;
		if (imat == 8) {
		    izero = 1;
		    z__[1] = d__[1];
		    d__[1] = 0.f;
		    if (n > 1) {
			z__[2] = e[1].r;
			e[1].r = 0.f, e[1].i = 0.f;
		    }
		} else if (imat == 9) {
		    izero = n;
		    if (n > 1) {
			i__3 = n - 1;
			z__[0] = e[i__3].r;
			i__3 = n - 1;
			e[i__3].r = 0.f, e[i__3].i = 0.f;
		    }
		    z__[1] = d__[n];
		    d__[n] = 0.f;
		} else if (imat == 10) {
		    izero = (n + 1) / 2;
		    if (izero > 1) {
			i__3 = izero - 1;
			z__[0] = e[i__3].r;
			i__3 = izero - 1;
			e[i__3].r = 0.f, e[i__3].i = 0.f;
			i__3 = izero;
			z__[2] = e[i__3].r;
			i__3 = izero;
			e[i__3].r = 0.f, e[i__3].i = 0.f;
		    }
		    z__[1] = d__[izero];
		    d__[izero] = 0.f;
		}
	    }

/*           Generate NRHS random solution vectors. */

	    ix = 1;
	    i__3 = *nrhs;
	    for (j = 1; j <= i__3; ++j) {
		clarnv_(&c__2, iseed, &n, &xact[ix]);
		ix += lda;
/* L40: */
	    }

/*           Set the right hand side. */

	    claptm_("Lower", &n, nrhs, &c_b24, &d__[1], &e[1], &xact[1], &lda, 
		     &c_b25, &b[1], &lda);

	    for (ifact = 1; ifact <= 2; ++ifact) {
		if (ifact == 1) {
		    *(unsigned char *)fact = 'F';
		} else {
		    *(unsigned char *)fact = 'N';
		}

/*              Compute the condition number for comparison with */
/*              the value returned by CPTSVX. */

		if (zerot) {
		    if (ifact == 1) {
			goto L100;
		    }
		    rcondc = 0.f;

		} else if (ifact == 1) {

/*                 Compute the 1-norm of A. */

		    anorm = clanht_("1", &n, &d__[1], &e[1]);

		    scopy_(&n, &d__[1], &c__1, &d__[n + 1], &c__1);
		    if (n > 1) {
			i__3 = n - 1;
			ccopy_(&i__3, &e[1], &c__1, &e[n + 1], &c__1);
		    }

/*                 Factor the matrix A. */

		    cpttrf_(&n, &d__[n + 1], &e[n + 1], &info);

/*                 Use CPTTRS to solve for one column at a time of */
/*                 inv(A), computing the maximum column sum as we go. */

		    ainvnm = 0.f;
		    i__3 = n;
		    for (i__ = 1; i__ <= i__3; ++i__) {
			i__4 = n;
			for (j = 1; j <= i__4; ++j) {
			    i__5 = j;
			    x[i__5].r = 0.f, x[i__5].i = 0.f;
/* L50: */
			}
			i__4 = i__;
			x[i__4].r = 1.f, x[i__4].i = 0.f;
			cpttrs_("Lower", &n, &c__1, &d__[n + 1], &e[n + 1], &
				x[1], &lda, &info);
/* Computing MAX */
			r__1 = ainvnm, r__2 = scasum_(&n, &x[1], &c__1);
			ainvnm = dmax(r__1,r__2);
/* L60: */
		    }

/*                 Compute the 1-norm condition number of A. */

		    if (anorm <= 0.f || ainvnm <= 0.f) {
			rcondc = 1.f;
		    } else {
			rcondc = 1.f / anorm / ainvnm;
		    }
		}

		if (ifact == 2) {

/*                 --- Test CPTSV -- */

		    scopy_(&n, &d__[1], &c__1, &d__[n + 1], &c__1);
		    if (n > 1) {
			i__3 = n - 1;
			ccopy_(&i__3, &e[1], &c__1, &e[n + 1], &c__1);
		    }
		    clacpy_("Full", &n, nrhs, &b[1], &lda, &x[1], &lda);

/*                 Factor A as L*D*L' and solve the system A*X = B. */

		    s_copy(srnamc_1.srnamt, "CPTSV ", (ftnlen)32, (ftnlen)6);
		    cptsv_(&n, nrhs, &d__[n + 1], &e[n + 1], &x[1], &lda, &
			    info);

/*                 Check error code from CPTSV . */

		    if (info != izero) {
			alaerh_(path, "CPTSV ", &info, &izero, " ", &n, &n, &
				c__1, &c__1, nrhs, &imat, &nfail, &nerrs, 
				nout);
		    }
		    nt = 0;
		    if (izero == 0) {

/*                    Check the factorization by computing the ratio */
/*                       norm(L*D*L' - A) / (n * norm(A) * EPS ) */

			cptt01_(&n, &d__[1], &e[1], &d__[n + 1], &e[n + 1], &
				work[1], result);

/*                    Compute the residual in the solution. */

			clacpy_("Full", &n, nrhs, &b[1], &lda, &work[1], &lda);
			cptt02_("Lower", &n, nrhs, &d__[1], &e[1], &x[1], &
				lda, &work[1], &lda, &result[1]);

/*                    Check solution from generated exact solution. */

			cget04_(&n, nrhs, &x[1], &lda, &xact[1], &lda, &
				rcondc, &result[2]);
			nt = 3;
		    }

/*                 Print information about the tests that did not pass */
/*                 the threshold. */

		    i__3 = nt;
		    for (k = 1; k <= i__3; ++k) {
			if (result[k - 1] >= *thresh) {
			    if (nfail == 0 && nerrs == 0) {
				aladhd_(nout, path);
			    }
			    io___35.ciunit = *nout;
			    s_wsfe(&io___35);
			    do_fio(&c__1, "CPTSV ", (ftnlen)6);
			    do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer))
				    ;
			    do_fio(&c__1, (char *)&imat, (ftnlen)sizeof(
				    integer));
			    do_fio(&c__1, (char *)&k, (ftnlen)sizeof(integer))
				    ;
			    do_fio(&c__1, (char *)&result[k - 1], (ftnlen)
				    sizeof(real));
			    e_wsfe();
			    ++nfail;
			}
/* L70: */
		    }
		    nrun += nt;
		}

/*              --- Test CPTSVX --- */

		if (ifact > 1) {

/*                 Initialize D( N+1:2*N ) and E( N+1:2*N ) to zero. */

		    i__3 = n - 1;
		    for (i__ = 1; i__ <= i__3; ++i__) {
			d__[n + i__] = 0.f;
			i__4 = n + i__;
			e[i__4].r = 0.f, e[i__4].i = 0.f;
/* L80: */
		    }
		    if (n > 0) {
			d__[n + n] = 0.f;
		    }
		}

		claset_("Full", &n, nrhs, &c_b62, &c_b62, &x[1], &lda);

/*              Solve the system and compute the condition number and */
/*              error bounds using CPTSVX. */

		s_copy(srnamc_1.srnamt, "CPTSVX", (ftnlen)32, (ftnlen)6);
		cptsvx_(fact, &n, nrhs, &d__[1], &e[1], &d__[n + 1], &e[n + 1]
, &b[1], &lda, &x[1], &lda, &rcond, &rwork[1], &rwork[
			*nrhs + 1], &work[1], &rwork[(*nrhs << 1) + 1], &info);

/*              Check the error code from CPTSVX. */

		if (info != izero) {
		    alaerh_(path, "CPTSVX", &info, &izero, fact, &n, &n, &
			    c__1, &c__1, nrhs, &imat, &nfail, &nerrs, nout);
		}
		if (izero == 0) {
		    if (ifact == 2) {

/*                    Check the factorization by computing the ratio */
/*                       norm(L*D*L' - A) / (n * norm(A) * EPS ) */

			k1 = 1;
			cptt01_(&n, &d__[1], &e[1], &d__[n + 1], &e[n + 1], &
				work[1], result);
		    } else {
			k1 = 2;
		    }

/*                 Compute the residual in the solution. */

		    clacpy_("Full", &n, nrhs, &b[1], &lda, &work[1], &lda);
		    cptt02_("Lower", &n, nrhs, &d__[1], &e[1], &x[1], &lda, &
			    work[1], &lda, &result[1]);

/*                 Check solution from generated exact solution. */

		    cget04_(&n, nrhs, &x[1], &lda, &xact[1], &lda, &rcondc, &
			    result[2]);

/*                 Check error bounds from iterative refinement. */

		    cptt05_(&n, nrhs, &d__[1], &e[1], &b[1], &lda, &x[1], &
			    lda, &xact[1], &lda, &rwork[1], &rwork[*nrhs + 1], 
			     &result[3]);
		} else {
		    k1 = 6;
		}

/*              Check the reciprocal of the condition number. */

		result[5] = sget06_(&rcond, &rcondc);

/*              Print information about the tests that did not pass */
/*              the threshold. */

		for (k = k1; k <= 6; ++k) {
		    if (result[k - 1] >= *thresh) {
			if (nfail == 0 && nerrs == 0) {
			    aladhd_(nout, path);
			}
			io___38.ciunit = *nout;
			s_wsfe(&io___38);
			do_fio(&c__1, "CPTSVX", (ftnlen)6);
			do_fio(&c__1, fact, (ftnlen)1);
			do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&imat, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&k, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&result[k - 1], (ftnlen)sizeof(
				real));
			e_wsfe();
			++nfail;
		    }
/* L90: */
		}
		nrun = nrun + 7 - k1;
L100:
		;
	    }
L110:
	    ;
	}
/* L120: */
    }

/*     Print a summary of the results. */

    alasvm_(path, nout, &nfail, &nrun, &nerrs);

    return 0;

/*     End of CDRVPT */

} /* cdrvpt_ */
Exemplo n.º 24
0
int icmax1_(int *n, complex *cx, int *incx)
{
    /* System generated locals */
    int ret_val, i__1;

    /* Builtin functions */
    double c_abs(complex *);

    /* Local variables */
    int i__, ix;
    float smax;


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  ICMAX1 finds the index of the element whose float part has maximum */
/*  absolute value. */

/*  Based on ICAMAX from Level 1 BLAS. */
/*  The change is to use the 'genuine' absolute value. */

/*  Contributed by Nick Higham for use with CLACON. */

/*  Arguments */
/*  ========= */

/*  N       (input) INTEGER */
/*          The number of elements in the vector CX. */

/*  CX      (input) COMPLEX array, dimension (N) */
/*          The vector whose elements will be summed. */

/*  INCX    (input) INTEGER */
/*          The spacing between successive values of CX.  INCX >= 1. */

/* ===================================================================== */

/*     .. Local Scalars .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Statement Functions .. */
/*     .. */
/*     .. Statement Function definitions .. */

/*     NEXT LINE IS THE ONLY MODIFICATION. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    --cx;

    /* Function Body */
    ret_val = 0;
    if (*n < 1) {
	return ret_val;
    }
    ret_val = 1;
    if (*n == 1) {
	return ret_val;
    }
    if (*incx == 1) {
	goto L30;
    }

/*     CODE FOR INCREMENT NOT EQUAL TO 1 */

    ix = 1;
    smax = c_abs(&cx[1]);
    ix += *incx;
    i__1 = *n;
    for (i__ = 2; i__ <= i__1; ++i__) {
	if (c_abs(&cx[ix]) <= smax) {
	    goto L10;
	}
	ret_val = i__;
	smax = c_abs(&cx[ix]);
L10:
	ix += *incx;
/* L20: */
    }
    return ret_val;

/*     CODE FOR INCREMENT EQUAL TO 1 */

L30:
    smax = c_abs(&cx[1]);
    i__1 = *n;
    for (i__ = 2; i__ <= i__1; ++i__) {
	if (c_abs(&cx[i__]) <= smax) {
	    goto L40;
	}
	ret_val = i__;
	smax = c_abs(&cx[i__]);
L40:
	;
    }
    return ret_val;

/*     End of ICMAX1 */

} /* icmax1_ */
Exemplo n.º 25
0
/* DECK CDSTP */
/* Subroutine */ int cdstp_(real *eps, S_fp f, U_fp fa, real *hmax, integer *
	impl, integer *ierror, U_fp jacobn, integer *matdim, integer *maxord, 
	integer *mint, integer *miter, integer *ml, integer *mu, integer *n, 
	integer *nde, complex *ywt, real *uround, U_fp users, real *avgh, 
	real *avgord, real *h__, real *hused, integer *jtask, integer *mntold,
	 integer *mtrold, integer *nfe, integer *nje, integer *nqused, 
	integer *nstep, real *t, complex *y, complex *yh, complex *a, logical 
	*convrg, complex *dfdy, real *el, complex *fac, real *hold, integer *
	ipvt, integer *jstate, integer *jstepl, integer *nq, integer *nwait, 
	real *rc, real *rmax, complex *save1, complex *save2, real *tq, real *
	trend, integer *iswflg, integer *mtrsv, integer *mxrdsv)
{
    /* Initialized data */

    static logical ier = FALSE_;

    /* System generated locals */
    integer a_dim1, a_offset, dfdy_dim1, dfdy_offset, yh_dim1, yh_offset, 
	    i__1, i__2, i__3, i__4, i__5, i__6;
    real r__1, r__2, r__3;
    doublereal d__1, d__2;
    complex q__1, q__2;

    /* Local variables */
    static real d__;
    static integer i__, j;
    static real d1, hn, rh, hs, rh1, rh2, rh3, bnd;
    static integer nsv;
    static real erdn, told;
    static integer iter;
    static real erup;
    static integer ntry;
    static real y0nrm;
    extern /* Subroutine */ int cdscl_(real *, integer *, integer *, real *, 
	    real *, real *, real *, complex *);
    static integer nfail;
    extern /* Subroutine */ int cdcor_(complex *, real *, U_fp, real *, 
	    integer *, integer *, integer *, integer *, integer *, integer *, 
	    integer *, integer *, integer *, integer *, real *, U_fp, complex 
	    *, complex *, complex *, logical *, complex *, complex *, complex 
	    *, real *, integer *), cdpsc_(integer *, integer *, integer *, 
	    complex *), cdcst_(integer *, integer *, integer *, real *, real *
	    );
    static real denom;
    extern /* Subroutine */ int cdntl_(real *, S_fp, U_fp, real *, real *, 
	    integer *, integer *, integer *, integer *, integer *, integer *, 
	    integer *, integer *, integer *, integer *, complex *, real *, 
	    real *, U_fp, complex *, complex *, real *, integer *, integer *, 
	    integer *, real *, complex *, complex *, logical *, real *, 
	    complex *, logical *, integer *, integer *, integer *, real *, 
	    real *, complex *, real *, real *, integer *, integer *), cdpst_(
	    real *, S_fp, U_fp, real *, integer *, U_fp, integer *, integer *,
	     integer *, integer *, integer *, integer *, integer *, complex *,
	     real *, U_fp, complex *, complex *, complex *, real *, integer *,
	     integer *, complex *, complex *, complex *, logical *, integer *,
	     complex *, integer *, real *, integer *);
    static real ctest, etest, numer;
    extern doublereal scnrm2_(integer *, complex *, integer *);
    static logical evalfa, evaljc, switch__;

/* ***BEGIN PROLOGUE  CDSTP */
/* ***SUBSIDIARY */
/* ***PURPOSE  CDSTP performs one step of the integration of an initial */
/*            value problem for a system of ordinary differential */
/*            equations. */
/* ***LIBRARY   SLATEC (SDRIVE) */
/* ***TYPE      COMPLEX (SDSTP-S, DDSTP-D, CDSTP-C) */
/* ***AUTHOR  Kahaner, D. K., (NIST) */
/*             National Institute of Standards and Technology */
/*             Gaithersburg, MD  20899 */
/*           Sutherland, C. D., (LANL) */
/*             Mail Stop D466 */
/*             Los Alamos National Laboratory */
/*             Los Alamos, NM  87545 */
/* ***DESCRIPTION */

/*  Communication with CDSTP is done with the following variables: */

/*    YH      An N by MAXORD+1 array containing the dependent variables */
/*              and their scaled derivatives.  MAXORD, the maximum order */
/*              used, is currently 12 for the Adams methods and 5 for the */
/*              Gear methods.  YH(I,J+1) contains the J-th derivative of */
/*              Y(I), scaled by H**J/factorial(J).  Only Y(I), */
/*              1 .LE. I .LE. N, need be set by the calling program on */
/*              the first entry.  The YH array should not be altered by */
/*              the calling program.  When referencing YH as a */
/*              2-dimensional array, use a column length of N, as this is */
/*              the value used in CDSTP. */
/*    DFDY    A block of locations used for partial derivatives if MITER */
/*              is not 0.  If MITER is 1 or 2 its length must be at least */
/*              N*N.  If MITER is 4 or 5 its length must be at least */
/*              (2*ML+MU+1)*N. */
/*    YWT     An array of N locations used in convergence and error tests */
/*    SAVE1 */
/*    SAVE2   Arrays of length N used for temporary storage. */
/*    IPVT    An integer array of length N used by the linear system */
/*              solvers for the storage of row interchange information. */
/*    A       A block of locations used to store the matrix A, when using */
/*              the implicit method.  If IMPL is 1, A is a MATDIM by N */
/*              array.  If MITER is 1 or 2 MATDIM is N, and if MITER is 4 */
/*              or 5 MATDIM is 2*ML+MU+1.  If IMPL is 2 its length is N. */
/*              If IMPL is 3, A is a MATDIM by NDE array. */
/*    JTASK   An integer used on input. */
/*              It has the following values and meanings: */
/*                 .EQ. 0  Perform the first step.  This value enables */
/*                         the subroutine to initialize itself. */
/*                .GT. 0  Take a new step continuing from the last. */
/*                         Assumes the last step was successful and */
/*                         user has not changed any parameters. */
/*                 .LT. 0  Take a new step with a new value of H and/or */
/*                         MINT and/or MITER. */
/*    JSTATE  A completion code with the following meanings: */
/*                1  The step was successful. */
/*                2  A solution could not be obtained with H .NE. 0. */
/*                3  A solution was not obtained in MXTRY attempts. */
/*                4  For IMPL .NE. 0, the matrix A is singular. */
/*              On a return with JSTATE .GT. 1, the values of T and */
/*              the YH array are as of the beginning of the last */
/*              step, and H is the last step size attempted. */

/* ***ROUTINES CALLED  CDCOR, CDCST, CDNTL, CDPSC, CDPST, CDSCL, SCNRM2 */
/* ***REVISION HISTORY  (YYMMDD) */
/*   790601  DATE WRITTEN */
/*   900329  Initial submission to SLATEC. */
/* ***END PROLOGUE  CDSTP */
    /* Parameter adjustments */
    dfdy_dim1 = *matdim;
    dfdy_offset = 1 + dfdy_dim1;
    dfdy -= dfdy_offset;
    a_dim1 = *matdim;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    yh_dim1 = *n;
    yh_offset = 1 + yh_dim1;
    yh -= yh_offset;
    --ywt;
    --y;
    el -= 14;
    --fac;
    --ipvt;
    --save1;
    --save2;
    tq -= 4;

    /* Function Body */
/* ***FIRST EXECUTABLE STATEMENT  CDSTP */
    nsv = *n;
    bnd = 0.f;
    switch__ = FALSE_;
    ntry = 0;
    told = *t;
    nfail = 0;
    if (*jtask <= 0) {
	cdntl_(eps, (S_fp)f, (U_fp)fa, hmax, hold, impl, jtask, matdim, 
		maxord, mint, miter, ml, mu, n, nde, &save1[1], t, uround, (
		U_fp)users, &y[1], &ywt[1], h__, mntold, mtrold, nfe, rc, &yh[
		yh_offset], &a[a_offset], convrg, &el[14], &fac[1], &ier, &
		ipvt[1], nq, nwait, &rh, rmax, &save2[1], &tq[4], trend, 
		iswflg, jstate);
	if (*n == 0) {
	    goto L440;
	}
	if (*h__ == 0.f) {
	    goto L400;
	}
	if (ier) {
	    goto L420;
	}
    }
L100:
    ++ntry;
    if (ntry > 50) {
	goto L410;
    }
    *t += *h__;
    cdpsc_(&c__1, n, nq, &yh[yh_offset]);
    evaljc = ((r__1 = *rc - 1.f, dabs(r__1)) > .3f || *nstep >= *jstepl + 10) 
	    && *miter != 0;
    evalfa = ! evaljc;

L110:
    iter = 0;
    i__1 = *n;
    for (i__ = 1; i__ <= i__1; ++i__) {
/* L115: */
	i__2 = i__;
	i__3 = i__ + yh_dim1;
	y[i__2].r = yh[i__3].r, y[i__2].i = yh[i__3].i;
    }
    (*f)(n, t, &y[1], &save2[1]);
    if (*n == 0) {
	*jstate = 6;
	goto L430;
    }
    ++(*nfe);
    if (evaljc || ier) {
	cdpst_(&el[14], (S_fp)f, (U_fp)fa, h__, impl, (U_fp)jacobn, matdim, 
		miter, ml, mu, n, nde, nq, &save2[1], t, (U_fp)users, &y[1], &
		yh[yh_offset], &ywt[1], uround, nfe, nje, &a[a_offset], &dfdy[
		dfdy_offset], &fac[1], &ier, &ipvt[1], &save1[1], iswflg, &
		bnd, jstate);
	if (*n == 0) {
	    goto L430;
	}
	if (ier) {
	    goto L160;
	}
	*convrg = FALSE_;
	*rc = 1.f;
	*jstepl = *nstep;
    }
    i__2 = *n;
    for (i__ = 1; i__ <= i__2; ++i__) {
/* L125: */
	i__3 = i__;
	save1[i__3].r = 0.f, save1[i__3].i = 0.f;
    }
/*                      Up to MXITER corrector iterations are taken. */
/*                      Convergence is tested by requiring the r.m.s. */
/*                      norm of changes to be less than EPS.  The sum of */
/*                      the corrections is accumulated in the vector */
/*                      SAVE1(I).  It is approximately equal to the L-th */
/*                      derivative of Y multiplied by */
/*                      H**L/(factorial(L-1)*EL(L,NQ)), and is thus */
/*                      proportional to the actual errors to the lowest */
/*                      power of H present (H**L).  The YH array is not */
/*                      altered in the correction loop.  The norm of the */
/*                      iterate difference is stored in D.  If */
/*                      ITER .GT. 0, an estimate of the convergence rate */
/*                      constant is stored in TREND, and this is used in */
/*                      the convergence test. */

L130:
    cdcor_(&dfdy[dfdy_offset], &el[14], (U_fp)fa, h__, ierror, impl, &ipvt[1],
	     matdim, miter, ml, mu, n, nde, nq, t, (U_fp)users, &y[1], &yh[
	    yh_offset], &ywt[1], &evalfa, &save1[1], &save2[1], &a[a_offset], 
	    &d__, jstate);
    if (*n == 0) {
	goto L430;
    }
    if (*iswflg == 3 && *mint == 1) {
	if (iter == 0) {
	    numer = scnrm2_(n, &save1[1], &c__1);
	    i__3 = *n;
	    for (i__ = 1; i__ <= i__3; ++i__) {
/* L132: */
		i__2 = i__ * dfdy_dim1 + 1;
		i__1 = i__;
		dfdy[i__2].r = save1[i__1].r, dfdy[i__2].i = save1[i__1].i;
	    }
	    y0nrm = scnrm2_(n, &yh[yh_offset], &c__1);
	} else {
	    denom = numer;
	    i__2 = *n;
	    for (i__ = 1; i__ <= i__2; ++i__) {
/* L134: */
		i__1 = i__ * dfdy_dim1 + 1;
		i__3 = i__;
		i__4 = i__ * dfdy_dim1 + 1;
		q__1.r = save1[i__3].r - dfdy[i__4].r, q__1.i = save1[i__3].i 
			- dfdy[i__4].i;
		dfdy[i__1].r = q__1.r, dfdy[i__1].i = q__1.i;
	    }
	    numer = scnrm2_(n, &dfdy[dfdy_offset], matdim);
	    if (el[*nq * 13 + 1] * numer <= *uround * 100.f * y0nrm) {
		if (*rmax == 2.f) {
		    switch__ = TRUE_;
		    goto L170;
		}
	    }
	    i__1 = *n;
	    for (i__ = 1; i__ <= i__1; ++i__) {
/* L136: */
		i__3 = i__ * dfdy_dim1 + 1;
		i__4 = i__;
		dfdy[i__3].r = save1[i__4].r, dfdy[i__3].i = save1[i__4].i;
	    }
	    if (denom != 0.f) {
/* Computing MAX */
		r__1 = bnd, r__2 = numer / (denom * dabs(*h__) * el[*nq * 13 
			+ 1]);
		bnd = dmax(r__1,r__2);
	    }
	}
    }
    if (iter > 0) {
/* Computing MAX */
	r__1 = *trend * .9f, r__2 = d__ / d1;
	*trend = dmax(r__1,r__2);
    }
    d1 = d__;
/* Computing MIN */
    r__1 = *trend * 2.f;
    ctest = dmin(r__1,1.f) * d__;
    if (ctest <= *eps) {
	goto L170;
    }
    ++iter;
    if (iter < 3) {
	i__3 = *n;
	for (i__ = 1; i__ <= i__3; ++i__) {
/* L140: */
	    i__4 = i__;
	    i__1 = i__ + yh_dim1;
	    i__2 = *nq * 13 + 1;
	    i__5 = i__;
	    q__2.r = el[i__2] * save1[i__5].r, q__2.i = el[i__2] * save1[i__5]
		    .i;
	    q__1.r = yh[i__1].r + q__2.r, q__1.i = yh[i__1].i + q__2.i;
	    y[i__4].r = q__1.r, y[i__4].i = q__1.i;
	}
	(*f)(n, t, &y[1], &save2[1]);
	if (*n == 0) {
	    *jstate = 6;
	    goto L430;
	}
	++(*nfe);
	goto L130;
    }
/*                     The corrector iteration failed to converge in */
/*                     MXITER tries.  If partials are involved but are */
/*                     not up to date, they are reevaluated for the next */
/*                     try.  Otherwise the YH array is retracted to its */
/*                     values before prediction, and H is reduced, if */
/*                     possible.  If not, a no-convergence exit is taken. */
    if (*convrg) {
	evaljc = TRUE_;
	evalfa = FALSE_;
	goto L110;
    }
L160:
    *t = told;
    cdpsc_(&c_n1, n, nq, &yh[yh_offset]);
    *nwait = *nq + 2;
    if (*jtask != 0 && *jtask != 2) {
	*rmax = 2.f;
    }
    if (iter == 0) {
	rh = .3f;
    } else {
	d__1 = (doublereal) (*eps / ctest);
	rh = pow_dd(&d__1, &c_b22) * .9f;
    }
    if (rh * *h__ == 0.f) {
	goto L400;
    }
    cdscl_(hmax, n, nq, rmax, h__, rc, &rh, &yh[yh_offset]);
    goto L100;
/*                          The corrector has converged.  CONVRG is set */
/*                          to .TRUE. if partial derivatives were used, */
/*                          to indicate that they may need updating on */
/*                          subsequent steps.  The error test is made. */
L170:
    *convrg = *miter != 0;
    if (*ierror == 1 || *ierror == 5) {
	i__4 = *nde;
	for (i__ = 1; i__ <= i__4; ++i__) {
/* L180: */
	    i__1 = i__;
	    c_div(&q__1, &save1[i__], &ywt[i__]);
	    save2[i__1].r = q__1.r, save2[i__1].i = q__1.i;
	}
    } else {
	i__1 = *nde;
	for (i__ = 1; i__ <= i__1; ++i__) {
/* L185: */
	    i__4 = i__;
	    i__2 = i__;
/* Computing MAX */
	    r__2 = c_abs(&y[i__]), r__3 = c_abs(&ywt[i__]);
	    r__1 = dmax(r__2,r__3);
	    q__1.r = save1[i__2].r / r__1, q__1.i = save1[i__2].i / r__1;
	    save2[i__4].r = q__1.r, save2[i__4].i = q__1.i;
	}
    }
    etest = scnrm2_(nde, &save2[1], &c__1) / (tq[*nq * 3 + 2] * sqrt((real) (*
	    nde)));

/*                           The error test failed.  NFAIL keeps track of */
/*                           multiple failures.  Restore T and the YH */
/*                           array to their previous values, and prepare */
/*                           to try the step again.  Compute the optimum */
/*                           step size for this or one lower order. */
    if (etest > *eps) {
	*t = told;
	cdpsc_(&c_n1, n, nq, &yh[yh_offset]);
	++nfail;
	if (nfail < 3 || *nq == 1) {
	    if (*jtask != 0 && *jtask != 2) {
		*rmax = 2.f;
	    }
	    d__1 = (doublereal) (etest / *eps);
	    d__2 = (doublereal) (1.f / (*nq + 1));
	    rh2 = 1.f / (pow_dd(&d__1, &d__2) * 1.2f);
	    if (*nq > 1) {
		if (*ierror == 1 || *ierror == 5) {
		    i__4 = *nde;
		    for (i__ = 1; i__ <= i__4; ++i__) {
/* L190: */
			i__2 = i__;
			c_div(&q__1, &yh[i__ + (*nq + 1) * yh_dim1], &ywt[i__]
				);
			save2[i__2].r = q__1.r, save2[i__2].i = q__1.i;
		    }
		} else {
		    i__2 = *nde;
		    for (i__ = 1; i__ <= i__2; ++i__) {
/* L195: */
			i__4 = i__;
			i__1 = i__ + (*nq + 1) * yh_dim1;
/* Computing MAX */
			r__2 = c_abs(&y[i__]), r__3 = c_abs(&ywt[i__]);
			r__1 = dmax(r__2,r__3);
			q__1.r = yh[i__1].r / r__1, q__1.i = yh[i__1].i / 
				r__1;
			save2[i__4].r = q__1.r, save2[i__4].i = q__1.i;
		    }
		}
		erdn = scnrm2_(nde, &save2[1], &c__1) / (tq[*nq * 3 + 1] * 
			sqrt((real) (*nde)));
/* Computing MAX */
		d__1 = (doublereal) (erdn / *eps);
		d__2 = (doublereal) (1.f / *nq);
		r__1 = 1.f, r__2 = pow_dd(&d__1, &d__2) * 1.3f;
		rh1 = 1.f / dmax(r__1,r__2);
		if (rh2 < rh1) {
		    --(*nq);
		    *rc = *rc * el[*nq * 13 + 1] / el[(*nq + 1) * 13 + 1];
		    rh = rh1;
		} else {
		    rh = rh2;
		}
	    } else {
		rh = rh2;
	    }
	    *nwait = *nq + 2;
	    if (rh * *h__ == 0.f) {
		goto L400;
	    }
	    cdscl_(hmax, n, nq, rmax, h__, rc, &rh, &yh[yh_offset]);
	    goto L100;
	}
/*                Control reaches this section if the error test has */
/*                failed MXFAIL or more times.  It is assumed that the */
/*                derivatives that have accumulated in the YH array have */
/*                errors of the wrong order.  Hence the first derivative */
/*                is recomputed, the order is set to 1, and the step is */
/*                retried. */
	nfail = 0;
	*jtask = 2;
	i__4 = *n;
	for (i__ = 1; i__ <= i__4; ++i__) {
/* L215: */
	    i__1 = i__;
	    i__2 = i__ + yh_dim1;
	    y[i__1].r = yh[i__2].r, y[i__1].i = yh[i__2].i;
	}
	cdntl_(eps, (S_fp)f, (U_fp)fa, hmax, hold, impl, jtask, matdim, 
		maxord, mint, miter, ml, mu, n, nde, &save1[1], t, uround, (
		U_fp)users, &y[1], &ywt[1], h__, mntold, mtrold, nfe, rc, &yh[
		yh_offset], &a[a_offset], convrg, &el[14], &fac[1], &ier, &
		ipvt[1], nq, nwait, &rh, rmax, &save2[1], &tq[4], trend, 
		iswflg, jstate);
	*rmax = 10.f;
	if (*n == 0) {
	    goto L440;
	}
	if (*h__ == 0.f) {
	    goto L400;
	}
	if (ier) {
	    goto L420;
	}
	goto L100;
    }
/*                          After a successful step, update the YH array. */
    ++(*nstep);
    *hused = *h__;
    *nqused = *nq;
    *avgh = ((*nstep - 1) * *avgh + *h__) / *nstep;
    *avgord = ((*nstep - 1) * *avgord + *nq) / *nstep;
    i__1 = *nq + 1;
    for (j = 1; j <= i__1; ++j) {
	i__2 = *n;
	for (i__ = 1; i__ <= i__2; ++i__) {
/* L230: */
	    i__4 = i__ + j * yh_dim1;
	    i__5 = i__ + j * yh_dim1;
	    i__3 = j + *nq * 13;
	    i__6 = i__;
	    q__2.r = el[i__3] * save1[i__6].r, q__2.i = el[i__3] * save1[i__6]
		    .i;
	    q__1.r = yh[i__5].r + q__2.r, q__1.i = yh[i__5].i + q__2.i;
	    yh[i__4].r = q__1.r, yh[i__4].i = q__1.i;
	}
    }
    i__4 = *n;
    for (i__ = 1; i__ <= i__4; ++i__) {
/* L235: */
	i__5 = i__;
	i__3 = i__ + yh_dim1;
	y[i__5].r = yh[i__3].r, y[i__5].i = yh[i__3].i;
    }
/*                                          If ISWFLG is 3, consider */
/*                                          changing integration methods. */
    if (*iswflg == 3) {
	if (bnd != 0.f) {
	    if (*mint == 1 && *nq <= 5) {
/* Computing MAX */
		d__1 = (doublereal) (etest / *eps);
		d__2 = (doublereal) (1.f / (*nq + 1));
		r__1 = *uround, r__2 = pow_dd(&d__1, &d__2);
		hn = dabs(*h__) / dmax(r__1,r__2);
/* Computing MIN */
		r__1 = hn, r__2 = 1.f / (el[*nq * 13 + 1] * 2.f * bnd);
		hn = dmin(r__1,r__2);
/* Computing MAX */
		d__1 = (doublereal) (etest / (*eps * el[*nq + 14]));
		d__2 = (doublereal) (1.f / (*nq + 1));
		r__1 = *uround, r__2 = pow_dd(&d__1, &d__2);
		hs = dabs(*h__) / dmax(r__1,r__2);
		if (hs > hn * 1.2f) {
		    *mint = 2;
		    *mntold = *mint;
		    *miter = *mtrsv;
		    *mtrold = *miter;
		    *maxord = min(*mxrdsv,5);
		    *rc = 0.f;
		    *rmax = 10.f;
		    *trend = 1.f;
		    cdcst_(maxord, mint, iswflg, &el[14], &tq[4]);
		    *nwait = *nq + 2;
		}
	    } else if (*mint == 2) {
/* Computing MAX */
		d__1 = (doublereal) (etest / *eps);
		d__2 = (doublereal) (1.f / (*nq + 1));
		r__1 = *uround, r__2 = pow_dd(&d__1, &d__2);
		hs = dabs(*h__) / dmax(r__1,r__2);
/* Computing MAX */
		d__1 = (doublereal) (etest * el[*nq + 14] / *eps);
		d__2 = (doublereal) (1.f / (*nq + 1));
		r__1 = *uround, r__2 = pow_dd(&d__1, &d__2);
		hn = dabs(*h__) / dmax(r__1,r__2);
/* Computing MIN */
		r__1 = hn, r__2 = 1.f / (el[*nq * 13 + 1] * 2.f * bnd);
		hn = dmin(r__1,r__2);
		if (hn >= hs) {
		    *mint = 1;
		    *mntold = *mint;
		    *miter = 0;
		    *mtrold = *miter;
		    *maxord = min(*mxrdsv,12);
		    *rmax = 10.f;
		    *trend = 1.f;
		    *convrg = FALSE_;
		    cdcst_(maxord, mint, iswflg, &el[14], &tq[4]);
		    *nwait = *nq + 2;
		}
	    }
	}
    }
    if (switch__) {
	*mint = 2;
	*mntold = *mint;
	*miter = *mtrsv;
	*mtrold = *miter;
	*maxord = min(*mxrdsv,5);
	*nq = min(*nq,*maxord);
	*rc = 0.f;
	*rmax = 10.f;
	*trend = 1.f;
	cdcst_(maxord, mint, iswflg, &el[14], &tq[4]);
	*nwait = *nq + 2;
    }
/*                           Consider changing H if NWAIT = 1.  Otherwise */
/*                           decrease NWAIT by 1.  If NWAIT is then 1 and */
/*                           NQ.LT.MAXORD, then SAVE1 is saved for use in */
/*                           a possible order increase on the next step. */

    if (*jtask == 0 || *jtask == 2) {
/* Computing MAX */
	d__1 = (doublereal) (etest / *eps);
	d__2 = (doublereal) (1.f / (*nq + 1));
	r__1 = *uround, r__2 = pow_dd(&d__1, &d__2) * 1.2f;
	rh = 1.f / dmax(r__1,r__2);
	if (rh > 1.f) {
	    cdscl_(hmax, n, nq, rmax, h__, rc, &rh, &yh[yh_offset]);
	}
    } else if (*nwait > 1) {
	--(*nwait);
	if (*nwait == 1 && *nq < *maxord) {
	    i__5 = *nde;
	    for (i__ = 1; i__ <= i__5; ++i__) {
/* L250: */
		i__3 = i__ + (*maxord + 1) * yh_dim1;
		i__4 = i__;
		yh[i__3].r = save1[i__4].r, yh[i__3].i = save1[i__4].i;
	    }
	}
/*             If a change in H is considered, an increase or decrease in */
/*             order by one is considered also.  A change in H is made */
/*             only if it is by a factor of at least TRSHLD.  Factors */
/*             RH1, RH2, and RH3 are computed, by which H could be */
/*             multiplied at order NQ - 1, order NQ, or order NQ + 1, */
/*             respectively.  The largest of these is determined and the */
/*             new order chosen accordingly.  If the order is to be */
/*             increased, we compute one additional scaled derivative. */
/*             If there is a change of order, reset NQ and the */
/*             coefficients.  In any case H is reset according to RH and */
/*             the YH array is rescaled. */
    } else {
	if (*nq == 1) {
	    rh1 = 0.f;
	} else {
	    if (*ierror == 1 || *ierror == 5) {
		i__3 = *nde;
		for (i__ = 1; i__ <= i__3; ++i__) {
/* L270: */
		    i__4 = i__;
		    c_div(&q__1, &yh[i__ + (*nq + 1) * yh_dim1], &ywt[i__]);
		    save2[i__4].r = q__1.r, save2[i__4].i = q__1.i;
		}
	    } else {
		i__4 = *nde;
		for (i__ = 1; i__ <= i__4; ++i__) {
/* L275: */
		    i__3 = i__;
		    i__5 = i__ + (*nq + 1) * yh_dim1;
/* Computing MAX */
		    r__2 = c_abs(&y[i__]), r__3 = c_abs(&ywt[i__]);
		    r__1 = dmax(r__2,r__3);
		    q__1.r = yh[i__5].r / r__1, q__1.i = yh[i__5].i / r__1;
		    save2[i__3].r = q__1.r, save2[i__3].i = q__1.i;
		}
	    }
	    erdn = scnrm2_(nde, &save2[1], &c__1) / (tq[*nq * 3 + 1] * sqrt((
		    real) (*nde)));
/* Computing MAX */
	    d__1 = (doublereal) (erdn / *eps);
	    d__2 = (doublereal) (1.f / *nq);
	    r__1 = *uround, r__2 = pow_dd(&d__1, &d__2) * 1.3f;
	    rh1 = 1.f / dmax(r__1,r__2);
	}
/* Computing MAX */
	d__1 = (doublereal) (etest / *eps);
	d__2 = (doublereal) (1.f / (*nq + 1));
	r__1 = *uround, r__2 = pow_dd(&d__1, &d__2) * 1.2f;
	rh2 = 1.f / dmax(r__1,r__2);
	if (*nq == *maxord) {
	    rh3 = 0.f;
	} else {
	    if (*ierror == 1 || *ierror == 5) {
		i__3 = *nde;
		for (i__ = 1; i__ <= i__3; ++i__) {
/* L290: */
		    i__5 = i__;
		    i__4 = i__;
		    i__6 = i__ + (*maxord + 1) * yh_dim1;
		    q__2.r = save1[i__4].r - yh[i__6].r, q__2.i = save1[i__4]
			    .i - yh[i__6].i;
		    c_div(&q__1, &q__2, &ywt[i__]);
		    save2[i__5].r = q__1.r, save2[i__5].i = q__1.i;
		}
	    } else {
		i__5 = *nde;
		for (i__ = 1; i__ <= i__5; ++i__) {
		    i__4 = i__;
		    i__6 = i__;
		    i__3 = i__ + (*maxord + 1) * yh_dim1;
		    q__2.r = save1[i__6].r - yh[i__3].r, q__2.i = save1[i__6]
			    .i - yh[i__3].i;
/* Computing MAX */
		    r__2 = c_abs(&y[i__]), r__3 = c_abs(&ywt[i__]);
		    r__1 = dmax(r__2,r__3);
		    q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
		    save2[i__4].r = q__1.r, save2[i__4].i = q__1.i;
/* L295: */
		}
	    }
	    erup = scnrm2_(nde, &save2[1], &c__1) / (tq[*nq * 3 + 3] * sqrt((
		    real) (*nde)));
/* Computing MAX */
	    d__1 = (doublereal) (erup / *eps);
	    d__2 = (doublereal) (1.f / (*nq + 2));
	    r__1 = *uround, r__2 = pow_dd(&d__1, &d__2) * 1.4f;
	    rh3 = 1.f / dmax(r__1,r__2);
	}
	if (rh1 > rh2 && rh1 >= rh3) {
	    rh = rh1;
	    if (rh <= 1.f) {
		goto L380;
	    }
	    --(*nq);
	    *rc = *rc * el[*nq * 13 + 1] / el[(*nq + 1) * 13 + 1];
	} else if (rh2 >= rh1 && rh2 >= rh3) {
	    rh = rh2;
	    if (rh <= 1.f) {
		goto L380;
	    }
	} else {
	    rh = rh3;
	    if (rh <= 1.f) {
		goto L380;
	    }
	    i__5 = *n;
	    for (i__ = 1; i__ <= i__5; ++i__) {
/* L360: */
		i__4 = i__ + (*nq + 2) * yh_dim1;
		i__6 = i__;
		i__3 = *nq + 1 + *nq * 13;
		q__2.r = el[i__3] * save1[i__6].r, q__2.i = el[i__3] * save1[
			i__6].i;
		i__2 = *nq + 1;
		d__1 = (doublereal) i__2;
		q__1.r = q__2.r / d__1, q__1.i = q__2.i / d__1;
		yh[i__4].r = q__1.r, yh[i__4].i = q__1.i;
	    }
	    ++(*nq);
	    *rc = *rc * el[*nq * 13 + 1] / el[(*nq - 1) * 13 + 1];
	}
	if (*iswflg == 3 && *mint == 1) {
	    if (bnd != 0.f) {
/* Computing MIN */
		r__1 = rh, r__2 = 1.f / (el[*nq * 13 + 1] * 2.f * bnd * dabs(*
			h__));
		rh = dmin(r__1,r__2);
	    }
	}
	cdscl_(hmax, n, nq, rmax, h__, rc, &rh, &yh[yh_offset]);
	*rmax = 10.f;
L380:
	*nwait = *nq + 2;
    }
/*               All returns are made through this section.  H is saved */
/*               in HOLD to allow the caller to change H on the next step */
    *jstate = 1;
    *hold = *h__;
    return 0;

L400:
    *jstate = 2;
    *hold = *h__;
    i__4 = *n;
    for (i__ = 1; i__ <= i__4; ++i__) {
/* L405: */
	i__6 = i__;
	i__3 = i__ + yh_dim1;
	y[i__6].r = yh[i__3].r, y[i__6].i = yh[i__3].i;
    }
    return 0;

L410:
    *jstate = 3;
    *hold = *h__;
    return 0;

L420:
    *jstate = 4;
    *hold = *h__;
    return 0;

L430:
    *t = told;
    cdpsc_(&c_n1, &nsv, nq, &yh[yh_offset]);
    i__6 = nsv;
    for (i__ = 1; i__ <= i__6; ++i__) {
/* L435: */
	i__3 = i__;
	i__4 = i__ + yh_dim1;
	y[i__3].r = yh[i__4].r, y[i__3].i = yh[i__4].i;
    }
L440:
    *hold = *h__;
    return 0;
} /* cdstp_ */
Exemplo n.º 26
0
/* Subroutine */ int cdrgev_(integer *nsizes, integer *nn, integer *ntypes, 
	logical *dotype, integer *iseed, real *thresh, integer *nounit, 
	complex *a, integer *lda, complex *b, complex *s, complex *t, complex 
	*q, integer *ldq, complex *z__, complex *qe, integer *ldqe, complex *
	alpha, complex *beta, complex *alpha1, complex *beta1, complex *work, 
	integer *lwork, real *rwork, real *result, integer *info)
{
    /* Initialized data */

    static integer kclass[26] = { 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,
	    2,2,2,3 };
    static integer kbmagn[26] = { 1,1,1,1,1,1,1,1,3,2,3,2,2,3,1,1,1,1,1,1,1,3,
	    2,3,2,1 };
    static integer ktrian[26] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
	    1,1,1,1 };
    static logical lasign[26] = { FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,
	    TRUE_,FALSE_,TRUE_,TRUE_,FALSE_,FALSE_,TRUE_,TRUE_,TRUE_,FALSE_,
	    TRUE_,FALSE_,FALSE_,FALSE_,TRUE_,TRUE_,TRUE_,TRUE_,TRUE_,FALSE_ };
    static logical lbsign[26] = { FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,
	    FALSE_,TRUE_,FALSE_,FALSE_,TRUE_,TRUE_,FALSE_,FALSE_,TRUE_,FALSE_,
	    TRUE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,FALSE_,
	    FALSE_ };
    static integer kz1[6] = { 0,1,2,1,3,3 };
    static integer kz2[6] = { 0,0,1,2,1,1 };
    static integer kadd[6] = { 0,0,0,0,3,2 };
    static integer katype[26] = { 0,1,0,1,2,3,4,1,4,4,1,1,4,4,4,2,4,5,8,7,9,4,
	    4,4,4,0 };
    static integer kbtype[26] = { 0,0,1,1,2,-3,1,4,1,1,4,4,1,1,-4,2,-4,8,8,8,
	    8,8,8,8,8,0 };
    static integer kazero[26] = { 1,1,1,1,1,1,2,1,2,2,1,1,2,2,3,1,3,5,5,5,5,3,
	    3,3,3,1 };
    static integer kbzero[26] = { 1,1,1,1,1,1,1,2,1,1,2,2,1,1,4,1,4,6,6,6,6,4,
	    4,4,4,1 };
    static integer kamagn[26] = { 1,1,1,1,1,1,1,1,2,3,2,3,2,3,1,1,1,1,1,1,1,2,
	    3,3,2,1 };

    /* Format strings */
    static char fmt_9999[] = "(\002 CDRGEV: \002,a,\002 returned INFO=\002,i"
	    "6,\002.\002,/3x,\002N=\002,i6,\002, JTYPE=\002,i6,\002, ISEED="
	    "(\002,3(i5,\002,\002),i5,\002)\002)";
    static char fmt_9998[] = "(\002 CDRGEV: \002,a,\002 Eigenvectors from"
	    " \002,a,\002 incorrectly \002,\002normalized.\002,/\002 Bits of "
	    "error=\002,0p,g10.3,\002,\002,3x,\002N=\002,i4,\002, JTYPE=\002,"
	    "i3,\002, ISEED=(\002,3(i4,\002,\002),i5,\002)\002)";
    static char fmt_9997[] = "(/1x,a3,\002 -- Complex Generalized eigenvalue"
	    " problem \002,\002driver\002)";
    static char fmt_9996[] = "(\002 Matrix types (see CDRGEV for details):"
	    " \002)";
    static char fmt_9995[] = "(\002 Special Matrices:\002,23x,\002(J'=transp"
	    "osed Jordan block)\002,/\002   1=(0,0)  2=(I,0)  3=(0,I)  4=(I,I"
	    ")  5=(J',J')  \002,\0026=(diag(J',I), diag(I,J'))\002,/\002 Diag"
	    "onal Matrices:  ( \002,\002D=diag(0,1,2,...) )\002,/\002   7=(D,"
	    "I)   9=(large*D, small*I\002,\002)  11=(large*I, small*D)  13=(l"
	    "arge*D, large*I)\002,/\002   8=(I,D)  10=(small*D, large*I)  12="
	    "(small*I, large*D) \002,\002 14=(small*D, small*I)\002,/\002  15"
	    "=(D, reversed D)\002)";
    static char fmt_9994[] = "(\002 Matrices Rotated by Random \002,a,\002 M"
	    "atrices U, V:\002,/\002  16=Transposed Jordan Blocks            "
	    " 19=geometric \002,\002alpha, beta=0,1\002,/\002  17=arithm. alp"
	    "ha&beta             \002,\002      20=arithmetic alpha, beta=0,"
	    "1\002,/\002  18=clustered \002,\002alpha, beta=0,1            21"
	    "=random alpha, beta=0,1\002,/\002 Large & Small Matrices:\002,"
	    "/\002  22=(large, small)   \002,\00223=(small,large)    24=(smal"
	    "l,small)    25=(large,large)\002,/\002  26=random O(1) matrices"
	    ".\002)";
    static char fmt_9993[] = "(/\002 Tests performed:    \002,/\002 1 = max "
	    "| ( b A - a B )'*l | / const.,\002,/\002 2 = | |VR(i)| - 1 | / u"
	    "lp,\002,/\002 3 = max | ( b A - a B )*r | / const.\002,/\002 4 ="
	    " | |VL(i)| - 1 | / ulp,\002,/\002 5 = 0 if W same no matter if r"
	    " or l computed,\002,/\002 6 = 0 if l same no matter if l compute"
	    "d,\002,/\002 7 = 0 if r same no matter if r computed,\002,/1x)";
    static char fmt_9992[] = "(\002 Matrix order=\002,i5,\002, type=\002,i2"
	    ",\002, seed=\002,4(i4,\002,\002),\002 result \002,i2,\002 is\002"
	    ",0p,f8.2)";
    static char fmt_9991[] = "(\002 Matrix order=\002,i5,\002, type=\002,i2"
	    ",\002, seed=\002,4(i4,\002,\002),\002 result \002,i2,\002 is\002"
	    ",1p,e10.3)";

    /* System generated locals */
    integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, qe_dim1, 
	    qe_offset, s_dim1, s_offset, t_dim1, t_offset, z_dim1, z_offset, 
	    i__1, i__2, i__3, i__4, i__5, i__6, i__7;
    real r__1, r__2;
    complex q__1, q__2, q__3;

    /* Builtin functions */
    double r_sign(real *, real *), c_abs(complex *);
    void r_cnjg(complex *, complex *);
    integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void);

    /* Local variables */
    static integer iadd, ierr, nmax, i__, j, n;
    static logical badnn;
    extern /* Subroutine */ int cget52_(logical *, integer *, complex *, 
	    integer *, complex *, integer *, complex *, integer *, complex *, 
	    complex *, complex *, real *, real *), cggev_(char *, char *, 
	    integer *, complex *, integer *, complex *, integer *, complex *, 
	    complex *, complex *, integer *, complex *, integer *, complex *, 
	    integer *, real *, integer *);
    static real rmagn[4];
    static complex ctemp;
    static integer nmats, jsize, nerrs, jtype, n1;
    extern /* Subroutine */ int clatm4_(integer *, integer *, integer *, 
	    integer *, logical *, real *, real *, real *, integer *, integer *
	    , complex *, integer *), cunm2r_(char *, char *, integer *, 
	    integer *, integer *, complex *, integer *, complex *, complex *, 
	    integer *, complex *, integer *);
    static integer jc, nb, in;
    extern /* Subroutine */ int slabad_(real *, real *);
    static integer jr;
    extern /* Subroutine */ int clarfg_(integer *, complex *, complex *, 
	    integer *, complex *);
    extern /* Complex */ VOID clarnd_(complex *, integer *, integer *);
    extern doublereal slamch_(char *);
    extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex 
	    *, integer *, complex *, integer *), claset_(char *, 
	    integer *, integer *, complex *, complex *, complex *, integer *);
    static real safmin, safmax;
    static integer ioldsd[4];
    extern integer ilaenv_(integer *, char *, char *, integer *, integer *, 
	    integer *, integer *, ftnlen, ftnlen);
    extern /* Subroutine */ int alasvm_(char *, integer *, integer *, integer 
	    *, integer *), xerbla_(char *, integer *);
    static integer minwrk, maxwrk;
    static real ulpinv;
    static integer mtypes, ntestt;
    static real ulp;

    /* Fortran I/O blocks */
    static cilist io___40 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___42 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___43 = { 0, 0, 0, fmt_9998, 0 };
    static cilist io___44 = { 0, 0, 0, fmt_9998, 0 };
    static cilist io___45 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___46 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___47 = { 0, 0, 0, fmt_9999, 0 };
    static cilist io___48 = { 0, 0, 0, fmt_9997, 0 };
    static cilist io___49 = { 0, 0, 0, fmt_9996, 0 };
    static cilist io___50 = { 0, 0, 0, fmt_9995, 0 };
    static cilist io___51 = { 0, 0, 0, fmt_9994, 0 };
    static cilist io___52 = { 0, 0, 0, fmt_9993, 0 };
    static cilist io___53 = { 0, 0, 0, fmt_9992, 0 };
    static cilist io___54 = { 0, 0, 0, fmt_9991, 0 };



#define a_subscr(a_1,a_2) (a_2)*a_dim1 + a_1
#define a_ref(a_1,a_2) a[a_subscr(a_1,a_2)]
#define b_subscr(a_1,a_2) (a_2)*b_dim1 + a_1
#define b_ref(a_1,a_2) b[b_subscr(a_1,a_2)]
#define q_subscr(a_1,a_2) (a_2)*q_dim1 + a_1
#define q_ref(a_1,a_2) q[q_subscr(a_1,a_2)]
#define z___subscr(a_1,a_2) (a_2)*z_dim1 + a_1
#define z___ref(a_1,a_2) z__[z___subscr(a_1,a_2)]
#define qe_subscr(a_1,a_2) (a_2)*qe_dim1 + a_1
#define qe_ref(a_1,a_2) qe[qe_subscr(a_1,a_2)]


/*  -- LAPACK test routine (version 3.0) --   
       Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,   
       Courant Institute, Argonne National Lab, and Rice University   
       June 30, 1999   


    Purpose   
    =======   

    CDRGEV checks the nonsymmetric generalized eigenvalue problem driver   
    routine CGGEV.   

    CGGEV computes for a pair of n-by-n nonsymmetric matrices (A,B) the   
    generalized eigenvalues and, optionally, the left and right   
    eigenvectors.   

    A generalized eigenvalue for a pair of matrices (A,B) is a scalar w   
    or a ratio  alpha/beta = w, such that A - w*B is singular.  It is   
    usually represented as the pair (alpha,beta), as there is reasonalbe   
    interpretation for beta=0, and even for both being zero.   

    A right generalized eigenvector corresponding to a generalized   
    eigenvalue  w  for a pair of matrices (A,B) is a vector r  such that   
    (A - wB) * r = 0.  A left generalized eigenvector is a vector l such   
    that l**H * (A - wB) = 0, where l**H is the conjugate-transpose of l.   

    When CDRGEV is called, a number of matrix "sizes" ("n's") and a   
    number of matrix "types" are specified.  For each size ("n")   
    and each type of matrix, a pair of matrices (A, B) will be generated   
    and used for testing.  For each matrix pair, the following tests   
    will be performed and compared with the threshhold THRESH.   

    Results from CGGEV:   

    (1)  max over all left eigenvalue/-vector pairs (alpha/beta,l) of   

         | VL**H * (beta A - alpha B) |/( ulp max(|beta A|, |alpha B|) )   

         where VL**H is the conjugate-transpose of VL.   

    (2)  | |VL(i)| - 1 | / ulp and whether largest component real   

         VL(i) denotes the i-th column of VL.   

    (3)  max over all left eigenvalue/-vector pairs (alpha/beta,r) of   

         | (beta A - alpha B) * VR | / ( ulp max(|beta A|, |alpha B|) )   

    (4)  | |VR(i)| - 1 | / ulp and whether largest component real   

         VR(i) denotes the i-th column of VR.   

    (5)  W(full) = W(partial)   
         W(full) denotes the eigenvalues computed when both l and r   
         are also computed, and W(partial) denotes the eigenvalues   
         computed when only W, only W and r, or only W and l are   
         computed.   

    (6)  VL(full) = VL(partial)   
         VL(full) denotes the left eigenvectors computed when both l   
         and r are computed, and VL(partial) denotes the result   
         when only l is computed.   

    (7)  VR(full) = VR(partial)   
         VR(full) denotes the right eigenvectors computed when both l   
         and r are also computed, and VR(partial) denotes the result   
         when only l is computed.   


    Test Matrices   
    ---- --------   

    The sizes of the test matrices are specified by an array   
    NN(1:NSIZES); the value of each element NN(j) specifies one size.   
    The "types" are specified by a logical array DOTYPE( 1:NTYPES ); if   
    DOTYPE(j) is .TRUE., then matrix type "j" will be generated.   
    Currently, the list of possible types is:   

    (1)  ( 0, 0 )         (a pair of zero matrices)   

    (2)  ( I, 0 )         (an identity and a zero matrix)   

    (3)  ( 0, I )         (an identity and a zero matrix)   

    (4)  ( I, I )         (a pair of identity matrices)   

            t   t   
    (5)  ( J , J  )       (a pair of transposed Jordan blocks)   

                                        t                ( I   0  )   
    (6)  ( X, Y )         where  X = ( J   0  )  and Y = (      t )   
                                     ( 0   I  )          ( 0   J  )   
                          and I is a k x k identity and J a (k+1)x(k+1)   
                          Jordan block; k=(N-1)/2   

    (7)  ( D, I )         where D is diag( 0, 1,..., N-1 ) (a diagonal   
                          matrix with those diagonal entries.)   
    (8)  ( I, D )   

    (9)  ( big*D, small*I ) where "big" is near overflow and small=1/big   

    (10) ( small*D, big*I )   

    (11) ( big*I, small*D )   

    (12) ( small*I, big*D )   

    (13) ( big*D, big*I )   

    (14) ( small*D, small*I )   

    (15) ( D1, D2 )        where D1 is diag( 0, 0, 1, ..., N-3, 0 ) and   
                           D2 is diag( 0, N-3, N-4,..., 1, 0, 0 )   
              t   t   
    (16) Q ( J , J ) Z     where Q and Z are random orthogonal matrices.   

    (17) Q ( T1, T2 ) Z    where T1 and T2 are upper triangular matrices   
                           with random O(1) entries above the diagonal   
                           and diagonal entries diag(T1) =   
                           ( 0, 0, 1, ..., N-3, 0 ) and diag(T2) =   
                           ( 0, N-3, N-4,..., 1, 0, 0 )   

    (18) Q ( T1, T2 ) Z    diag(T1) = ( 0, 0, 1, 1, s, ..., s, 0 )   
                           diag(T2) = ( 0, 1, 0, 1,..., 1, 0 )   
                           s = machine precision.   

    (19) Q ( T1, T2 ) Z    diag(T1)=( 0,0,1,1, 1-d, ..., 1-(N-5)*d=s, 0 )   
                           diag(T2) = ( 0, 1, 0, 1, ..., 1, 0 )   

                                                           N-5   
    (20) Q ( T1, T2 ) Z    diag(T1)=( 0, 0, 1, 1, a, ..., a   =s, 0 )   
                           diag(T2) = ( 0, 1, 0, 1, ..., 1, 0, 0 )   

    (21) Q ( T1, T2 ) Z    diag(T1)=( 0, 0, 1, r1, r2, ..., r(N-4), 0 )   
                           diag(T2) = ( 0, 1, 0, 1, ..., 1, 0, 0 )   
                           where r1,..., r(N-4) are random.   

    (22) Q ( big*T1, small*T2 ) Z    diag(T1) = ( 0, 0, 1, ..., N-3, 0 )   
                                     diag(T2) = ( 0, 1, ..., 1, 0, 0 )   

    (23) Q ( small*T1, big*T2 ) Z    diag(T1) = ( 0, 0, 1, ..., N-3, 0 )   
                                     diag(T2) = ( 0, 1, ..., 1, 0, 0 )   

    (24) Q ( small*T1, small*T2 ) Z  diag(T1) = ( 0, 0, 1, ..., N-3, 0 )   
                                     diag(T2) = ( 0, 1, ..., 1, 0, 0 )   

    (25) Q ( big*T1, big*T2 ) Z      diag(T1) = ( 0, 0, 1, ..., N-3, 0 )   
                                     diag(T2) = ( 0, 1, ..., 1, 0, 0 )   

    (26) Q ( T1, T2 ) Z     where T1 and T2 are random upper-triangular   
                            matrices.   


    Arguments   
    =========   

    NSIZES  (input) INTEGER   
            The number of sizes of matrices to use.  If it is zero,   
            CDRGES does nothing.  NSIZES >= 0.   

    NN      (input) INTEGER array, dimension (NSIZES)   
            An array containing the sizes to be used for the matrices.   
            Zero values will be skipped.  NN >= 0.   

    NTYPES  (input) INTEGER   
            The number of elements in DOTYPE.   If it is zero, CDRGEV   
            does nothing.  It must be at least zero.  If it is MAXTYP+1   
            and NSIZES is 1, then an additional type, MAXTYP+1 is   
            defined, which is to use whatever matrix is in A.  This   
            is only useful if DOTYPE(1:MAXTYP) is .FALSE. and   
            DOTYPE(MAXTYP+1) is .TRUE. .   

    DOTYPE  (input) LOGICAL array, dimension (NTYPES)   
            If DOTYPE(j) is .TRUE., then for each size in NN a   
            matrix of that size and of type j will be generated.   
            If NTYPES is smaller than the maximum number of types   
            defined (PARAMETER MAXTYP), then types NTYPES+1 through   
            MAXTYP will not be generated. If NTYPES is larger   
            than MAXTYP, DOTYPE(MAXTYP+1) through DOTYPE(NTYPES)   
            will be ignored.   

    ISEED   (input/output) INTEGER array, dimension (4)   
            On entry ISEED specifies the seed of the random number   
            generator. The array elements should be between 0 and 4095;   
            if not they will be reduced mod 4096. Also, ISEED(4) must   
            be odd.  The random number generator uses a linear   
            congruential sequence limited to small integers, and so   
            should produce machine independent random numbers. The   
            values of ISEED are changed on exit, and can be used in the   
            next call to CDRGES to continue the same random number   
            sequence.   

    THRESH  (input) REAL   
            A test will count as "failed" if the "error", computed as   
            described above, exceeds THRESH.  Note that the error is   
            scaled to be O(1), so THRESH should be a reasonably small   
            multiple of 1, e.g., 10 or 100.  In particular, it should   
            not depend on the precision (single vs. double) or the size   
            of the matrix.  It must be at least zero.   

    NOUNIT  (input) INTEGER   
            The FORTRAN unit number for printing out error messages   
            (e.g., if a routine returns IERR not equal to 0.)   

    A       (input/workspace) COMPLEX array, dimension(LDA, max(NN))   
            Used to hold the original A matrix.  Used as input only   
            if NTYPES=MAXTYP+1, DOTYPE(1:MAXTYP)=.FALSE., and   
            DOTYPE(MAXTYP+1)=.TRUE.   

    LDA     (input) INTEGER   
            The leading dimension of A, B, S, and T.   
            It must be at least 1 and at least max( NN ).   

    B       (input/workspace) COMPLEX array, dimension(LDA, max(NN))   
            Used to hold the original B matrix.  Used as input only   
            if NTYPES=MAXTYP+1, DOTYPE(1:MAXTYP)=.FALSE., and   
            DOTYPE(MAXTYP+1)=.TRUE.   

    S       (workspace) COMPLEX array, dimension (LDA, max(NN))   
            The Schur form matrix computed from A by CGGEV.  On exit, S   
            contains the Schur form matrix corresponding to the matrix   
            in A.   

    T       (workspace) COMPLEX array, dimension (LDA, max(NN))   
            The upper triangular matrix computed from B by CGGEV.   

    Q      (workspace) COMPLEX array, dimension (LDQ, max(NN))   
            The (left) eigenvectors matrix computed by CGGEV.   

    LDQ     (input) INTEGER   
            The leading dimension of Q and Z. It must   
            be at least 1 and at least max( NN ).   

    Z       (workspace) COMPLEX array, dimension( LDQ, max(NN) )   
            The (right) orthogonal matrix computed by CGGEV.   

    QE      (workspace) COMPLEX array, dimension( LDQ, max(NN) )   
            QE holds the computed right or left eigenvectors.   

    LDQE    (input) INTEGER   
            The leading dimension of QE. LDQE >= max(1,max(NN)).   

    ALPHA   (workspace) COMPLEX array, dimension (max(NN))   
    BETA    (workspace) COMPLEX array, dimension (max(NN))   
            The generalized eigenvalues of (A,B) computed by CGGEV.   
            ( ALPHAR(k)+ALPHAI(k)*i ) / BETA(k) is the k-th   
            generalized eigenvalue of A and B.   

    ALPHA1  (workspace) COMPLEX array, dimension (max(NN))   
    BETA1   (workspace) COMPLEX array, dimension (max(NN))   
            Like ALPHAR, ALPHAI, BETA, these arrays contain the   
            eigenvalues of A and B, but those computed when CGGEV only   
            computes a partial eigendecomposition, i.e. not the   
            eigenvalues and left and right eigenvectors.   

    WORK    (workspace) COMPLEX array, dimension (LWORK)   

    LWORK   (input) INTEGER   
            The number of entries in WORK.  LWORK >= N*(N+1)   

    RWORK   (workspace) REAL array, dimension (8*N)   
            Real workspace.   

    RESULT  (output) REAL array, dimension (2)   
            The values computed by the tests described above.   
            The values are currently limited to 1/ulp, to avoid overflow.   

    INFO    (output) INTEGER   
            = 0:  successful exit   
            < 0:  if INFO = -i, the i-th argument had an illegal value.   
            > 0:  A routine returned an error code.  INFO is the   
                  absolute value of the INFO value returned.   

    =====================================================================   

       Parameter adjustments */
    --nn;
    --dotype;
    --iseed;
    t_dim1 = *lda;
    t_offset = 1 + t_dim1 * 1;
    t -= t_offset;
    s_dim1 = *lda;
    s_offset = 1 + s_dim1 * 1;
    s -= s_offset;
    b_dim1 = *lda;
    b_offset = 1 + b_dim1 * 1;
    b -= b_offset;
    a_dim1 = *lda;
    a_offset = 1 + a_dim1 * 1;
    a -= a_offset;
    z_dim1 = *ldq;
    z_offset = 1 + z_dim1 * 1;
    z__ -= z_offset;
    q_dim1 = *ldq;
    q_offset = 1 + q_dim1 * 1;
    q -= q_offset;
    qe_dim1 = *ldqe;
    qe_offset = 1 + qe_dim1 * 1;
    qe -= qe_offset;
    --alpha;
    --beta;
    --alpha1;
    --beta1;
    --work;
    --rwork;
    --result;

    /* Function Body   

       Check for errors */

    *info = 0;

    badnn = FALSE_;
    nmax = 1;
    i__1 = *nsizes;
    for (j = 1; j <= i__1; ++j) {
/* Computing MAX */
	i__2 = nmax, i__3 = nn[j];
	nmax = max(i__2,i__3);
	if (nn[j] < 0) {
	    badnn = TRUE_;
	}
/* L10: */
    }

    if (*nsizes < 0) {
	*info = -1;
    } else if (badnn) {
	*info = -2;
    } else if (*ntypes < 0) {
	*info = -3;
    } else if (*thresh < 0.f) {
	*info = -6;
    } else if (*lda <= 1 || *lda < nmax) {
	*info = -9;
    } else if (*ldq <= 1 || *ldq < nmax) {
	*info = -14;
    } else if (*ldqe <= 1 || *ldqe < nmax) {
	*info = -17;
    }

/*     Compute workspace   
        (Note: Comments in the code beginning "Workspace:" describe the   
         minimal amount of workspace needed at that point in the code,   
         as well as the preferred amount for good performance.   
         NB refers to the optimal block size for the immediately   
         following subroutine, as returned by ILAENV. */

    minwrk = 1;
    if (*info == 0 && *lwork >= 1) {
	minwrk = nmax * (nmax + 1);
/* Computing MAX */
	i__1 = 1, i__2 = ilaenv_(&c__1, "CGEQRF", " ", &nmax, &nmax, &c_n1, &
		c_n1, (ftnlen)6, (ftnlen)1), i__1 = max(i__1,i__2), i__2 = 
		ilaenv_(&c__1, "CUNMQR", "LC", &nmax, &nmax, &nmax, &c_n1, (
		ftnlen)6, (ftnlen)2), i__1 = max(i__1,i__2), i__2 = ilaenv_(&
		c__1, "CUNGQR", " ", &nmax, &nmax, &nmax, &c_n1, (ftnlen)6, (
		ftnlen)1);
	nb = max(i__1,i__2);
/* Computing MAX */
	i__1 = nmax << 1, i__2 = nmax * (nb + 1), i__1 = max(i__1,i__2), i__2 
		= nmax * (nmax + 1);
	maxwrk = max(i__1,i__2);
	work[1].r = (real) maxwrk, work[1].i = 0.f;
    }

    if (*lwork < minwrk) {
	*info = -23;
    }

    if (*info != 0) {
	i__1 = -(*info);
	xerbla_("CDRGEV", &i__1);
	return 0;
    }

/*     Quick return if possible */

    if (*nsizes == 0 || *ntypes == 0) {
	return 0;
    }

    ulp = slamch_("Precision");
    safmin = slamch_("Safe minimum");
    safmin /= ulp;
    safmax = 1.f / safmin;
    slabad_(&safmin, &safmax);
    ulpinv = 1.f / ulp;

/*     The values RMAGN(2:3) depend on N, see below. */

    rmagn[0] = 0.f;
    rmagn[1] = 1.f;

/*     Loop over sizes, types */

    ntestt = 0;
    nerrs = 0;
    nmats = 0;

    i__1 = *nsizes;
    for (jsize = 1; jsize <= i__1; ++jsize) {
	n = nn[jsize];
	n1 = max(1,n);
	rmagn[2] = safmax * ulp / (real) n1;
	rmagn[3] = safmin * ulpinv * n1;

	if (*nsizes != 1) {
	    mtypes = min(26,*ntypes);
	} else {
	    mtypes = min(27,*ntypes);
	}

	i__2 = mtypes;
	for (jtype = 1; jtype <= i__2; ++jtype) {
	    if (! dotype[jtype]) {
		goto L210;
	    }
	    ++nmats;

/*           Save ISEED in case of an error. */

	    for (j = 1; j <= 4; ++j) {
		ioldsd[j - 1] = iseed[j];
/* L20: */
	    }

/*           Generate test matrices A and B   

             Description of control parameters:   

             KCLASS: =1 means w/o rotation, =2 means w/ rotation,   
                     =3 means random.   
             KATYPE: the "type" to be passed to CLATM4 for computing A.   
             KAZERO: the pattern of zeros on the diagonal for A:   
                     =1: ( xxx ), =2: (0, xxx ) =3: ( 0, 0, xxx, 0 ),   
                     =4: ( 0, xxx, 0, 0 ), =5: ( 0, 0, 1, xxx, 0 ),   
                     =6: ( 0, 1, 0, xxx, 0 ).  (xxx means a string of   
                     non-zero entries.)   
             KAMAGN: the magnitude of the matrix: =0: zero, =1: O(1),   
                     =2: large, =3: small.   
             LASIGN: .TRUE. if the diagonal elements of A are to be   
                     multiplied by a random magnitude 1 number.   
             KBTYPE, KBZERO, KBMAGN, LBSIGN: the same, but for B.   
             KTRIAN: =0: don't fill in the upper triangle, =1: do.   
             KZ1, KZ2, KADD: used to implement KAZERO and KBZERO.   
             RMAGN: used to implement KAMAGN and KBMAGN. */

	    if (mtypes > 26) {
		goto L100;
	    }
	    ierr = 0;
	    if (kclass[jtype - 1] < 3) {

/*              Generate A (w/o rotation) */

		if ((i__3 = katype[jtype - 1], abs(i__3)) == 3) {
		    in = ((n - 1) / 2 << 1) + 1;
		    if (in != n) {
			claset_("Full", &n, &n, &c_b1, &c_b1, &a[a_offset], 
				lda);
		    }
		} else {
		    in = n;
		}
		clatm4_(&katype[jtype - 1], &in, &kz1[kazero[jtype - 1] - 1], 
			&kz2[kazero[jtype - 1] - 1], &lasign[jtype - 1], &
			rmagn[kamagn[jtype - 1]], &ulp, &rmagn[ktrian[jtype - 
			1] * kamagn[jtype - 1]], &c__2, &iseed[1], &a[
			a_offset], lda);
		iadd = kadd[kazero[jtype - 1] - 1];
		if (iadd > 0 && iadd <= n) {
		    i__3 = a_subscr(iadd, iadd);
		    i__4 = kamagn[jtype - 1];
		    a[i__3].r = rmagn[i__4], a[i__3].i = 0.f;
		}

/*              Generate B (w/o rotation) */

		if ((i__3 = kbtype[jtype - 1], abs(i__3)) == 3) {
		    in = ((n - 1) / 2 << 1) + 1;
		    if (in != n) {
			claset_("Full", &n, &n, &c_b1, &c_b1, &b[b_offset], 
				lda);
		    }
		} else {
		    in = n;
		}
		clatm4_(&kbtype[jtype - 1], &in, &kz1[kbzero[jtype - 1] - 1], 
			&kz2[kbzero[jtype - 1] - 1], &lbsign[jtype - 1], &
			rmagn[kbmagn[jtype - 1]], &c_b28, &rmagn[ktrian[jtype 
			- 1] * kbmagn[jtype - 1]], &c__2, &iseed[1], &b[
			b_offset], lda);
		iadd = kadd[kbzero[jtype - 1] - 1];
		if (iadd != 0 && iadd <= n) {
		    i__3 = b_subscr(iadd, iadd);
		    i__4 = kbmagn[jtype - 1];
		    b[i__3].r = rmagn[i__4], b[i__3].i = 0.f;
		}

		if (kclass[jtype - 1] == 2 && n > 0) {

/*                 Include rotations   

                   Generate Q, Z as Householder transformations times   
                   a diagonal matrix. */

		    i__3 = n - 1;
		    for (jc = 1; jc <= i__3; ++jc) {
			i__4 = n;
			for (jr = jc; jr <= i__4; ++jr) {
			    i__5 = q_subscr(jr, jc);
			    clarnd_(&q__1, &c__3, &iseed[1]);
			    q[i__5].r = q__1.r, q[i__5].i = q__1.i;
			    i__5 = z___subscr(jr, jc);
			    clarnd_(&q__1, &c__3, &iseed[1]);
			    z__[i__5].r = q__1.r, z__[i__5].i = q__1.i;
/* L30: */
			}
			i__4 = n + 1 - jc;
			clarfg_(&i__4, &q_ref(jc, jc), &q_ref(jc + 1, jc), &
				c__1, &work[jc]);
			i__4 = (n << 1) + jc;
			i__5 = q_subscr(jc, jc);
			r__2 = q[i__5].r;
			r__1 = r_sign(&c_b28, &r__2);
			work[i__4].r = r__1, work[i__4].i = 0.f;
			i__4 = q_subscr(jc, jc);
			q[i__4].r = 1.f, q[i__4].i = 0.f;
			i__4 = n + 1 - jc;
			clarfg_(&i__4, &z___ref(jc, jc), &z___ref(jc + 1, jc),
				 &c__1, &work[n + jc]);
			i__4 = n * 3 + jc;
			i__5 = z___subscr(jc, jc);
			r__2 = z__[i__5].r;
			r__1 = r_sign(&c_b28, &r__2);
			work[i__4].r = r__1, work[i__4].i = 0.f;
			i__4 = z___subscr(jc, jc);
			z__[i__4].r = 1.f, z__[i__4].i = 0.f;
/* L40: */
		    }
		    clarnd_(&q__1, &c__3, &iseed[1]);
		    ctemp.r = q__1.r, ctemp.i = q__1.i;
		    i__3 = q_subscr(n, n);
		    q[i__3].r = 1.f, q[i__3].i = 0.f;
		    i__3 = n;
		    work[i__3].r = 0.f, work[i__3].i = 0.f;
		    i__3 = n * 3;
		    r__1 = c_abs(&ctemp);
		    q__1.r = ctemp.r / r__1, q__1.i = ctemp.i / r__1;
		    work[i__3].r = q__1.r, work[i__3].i = q__1.i;
		    clarnd_(&q__1, &c__3, &iseed[1]);
		    ctemp.r = q__1.r, ctemp.i = q__1.i;
		    i__3 = z___subscr(n, n);
		    z__[i__3].r = 1.f, z__[i__3].i = 0.f;
		    i__3 = n << 1;
		    work[i__3].r = 0.f, work[i__3].i = 0.f;
		    i__3 = n << 2;
		    r__1 = c_abs(&ctemp);
		    q__1.r = ctemp.r / r__1, q__1.i = ctemp.i / r__1;
		    work[i__3].r = q__1.r, work[i__3].i = q__1.i;

/*                 Apply the diagonal matrices */

		    i__3 = n;
		    for (jc = 1; jc <= i__3; ++jc) {
			i__4 = n;
			for (jr = 1; jr <= i__4; ++jr) {
			    i__5 = a_subscr(jr, jc);
			    i__6 = (n << 1) + jr;
			    r_cnjg(&q__3, &work[n * 3 + jc]);
			    q__2.r = work[i__6].r * q__3.r - work[i__6].i * 
				    q__3.i, q__2.i = work[i__6].r * q__3.i + 
				    work[i__6].i * q__3.r;
			    i__7 = a_subscr(jr, jc);
			    q__1.r = q__2.r * a[i__7].r - q__2.i * a[i__7].i, 
				    q__1.i = q__2.r * a[i__7].i + q__2.i * a[
				    i__7].r;
			    a[i__5].r = q__1.r, a[i__5].i = q__1.i;
			    i__5 = b_subscr(jr, jc);
			    i__6 = (n << 1) + jr;
			    r_cnjg(&q__3, &work[n * 3 + jc]);
			    q__2.r = work[i__6].r * q__3.r - work[i__6].i * 
				    q__3.i, q__2.i = work[i__6].r * q__3.i + 
				    work[i__6].i * q__3.r;
			    i__7 = b_subscr(jr, jc);
			    q__1.r = q__2.r * b[i__7].r - q__2.i * b[i__7].i, 
				    q__1.i = q__2.r * b[i__7].i + q__2.i * b[
				    i__7].r;
			    b[i__5].r = q__1.r, b[i__5].i = q__1.i;
/* L50: */
			}
/* L60: */
		    }
		    i__3 = n - 1;
		    cunm2r_("L", "N", &n, &n, &i__3, &q[q_offset], ldq, &work[
			    1], &a[a_offset], lda, &work[(n << 1) + 1], &ierr);
		    if (ierr != 0) {
			goto L90;
		    }
		    i__3 = n - 1;
		    cunm2r_("R", "C", &n, &n, &i__3, &z__[z_offset], ldq, &
			    work[n + 1], &a[a_offset], lda, &work[(n << 1) + 
			    1], &ierr);
		    if (ierr != 0) {
			goto L90;
		    }
		    i__3 = n - 1;
		    cunm2r_("L", "N", &n, &n, &i__3, &q[q_offset], ldq, &work[
			    1], &b[b_offset], lda, &work[(n << 1) + 1], &ierr);
		    if (ierr != 0) {
			goto L90;
		    }
		    i__3 = n - 1;
		    cunm2r_("R", "C", &n, &n, &i__3, &z__[z_offset], ldq, &
			    work[n + 1], &b[b_offset], lda, &work[(n << 1) + 
			    1], &ierr);
		    if (ierr != 0) {
			goto L90;
		    }
		}
	    } else {

/*              Random matrices */

		i__3 = n;
		for (jc = 1; jc <= i__3; ++jc) {
		    i__4 = n;
		    for (jr = 1; jr <= i__4; ++jr) {
			i__5 = a_subscr(jr, jc);
			i__6 = kamagn[jtype - 1];
			clarnd_(&q__2, &c__4, &iseed[1]);
			q__1.r = rmagn[i__6] * q__2.r, q__1.i = rmagn[i__6] * 
				q__2.i;
			a[i__5].r = q__1.r, a[i__5].i = q__1.i;
			i__5 = b_subscr(jr, jc);
			i__6 = kbmagn[jtype - 1];
			clarnd_(&q__2, &c__4, &iseed[1]);
			q__1.r = rmagn[i__6] * q__2.r, q__1.i = rmagn[i__6] * 
				q__2.i;
			b[i__5].r = q__1.r, b[i__5].i = q__1.i;
/* L70: */
		    }
/* L80: */
		}
	    }

L90:

	    if (ierr != 0) {
		io___40.ciunit = *nounit;
		s_wsfe(&io___40);
		do_fio(&c__1, "Generator", (ftnlen)9);
		do_fio(&c__1, (char *)&ierr, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(ierr);
		return 0;
	    }

L100:

	    for (i__ = 1; i__ <= 7; ++i__) {
		result[i__] = -1.f;
/* L110: */
	    }

/*           Call CGGEV to compute eigenvalues and eigenvectors. */

	    clacpy_(" ", &n, &n, &a[a_offset], lda, &s[s_offset], lda);
	    clacpy_(" ", &n, &n, &b[b_offset], lda, &t[t_offset], lda);
	    cggev_("V", "V", &n, &s[s_offset], lda, &t[t_offset], lda, &alpha[
		    1], &beta[1], &q[q_offset], ldq, &z__[z_offset], ldq, &
		    work[1], lwork, &rwork[1], &ierr);
	    if (ierr != 0 && ierr != n + 1) {
		result[1] = ulpinv;
		io___42.ciunit = *nounit;
		s_wsfe(&io___42);
		do_fio(&c__1, "CGGEV1", (ftnlen)6);
		do_fio(&c__1, (char *)&ierr, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(ierr);
		goto L190;
	    }

/*           Do the tests (1) and (2) */

	    cget52_(&c_true, &n, &a[a_offset], lda, &b[b_offset], lda, &q[
		    q_offset], ldq, &alpha[1], &beta[1], &work[1], &rwork[1], 
		    &result[1]);
	    if (result[2] > *thresh) {
		io___43.ciunit = *nounit;
		s_wsfe(&io___43);
		do_fio(&c__1, "Left", (ftnlen)4);
		do_fio(&c__1, "CGGEV1", (ftnlen)6);
		do_fio(&c__1, (char *)&result[2], (ftnlen)sizeof(real));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
	    }

/*           Do the tests (3) and (4) */

	    cget52_(&c_false, &n, &a[a_offset], lda, &b[b_offset], lda, &z__[
		    z_offset], ldq, &alpha[1], &beta[1], &work[1], &rwork[1], 
		    &result[3]);
	    if (result[4] > *thresh) {
		io___44.ciunit = *nounit;
		s_wsfe(&io___44);
		do_fio(&c__1, "Right", (ftnlen)5);
		do_fio(&c__1, "CGGEV1", (ftnlen)6);
		do_fio(&c__1, (char *)&result[4], (ftnlen)sizeof(real));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
	    }

/*           Do test (5) */

	    clacpy_(" ", &n, &n, &a[a_offset], lda, &s[s_offset], lda);
	    clacpy_(" ", &n, &n, &b[b_offset], lda, &t[t_offset], lda);
	    cggev_("N", "N", &n, &s[s_offset], lda, &t[t_offset], lda, &
		    alpha1[1], &beta1[1], &q[q_offset], ldq, &z__[z_offset], 
		    ldq, &work[1], lwork, &rwork[1], &ierr);
	    if (ierr != 0 && ierr != n + 1) {
		result[1] = ulpinv;
		io___45.ciunit = *nounit;
		s_wsfe(&io___45);
		do_fio(&c__1, "CGGEV2", (ftnlen)6);
		do_fio(&c__1, (char *)&ierr, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(ierr);
		goto L190;
	    }

	    i__3 = n;
	    for (j = 1; j <= i__3; ++j) {
		i__4 = j;
		i__5 = j;
		i__6 = j;
		i__7 = j;
		if (alpha[i__4].r != alpha1[i__5].r || alpha[i__4].i != 
			alpha1[i__5].i || (beta[i__6].r != beta1[i__7].r || 
			beta[i__6].i != beta1[i__7].i)) {
		    result[5] = ulpinv;
		}
/* L120: */
	    }

/*           Do test (6): Compute eigenvalues and left eigenvectors,   
             and test them */

	    clacpy_(" ", &n, &n, &a[a_offset], lda, &s[s_offset], lda);
	    clacpy_(" ", &n, &n, &b[b_offset], lda, &t[t_offset], lda);
	    cggev_("V", "N", &n, &s[s_offset], lda, &t[t_offset], lda, &
		    alpha1[1], &beta1[1], &qe[qe_offset], ldqe, &z__[z_offset]
		    , ldq, &work[1], lwork, &rwork[1], &ierr);
	    if (ierr != 0 && ierr != n + 1) {
		result[1] = ulpinv;
		io___46.ciunit = *nounit;
		s_wsfe(&io___46);
		do_fio(&c__1, "CGGEV3", (ftnlen)6);
		do_fio(&c__1, (char *)&ierr, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(ierr);
		goto L190;
	    }

	    i__3 = n;
	    for (j = 1; j <= i__3; ++j) {
		i__4 = j;
		i__5 = j;
		i__6 = j;
		i__7 = j;
		if (alpha[i__4].r != alpha1[i__5].r || alpha[i__4].i != 
			alpha1[i__5].i || (beta[i__6].r != beta1[i__7].r || 
			beta[i__6].i != beta1[i__7].i)) {
		    result[6] = ulpinv;
		}
/* L130: */
	    }

	    i__3 = n;
	    for (j = 1; j <= i__3; ++j) {
		i__4 = n;
		for (jc = 1; jc <= i__4; ++jc) {
		    i__5 = q_subscr(j, jc);
		    i__6 = qe_subscr(j, jc);
		    if (q[i__5].r != qe[i__6].r || q[i__5].i != qe[i__6].i) {
			result[6] = ulpinv;
		    }
/* L140: */
		}
/* L150: */
	    }

/*           Do test (7): Compute eigenvalues and right eigenvectors,   
             and test them */

	    clacpy_(" ", &n, &n, &a[a_offset], lda, &s[s_offset], lda);
	    clacpy_(" ", &n, &n, &b[b_offset], lda, &t[t_offset], lda);
	    cggev_("N", "V", &n, &s[s_offset], lda, &t[t_offset], lda, &
		    alpha1[1], &beta1[1], &q[q_offset], ldq, &qe[qe_offset], 
		    ldqe, &work[1], lwork, &rwork[1], &ierr);
	    if (ierr != 0 && ierr != n + 1) {
		result[1] = ulpinv;
		io___47.ciunit = *nounit;
		s_wsfe(&io___47);
		do_fio(&c__1, "CGGEV4", (ftnlen)6);
		do_fio(&c__1, (char *)&ierr, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
		do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer));
		do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(integer));
		e_wsfe();
		*info = abs(ierr);
		goto L190;
	    }

	    i__3 = n;
	    for (j = 1; j <= i__3; ++j) {
		i__4 = j;
		i__5 = j;
		i__6 = j;
		i__7 = j;
		if (alpha[i__4].r != alpha1[i__5].r || alpha[i__4].i != 
			alpha1[i__5].i || (beta[i__6].r != beta1[i__7].r || 
			beta[i__6].i != beta1[i__7].i)) {
		    result[7] = ulpinv;
		}
/* L160: */
	    }

	    i__3 = n;
	    for (j = 1; j <= i__3; ++j) {
		i__4 = n;
		for (jc = 1; jc <= i__4; ++jc) {
		    i__5 = z___subscr(j, jc);
		    i__6 = qe_subscr(j, jc);
		    if (z__[i__5].r != qe[i__6].r || z__[i__5].i != qe[i__6]
			    .i) {
			result[7] = ulpinv;
		    }
/* L170: */
		}
/* L180: */
	    }

/*           End of Loop -- Check for RESULT(j) > THRESH */

L190:

	    ntestt += 7;

/*           Print out tests which fail. */

	    for (jr = 1; jr <= 9; ++jr) {
		if (result[jr] >= *thresh) {

/*                 If this is the first test to fail,   
                   print a header to the data file. */

		    if (nerrs == 0) {
			io___48.ciunit = *nounit;
			s_wsfe(&io___48);
			do_fio(&c__1, "CGV", (ftnlen)3);
			e_wsfe();

/*                    Matrix types */

			io___49.ciunit = *nounit;
			s_wsfe(&io___49);
			e_wsfe();
			io___50.ciunit = *nounit;
			s_wsfe(&io___50);
			e_wsfe();
			io___51.ciunit = *nounit;
			s_wsfe(&io___51);
			do_fio(&c__1, "Orthogonal", (ftnlen)10);
			e_wsfe();

/*                    Tests performed */

			io___52.ciunit = *nounit;
			s_wsfe(&io___52);
			e_wsfe();

		    }
		    ++nerrs;
		    if (result[jr] < 1e4f) {
			io___53.ciunit = *nounit;
			s_wsfe(&io___53);
			do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer))
				;
			do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(
				integer));
			do_fio(&c__1, (char *)&jr, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&result[jr], (ftnlen)sizeof(
				real));
			e_wsfe();
		    } else {
			io___54.ciunit = *nounit;
			s_wsfe(&io___54);
			do_fio(&c__1, (char *)&n, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&jtype, (ftnlen)sizeof(integer))
				;
			do_fio(&c__4, (char *)&ioldsd[0], (ftnlen)sizeof(
				integer));
			do_fio(&c__1, (char *)&jr, (ftnlen)sizeof(integer));
			do_fio(&c__1, (char *)&result[jr], (ftnlen)sizeof(
				real));
			e_wsfe();
		    }
		}
/* L200: */
	    }

L210:
	    ;
	}
/* L220: */
    }

/*     Summary */

    alasvm_("CGV", nounit, &nerrs, &ntestt, &c__0);

    work[1].r = (real) maxwrk, work[1].i = 0.f;

    return 0;







/*     End of CDRGEV */

} /* cdrgev_ */
Exemplo n.º 27
0
double clantr_(char *norm, char *uplo, char *diag, int *m, int *n,
               complex *a, int *lda, float *work)
{
    /* System generated locals */
    int a_dim1, a_offset, i__1, i__2, i__3, i__4;
    float ret_val, r__1, r__2;

    /* Builtin functions */
    double c_abs(complex *), sqrt(double);

    /* Local variables */
    int i__, j;
    float sum, scale;
    int udiag;
    extern int lsame_(char *, char *);
    float value;
    extern  int classq_(int *, complex *, int *, float
                        *, float *);


    /*  -- LAPACK auxiliary routine (version 3.2) -- */
    /*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
    /*     November 2006 */

    /*     .. Scalar Arguments .. */
    /*     .. */
    /*     .. Array Arguments .. */
    /*     .. */

    /*  Purpose */
    /*  ======= */

    /*  CLANTR  returns the value of the one norm,  or the Frobenius norm, or */
    /*  the  infinity norm,  or the  element of  largest absolute value  of a */
    /*  trapezoidal or triangular matrix A. */

    /*  Description */
    /*  =========== */

    /*  CLANTR returns the value */

    /*     CLANTR = ( MAX(ABS(A(i,j))), NORM = 'M' or 'm' */
    /*              ( */
    /*              ( norm1(A),         NORM = '1', 'O' or 'o' */
    /*              ( */
    /*              ( normI(A),         NORM = 'I' or 'i' */
    /*              ( */
    /*              ( normF(A),         NORM = 'F', 'f', 'E' or 'e' */

    /*  where  norm1  denotes the  one norm of a matrix (maximum column sum), */
    /*  normI  denotes the  infinity norm  of a matrix  (maximum row sum) and */
    /*  normF  denotes the  Frobenius norm of a matrix (square root of sum of */
    /*  squares).  Note that  MAX(ABS(A(i,j)))  is not a consistent matrix norm. */

    /*  Arguments */
    /*  ========= */

    /*  NORM    (input) CHARACTER*1 */
    /*          Specifies the value to be returned in CLANTR as described */
    /*          above. */

    /*  UPLO    (input) CHARACTER*1 */
    /*          Specifies whether the matrix A is upper or lower trapezoidal. */
    /*          = 'U':  Upper trapezoidal */
    /*          = 'L':  Lower trapezoidal */
    /*          Note that A is triangular instead of trapezoidal if M = N. */

    /*  DIAG    (input) CHARACTER*1 */
    /*          Specifies whether or not the matrix A has unit diagonal. */
    /*          = 'N':  Non-unit diagonal */
    /*          = 'U':  Unit diagonal */

    /*  M       (input) INTEGER */
    /*          The number of rows of the matrix A.  M >= 0, and if */
    /*          UPLO = 'U', M <= N.  When M = 0, CLANTR is set to zero. */

    /*  N       (input) INTEGER */
    /*          The number of columns of the matrix A.  N >= 0, and if */
    /*          UPLO = 'L', N <= M.  When N = 0, CLANTR is set to zero. */

    /*  A       (input) COMPLEX array, dimension (LDA,N) */
    /*          The trapezoidal matrix A (A is triangular if M = N). */
    /*          If UPLO = 'U', the leading m by n upper trapezoidal part of */
    /*          the array A contains the upper trapezoidal matrix, and the */
    /*          strictly lower triangular part of A is not referenced. */
    /*          If UPLO = 'L', the leading m by n lower trapezoidal part of */
    /*          the array A contains the lower trapezoidal matrix, and the */
    /*          strictly upper triangular part of A is not referenced.  Note */
    /*          that when DIAG = 'U', the diagonal elements of A are not */
    /*          referenced and are assumed to be one. */

    /*  LDA     (input) INTEGER */
    /*          The leading dimension of the array A.  LDA >= MAX(M,1). */

    /*  WORK    (workspace) REAL array, dimension (MAX(1,LWORK)), */
    /*          where LWORK >= M when NORM = 'I'; otherwise, WORK is not */
    /*          referenced. */

    /* ===================================================================== */

    /*     .. Parameters .. */
    /*     .. */
    /*     .. Local Scalars .. */
    /*     .. */
    /*     .. External Functions .. */
    /*     .. */
    /*     .. External Subroutines .. */
    /*     .. */
    /*     .. Intrinsic Functions .. */
    /*     .. */
    /*     .. Executable Statements .. */

    /* Parameter adjustments */
    a_dim1 = *lda;
    a_offset = 1 + a_dim1;
    a -= a_offset;
    --work;

    /* Function Body */
    if (MIN(*m,*n) == 0) {
        value = 0.f;
    } else if (lsame_(norm, "M")) {

        /*        Find MAX(ABS(A(i,j))). */

        if (lsame_(diag, "U")) {
            value = 1.f;
            if (lsame_(uplo, "U")) {
                i__1 = *n;
                for (j = 1; j <= i__1; ++j) {
                    /* Computing MIN */
                    i__3 = *m, i__4 = j - 1;
                    i__2 = MIN(i__3,i__4);
                    for (i__ = 1; i__ <= i__2; ++i__) {
                        /* Computing MAX */
                        r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
                        value = MAX(r__1,r__2);
                        /* L10: */
                    }
                    /* L20: */
                }
            } else {
                i__1 = *n;
                for (j = 1; j <= i__1; ++j) {
                    i__2 = *m;
                    for (i__ = j + 1; i__ <= i__2; ++i__) {
                        /* Computing MAX */
                        r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
                        value = MAX(r__1,r__2);
                        /* L30: */
                    }
                    /* L40: */
                }
            }
        } else {
            value = 0.f;
            if (lsame_(uplo, "U")) {
                i__1 = *n;
                for (j = 1; j <= i__1; ++j) {
                    i__2 = MIN(*m,j);
                    for (i__ = 1; i__ <= i__2; ++i__) {
                        /* Computing MAX */
                        r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
                        value = MAX(r__1,r__2);
                        /* L50: */
                    }
                    /* L60: */
                }
            } else {
                i__1 = *n;
                for (j = 1; j <= i__1; ++j) {
                    i__2 = *m;
                    for (i__ = j; i__ <= i__2; ++i__) {
                        /* Computing MAX */
                        r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
                        value = MAX(r__1,r__2);
                        /* L70: */
                    }
                    /* L80: */
                }
            }
        }
    } else if (lsame_(norm, "O") || *(unsigned char *)
               norm == '1') {

        /*        Find norm1(A). */

        value = 0.f;
        udiag = lsame_(diag, "U");
        if (lsame_(uplo, "U")) {
            i__1 = *n;
            for (j = 1; j <= i__1; ++j) {
                if (udiag && j <= *m) {
                    sum = 1.f;
                    i__2 = j - 1;
                    for (i__ = 1; i__ <= i__2; ++i__) {
                        sum += c_abs(&a[i__ + j * a_dim1]);
                        /* L90: */
                    }
                } else {
                    sum = 0.f;
                    i__2 = MIN(*m,j);
                    for (i__ = 1; i__ <= i__2; ++i__) {
                        sum += c_abs(&a[i__ + j * a_dim1]);
                        /* L100: */
                    }
                }
                value = MAX(value,sum);
                /* L110: */
            }
        } else {
            i__1 = *n;
            for (j = 1; j <= i__1; ++j) {
                if (udiag) {
                    sum = 1.f;
                    i__2 = *m;
                    for (i__ = j + 1; i__ <= i__2; ++i__) {
                        sum += c_abs(&a[i__ + j * a_dim1]);
                        /* L120: */
                    }
                } else {
                    sum = 0.f;
                    i__2 = *m;
                    for (i__ = j; i__ <= i__2; ++i__) {
                        sum += c_abs(&a[i__ + j * a_dim1]);
                        /* L130: */
                    }
                }
                value = MAX(value,sum);
                /* L140: */
            }
        }
    } else if (lsame_(norm, "I")) {

        /*        Find normI(A). */

        if (lsame_(uplo, "U")) {
            if (lsame_(diag, "U")) {
                i__1 = *m;
                for (i__ = 1; i__ <= i__1; ++i__) {
                    work[i__] = 1.f;
                    /* L150: */
                }
                i__1 = *n;
                for (j = 1; j <= i__1; ++j) {
                    /* Computing MIN */
                    i__3 = *m, i__4 = j - 1;
                    i__2 = MIN(i__3,i__4);
                    for (i__ = 1; i__ <= i__2; ++i__) {
                        work[i__] += c_abs(&a[i__ + j * a_dim1]);
                        /* L160: */
                    }
                    /* L170: */
                }
            } else {
                i__1 = *m;
                for (i__ = 1; i__ <= i__1; ++i__) {
                    work[i__] = 0.f;
                    /* L180: */
                }
                i__1 = *n;
                for (j = 1; j <= i__1; ++j) {
                    i__2 = MIN(*m,j);
                    for (i__ = 1; i__ <= i__2; ++i__) {
                        work[i__] += c_abs(&a[i__ + j * a_dim1]);
                        /* L190: */
                    }
                    /* L200: */
                }
            }
        } else {
            if (lsame_(diag, "U")) {
                i__1 = *n;
                for (i__ = 1; i__ <= i__1; ++i__) {
                    work[i__] = 1.f;
                    /* L210: */
                }
                i__1 = *m;
                for (i__ = *n + 1; i__ <= i__1; ++i__) {
                    work[i__] = 0.f;
                    /* L220: */
                }
                i__1 = *n;
                for (j = 1; j <= i__1; ++j) {
                    i__2 = *m;
                    for (i__ = j + 1; i__ <= i__2; ++i__) {
                        work[i__] += c_abs(&a[i__ + j * a_dim1]);
                        /* L230: */
                    }
                    /* L240: */
                }
            } else {
                i__1 = *m;
                for (i__ = 1; i__ <= i__1; ++i__) {
                    work[i__] = 0.f;
                    /* L250: */
                }
                i__1 = *n;
                for (j = 1; j <= i__1; ++j) {
                    i__2 = *m;
                    for (i__ = j; i__ <= i__2; ++i__) {
                        work[i__] += c_abs(&a[i__ + j * a_dim1]);
                        /* L260: */
                    }
                    /* L270: */
                }
            }
        }
        value = 0.f;
        i__1 = *m;
        for (i__ = 1; i__ <= i__1; ++i__) {
            /* Computing MAX */
            r__1 = value, r__2 = work[i__];
            value = MAX(r__1,r__2);
            /* L280: */
        }
    } else if (lsame_(norm, "F") || lsame_(norm, "E")) {

        /*        Find normF(A). */

        if (lsame_(uplo, "U")) {
            if (lsame_(diag, "U")) {
                scale = 1.f;
                sum = (float) MIN(*m,*n);
                i__1 = *n;
                for (j = 2; j <= i__1; ++j) {
                    /* Computing MIN */
                    i__3 = *m, i__4 = j - 1;
                    i__2 = MIN(i__3,i__4);
                    classq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
                    /* L290: */
                }
            } else {
                scale = 0.f;
                sum = 1.f;
                i__1 = *n;
                for (j = 1; j <= i__1; ++j) {
                    i__2 = MIN(*m,j);
                    classq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
                    /* L300: */
                }
            }
        } else {
            if (lsame_(diag, "U")) {
                scale = 1.f;
                sum = (float) MIN(*m,*n);
                i__1 = *n;
                for (j = 1; j <= i__1; ++j) {
                    i__2 = *m - j;
                    /* Computing MIN */
                    i__3 = *m, i__4 = j + 1;
                    classq_(&i__2, &a[MIN(i__3, i__4)+ j * a_dim1], &c__1, &
                            scale, &sum);
                    /* L310: */
                }
            } else {
                scale = 0.f;
                sum = 1.f;
                i__1 = *n;
                for (j = 1; j <= i__1; ++j) {
                    i__2 = *m - j + 1;
                    classq_(&i__2, &a[j + j * a_dim1], &c__1, &scale, &sum);
                    /* L320: */
                }
            }
        }
        value = scale * sqrt(sum);
    }

    ret_val = value;
    return ret_val;

    /*     End of CLANTR */

} /* clantr_ */
Exemplo n.º 28
0
/* Subroutine */ int clar1v_(integer *n, integer *b1, integer *bn, real *
	lambda, real *d__, real *l, real *ld, real *lld, real *pivmin, real *
	gaptol, complex *z__, logical *wantnc, integer *negcnt, real *ztz, 
	real *mingma, integer *r__, integer *isuppz, real *nrminv, real *
	resid, real *rqcorr, real *work)
{
    /* System generated locals */
    integer i__1, i__2, i__3, i__4;
    real r__1;
    complex q__1, q__2;

    /* Builtin functions */
    double c_abs(complex *), sqrt(doublereal);

    /* Local variables */
    integer i__;
    real s;
    integer r1, r2;
    real eps, tmp;
    integer neg1, neg2, indp, inds;
    real dplus;
    extern doublereal slamch_(char *);
    integer indlpl, indumn;
    extern logical sisnan_(real *);
    real dminus;
    logical sawnan1, sawnan2;


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CLAR1V computes the (scaled) r-th column of the inverse of */
/*  the sumbmatrix in rows B1 through BN of the tridiagonal matrix */
/*  L D L^T - sigma I. When sigma is close to an eigenvalue, the */
/*  computed vector is an accurate eigenvector. Usually, r corresponds */
/*  to the index where the eigenvector is largest in magnitude. */
/*  The following steps accomplish this computation : */
/*  (a) Stationary qd transform,  L D L^T - sigma I = L(+) D(+) L(+)^T, */
/*  (b) Progressive qd transform, L D L^T - sigma I = U(-) D(-) U(-)^T, */
/*  (c) Computation of the diagonal elements of the inverse of */
/*      L D L^T - sigma I by combining the above transforms, and choosing */
/*      r as the index where the diagonal of the inverse is (one of the) */
/*      largest in magnitude. */
/*  (d) Computation of the (scaled) r-th column of the inverse using the */
/*      twisted factorization obtained by combining the top part of the */
/*      the stationary and the bottom part of the progressive transform. */

/*  Arguments */
/*  ========= */

/*  N        (input) INTEGER */
/*           The order of the matrix L D L^T. */

/*  B1       (input) INTEGER */
/*           First index of the submatrix of L D L^T. */

/*  BN       (input) INTEGER */
/*           Last index of the submatrix of L D L^T. */

/*  LAMBDA    (input) REAL */
/*           The shift. In order to compute an accurate eigenvector, */
/*           LAMBDA should be a good approximation to an eigenvalue */
/*           of L D L^T. */

/*  L        (input) REAL             array, dimension (N-1) */
/*           The (n-1) subdiagonal elements of the unit bidiagonal matrix */
/*           L, in elements 1 to N-1. */

/*  D        (input) REAL             array, dimension (N) */
/*           The n diagonal elements of the diagonal matrix D. */

/*  LD       (input) REAL             array, dimension (N-1) */
/*           The n-1 elements L(i)*D(i). */

/*  LLD      (input) REAL             array, dimension (N-1) */
/*           The n-1 elements L(i)*L(i)*D(i). */

/*  PIVMIN   (input) REAL */
/*           The minimum pivot in the Sturm sequence. */

/*  GAPTOL   (input) REAL */
/*           Tolerance that indicates when eigenvector entries are negligible */
/*           w.r.t. their contribution to the residual. */

/*  Z        (input/output) COMPLEX          array, dimension (N) */
/*           On input, all entries of Z must be set to 0. */
/*           On output, Z contains the (scaled) r-th column of the */
/*           inverse. The scaling is such that Z(R) equals 1. */

/*  WANTNC   (input) LOGICAL */
/*           Specifies whether NEGCNT has to be computed. */

/*  NEGCNT   (output) INTEGER */
/*           If WANTNC is .TRUE. then NEGCNT = the number of pivots < pivmin */
/*           in the  matrix factorization L D L^T, and NEGCNT = -1 otherwise. */

/*  ZTZ      (output) REAL */
/*           The square of the 2-norm of Z. */

/*  MINGMA   (output) REAL */
/*           The reciprocal of the largest (in magnitude) diagonal */
/*           element of the inverse of L D L^T - sigma I. */

/*  R        (input/output) INTEGER */
/*           The twist index for the twisted factorization used to */
/*           compute Z. */
/*           On input, 0 <= R <= N. If R is input as 0, R is set to */
/*           the index where (L D L^T - sigma I)^{-1} is largest */
/*           in magnitude. If 1 <= R <= N, R is unchanged. */
/*           On output, R contains the twist index used to compute Z. */
/*           Ideally, R designates the position of the maximum entry in the */
/*           eigenvector. */

/*  ISUPPZ   (output) INTEGER array, dimension (2) */
/*           The support of the vector in Z, i.e., the vector Z is */
/*           nonzero only in elements ISUPPZ(1) through ISUPPZ( 2 ). */

/*  NRMINV   (output) REAL */
/*           NRMINV = 1/SQRT( ZTZ ) */

/*  RESID    (output) REAL */
/*           The residual of the FP vector. */
/*           RESID = ABS( MINGMA )/SQRT( ZTZ ) */

/*  RQCORR   (output) REAL */
/*           The Rayleigh Quotient correction to LAMBDA. */
/*           RQCORR = MINGMA*TMP */

/*  WORK     (workspace) REAL             array, dimension (4*N) */

/*  Further Details */
/*  =============== */

/*  Based on contributions by */
/*     Beresford Parlett, University of California, Berkeley, USA */
/*     Jim Demmel, University of California, Berkeley, USA */
/*     Inderjit Dhillon, University of Texas, Austin, USA */
/*     Osni Marques, LBNL/NERSC, USA */
/*     Christof Voemel, University of California, Berkeley, USA */

/*  ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    --work;
    --isuppz;
    --z__;
    --lld;
    --ld;
    --l;
    --d__;

    /* Function Body */
    eps = slamch_("Precision");
    if (*r__ == 0) {
	r1 = *b1;
	r2 = *bn;
    } else {
	r1 = *r__;
	r2 = *r__;
    }
/*     Storage for LPLUS */
    indlpl = 0;
/*     Storage for UMINUS */
    indumn = *n;
    inds = (*n << 1) + 1;
    indp = *n * 3 + 1;
    if (*b1 == 1) {
	work[inds] = 0.f;
    } else {
	work[inds + *b1 - 1] = lld[*b1 - 1];
    }

/*     Compute the stationary transform (using the differential form) */
/*     until the index R2. */

    sawnan1 = FALSE_;
    neg1 = 0;
    s = work[inds + *b1 - 1] - *lambda;
    i__1 = r1 - 1;
    for (i__ = *b1; i__ <= i__1; ++i__) {
	dplus = d__[i__] + s;
	work[indlpl + i__] = ld[i__] / dplus;
	if (dplus < 0.f) {
	    ++neg1;
	}
	work[inds + i__] = s * work[indlpl + i__] * l[i__];
	s = work[inds + i__] - *lambda;
/* L50: */
    }
    sawnan1 = sisnan_(&s);
    if (sawnan1) {
	goto L60;
    }
    i__1 = r2 - 1;
    for (i__ = r1; i__ <= i__1; ++i__) {
	dplus = d__[i__] + s;
	work[indlpl + i__] = ld[i__] / dplus;
	work[inds + i__] = s * work[indlpl + i__] * l[i__];
	s = work[inds + i__] - *lambda;
/* L51: */
    }
    sawnan1 = sisnan_(&s);

L60:
    if (sawnan1) {
/*        Runs a slower version of the above loop if a NaN is detected */
	neg1 = 0;
	s = work[inds + *b1 - 1] - *lambda;
	i__1 = r1 - 1;
	for (i__ = *b1; i__ <= i__1; ++i__) {
	    dplus = d__[i__] + s;
	    if (dabs(dplus) < *pivmin) {
		dplus = -(*pivmin);
	    }
	    work[indlpl + i__] = ld[i__] / dplus;
	    if (dplus < 0.f) {
		++neg1;
	    }
	    work[inds + i__] = s * work[indlpl + i__] * l[i__];
	    if (work[indlpl + i__] == 0.f) {
		work[inds + i__] = lld[i__];
	    }
	    s = work[inds + i__] - *lambda;
/* L70: */
	}
	i__1 = r2 - 1;
	for (i__ = r1; i__ <= i__1; ++i__) {
	    dplus = d__[i__] + s;
	    if (dabs(dplus) < *pivmin) {
		dplus = -(*pivmin);
	    }
	    work[indlpl + i__] = ld[i__] / dplus;
	    work[inds + i__] = s * work[indlpl + i__] * l[i__];
	    if (work[indlpl + i__] == 0.f) {
		work[inds + i__] = lld[i__];
	    }
	    s = work[inds + i__] - *lambda;
/* L71: */
	}
    }

/*     Compute the progressive transform (using the differential form) */
/*     until the index R1 */

    sawnan2 = FALSE_;
    neg2 = 0;
    work[indp + *bn - 1] = d__[*bn] - *lambda;
    i__1 = r1;
    for (i__ = *bn - 1; i__ >= i__1; --i__) {
	dminus = lld[i__] + work[indp + i__];
	tmp = d__[i__] / dminus;
	if (dminus < 0.f) {
	    ++neg2;
	}
	work[indumn + i__] = l[i__] * tmp;
	work[indp + i__ - 1] = work[indp + i__] * tmp - *lambda;
/* L80: */
    }
    tmp = work[indp + r1 - 1];
    sawnan2 = sisnan_(&tmp);
    if (sawnan2) {
/*        Runs a slower version of the above loop if a NaN is detected */
	neg2 = 0;
	i__1 = r1;
	for (i__ = *bn - 1; i__ >= i__1; --i__) {
	    dminus = lld[i__] + work[indp + i__];
	    if (dabs(dminus) < *pivmin) {
		dminus = -(*pivmin);
	    }
	    tmp = d__[i__] / dminus;
	    if (dminus < 0.f) {
		++neg2;
	    }
	    work[indumn + i__] = l[i__] * tmp;
	    work[indp + i__ - 1] = work[indp + i__] * tmp - *lambda;
	    if (tmp == 0.f) {
		work[indp + i__ - 1] = d__[i__] - *lambda;
	    }
/* L100: */
	}
    }

/*     Find the index (from R1 to R2) of the largest (in magnitude) */
/*     diagonal element of the inverse */

    *mingma = work[inds + r1 - 1] + work[indp + r1 - 1];
    if (*mingma < 0.f) {
	++neg1;
    }
    if (*wantnc) {
	*negcnt = neg1 + neg2;
    } else {
	*negcnt = -1;
    }
    if (dabs(*mingma) == 0.f) {
	*mingma = eps * work[inds + r1 - 1];
    }
    *r__ = r1;
    i__1 = r2 - 1;
    for (i__ = r1; i__ <= i__1; ++i__) {
	tmp = work[inds + i__] + work[indp + i__];
	if (tmp == 0.f) {
	    tmp = eps * work[inds + i__];
	}
	if (dabs(tmp) <= dabs(*mingma)) {
	    *mingma = tmp;
	    *r__ = i__ + 1;
	}
/* L110: */
    }

/*     Compute the FP vector: solve N^T v = e_r */

    isuppz[1] = *b1;
    isuppz[2] = *bn;
    i__1 = *r__;
    z__[i__1].r = 1.f, z__[i__1].i = 0.f;
    *ztz = 1.f;

/*     Compute the FP vector upwards from R */

    if (! sawnan1 && ! sawnan2) {
	i__1 = *b1;
	for (i__ = *r__ - 1; i__ >= i__1; --i__) {
	    i__2 = i__;
	    i__3 = indlpl + i__;
	    i__4 = i__ + 1;
	    q__2.r = work[i__3] * z__[i__4].r, q__2.i = work[i__3] * z__[i__4]
		    .i;
	    q__1.r = -q__2.r, q__1.i = -q__2.i;
	    z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
	    if ((c_abs(&z__[i__]) + c_abs(&z__[i__ + 1])) * (r__1 = ld[i__], 
		    dabs(r__1)) < *gaptol) {
		i__2 = i__;
		z__[i__2].r = 0.f, z__[i__2].i = 0.f;
		isuppz[1] = i__ + 1;
		goto L220;
	    }
	    i__2 = i__;
	    i__3 = i__;
	    q__1.r = z__[i__2].r * z__[i__3].r - z__[i__2].i * z__[i__3].i, 
		    q__1.i = z__[i__2].r * z__[i__3].i + z__[i__2].i * z__[
		    i__3].r;
	    *ztz += q__1.r;
/* L210: */
	}
L220:
	;
    } else {
/*        Run slower loop if NaN occurred. */
	i__1 = *b1;
	for (i__ = *r__ - 1; i__ >= i__1; --i__) {
	    i__2 = i__ + 1;
	    if (z__[i__2].r == 0.f && z__[i__2].i == 0.f) {
		i__2 = i__;
		r__1 = -(ld[i__ + 1] / ld[i__]);
		i__3 = i__ + 2;
		q__1.r = r__1 * z__[i__3].r, q__1.i = r__1 * z__[i__3].i;
		z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
	    } else {
		i__2 = i__;
		i__3 = indlpl + i__;
		i__4 = i__ + 1;
		q__2.r = work[i__3] * z__[i__4].r, q__2.i = work[i__3] * z__[
			i__4].i;
		q__1.r = -q__2.r, q__1.i = -q__2.i;
		z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
	    }
	    if ((c_abs(&z__[i__]) + c_abs(&z__[i__ + 1])) * (r__1 = ld[i__], 
		    dabs(r__1)) < *gaptol) {
		i__2 = i__;
		z__[i__2].r = 0.f, z__[i__2].i = 0.f;
		isuppz[1] = i__ + 1;
		goto L240;
	    }
	    i__2 = i__;
	    i__3 = i__;
	    q__1.r = z__[i__2].r * z__[i__3].r - z__[i__2].i * z__[i__3].i, 
		    q__1.i = z__[i__2].r * z__[i__3].i + z__[i__2].i * z__[
		    i__3].r;
	    *ztz += q__1.r;
/* L230: */
	}
L240:
	;
    }
/*     Compute the FP vector downwards from R in blocks of size BLKSIZ */
    if (! sawnan1 && ! sawnan2) {
	i__1 = *bn - 1;
	for (i__ = *r__; i__ <= i__1; ++i__) {
	    i__2 = i__ + 1;
	    i__3 = indumn + i__;
	    i__4 = i__;
	    q__2.r = work[i__3] * z__[i__4].r, q__2.i = work[i__3] * z__[i__4]
		    .i;
	    q__1.r = -q__2.r, q__1.i = -q__2.i;
	    z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
	    if ((c_abs(&z__[i__]) + c_abs(&z__[i__ + 1])) * (r__1 = ld[i__], 
		    dabs(r__1)) < *gaptol) {
		i__2 = i__ + 1;
		z__[i__2].r = 0.f, z__[i__2].i = 0.f;
		isuppz[2] = i__;
		goto L260;
	    }
	    i__2 = i__ + 1;
	    i__3 = i__ + 1;
	    q__1.r = z__[i__2].r * z__[i__3].r - z__[i__2].i * z__[i__3].i, 
		    q__1.i = z__[i__2].r * z__[i__3].i + z__[i__2].i * z__[
		    i__3].r;
	    *ztz += q__1.r;
/* L250: */
	}
L260:
	;
    } else {
/*        Run slower loop if NaN occurred. */
	i__1 = *bn - 1;
	for (i__ = *r__; i__ <= i__1; ++i__) {
	    i__2 = i__;
	    if (z__[i__2].r == 0.f && z__[i__2].i == 0.f) {
		i__2 = i__ + 1;
		r__1 = -(ld[i__ - 1] / ld[i__]);
		i__3 = i__ - 1;
		q__1.r = r__1 * z__[i__3].r, q__1.i = r__1 * z__[i__3].i;
		z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
	    } else {
		i__2 = i__ + 1;
		i__3 = indumn + i__;
		i__4 = i__;
		q__2.r = work[i__3] * z__[i__4].r, q__2.i = work[i__3] * z__[
			i__4].i;
		q__1.r = -q__2.r, q__1.i = -q__2.i;
		z__[i__2].r = q__1.r, z__[i__2].i = q__1.i;
	    }
	    if ((c_abs(&z__[i__]) + c_abs(&z__[i__ + 1])) * (r__1 = ld[i__], 
		    dabs(r__1)) < *gaptol) {
		i__2 = i__ + 1;
		z__[i__2].r = 0.f, z__[i__2].i = 0.f;
		isuppz[2] = i__;
		goto L280;
	    }
	    i__2 = i__ + 1;
	    i__3 = i__ + 1;
	    q__1.r = z__[i__2].r * z__[i__3].r - z__[i__2].i * z__[i__3].i, 
		    q__1.i = z__[i__2].r * z__[i__3].i + z__[i__2].i * z__[
		    i__3].r;
	    *ztz += q__1.r;
/* L270: */
	}
L280:
	;
    }

/*     Compute quantities for convergence test */

    tmp = 1.f / *ztz;
    *nrminv = sqrt(tmp);
    *resid = dabs(*mingma) * *nrminv;
    *rqcorr = *mingma * tmp;


    return 0;

/*     End of CLAR1V */

} /* clar1v_ */
Exemplo n.º 29
0
/* Subroutine */ int claev2_(complex *a, complex *b, complex *c__, real *rt1, 
	real *rt2, real *cs1, complex *sn1)
{
    /* System generated locals */
    real r__1, r__2, r__3;
    complex q__1, q__2;

    /* Local variables */
    real t;
    complex w;

/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     November 2006 */

/*  Purpose */
/*  ======= */

/*  CLAEV2 computes the eigendecomposition of a 2-by-2 Hermitian matrix */
/*     [  A         B  ] */
/*     [  CONJG(B)  C  ]. */
/*  On return, RT1 is the eigenvalue of larger absolute value, RT2 is the */
/*  eigenvalue of smaller absolute value, and (CS1,SN1) is the unit right */
/*  eigenvector for RT1, giving the decomposition */

/*  [ CS1  CONJG(SN1) ] [    A     B ] [ CS1 -CONJG(SN1) ] = [ RT1  0  ] */
/*  [-SN1     CS1     ] [ CONJG(B) C ] [ SN1     CS1     ]   [  0  RT2 ]. */

/*  Arguments */
/*  ========= */

/*  A      (input) COMPLEX */
/*         The (1,1) element of the 2-by-2 matrix. */

/*  B      (input) COMPLEX */
/*         The (1,2) element and the conjugate of the (2,1) element of */
/*         the 2-by-2 matrix. */

/*  C      (input) COMPLEX */
/*         The (2,2) element of the 2-by-2 matrix. */

/*  RT1    (output) REAL */
/*         The eigenvalue of larger absolute value. */

/*  RT2    (output) REAL */
/*         The eigenvalue of smaller absolute value. */

/*  CS1    (output) REAL */
/*  SN1    (output) COMPLEX */
/*         The vector (CS1, SN1) is a unit right eigenvector for RT1. */

/*  Further Details */
/*  =============== */

/*  RT1 is accurate to a few ulps barring over/underflow. */

/*  RT2 may be inaccurate if there is massive cancellation in the */
/*  determinant A*C-B*B; higher precision or correctly rounded or */
/*  correctly truncated arithmetic would be needed to compute RT2 */
/*  accurately in all cases. */

/*  CS1 and SN1 are accurate to a few ulps barring over/underflow. */

/*  Overflow is possible only if RT1 is within a factor of 5 of overflow. */
/*  Underflow is harmless if the input data is 0 or exceeds */
/*     underflow_threshold / macheps. */

/* ===================================================================== */

    if (c_abs(b) == 0.f) {
	w.r = 1.f, w.i = 0.f;
    } else {
	r_cnjg(&q__2, b);
	r__1 = c_abs(b);
	q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
	w.r = q__1.r, w.i = q__1.i;
    }
    r__1 = a->r;
    r__2 = c_abs(b);
    r__3 = c__->r;
    slaev2_(&r__1, &r__2, &r__3, rt1, rt2, cs1, &t);
    q__1.r = t * w.r, q__1.i = t * w.i;
    sn1->r = q__1.r, sn1->i = q__1.i;
    return 0;

/*     End of CLAEV2 */

} /* claev2_ */
Exemplo n.º 30
0
double clantb_(char *norm, char *uplo, char *diag, int *n, int *k, 
	 complex *ab, int *ldab, float *work)
{
    /* System generated locals */
    int ab_dim1, ab_offset, i__1, i__2, i__3, i__4, i__5;
    float ret_val, r__1, r__2;

    /* Builtin functions */
    double c_abs(complex *), sqrt(double);

    /* Local variables */
    int i__, j, l;
    float sum, scale;
    int udiag;
    extern int lsame_(char *, char *);
    float value;
    extern  int classq_(int *, complex *, int *, float 
	    *, float *);


/*  -- LAPACK auxiliary routine (version 3.2) -- */
/*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/*     November 2006 */

/*     .. Scalar Arguments .. */
/*     .. */
/*     .. Array Arguments .. */
/*     .. */

/*  Purpose */
/*  ======= */

/*  CLANTB  returns the value of the one norm,  or the Frobenius norm, or */
/*  the  infinity norm,  or the element of  largest absolute value  of an */
/*  n by n triangular band matrix A,  with ( k + 1 ) diagonals. */

/*  Description */
/*  =========== */

/*  CLANTB returns the value */

/*     CLANTB = ( MAX(ABS(A(i,j))), NORM = 'M' or 'm' */
/*              ( */
/*              ( norm1(A),         NORM = '1', 'O' or 'o' */
/*              ( */
/*              ( normI(A),         NORM = 'I' or 'i' */
/*              ( */
/*              ( normF(A),         NORM = 'F', 'f', 'E' or 'e' */

/*  where  norm1  denotes the  one norm of a matrix (maximum column sum), */
/*  normI  denotes the  infinity norm  of a matrix  (maximum row sum) and */
/*  normF  denotes the  Frobenius norm of a matrix (square root of sum of */
/*  squares).  Note that  MAX(ABS(A(i,j)))  is not a consistent matrix norm. */

/*  Arguments */
/*  ========= */

/*  NORM    (input) CHARACTER*1 */
/*          Specifies the value to be returned in CLANTB as described */
/*          above. */

/*  UPLO    (input) CHARACTER*1 */
/*          Specifies whether the matrix A is upper or lower triangular. */
/*          = 'U':  Upper triangular */
/*          = 'L':  Lower triangular */

/*  DIAG    (input) CHARACTER*1 */
/*          Specifies whether or not the matrix A is unit triangular. */
/*          = 'N':  Non-unit triangular */
/*          = 'U':  Unit triangular */

/*  N       (input) INTEGER */
/*          The order of the matrix A.  N >= 0.  When N = 0, CLANTB is */
/*          set to zero. */

/*  K       (input) INTEGER */
/*          The number of super-diagonals of the matrix A if UPLO = 'U', */
/*          or the number of sub-diagonals of the matrix A if UPLO = 'L'. */
/*          K >= 0. */

/*  AB      (input) COMPLEX array, dimension (LDAB,N) */
/*          The upper or lower triangular band matrix A, stored in the */
/*          first k+1 rows of AB.  The j-th column of A is stored */
/*          in the j-th column of the array AB as follows: */
/*          if UPLO = 'U', AB(k+1+i-j,j) = A(i,j) for MAX(1,j-k)<=i<=j; */
/*          if UPLO = 'L', AB(1+i-j,j)   = A(i,j) for j<=i<=MIN(n,j+k). */
/*          Note that when DIAG = 'U', the elements of the array AB */
/*          corresponding to the diagonal elements of the matrix A are */
/*          not referenced, but are assumed to be one. */

/*  LDAB    (input) INTEGER */
/*          The leading dimension of the array AB.  LDAB >= K+1. */

/*  WORK    (workspace) REAL array, dimension (MAX(1,LWORK)), */
/*          where LWORK >= N when NORM = 'I'; otherwise, WORK is not */
/*          referenced. */

/* ===================================================================== */

/*     .. Parameters .. */
/*     .. */
/*     .. Local Scalars .. */
/*     .. */
/*     .. External Functions .. */
/*     .. */
/*     .. External Subroutines .. */
/*     .. */
/*     .. Intrinsic Functions .. */
/*     .. */
/*     .. Executable Statements .. */

    /* Parameter adjustments */
    ab_dim1 = *ldab;
    ab_offset = 1 + ab_dim1;
    ab -= ab_offset;
    --work;

    /* Function Body */
    if (*n == 0) {
	value = 0.f;
    } else if (lsame_(norm, "M")) {

/*        Find MAX(ABS(A(i,j))). */

	if (lsame_(diag, "U")) {
	    value = 1.f;
	    if (lsame_(uplo, "U")) {
		i__1 = *n;
		for (j = 1; j <= i__1; ++j) {
/* Computing MAX */
		    i__2 = *k + 2 - j;
		    i__3 = *k;
		    for (i__ = MAX(i__2,1); i__ <= i__3; ++i__) {
/* Computing MAX */
			r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
			value = MAX(r__1,r__2);
/* L10: */
		    }
/* L20: */
		}
	    } else {
		i__1 = *n;
		for (j = 1; j <= i__1; ++j) {
/* Computing MIN */
		    i__2 = *n + 1 - j, i__4 = *k + 1;
		    i__3 = MIN(i__2,i__4);
		    for (i__ = 2; i__ <= i__3; ++i__) {
/* Computing MAX */
			r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
			value = MAX(r__1,r__2);
/* L30: */
		    }
/* L40: */
		}
	    }
	} else {
	    value = 0.f;
	    if (lsame_(uplo, "U")) {
		i__1 = *n;
		for (j = 1; j <= i__1; ++j) {
/* Computing MAX */
		    i__3 = *k + 2 - j;
		    i__2 = *k + 1;
		    for (i__ = MAX(i__3,1); i__ <= i__2; ++i__) {
/* Computing MAX */
			r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
			value = MAX(r__1,r__2);
/* L50: */
		    }
/* L60: */
		}
	    } else {
		i__1 = *n;
		for (j = 1; j <= i__1; ++j) {
/* Computing MIN */
		    i__3 = *n + 1 - j, i__4 = *k + 1;
		    i__2 = MIN(i__3,i__4);
		    for (i__ = 1; i__ <= i__2; ++i__) {
/* Computing MAX */
			r__1 = value, r__2 = c_abs(&ab[i__ + j * ab_dim1]);
			value = MAX(r__1,r__2);
/* L70: */
		    }
/* L80: */
		}
	    }
	}
    } else if (lsame_(norm, "O") || *(unsigned char *)
	    norm == '1') {

/*        Find norm1(A). */

	value = 0.f;
	udiag = lsame_(diag, "U");
	if (lsame_(uplo, "U")) {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		if (udiag) {
		    sum = 1.f;
/* Computing MAX */
		    i__2 = *k + 2 - j;
		    i__3 = *k;
		    for (i__ = MAX(i__2,1); i__ <= i__3; ++i__) {
			sum += c_abs(&ab[i__ + j * ab_dim1]);
/* L90: */
		    }
		} else {
		    sum = 0.f;
/* Computing MAX */
		    i__3 = *k + 2 - j;
		    i__2 = *k + 1;
		    for (i__ = MAX(i__3,1); i__ <= i__2; ++i__) {
			sum += c_abs(&ab[i__ + j * ab_dim1]);
/* L100: */
		    }
		}
		value = MAX(value,sum);
/* L110: */
	    }
	} else {
	    i__1 = *n;
	    for (j = 1; j <= i__1; ++j) {
		if (udiag) {
		    sum = 1.f;
/* Computing MIN */
		    i__3 = *n + 1 - j, i__4 = *k + 1;
		    i__2 = MIN(i__3,i__4);
		    for (i__ = 2; i__ <= i__2; ++i__) {
			sum += c_abs(&ab[i__ + j * ab_dim1]);
/* L120: */
		    }
		} else {
		    sum = 0.f;
/* Computing MIN */
		    i__3 = *n + 1 - j, i__4 = *k + 1;
		    i__2 = MIN(i__3,i__4);
		    for (i__ = 1; i__ <= i__2; ++i__) {
			sum += c_abs(&ab[i__ + j * ab_dim1]);
/* L130: */
		    }
		}
		value = MAX(value,sum);
/* L140: */
	    }
	}
    } else if (lsame_(norm, "I")) {

/*        Find normI(A). */

	value = 0.f;
	if (lsame_(uplo, "U")) {
	    if (lsame_(diag, "U")) {
		i__1 = *n;
		for (i__ = 1; i__ <= i__1; ++i__) {
		    work[i__] = 1.f;
/* L150: */
		}
		i__1 = *n;
		for (j = 1; j <= i__1; ++j) {
		    l = *k + 1 - j;
/* Computing MAX */
		    i__2 = 1, i__3 = j - *k;
		    i__4 = j - 1;
		    for (i__ = MAX(i__2,i__3); i__ <= i__4; ++i__) {
			work[i__] += c_abs(&ab[l + i__ + j * ab_dim1]);
/* L160: */
		    }
/* L170: */
		}
	    } else {
		i__1 = *n;
		for (i__ = 1; i__ <= i__1; ++i__) {
		    work[i__] = 0.f;
/* L180: */
		}
		i__1 = *n;
		for (j = 1; j <= i__1; ++j) {
		    l = *k + 1 - j;
/* Computing MAX */
		    i__4 = 1, i__2 = j - *k;
		    i__3 = j;
		    for (i__ = MAX(i__4,i__2); i__ <= i__3; ++i__) {
			work[i__] += c_abs(&ab[l + i__ + j * ab_dim1]);
/* L190: */
		    }
/* L200: */
		}
	    }
	} else {
	    if (lsame_(diag, "U")) {
		i__1 = *n;
		for (i__ = 1; i__ <= i__1; ++i__) {
		    work[i__] = 1.f;
/* L210: */
		}
		i__1 = *n;
		for (j = 1; j <= i__1; ++j) {
		    l = 1 - j;
/* Computing MIN */
		    i__4 = *n, i__2 = j + *k;
		    i__3 = MIN(i__4,i__2);
		    for (i__ = j + 1; i__ <= i__3; ++i__) {
			work[i__] += c_abs(&ab[l + i__ + j * ab_dim1]);
/* L220: */
		    }
/* L230: */
		}
	    } else {
		i__1 = *n;
		for (i__ = 1; i__ <= i__1; ++i__) {
		    work[i__] = 0.f;
/* L240: */
		}
		i__1 = *n;
		for (j = 1; j <= i__1; ++j) {
		    l = 1 - j;
/* Computing MIN */
		    i__4 = *n, i__2 = j + *k;
		    i__3 = MIN(i__4,i__2);
		    for (i__ = j; i__ <= i__3; ++i__) {
			work[i__] += c_abs(&ab[l + i__ + j * ab_dim1]);
/* L250: */
		    }
/* L260: */
		}
	    }
	}
	i__1 = *n;
	for (i__ = 1; i__ <= i__1; ++i__) {
/* Computing MAX */
	    r__1 = value, r__2 = work[i__];
	    value = MAX(r__1,r__2);
/* L270: */
	}
    } else if (lsame_(norm, "F") || lsame_(norm, "E")) {

/*        Find normF(A). */

	if (lsame_(uplo, "U")) {
	    if (lsame_(diag, "U")) {
		scale = 1.f;
		sum = (float) (*n);
		if (*k > 0) {
		    i__1 = *n;
		    for (j = 2; j <= i__1; ++j) {
/* Computing MIN */
			i__4 = j - 1;
			i__3 = MIN(i__4,*k);
/* Computing MAX */
			i__2 = *k + 2 - j;
			classq_(&i__3, &ab[MAX(i__2, 1)+ j * ab_dim1], &c__1, 
				&scale, &sum);
/* L280: */
		    }
		}
	    } else {
		scale = 0.f;
		sum = 1.f;
		i__1 = *n;
		for (j = 1; j <= i__1; ++j) {
/* Computing MIN */
		    i__4 = j, i__2 = *k + 1;
		    i__3 = MIN(i__4,i__2);
/* Computing MAX */
		    i__5 = *k + 2 - j;
		    classq_(&i__3, &ab[MAX(i__5, 1)+ j * ab_dim1], &c__1, &
			    scale, &sum);
/* L290: */
		}
	    }
	} else {
	    if (lsame_(diag, "U")) {
		scale = 1.f;
		sum = (float) (*n);
		if (*k > 0) {
		    i__1 = *n - 1;
		    for (j = 1; j <= i__1; ++j) {
/* Computing MIN */
			i__4 = *n - j;
			i__3 = MIN(i__4,*k);
			classq_(&i__3, &ab[j * ab_dim1 + 2], &c__1, &scale, &
				sum);
/* L300: */
		    }
		}
	    } else {
		scale = 0.f;
		sum = 1.f;
		i__1 = *n;
		for (j = 1; j <= i__1; ++j) {
/* Computing MIN */
		    i__4 = *n - j + 1, i__2 = *k + 1;
		    i__3 = MIN(i__4,i__2);
		    classq_(&i__3, &ab[j * ab_dim1 + 1], &c__1, &scale, &sum);
/* L310: */
		}
	    }
	}
	value = scale * sqrt(sum);
    }

    ret_val = value;
    return ret_val;

/*     End of CLANTB */

} /* clantb_ */