extern "C" void magma_zlarfxsym(magma_int_t N, magmaDoubleComplex *A, magma_int_t LDA, magmaDoubleComplex *V, magmaDoubleComplex *TAU) { magma_int_t IONE=1; magmaDoubleComplex dtmp; magmaDoubleComplex Z_ZERO = MAGMA_Z_ZERO; //magmaDoubleComplex Z_ONE = MAGMA_Z_ONE; magmaDoubleComplex Z_MONE = MAGMA_Z_NEG_ONE; magmaDoubleComplex Z_HALF = MAGMA_Z_HALF; //magmaDoubleComplex WORK[N]; magmaDoubleComplex *WORK; magma_zmalloc_cpu( &WORK, N ); /* apply left and right on A(st:ed,st:ed)*/ //magma_zlarfxsym(len,A(st,st),LDX,V(st),TAU(st)); /* X = AVtau */ blasf77_zhemv("L",&N, TAU, A, &LDA, V, &IONE, &Z_ZERO, WORK, &IONE); /* je calcul dtmp= X'*V */ dtmp = magma_cblas_zdotc(N, WORK, IONE, V, IONE); /* je calcul 1/2 X'*V*t = 1/2*dtmp*tau */ dtmp = -dtmp * Z_HALF * (*TAU); /* je calcul W=X-1/2VX'Vt = X - dtmp*V */ /* for (j = 0; j < N; j++) WORK[j] = WORK[j] + (dtmp*V[j]); */ blasf77_zaxpy(&N, &dtmp, V, &IONE, WORK, &IONE); /* performs the symmetric rank 2 operation A := alpha*x*y' + alpha*y*x' + A */ blasf77_zher2("L",&N,&Z_MONE,WORK,&IONE,V,&IONE,A,&LDA); magma_free_cpu(WORK); }
// ---------------------------------------- int main( int argc, char** argv ) { TESTING_INIT(); //real_Double_t t_m, t_c, t_f; magma_int_t ione = 1; magmaDoubleComplex *A, *B; double diff, error; magma_int_t ISEED[4] = {0,0,0,1}; magma_int_t m, n, k, size, maxn, ld; magmaDoubleComplex x2_m, x2_c; // complex x for magma, cblas/fortran blas respectively double x_m, x_c; // x for magma, cblas/fortran blas respectively magma_opts opts; parse_opts( argc, argv, &opts ); opts.tolerance = max( 100., opts.tolerance ); double tol = opts.tolerance * lapackf77_dlamch("E"); gTol = tol; printf( "!! Calling these CBLAS and Fortran BLAS sometimes crashes (segfault), which !!\n" "!! is why we use wrappers. It does not necesarily indicate a bug in MAGMA. !!\n" "\n" "Diff compares MAGMA wrapper to CBLAS and BLAS function; should be exactly 0.\n" "Error compares MAGMA implementation to CBLAS and BLAS function; should be ~ machine epsilon.\n" "\n" ); double total_diff = 0.; double total_error = 0.; int inc[] = { 1 }; //{ -2, -1, 1, 2 }; //{ 1 }; //{ -1, 1 }; int ninc = sizeof(inc)/sizeof(*inc); for( int itest = 0; itest < opts.ntest; ++itest ) { m = opts.msize[itest]; n = opts.nsize[itest]; k = opts.ksize[itest]; for( int iincx = 0; iincx < ninc; ++iincx ) { magma_int_t incx = inc[iincx]; for( int iincy = 0; iincy < ninc; ++iincy ) { magma_int_t incy = inc[iincy]; printf("=========================================================================\n"); printf( "m=%d, n=%d, k=%d, incx = %d, incy = %d\n", (int) m, (int) n, (int) k, (int) incx, (int) incy ); printf( "Function MAGMA CBLAS BLAS Diff Error\n" " msec msec msec\n" ); // allocate matrices // over-allocate so they can be any combination of // {m,n,k} * {abs(incx), abs(incy)} by // {m,n,k} * {abs(incx), abs(incy)} maxn = max( max( m, n ), k ) * max( abs(incx), abs(incy) ); ld = max( 1, maxn ); size = ld*maxn; magma_zmalloc_pinned( &A, size ); assert( A != NULL ); magma_zmalloc_pinned( &B, size ); assert( B != NULL ); // initialize matrices lapackf77_zlarnv( &ione, ISEED, &size, A ); lapackf77_zlarnv( &ione, ISEED, &size, B ); printf( "Level 1 BLAS ----------------------------------------------------------\n" ); // ----- test DZASUM // get one-norm of column j of A if ( incx > 0 && incx == incy ) { // positive, no incy diff = 0; error = 0; for( int j = 0; j < k; ++j ) { x_m = magma_cblas_dzasum( m, A(0,j), incx ); x_c = cblas_dzasum( m, A(0,j), incx ); diff += fabs( x_m - x_c ); x_c = blasf77_dzasum( &m, A(0,j), &incx ); error += fabs( (x_m - x_c) / (m*x_c) ); } output( "dzasum", diff, error ); total_diff += diff; total_error += error; } // ----- test DZNRM2 // get two-norm of column j of A if ( incx > 0 && incx == incy ) { // positive, no incy diff = 0; error = 0; for( int j = 0; j < k; ++j ) { x_m = magma_cblas_dznrm2( m, A(0,j), incx ); x_c = cblas_dznrm2( m, A(0,j), incx ); diff += fabs( x_m - x_c ); x_c = blasf77_dznrm2( &m, A(0,j), &incx ); error += fabs( (x_m - x_c) / (m*x_c) ); } output( "dznrm2", diff, error ); total_diff += diff; total_error += error; } // ----- test ZDOTC // dot columns, Aj^H Bj diff = 0; error = 0; for( int j = 0; j < k; ++j ) { // MAGMA implementation, not just wrapper x2_m = magma_cblas_zdotc( m, A(0,j), incx, B(0,j), incy ); // crashes on MKL 11.1.2, ILP64 #if ! defined( MAGMA_WITH_MKL ) #ifdef COMPLEX cblas_zdotc_sub( m, A(0,j), incx, B(0,j), incy, &x2_c ); #else x2_c = cblas_zdotc( m, A(0,j), incx, B(0,j), incy ); #endif error += fabs( x2_m - x2_c ) / fabs( m*x2_c ); #endif // crashes on MacOS 10.9 #if ! defined( __APPLE__ ) x2_c = blasf77_zdotc( &m, A(0,j), &incx, B(0,j), &incy ); error += fabs( x2_m - x2_c ) / fabs( m*x2_c ); #endif } output( "zdotc", diff, error ); total_diff += diff; total_error += error; total_error += error; // ----- test ZDOTU // dot columns, Aj^T * Bj diff = 0; error = 0; for( int j = 0; j < k; ++j ) { // MAGMA implementation, not just wrapper x2_m = magma_cblas_zdotu( m, A(0,j), incx, B(0,j), incy ); // crashes on MKL 11.1.2, ILP64 #if ! defined( MAGMA_WITH_MKL ) #ifdef COMPLEX cblas_zdotu_sub( m, A(0,j), incx, B(0,j), incy, &x2_c ); #else x2_c = cblas_zdotu( m, A(0,j), incx, B(0,j), incy ); #endif error += fabs( x2_m - x2_c ) / fabs( m*x2_c ); #endif // crashes on MacOS 10.9 #if ! defined( __APPLE__ ) x2_c = blasf77_zdotu( &m, A(0,j), &incx, B(0,j), &incy ); error += fabs( x2_m - x2_c ) / fabs( m*x2_c ); #endif } output( "zdotu", diff, error ); total_diff += diff; total_error += error; // tell user about disabled functions #if defined( MAGMA_WITH_MKL ) printf( "cblas_zdotc and cblas_zdotu disabled with MKL (segfaults)\n" ); #endif #if defined( __APPLE__ ) printf( "blasf77_zdotc and blasf77_zdotu disabled on MacOS (segfaults)\n" ); #endif // cleanup magma_free_pinned( A ); magma_free_pinned( B ); fflush( stdout ); }}} // itest, incx, incy // TODO use average error? printf( "sum diffs = %8.2g, MAGMA wrapper compared to CBLAS and Fortran BLAS; should be exactly 0.\n" "sum errors = %8.2e, MAGMA implementation compared to CBLAS and Fortran BLAS; should be ~ machine epsilon.\n\n", total_diff, total_error ); if ( total_diff != 0. ) { printf( "some tests failed diff == 0.; see above.\n" ); } else { printf( "all tests passed diff == 0.\n" ); } TESTING_FINALIZE(); int status = (total_diff != 0.); return status; }
// ---------------------------------------- int main( int argc, char** argv ) { TESTING_INIT(); //real_Double_t t_m, t_c, t_f; magma_int_t ione = 1; magmaDoubleComplex *A, *B; double error_cblas, error_fblas, error_inline; magma_int_t ISEED[4] = {0,0,0,1}; magma_int_t i, j, k, m, n, size, maxn, ld; // complex x for magma, cblas, fortran, inline blas respectively magmaDoubleComplex x2_m, x2_c, x2_f, x2_i; // real x for magma, cblas, fortran, inline blas respectively double x_m, x_c, x_f, x_i; MAGMA_UNUSED( x_c ); MAGMA_UNUSED( x_f ); MAGMA_UNUSED( x2_c ); MAGMA_UNUSED( x2_f ); MAGMA_UNUSED( x2_m ); magma_opts opts; opts.parse_opts( argc, argv ); opts.tolerance = max( 100., opts.tolerance ); double tol = opts.tolerance * lapackf77_dlamch("E"); gTol = tol; magma_int_t inc[] = { -2, -1, 1, 2 }; //{ 1 }; //{ -1, 1 }; magma_int_t ninc = sizeof(inc)/sizeof(*inc); magma_int_t maxinc = 0; for( i=0; i < ninc; ++i ) { maxinc = max( maxinc, abs(inc[i]) ); } printf( "!! Calling these CBLAS and Fortran BLAS sometimes crashes (segfaults), which !!\n" "!! is why we use wrappers. It does not necesarily indicate a bug in MAGMA. !!\n" "!! If MAGMA_WITH_MKL or __APPLE__ are defined, known failures are skipped. !!\n" "\n" ); // tell user about disabled functions #ifndef HAVE_CBLAS printf( "n/a: HAVE_CBLAS not defined, so no cblas functions tested.\n\n" ); #endif #if defined(MAGMA_WITH_MKL) printf( "n/a: cblas_zdotc, cblas_zdotu, blasf77_zdotc, and blasf77_zdotu are disabled with MKL, due to segfaults.\n\n" ); #endif #if defined(__APPLE__) printf( "n/a: blasf77_zdotc and blasf77_zdotu are disabled on MacOS, due to segfaults.\n\n" ); #endif printf( "%% Error w.r.t. Error w.r.t. Error w.r.t.\n" "%% M N K incx incy Function CBLAS Fortran BLAS inline\n" "%%====================================================================================\n" ); for( int itest = 0; itest < opts.ntest; ++itest ) { if ( itest > 0 ) { printf( "%%----------------------------------------------------------------------\n" ); } m = opts.msize[itest]; n = opts.nsize[itest]; k = opts.ksize[itest]; // allocate matrices // over-allocate so they can be any combination of // {m,n,k} * {abs(incx), abs(incy)} by // {m,n,k} * {abs(incx), abs(incy)} maxn = max( max( m, n ), k ) * maxinc; ld = max( 1, maxn ); size = ld*maxn; TESTING_MALLOC_CPU( A, magmaDoubleComplex, size ); TESTING_MALLOC_CPU( B, magmaDoubleComplex, size ); // initialize matrices lapackf77_zlarnv( &ione, ISEED, &size, A ); lapackf77_zlarnv( &ione, ISEED, &size, B ); // ----- test DZASUM for( int iincx = 0; iincx < ninc; ++iincx ) { magma_int_t incx = inc[iincx]; for( int iincy = 0; iincy < ninc; ++iincy ) { magma_int_t incy = inc[iincy]; // get one-norm of column j of A if ( incx > 0 && incx == incy ) { // positive, no incy error_cblas = 0; error_fblas = 0; error_inline = 0; for( j=0; j < k; ++j ) { x_m = magma_cblas_dzasum( m, A(0,j), incx ); #ifdef HAVE_CBLAS x_c = cblas_dzasum( m, A(0,j), incx ); error_cblas = max( error_cblas, fabs(x_m - x_c) / fabs(m*x_c) ); #else x_c = 0; error_cblas = SKIPPED_FLAG; #endif x_f = blasf77_dzasum( &m, A(0,j), &incx ); error_fblas = max( error_fblas, fabs(x_m - x_f) / fabs(m*x_f) ); // inline implementation x_i = 0; for( i=0; i < m; ++i ) { x_i += MAGMA_Z_ABS1( *A(i*incx,j) ); // |real(Aij)| + |imag(Aij)| } error_inline = max( error_inline, fabs(x_m - x_i) / fabs(m*x_i) ); //printf( "dzasum xm %.8e, xc %.8e, xf %.8e, xi %.8e\n", x_m, x_c, x_f, x_i ); } output( "dzasum", m, n, k, incx, incy, error_cblas, error_fblas, error_inline ); } } } printf( "\n" ); // ----- test DZNRM2 // get two-norm of column j of A for( int iincx = 0; iincx < ninc; ++iincx ) { magma_int_t incx = inc[iincx]; for( int iincy = 0; iincy < ninc; ++iincy ) { magma_int_t incy = inc[iincy]; if ( incx > 0 && incx == incy ) { // positive, no incy error_cblas = 0; error_fblas = 0; error_inline = 0; for( j=0; j < k; ++j ) { x_m = magma_cblas_dznrm2( m, A(0,j), incx ); #ifdef HAVE_CBLAS x_c = cblas_dznrm2( m, A(0,j), incx ); error_cblas = max( error_cblas, fabs(x_m - x_c) / fabs(m*x_c) ); #else x_c = 0; error_cblas = SKIPPED_FLAG; #endif x_f = blasf77_dznrm2( &m, A(0,j), &incx ); error_fblas = max( error_fblas, fabs(x_m - x_f) / fabs(m*x_f) ); // inline implementation (poor -- doesn't scale) x_i = 0; for( i=0; i < m; ++i ) { x_i += real( *A(i*incx,j) ) * real( *A(i*incx,j) ) + imag( *A(i*incx,j) ) * imag( *A(i*incx,j) ); // same: real( conj( *A(i*incx,j) ) * *A(i*incx,j) ); } x_i = sqrt( x_i ); error_inline = max( error_inline, fabs(x_m - x_i) / fabs(m*x_i) ); //printf( "dznrm2 xm %.8e, xc %.8e, xf %.8e, xi %.8e\n", x_m, x_c, x_f, x_i ); } output( "dznrm2", m, n, k, incx, incy, error_cblas, error_fblas, error_inline ); } } } printf( "\n" ); // ----- test ZDOTC // dot columns, Aj^H Bj for( int iincx = 0; iincx < ninc; ++iincx ) { magma_int_t incx = inc[iincx]; for( int iincy = 0; iincy < ninc; ++iincy ) { magma_int_t incy = inc[iincy]; error_cblas = 0; error_fblas = 0; error_inline = 0; for( j=0; j < k; ++j ) { // MAGMA implementation, not just wrapper x2_m = magma_cblas_zdotc( m, A(0,j), incx, B(0,j), incy ); // crashes with MKL 11.1.2, ILP64 #if defined(HAVE_CBLAS) && ! defined(MAGMA_WITH_MKL) #ifdef COMPLEX cblas_zdotc_sub( m, A(0,j), incx, B(0,j), incy, &x2_c ); #else x2_c = cblas_zdotc( m, A(0,j), incx, B(0,j), incy ); #endif error_cblas = max( error_cblas, fabs(x2_m - x2_c) / fabs(m*x2_c) ); #else x2_c = MAGMA_Z_ZERO; error_cblas = SKIPPED_FLAG; #endif // crashes with MKL 11.2.3 and MacOS 10.9 #if (! defined(COMPLEX) || ! defined(MAGMA_WITH_MKL)) && ! defined(__APPLE__) x2_f = blasf77_zdotc( &m, A(0,j), &incx, B(0,j), &incy ); error_fblas = max( error_fblas, fabs(x2_m - x2_f) / fabs(m*x2_f) ); #else x2_f = MAGMA_Z_ZERO; error_fblas = SKIPPED_FLAG; #endif // inline implementation x2_i = MAGMA_Z_ZERO; magma_int_t A_offset = (incx > 0 ? 0 : (-n + 1)*incx); magma_int_t B_offset = (incy > 0 ? 0 : (-n + 1)*incy); for( i=0; i < m; ++i ) { x2_i += conj( *A(A_offset + i*incx,j) ) * *B(B_offset + i*incy,j); } error_inline = max( error_inline, fabs(x2_m - x2_i) / fabs(m*x2_i) ); //printf( "zdotc xm %.8e + %.8ei, xc %.8e + %.8ei, xf %.8e + %.8ei, xi %.8e + %.8ei\n", // real(x2_m), imag(x2_m), // real(x2_c), imag(x2_c), // real(x2_f), imag(x2_f), // real(x2_i), imag(x2_i) ); } output( "zdotc", m, n, k, incx, incy, error_cblas, error_fblas, error_inline ); } } printf( "\n" ); // ----- test ZDOTU // dot columns, Aj^T * Bj for( int iincx = 0; iincx < ninc; ++iincx ) { magma_int_t incx = inc[iincx]; for( int iincy = 0; iincy < ninc; ++iincy ) { magma_int_t incy = inc[iincy]; error_cblas = 0; error_fblas = 0; error_inline = 0; for( j=0; j < k; ++j ) { // MAGMA implementation, not just wrapper x2_m = magma_cblas_zdotu( m, A(0,j), incx, B(0,j), incy ); // crashes with MKL 11.1.2, ILP64 #if defined(HAVE_CBLAS) && ! defined(MAGMA_WITH_MKL) #ifdef COMPLEX cblas_zdotu_sub( m, A(0,j), incx, B(0,j), incy, &x2_c ); #else x2_c = cblas_zdotu( m, A(0,j), incx, B(0,j), incy ); #endif error_cblas = max( error_cblas, fabs(x2_m - x2_c) / fabs(m*x2_c) ); #else x2_c = MAGMA_Z_ZERO; error_cblas = SKIPPED_FLAG; #endif // crashes with MKL 11.2.3 and MacOS 10.9 #if (! defined(COMPLEX) || ! defined(MAGMA_WITH_MKL)) && ! defined(__APPLE__) x2_f = blasf77_zdotu( &m, A(0,j), &incx, B(0,j), &incy ); error_fblas = max( error_fblas, fabs(x2_m - x2_f) / fabs(m*x2_f) ); #else x2_f = MAGMA_Z_ZERO; error_fblas = SKIPPED_FLAG; #endif // inline implementation x2_i = MAGMA_Z_ZERO; magma_int_t A_offset = (incx > 0 ? 0 : (-n + 1)*incx); magma_int_t B_offset = (incy > 0 ? 0 : (-n + 1)*incy); for( i=0; i < m; ++i ) { x2_i += *A(A_offset + i*incx,j) * *B(B_offset + i*incy,j); } error_inline = max( error_inline, fabs(x2_m - x2_i) / fabs(m*x2_i) ); //printf( "zdotu xm %.8e + %.8ei, xc %.8e + %.8ei, xf %.8e + %.8ei, xi %.8e + %.8ei\n", // real(x2_m), imag(x2_m), // real(x2_c), imag(x2_c), // real(x2_f), imag(x2_f), // real(x2_i), imag(x2_i) ); } output( "zdotu", m, n, k, incx, incy, error_cblas, error_fblas, error_inline ); } } // cleanup TESTING_FREE_CPU( A ); TESTING_FREE_CPU( B ); fflush( stdout ); } // itest, incx, incy opts.cleanup(); TESTING_FINALIZE(); return gStatus; }
/** Purpose ------- ZLATRD reduces NB rows and columns of a complex Hermitian matrix A to Hermitian tridiagonal form by an orthogonal similarity transformation Q' * A * Q, and returns the matrices V and W which are needed to apply the transformation to the unreduced part of A. If UPLO = MagmaUpper, ZLATRD reduces the last NB rows and columns of a matrix, of which the upper triangle is supplied; if UPLO = MagmaLower, ZLATRD reduces the first NB rows and columns of a matrix, of which the lower triangle is supplied. This is an auxiliary routine called by ZHETRD. Arguments --------- @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the Hermitian matrix A is stored: - = MagmaUpper: Upper triangular - = MagmaLower: Lower triangular @param[in] n INTEGER The order of the matrix A. @param[in] nb INTEGER The number of rows and columns to be reduced. @param[in,out] A COMPLEX_16 array, dimension (LDA,N) On entry, the Hermitian matrix A. If UPLO = MagmaUpper, the leading n-by-n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n-by-n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit: - if UPLO = MagmaUpper, the last NB columns have been reduced to tridiagonal form, with the diagonal elements overwriting the diagonal elements of A; the elements above the diagonal with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors; - if UPLO = MagmaLower, the first NB columns have been reduced to tridiagonal form, with the diagonal elements overwriting the diagonal elements of A; the elements below the diagonal with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors. See Further Details. @param[in] lda INTEGER The leading dimension of the array A. LDA >= (1,N). @param[out] e COMPLEX_16 array, dimension (N-1) If UPLO = MagmaUpper, E(n-nb:n-1) contains the superdiagonal elements of the last NB columns of the reduced matrix; if UPLO = MagmaLower, E(1:nb) contains the subdiagonal elements of the first NB columns of the reduced matrix. @param[out] tau COMPLEX_16 array, dimension (N-1) The scalar factors of the elementary reflectors, stored in TAU(n-nb:n-1) if UPLO = MagmaUpper, and in TAU(1:nb) if UPLO = MagmaLower. See Further Details. @param[out] W COMPLEX_16 array, dimension (LDW,NB) The n-by-nb matrix W required to update the unreduced part of A. @param[in] ldw INTEGER The leading dimension of the array W. LDW >= max(1,N). @param dA TODO: dimension (ldda, n)? @param ldda TODO: ldda >= n? @param dW TODO: dimension (lddw, ??) @param lddw TODO: lddw >= n ?? @param[in] queue magma_queue_t Queue to execute in. Further Details --------------- If UPLO = MagmaUpper, the matrix Q is represented as a product of elementary reflectors Q = H(n) H(n-1) . . . H(n-nb+1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a complex scalar, and v is a complex vector with v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), and tau in TAU(i-1). If UPLO = MagmaLower, the matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(nb). Each H(i) has the form H(i) = I - tau * v * v' where tau is a complex scalar, and v is a complex vector with v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), and tau in TAU(i). The elements of the vectors v together form the n-by-nb matrix V which is needed, with W, to apply the transformation to the unreduced part of the matrix, using a Hermitian rank-2k update of the form: A := A - V*W' - W*V'. The contents of A on exit are illustrated by the following examples with n = 5 and nb = 2: if UPLO = MagmaUpper: if UPLO = MagmaLower: ( a a a v4 v5 ) ( d ) ( a a v4 v5 ) ( 1 d ) ( a 1 v5 ) ( v1 1 a ) ( d 1 ) ( v1 v2 a a ) ( d ) ( v1 v2 a a a ) where d denotes a diagonal element of the reduced matrix, a denotes an element of the original matrix that is unchanged, and vi denotes an element of the vector defining H(i). @ingroup magma_zheev_aux ********************************************************************/ extern "C" magma_int_t magma_zlatrd( magma_uplo_t uplo, magma_int_t n, magma_int_t nb, magmaDoubleComplex *A, magma_int_t lda, double *e, magmaDoubleComplex *tau, magmaDoubleComplex *W, magma_int_t ldw, magmaDoubleComplex *work, magma_int_t lwork, magmaDoubleComplex_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr dW, magma_int_t lddw, magma_queue_t queue ) { #define A(i_, j_) (A + (i_) + (j_)*lda) #define W(i_, j_) (W + (i_) + (j_)*ldw) #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dW(i_, j_) (dW + (i_) + (j_)*lddw) /* Constants */ const magmaDoubleComplex c_neg_one = MAGMA_Z_NEG_ONE; const magmaDoubleComplex c_one = MAGMA_Z_ONE; const magmaDoubleComplex c_zero = MAGMA_Z_ZERO; const magma_int_t ione = 1; /* Local variables */ magmaDoubleComplex alpha, value; magma_int_t i, i_n, i_1, iw; /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) { info = -1; } else if ( n < 0 ) { info = -2; } else if ( nb < 1 ) { info = -3; } else if ( lda < max(1,n) ) { info = -5; } else if ( ldw < max(1,n) ) { info = -9; } else if ( ldda < max(1,n) ) { info = -11; } else if ( lddw < max(1,n) ) { info = -13; } if (info != 0) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return if possible */ if (n == 0) { return info; } if (uplo == MagmaUpper) { /* Reduce last NB columns of upper triangle */ for (i = n-1; i >= n - nb; --i) { i_1 = i + 1; i_n = n - i - 1; iw = i - n + nb; if (i < n-1) { /* Update A(1:i,i) */ #ifdef COMPLEX lapackf77_zlacgv( &i_n, W(i, iw+1), &ldw ); #endif blasf77_zgemv( "No transpose", &i_1, &i_n, &c_neg_one, A(0, i+1), &lda, W(i, iw+1), &ldw, &c_one, A(0, i), &ione ); #ifdef COMPLEX lapackf77_zlacgv( &i_n, W(i, iw+1), &ldw ); lapackf77_zlacgv( &i_n, A(i, i+1), &lda ); #endif blasf77_zgemv( "No transpose", &i_1, &i_n, &c_neg_one, W(0, iw+1), &ldw, A(i, i+1), &lda, &c_one, A(0, i), &ione ); #ifdef COMPLEX lapackf77_zlacgv( &i_n, A(i, i+1), &lda ); #endif } if (i > 0) { /* Generate elementary reflector H(i) to annihilate A(1:i-2,i) */ alpha = *A(i-1, i); lapackf77_zlarfg( &i, &alpha, A(0, i), &ione, &tau[i - 1] ); e[i-1] = MAGMA_Z_REAL( alpha ); *A(i-1,i) = MAGMA_Z_ONE; /* Compute W(1:i-1,i) */ // 1. Send the block reflector A(0:n-i-1,i) to the GPU magma_zsetvector( i, A(0, i), 1, dA(0, i), 1, queue ); magma_zhemv( MagmaUpper, i, c_one, dA(0, 0), ldda, dA(0, i), ione, c_zero, dW(0, iw), ione, queue ); // 2. Start putting the result back (asynchronously) magma_zgetmatrix_async( i, 1, dW(0, iw), lddw, W(0, iw), ldw, queue ); if (i < n-1) { blasf77_zgemv( MagmaConjTransStr, &i, &i_n, &c_one, W(0, iw+1), &ldw, A(0, i), &ione, &c_zero, W(i+1, iw), &ione ); } // 3. Here is where we need it // TODO find the right place magma_queue_sync( queue ); if (i < n-1) { blasf77_zgemv( "No transpose", &i, &i_n, &c_neg_one, A(0, i+1), &lda, W(i+1, iw), &ione, &c_one, W(0, iw), &ione ); blasf77_zgemv( MagmaConjTransStr, &i, &i_n, &c_one, A(0, i+1), &lda, A(0, i), &ione, &c_zero, W(i+1, iw), &ione ); blasf77_zgemv( "No transpose", &i, &i_n, &c_neg_one, W(0, iw+1), &ldw, W(i+1, iw), &ione, &c_one, W(0, iw), &ione ); } blasf77_zscal( &i, &tau[i - 1], W(0, iw), &ione ); value = magma_cblas_zdotc( i, W(0,iw), ione, A(0,i), ione ); alpha = tau[i - 1] * -0.5f * value; blasf77_zaxpy( &i, &alpha, A(0, i), &ione, W(0, iw), &ione ); } } } else { /* Reduce first NB columns of lower triangle */ for (i = 0; i < nb; ++i) { /* Update A(i:n,i) */ i_n = n - i; #ifdef COMPLEX lapackf77_zlacgv( &i, W(i, 0), &ldw ); #endif blasf77_zgemv( "No transpose", &i_n, &i, &c_neg_one, A(i, 0), &lda, W(i, 0), &ldw, &c_one, A(i, i), &ione ); #ifdef COMPLEX lapackf77_zlacgv( &i, W(i, 0), &ldw ); lapackf77_zlacgv( &i, A(i, 0), &lda ); #endif blasf77_zgemv( "No transpose", &i_n, &i, &c_neg_one, W(i, 0), &ldw, A(i, 0), &lda, &c_one, A(i, i), &ione ); #ifdef COMPLEX lapackf77_zlacgv( &i, A(i, 0), &lda ); #endif if (i < n-1) { /* Generate elementary reflector H(i) to annihilate A(i+2:n,i) */ i_n = n - i - 1; alpha = *A(i+1, i); lapackf77_zlarfg( &i_n, &alpha, A(min(i+2,n-1), i), &ione, &tau[i] ); e[i] = MAGMA_Z_REAL( alpha ); *A(i+1,i) = MAGMA_Z_ONE; /* Compute W(i+1:n,i) */ // 1. Send the block reflector A(i+1:n,i) to the GPU magma_zsetvector( i_n, A(i+1, i), 1, dA(i+1, i), 1, queue ); magma_zhemv( MagmaLower, i_n, c_one, dA(i+1, i+1), ldda, dA(i+1, i), ione, c_zero, dW(i+1, i), ione, queue ); // 2. Start putting the result back (asynchronously) magma_zgetmatrix_async( i_n, 1, dW(i+1, i), lddw, W(i+1, i), ldw, queue ); blasf77_zgemv( MagmaConjTransStr, &i_n, &i, &c_one, W(i+1, 0), &ldw, A(i+1, i), &ione, &c_zero, W(0, i), &ione ); blasf77_zgemv( "No transpose", &i_n, &i, &c_neg_one, A(i+1, 0), &lda, W(0, i), &ione, &c_zero, work, &ione ); blasf77_zgemv( MagmaConjTransStr, &i_n, &i, &c_one, A(i+1, 0), &lda, A(i+1, i), &ione, &c_zero, W(0, i), &ione ); // 3. Here is where we need it magma_queue_sync( queue ); if (i != 0) blasf77_zaxpy( &i_n, &c_one, work, &ione, W(i+1, i), &ione ); blasf77_zgemv( "No transpose", &i_n, &i, &c_neg_one, W(i+1, 0), &ldw, W(0, i), &ione, &c_one, W(i+1, i), &ione ); blasf77_zscal( &i_n, &tau[i], W(i+1,i), &ione ); value = magma_cblas_zdotc( i_n, W(i+1,i), ione, A(i+1,i), ione ); alpha = tau[i] * -0.5f * value; blasf77_zaxpy( &i_n, &alpha, A(i+1, i), &ione, W(i+1,i), &ione ); } } } return info; } /* magma_zlatrd */
magma_int_t magma_zlatrsd( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_bool_t normin, magma_int_t n, const magmaDoubleComplex *A, magma_int_t lda, magmaDoubleComplex lambda, magmaDoubleComplex *x, double *scale, double *cnorm, magma_int_t *info) { #define A(i,j) (A + (i) + (j)*lda) /* constants */ const magma_int_t ione = 1; const double d_half = 0.5; const magmaDoubleComplex c_zero = MAGMA_Z_ZERO; const magmaDoubleComplex c_one = MAGMA_Z_ONE; /* System generated locals */ magma_int_t len; magmaDoubleComplex ztmp; /* Local variables */ magma_int_t i, j; double xj, rec, tjj; magma_int_t jinc; double xbnd; magma_int_t imax; double tmax; magmaDoubleComplex tjjs; double xmax, grow; double tscal; magmaDoubleComplex uscal; magma_int_t jlast; magmaDoubleComplex csumj; double bignum; magma_int_t jfirst; double smlnum; /* Function Body */ *info = 0; magma_int_t upper = (uplo == MagmaUpper); magma_int_t notran = (trans == MagmaNoTrans); magma_int_t nounit = (diag == MagmaNonUnit); /* Test the input parameters. */ if ( ! upper && uplo != MagmaLower ) { *info = -1; } else if (! notran && trans != MagmaTrans && trans != MagmaConjTrans) { *info = -2; } else if ( ! nounit && diag != MagmaUnit ) { *info = -3; } else if ( ! (normin == MagmaTrue) && ! (normin == MagmaFalse) ) { *info = -4; } else if ( n < 0 ) { *info = -5; } else if ( lda < max(1,n) ) { *info = -7; } if ( *info != 0 ) { magma_xerbla( __func__, -(*info) ); return *info; } /* Quick return if possible */ if ( n == 0 ) { return *info; } /* Determine machine dependent parameters to control overflow. */ smlnum = lapackf77_dlamch( "Safe minimum" ); bignum = 1. / smlnum; lapackf77_dlabad( &smlnum, &bignum ); smlnum /= lapackf77_dlamch( "Precision" ); bignum = 1. / smlnum; *scale = 1.; if ( normin == MagmaFalse ) { /* Compute the 1-norm of each column, not including the diagonal. */ if ( upper ) { /* A is upper triangular. */ cnorm[0] = 0.; for( j = 1; j < n; ++j ) { cnorm[j] = magma_cblas_dzasum( j, A(0,j), ione ); } } else { /* A is lower triangular. */ for( j = 0; j < n-1; ++j ) { cnorm[j] = magma_cblas_dzasum( n-(j+1), A(j+1,j), ione ); } cnorm[n-1] = 0.; } } /* Scale the column norms by TSCAL if the maximum element in CNORM is */ /* greater than BIGNUM/2. */ imax = blasf77_idamax( &n, &cnorm[0], &ione ) - 1; tmax = cnorm[imax]; if ( tmax <= bignum * 0.5 ) { tscal = 1.; } else { tscal = 0.5 / (smlnum * tmax); blasf77_dscal( &n, &tscal, &cnorm[0], &ione ); } /* ================================================================= */ /* Compute a bound on the computed solution vector to see if the */ /* Level 2 BLAS routine ZTRSV can be used. */ xmax = 0.; for( j = 0; j < n; ++j ) { xmax = max( xmax, 0.5*MAGMA_Z_ABS1( x[j] )); } xbnd = xmax; if ( notran ) { /* ---------------------------------------- */ /* Compute the growth in A * x = b. */ if ( upper ) { jfirst = n-1; jlast = 0; jinc = -1; } else { jfirst = 0; jlast = n; jinc = 1; } if ( tscal != 1. ) { grow = 0.; goto L60; } /* A is non-unit triangular. */ /* Compute GROW = 1/G(j) and XBND = 1/M(j). */ /* Initially, G(0) = max{x(i), i=1,...,n}. */ grow = 0.5 / max( xbnd, smlnum ); xbnd = grow; for( j = jfirst; (jinc < 0 ? j >= jlast : j < jlast); j += jinc ) { /* Exit the loop if the growth factor is too small. */ if ( grow <= smlnum ) { goto L60; } if ( nounit ) { tjjs = *A(j,j) - lambda; } else { tjjs = c_one - lambda; } tjj = MAGMA_Z_ABS1( tjjs ); if ( tjj >= smlnum ) { /* M(j) = G(j-1) / abs(A(j,j)) */ xbnd = min( xbnd, min(1.,tjj)*grow ); } else { /* M(j) could overflow, set XBND to 0. */ xbnd = 0.; } if ( tjj + cnorm[j] >= smlnum ) { /* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */ grow *= (tjj / (tjj + cnorm[j])); } else { /* G(j) could overflow, set GROW to 0. */ grow = 0.; } } grow = xbnd; L60: ; } else { /* ---------------------------------------- */ /* Compute the growth in A**T * x = b or A**H * x = b. */ if ( upper ) { jfirst = 0; jlast = n; jinc = 1; } else { jfirst = n-1; jlast = 0; jinc = -1; } if ( tscal != 1. ) { grow = 0.; goto L90; } /* A is non-unit triangular. */ /* Compute GROW = 1/G(j) and XBND = 1/M(j). */ /* Initially, M(0) = max{x(i), i=1,...,n}. */ grow = 0.5 / max( xbnd, smlnum ); xbnd = grow; for( j = jfirst; (jinc < 0 ? j >= jlast : j < jlast); j += jinc ) { /* Exit the loop if the growth factor is too small. */ if ( grow <= smlnum ) { goto L90; } /* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */ xj = 1. + cnorm[j]; grow = min( grow, xbnd / xj ); if ( nounit ) { tjjs = *A(j,j) - lambda; } else { tjjs = c_one - lambda; } tjj = MAGMA_Z_ABS1( tjjs ); if ( tjj >= smlnum ) { /* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */ if ( xj > tjj ) { xbnd *= (tjj / xj); } } else { /* M(j) could overflow, set XBND to 0. */ xbnd = 0.; } } grow = min( grow, xbnd ); L90: ; } /* ================================================================= */ /* Due to modified diagonal, we can't use regular BLAS ztrsv. */ /* Use a Level 1 BLAS solve, scaling intermediate results. */ if ( xmax > bignum * 0.5 ) { /* Scale X so that its components are less than or equal to */ /* BIGNUM in absolute value. */ *scale = (bignum * 0.5) / xmax; blasf77_zdscal( &n, scale, &x[0], &ione ); xmax = bignum; } else { xmax *= 2.; } if ( notran ) { /* ---------------------------------------- */ /* Solve A * x = b */ for( j = jfirst; (jinc < 0 ? j >= jlast : j < jlast); j += jinc ) { /* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */ xj = MAGMA_Z_ABS1( x[j] ); if ( nounit ) { tjjs = (*A(j,j) - lambda ) * tscal; } else { tjjs = (c_one - lambda) * tscal; if ( tscal == 1. ) { goto L110; } } tjj = MAGMA_Z_ABS1( tjjs ); if ( tjj > smlnum ) { /* abs(A(j,j)) > SMLNUM: */ if ( tjj < 1. ) { if ( xj > tjj * bignum ) { /* Scale x by 1/b(j). */ rec = 1. / xj; blasf77_zdscal( &n, &rec, &x[0], &ione ); *scale *= rec; xmax *= rec; } } x[j] = x[j] / tjjs; xj = MAGMA_Z_ABS1( x[j] ); } else if ( tjj > 0. ) { /* 0 < abs(A(j,j)) <= SMLNUM: */ if ( xj > tjj * bignum ) { /* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM */ /* to avoid overflow when dividing by A(j,j). */ rec = (tjj * bignum) / xj; if ( cnorm[j] > 1. ) { /* Scale by 1/CNORM(j) to avoid overflow when */ /* multiplying x(j) times column j. */ rec /= cnorm[j]; } blasf77_zdscal( &n, &rec, &x[0], &ione ); *scale *= rec; xmax *= rec; } x[j] = x[j] / tjjs; xj = MAGMA_Z_ABS1( x[j] ); } else { /* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */ /* scale = 0, and compute a solution to A*x = 0. */ for( i = 0; i < n; ++i ) { x[i] = c_zero; } x[j] = c_one; xj = 1.; *scale = 0.; xmax = 0.; } L110: /* Scale x if necessary to avoid overflow when adding a */ /* multiple of column j of A. */ if ( xj > 1. ) { rec = 1. / xj; if ( cnorm[j] > (bignum - xmax) * rec ) { /* Scale x by 1/(2*abs(x(j))). */ rec *= 0.5; blasf77_zdscal( &n, &rec, &x[0], &ione ); *scale *= rec; } } else if ( xj * cnorm[j] > bignum - xmax ) { /* Scale x by 1/2. */ blasf77_zdscal( &n, &d_half, &x[0], &ione ); *scale *= 0.5; } if ( upper ) { if ( j > 0 ) { /* Compute the update */ /* x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j) */ len = j; ztmp = -tscal * x[j]; blasf77_zaxpy( &len, &ztmp, A(0,j), &ione, &x[0], &ione ); i = blasf77_izamax( &len, &x[0], &ione ) - 1; xmax = MAGMA_Z_ABS1( x[i] ); } } else { if ( j < n-1 ) { /* Compute the update */ /* x(j+1:n) := x(j+1:n) - x(j) * A(j+1:n,j) */ len = n - (j+1); ztmp = -tscal * x[j]; blasf77_zaxpy( &len, &ztmp, A(j+1,j), &ione, &x[j + 1], &ione ); i = j + blasf77_izamax( &len, &x[j + 1], &ione ); xmax = MAGMA_Z_ABS1( x[i] ); } } } } else if ( trans == MagmaTrans ) { /* ---------------------------------------- */ /* Solve A**T * x = b */ for( j = jfirst; (jinc < 0 ? j >= jlast : j < jlast); j += jinc ) { /* Compute x(j) = b(j) - sum A(k,j)*x(k). */ /* k<>j */ xj = MAGMA_Z_ABS1( x[j] ); uscal = MAGMA_Z_MAKE( tscal, 0. ); rec = 1. / max( xmax, 1. ); if ( cnorm[j] > (bignum - xj) * rec ) { /* If x(j) could overflow, scale x by 1/(2*XMAX). */ rec *= 0.5; if ( nounit ) { tjjs = (*A(j,j) - lambda) * tscal; } else { tjjs = (c_one - lambda) * tscal; } tjj = MAGMA_Z_ABS1( tjjs ); if ( tjj > 1. ) { /* Divide by A(j,j) when scaling x if A(j,j) > 1. */ rec = min( 1., rec * tjj ); uscal = uscal / tjjs; } if ( rec < 1. ) { blasf77_zdscal( &n, &rec, &x[0], &ione ); *scale *= rec; xmax *= rec; } } csumj = c_zero; if ( uscal == c_one ) { /* If the scaling needed for A in the dot product is 1, */ /* call ZDOTU to perform the dot product. */ if ( upper ) { csumj = magma_cblas_zdotu( j, A(0,j), ione, &x[0], ione ); } else if ( j < n-1 ) { csumj = magma_cblas_zdotu( n-(j+1), A(j+1,j), ione, &x[j+1], ione ); } } else { /* Otherwise, use in-line code for the dot product. */ if ( upper ) { for( i = 0; i < j; ++i ) { csumj += (*A(i,j) * uscal) * x[i]; } } else if ( j < n-1 ) { for( i = j+1; i < n; ++i ) { csumj += (*A(i,j) * uscal) * x[i]; } } } if ( uscal == MAGMA_Z_MAKE( tscal, 0. )) { /* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */ /* was not used to scale the dotproduct. */ x[j] -= csumj; xj = MAGMA_Z_ABS1( x[j] ); if ( nounit ) { tjjs = (*A(j,j) - lambda) * tscal; } else { tjjs = (c_one - lambda) * tscal; if ( tscal == 1. ) { goto L160; } } /* Compute x(j) = x(j) / A(j,j), scaling if necessary. */ tjj = MAGMA_Z_ABS1( tjjs ); if ( tjj > smlnum ) { /* abs(A(j,j)) > SMLNUM: */ if ( tjj < 1. ) { if ( xj > tjj * bignum ) { /* Scale X by 1/abs(x(j)). */ rec = 1. / xj; blasf77_zdscal( &n, &rec, &x[0], &ione ); *scale *= rec; xmax *= rec; } } x[j] = x[j] / tjjs; } else if ( tjj > 0. ) { /* 0 < abs(A(j,j)) <= SMLNUM: */ if ( xj > tjj * bignum ) { /* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */ rec = (tjj * bignum) / xj; blasf77_zdscal( &n, &rec, &x[0], &ione ); *scale *= rec; xmax *= rec; } x[j] = x[j] / tjjs; } else { /* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */ /* scale = 0 and compute a solution to A**T *x = 0. */ for( i = 0; i < n; ++i ) { x[i] = c_zero; } x[j] = c_one; *scale = 0.; xmax = 0.; } L160: ; } else { /* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */ /* product has already been divided by 1/A(j,j). */ x[j] = (x[j] / tjjs) - csumj; } xmax = max( xmax, MAGMA_Z_ABS1( x[j] )); } } else { /* ---------------------------------------- */ /* Solve A**H * x = b */ for( j = jfirst; (jinc < 0 ? j >= jlast : j < jlast); j += jinc ) { /* Compute x(j) = b(j) - sum A(k,j)*x(k). */ /* k<>j */ xj = MAGMA_Z_ABS1( x[j] ); uscal = MAGMA_Z_MAKE( tscal, 0. ); rec = 1. / max(xmax, 1.); if ( cnorm[j] > (bignum - xj) * rec ) { /* If x(j) could overflow, scale x by 1/(2*XMAX). */ rec *= 0.5; if ( nounit ) { tjjs = MAGMA_Z_CONJ( *A(j,j) - lambda ) * tscal; } else { tjjs = (c_one - lambda) * tscal; } tjj = MAGMA_Z_ABS1( tjjs ); if ( tjj > 1. ) { /* Divide by A(j,j) when scaling x if A(j,j) > 1. */ rec = min( 1., rec * tjj ); uscal = uscal / tjjs; } if ( rec < 1. ) { blasf77_zdscal( &n, &rec, &x[0], &ione ); *scale *= rec; xmax *= rec; } } csumj = c_zero; if ( uscal == c_one ) { /* If the scaling needed for A in the dot product is 1, */ /* call ZDOTC to perform the dot product. */ if ( upper ) { csumj = magma_cblas_zdotc( j, A(0,j), ione, &x[0], ione ); } else if ( j < n-1 ) { csumj = magma_cblas_zdotc( n-(j+1), A(j+1,j), ione, &x[j+1], ione ); } } else { /* Otherwise, use in-line code for the dot product. */ if ( upper ) { for( i = 0; i < j; ++i ) { csumj += (MAGMA_Z_CONJ( *A(i,j) ) * uscal) * x[i]; } } else if ( j < n-1 ) { for( i = j + 1; i < n; ++i ) { csumj += (MAGMA_Z_CONJ( *A(i,j) ) * uscal) * x[i]; } } } if ( uscal == tscal ) { /* Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j) */ /* was not used to scale the dotproduct. */ x[j] -= csumj; xj = MAGMA_Z_ABS1( x[j] ); if ( nounit ) { tjjs = MAGMA_Z_CONJ( *A(j,j) - lambda ) * tscal; } else { tjjs = (c_one - lambda) * tscal; if ( tscal == 1. ) { goto L210; } } /* Compute x(j) = x(j) / A(j,j), scaling if necessary. */ tjj = MAGMA_Z_ABS1( tjjs ); if ( tjj > smlnum ) { /* abs(A(j,j)) > SMLNUM: */ if ( tjj < 1. ) { if ( xj > tjj * bignum ) { /* Scale X by 1/abs(x(j)). */ rec = 1. / xj; blasf77_zdscal( &n, &rec, &x[0], &ione ); *scale *= rec; xmax *= rec; } } x[j] = x[j] / tjjs; } else if ( tjj > 0. ) { /* 0 < abs(A(j,j)) <= SMLNUM: */ if ( xj > tjj * bignum ) { /* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */ rec = (tjj * bignum) / xj; blasf77_zdscal( &n, &rec, &x[0], &ione ); *scale *= rec; xmax *= rec; } x[j] = x[j] / tjjs; } else { /* A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and */ /* scale = 0 and compute a solution to A**H *x = 0. */ for( i = 0; i < n; ++i ) { x[i] = c_zero; } x[j] = c_one; *scale = 0.; xmax = 0.; } L210: ; } else { /* Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot */ /* product has already been divided by 1/A(j,j). */ x[j] = (x[j] / tjjs) - csumj; } xmax = max( xmax, MAGMA_Z_ABS1( x[j] )); } } *scale /= tscal; /* Scale the column norms by 1/TSCAL for return. */ if ( tscal != 1. ) { double d = 1. / tscal; blasf77_dscal( &n, &d, &cnorm[0], &ione ); } return *info; } /* end zlatrsd */
/** Purpose ------- ZLATRD reduces NB rows and columns of a complex Hermitian matrix A to Hermitian tridiagonal form by an orthogonal similarity transformation Q' * A * Q, and returns the matrices V and W which are needed to apply the transformation to the unreduced part of A. If UPLO = MagmaUpper, ZLATRD reduces the last NB rows and columns of a matrix, of which the upper triangle is supplied; if UPLO = MagmaLower, ZLATRD reduces the first NB rows and columns of a matrix, of which the lower triangle is supplied. This is an auxiliary routine called by ZHETRD. Arguments --------- @param[in] ngpu INTEGER Number of GPUs to use. ngpu > 0. @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the Hermitian matrix A is stored: - = MagmaUpper: Upper triangular - = MagmaLower: Lower triangular @param[in] n INTEGER The order of the matrix A. @param[in] nb INTEGER The number of rows and columns to be reduced. @param[in] nb0 INTEGER The block size used for the matrix distribution. nb and nb0 can be different for the final step of zhetrd. @param[in,out] A COMPLEX_16 array, dimension (LDA,N) On entry, the Hermitian matrix A. If UPLO = MagmaUpper, the leading n-by-n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n-by-n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit: - if UPLO = MagmaUpper, the last NB columns have been reduced to tridiagonal form, with the diagonal elements overwriting the diagonal elements of A; the elements above the diagonal with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors; - if UPLO = MagmaLower, the first NB columns have been reduced to tridiagonal form, with the diagonal elements overwriting the diagonal elements of A; the elements below the diagonal with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors. See Further Details. @param[in] lda INTEGER The leading dimension of the array A. LDA >= (1,N). @param[out] e COMPLEX_16 array, dimension (N-1) If UPLO = MagmaUpper, E(n-nb:n-1) contains the superdiagonal elements of the last NB columns of the reduced matrix; if UPLO = MagmaLower, E(1:nb) contains the subdiagonal elements of the first NB columns of the reduced matrix. @param[out] tau COMPLEX_16 array, dimension (N-1) The scalar factors of the elementary reflectors, stored in TAU(n-nb:n-1) if UPLO = MagmaUpper, and in TAU(1:nb) if UPLO = MagmaLower. See Further Details. @param[out] W COMPLEX_16 array, dimension (LDW,NB) The n-by-nb matrix W required to update the unreduced part of A. @param[in] ldw INTEGER The leading dimension of the array W. LDW >= max(1,N). @param dA @param[in] ldda @param[in] offset @param dW @param[in] lddw @param hwork @param[in] lhwork @param dwork @param[in] ldwork @param[in] queues magma_queue_t array of dimension (ngpu). queues[dev] is an execution queue on GPU dev. Further Details --------------- If UPLO = MagmaUpper, the matrix Q is represented as a product of elementary reflectors Q = H(n) H(n-1) . . . H(n-nb+1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a complex scalar, and v is a complex vector with v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), and tau in TAU(i-1). If UPLO = MagmaLower, the matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(nb). Each H(i) has the form H(i) = I - tau * v * v' where tau is a complex scalar, and v is a complex vector with v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), and tau in TAU(i). The elements of the vectors v together form the n-by-nb matrix V which is needed, with W, to apply the transformation to the unreduced part of the matrix, using a Hermitian rank-2k update of the form: A := A - V*W' - W*V'. The contents of A on exit are illustrated by the following examples with n = 5 and nb = 2: if UPLO = MagmaUpper: if UPLO = MagmaLower: ( a a a v4 v5 ) ( d ) ( a a v4 v5 ) ( 1 d ) ( a 1 v5 ) ( v1 1 a ) ( d 1 ) ( v1 v2 a a ) ( d ) ( v1 v2 a a a ) where d denotes a diagonal element of the reduced matrix, a denotes an element of the original matrix that is unchanged, and vi denotes an element of the vector defining H(i). @ingroup magma_zheev_aux ********************************************************************/ extern "C" magma_int_t magma_zlatrd_mgpu( magma_int_t ngpu, magma_uplo_t uplo, magma_int_t n, magma_int_t nb, magma_int_t nb0, magmaDoubleComplex *A, magma_int_t lda, double *e, magmaDoubleComplex *tau, magmaDoubleComplex *W, magma_int_t ldw, magmaDoubleComplex_ptr dA[], magma_int_t ldda, magma_int_t offset, magmaDoubleComplex_ptr dW[], magma_int_t lddw, magmaDoubleComplex *hwork, magma_int_t lhwork, magmaDoubleComplex_ptr dwork[], magma_int_t ldwork, magma_queue_t queues[] ) { #define A(i, j) (A + (j)*lda + (i)) #define W(i, j) (W + (j)*ldw + (i)) #define dA(dev, i, j) (dA[(dev)] + ((j)+loffset)*ldda + (i) + offset) #define dW(dev, i, j) (dW[(dev)] + (j) *lddw + (i)) #define dW1(dev, i, j) (dW[(dev)] + ((j)+nb) *lddw + (i)) /* Constants */ const magmaDoubleComplex c_neg_one = MAGMA_Z_NEG_ONE; const magmaDoubleComplex c_one = MAGMA_Z_ONE; const magmaDoubleComplex c_zero = MAGMA_Z_ZERO; const magma_int_t ione = 1; /* Local variables */ magmaDoubleComplex alpha, value; magma_int_t dev; magma_int_t i, n_i, n_i_1, ip1, iw; // TODO check arguments magma_int_t info = 0; if (n <= 0) { return info; } // TODO allocate f in zhetrd and pass into zlatrd. (e.g., expand hwork a bit) magmaDoubleComplex *f; magma_zmalloc_cpu( &f, n ); if ( f == NULL ) { info = MAGMA_ERR_HOST_ALLOC; return info; } magma_device_t orig_dev; magma_getdevice( &orig_dev ); if (uplo == MagmaUpper) { /* Reduce last NB columns of upper triangle */ for (i = n-1; i >= n - nb; --i) { ip1 = i + 1; n_i_1 = n - i - 1; iw = i - n + nb; if (i < n-1) { /* Update A(1:i,i) */ magmaDoubleComplex wii = -conj( *W(i, iw+1) ); blasf77_zaxpy( &ip1, &wii, A(0, i+1), &ione, A(0, i), &ione ); wii = -conj( *A(i, i+1) ); blasf77_zaxpy( &ip1, &wii, W(0, iw+1), &ione, A(0, i), &ione ); } if (i > 0) { /* Generate elementary reflector H(i) to annihilate A(1:i-2,i) */ alpha = *A(i-1, i); lapackf77_zlarfg( &i, &alpha, A(0, i), &ione, &tau[i - 1] ); e[i-1] = MAGMA_Z_REAL( alpha ); *A(i-1,i) = MAGMA_Z_ONE; // TODO Previously, this set dx2[dev] = dW1(dev, 0, iw); and used dx2 in zhemv. // TODO Now zhemv handles broadcasting x to the GPUs, but data in dW1 is // TODO apparently still used in zhetrd_mgpu / zher2k_mgpu. for( dev=0; dev < ngpu; dev++ ) { magma_setdevice( dev ); magma_zsetvector_async( n, A(0,i), 1, dW1(dev, 0, iw), 1, queues[dev] ); } magmablas_zhemv_mgpu( MagmaUpper, i, c_one, dA, ldda, 0, A(0,i), 1, c_zero, W(0, iw), 1, hwork, lhwork, dwork, ldwork, ngpu, nb0, queues ); if (i < n-1) { blasf77_zgemv( MagmaConjTransStr, &i, &n_i_1, &c_one, W(0, iw+1), &ldw, A(0, i), &ione, &c_zero, W(i+1, iw), &ione ); } /* overlap update */ if ( i < n-1 && i-1 >= n - nb ) { /* Update A(1:i,i) */ #ifdef COMPLEX lapackf77_zlacgv( &n_i_1, W(i-1, iw+1), &ldw ); #endif blasf77_zgemv( "No transpose", &i, &n_i_1, &c_neg_one, A(0, i+1), &lda, W(i-1, iw+1), &ldw, &c_one, A(0, i-1), &ione ); #ifdef COMPLEX lapackf77_zlacgv( &n_i_1, W(i-1, iw+1), &ldw ); lapackf77_zlacgv( &n_i_1, A(i-1, i +1), &lda ); #endif blasf77_zgemv( "No transpose", &i, &n_i_1, &c_neg_one, W(0, iw+1), &ldw, A(i-1, i+1), &lda, &c_one, A(0, i-1), &ione ); #ifdef COMPLEX lapackf77_zlacgv( &n_i_1, A(i-1, i+1), &lda ); #endif } // synchronize to get zhemv result W(0, iw) magmablas_zhemv_mgpu_sync( MagmaUpper, i, c_one, dA, ldda, 0, A(0,i), 1, c_zero, W(0, iw), 1, hwork, lhwork, dwork, ldwork, ngpu, nb0, queues ); if (i < n-1) { blasf77_zgemv( "No transpose", &i, &n_i_1, &c_neg_one, A(0, i+1), &lda, W(i+1, iw), &ione, &c_one, W(0, iw), &ione ); blasf77_zgemv( MagmaConjTransStr, &i, &n_i_1, &c_one, A(0, i+1), &lda, A(0, i), &ione, &c_zero, W(i+1, iw), &ione ); blasf77_zgemv( "No transpose", &i, &n_i_1, &c_neg_one, W(0, iw+1), &ldw, W(i+1, iw), &ione, &c_one, W(0, iw), &ione ); } blasf77_zscal( &i, &tau[i - 1], W(0, iw), &ione ); value = magma_cblas_zdotc( i, W(0,iw), ione, A(0,i), ione ); alpha = tau[i - 1] * -0.5f * value; blasf77_zaxpy( &i, &alpha, A(0, i), &ione, W(0, iw), &ione ); for( dev=0; dev < ngpu; dev++ ) { magma_setdevice( dev ); magma_zsetvector_async( n, W(0,iw), 1, dW(dev, 0, iw), 1, queues[dev] ); } } } } else { /* Reduce first NB columns of lower triangle */ for (i = 0; i < nb; ++i) { /* Update A(i:n,i) */ n_i = n - i; //idw = ((offset+i)/nb)%ngpu; if ( i > 0 ) { trace_cpu_start( 0, "gemv", "gemv" ); magmaDoubleComplex wii = -conj( *W(i, i-1) ); blasf77_zaxpy( &n_i, &wii, A(i, i-1), &ione, A(i, i), &ione ); wii = -conj( *A(i, i-1) ); blasf77_zaxpy( &n_i, &wii, W(i, i-1), &ione, A(i, i), &ione ); } if (i < n-1) { /* Generate elementary reflector H(i) to annihilate A(i+2:n,i) */ n_i_1 = n - i - 1; trace_cpu_start( 0, "larfg", "larfg" ); alpha = *A(i+1, i); lapackf77_zlarfg( &n_i_1, &alpha, A(min(i+2,n-1), i), &ione, &tau[i] ); e[i] = MAGMA_Z_REAL( alpha ); *A(i+1,i) = MAGMA_Z_ONE; trace_cpu_end( 0 ); /* Compute W(i+1:n,i) */ // TODO Previously, this set dx2[id] = dW1(id, 0, i)-offset; and used dx2 in zhemv. // TODO Now zhemv handles broadcasting x to the GPUs, but data in dW1 is // TODO apparently still used in zhetrd_mgpu / zher2k_mgpu. for( dev=0; dev < ngpu; dev++ ) { magma_setdevice( dev ); magma_zsetvector_async( n, A(0,i), 1, dW1(dev, 0, i), 1, queues[dev] ); } magmablas_zhemv_mgpu( MagmaLower, n_i_1, c_one, dA, ldda, offset+i+1, A(i+1, i), 1, c_zero, W(i+1, i), 1, hwork, lhwork, dwork, ldwork, ngpu, nb0, queues ); trace_cpu_start( 0, "gemv", "gemv" ); blasf77_zgemv( MagmaConjTransStr, &n_i_1, &i, &c_one, W(i+1, 0), &ldw, A(i+1, i), &ione, &c_zero, W(0, i), &ione ); blasf77_zgemv( "No transpose", &n_i_1, &i, &c_neg_one, A(i+1, 0), &lda, W(0, i), &ione, &c_zero, f, &ione ); blasf77_zgemv( MagmaConjTransStr, &n_i_1, &i, &c_one, A(i+1, 0), &lda, A(i+1, i), &ione, &c_zero, W(0, i), &ione ); trace_cpu_end( 0 ); /* overlap update */ if ( i > 0 && i+1 < n ) { trace_cpu_start( 0, "gemv", "gemv" ); #ifdef COMPLEX lapackf77_zlacgv( &i, W(i+1, 0), &ldw ); #endif blasf77_zgemv( "No transpose", &n_i_1, &i, &c_neg_one, A(i+1, 0), &lda, W(i+1, 0), &ldw, &c_one, A(i+1, i+1), &ione ); #ifdef COMPLEX lapackf77_zlacgv( &i, W(i+1, 0), &ldw ); lapackf77_zlacgv( &i, A(i+1, 0), &lda ); #endif blasf77_zgemv( "No transpose", &n_i_1, &i, &c_neg_one, W(i+1, 0), &ldw, A(i+1, 0), &lda, &c_one, A(i+1, i+1), &ione ); #ifdef COMPLEX lapackf77_zlacgv( &i, A(i+1, 0), &lda ); #endif trace_cpu_end( 0 ); } // synchronize to get zhemv result W(i+1, i) magmablas_zhemv_mgpu_sync( MagmaLower, n_i_1, c_one, dA, ldda, offset+i+1, A(i+1, i), 1, c_zero, W(i+1, i), 1, hwork, lhwork, dwork, ldwork, ngpu, nb0, queues ); trace_cpu_start( 0, "axpy", "axpy" ); if (i != 0) { blasf77_zaxpy( &n_i_1, &c_one, f, &ione, W(i+1, i), &ione ); } blasf77_zgemv( "No transpose", &n_i_1, &i, &c_neg_one, W(i+1, 0), &ldw, W(0, i), &ione, &c_one, W(i+1, i), &ione ); blasf77_zscal( &n_i_1, &tau[i], W(i+1,i), &ione ); value = magma_cblas_zdotc( n_i_1, W(i+1,i), ione, A(i+1,i), ione ); alpha = tau[i] * -0.5f * value; blasf77_zaxpy( &n_i_1, &alpha, A(i+1, i), &ione, W(i+1,i), &ione ); trace_cpu_end( 0 ); for( dev=0; dev < ngpu; dev++ ) { magma_setdevice( dev ); magma_zsetvector_async( n, W(0,i), 1, dW(dev, 0, i), 1, queues[dev] ); } } } } magma_free_cpu( f ); magma_setdevice( orig_dev ); return info; } /* magma_zlatrd_mgpu */