magma_int_t magma_zcsrget( magma_z_matrix A, magma_int_t *m, magma_int_t *n, magma_index_t **row, magma_index_t **col, magmaDoubleComplex **val, magma_queue_t queue ) { magma_int_t info = 0; magma_z_matrix A_CPU={Magma_CSR}, A_CSR={Magma_CSR}; if ( A.memory_location == Magma_CPU && A.storage_type == Magma_CSR ) { *m = A.num_rows; *n = A.num_cols; *val = A.val; *col = A.col; *row = A.row; } else { CHECK( magma_zmtransfer( A, &A_CPU, A.memory_location, Magma_CPU, queue )); CHECK( magma_zmconvert( A_CPU, &A_CSR, A_CPU.storage_type, Magma_CSR, queue )); CHECK( magma_zcsrget( A_CSR, m, n, row, col, val, queue )); } cleanup: magma_zmfree( &A_CSR, queue ); magma_zmfree( &A_CPU, queue ); return info; }
magma_int_t magma_zmLdiagadd( magma_z_matrix *L, magma_queue_t queue ) { magma_int_t info = 0; magma_z_matrix LL={Magma_CSR}; if( L->row[1]==1 ){ // lower triangular with unit diagonal //printf("L lower triangular.\n"); LL.diagorder_type = Magma_UNITY; CHECK( magma_zmconvert( *L, &LL, Magma_CSR, Magma_CSRL, queue )); } else if ( L->row[1]==0 ){ // strictly lower triangular //printf("L strictly lower triangular.\n"); CHECK( magma_zmtransfer( *L, &LL, Magma_CPU, Magma_CPU, queue )); magma_free_cpu( LL.col ); magma_free_cpu( LL.val ); LL.nnz = L->nnz+L->num_rows; CHECK( magma_zmalloc_cpu( &LL.val, LL.nnz )); CHECK( magma_index_malloc_cpu( &LL.col, LL.nnz )); magma_int_t z=0; for( magma_int_t i=0; i<L->num_rows; i++){ LL.row[i] = z; for( magma_int_t j=L->row[i]; j<L->row[i+1]; j++){ LL.val[z] = L->val[j]; LL.col[z] = L->col[j]; z++; } // add unit diagonal LL.val[z] = MAGMA_Z_MAKE(1.0, 0.0); LL.col[z] = i; z++; } LL.row[LL.num_rows] = z; LL.nnz = z; } else { printf("error: L neither lower nor strictly lower triangular!\n"); } magma_zmfree( L, queue ); CHECK( magma_zmtransfer(LL, L, Magma_CPU, Magma_CPU, queue )); cleanup: if( info != 0 ){ magma_zmfree( L, queue ); } magma_zmfree( &LL, queue ); return info; }
extern "C" magma_int_t magma_zvtranspose( magma_z_matrix x, magma_z_matrix *y, magma_queue_t queue ) { magma_int_t info = 0; magma_int_t m = x.num_rows; magma_int_t n = x.num_cols; // set queue for old dense routines magma_queue_t orig_queue=NULL; magmablasGetKernelStream( &orig_queue ); magma_z_matrix x_d={Magma_CSR}, y_d={Magma_CSR}; if ( x.memory_location == Magma_DEV ) { CHECK( magma_zvinit( y, Magma_DEV, x.num_rows,x.num_cols, MAGMA_Z_ZERO, queue )); y->num_rows = x.num_rows; y->num_cols = x.num_cols; y->storage_type = x.storage_type; if ( x.major == MagmaColMajor) { y->major = MagmaRowMajor; magmablas_ztranspose( m, n, x.val, m, y->val, n ); } else { y->major = MagmaColMajor; magmablas_ztranspose( n, m, x.val, n, y->val, m ); } } else { CHECK( magma_zmtransfer( x, &x_d, Magma_CPU, Magma_DEV, queue )); CHECK( magma_zvtranspose( x_d, &y_d, queue )); CHECK( magma_zmtransfer( y_d, y, Magma_DEV, Magma_CPU, queue )); } cleanup: if( info != 0 ){ magma_zmfree( y, queue ); } magma_zmfree( &x_d, queue ); magma_zmfree( &y_d, queue ); magmablasSetKernelStream( orig_queue ); return info; }
extern "C" magma_int_t magma_zmshrink( magma_z_matrix A, magma_z_matrix *B, magma_queue_t queue ) { magma_int_t info = 0; magma_z_matrix hA={Magma_CSR}, hACSR={Magma_CSR}, hB={Magma_CSR}, hBCSR={Magma_CSR}; if( A.num_rows<=A.num_cols){ if( A.memory_location == Magma_CPU && A.storage_type == Magma_CSR ){ CHECK( magma_zmconvert( A, B, Magma_CSR, Magma_CSR, queue )); for(magma_int_t i=0; i<A.nnz; i++){ if( B->col[i] >= A.num_rows ){ B->val[i] = MAGMA_Z_ZERO; } } CHECK( magma_zmcsrcompressor( B, queue ) ); B->num_cols = B->num_rows; } else { CHECK( magma_zmtransfer( A, &hA, A.memory_location, Magma_CPU, queue )); CHECK( magma_zmconvert( hA, &hACSR, A.storage_type, Magma_CSR, queue )); CHECK( magma_zmshrink( hACSR, &hBCSR, queue )); CHECK( magma_zmconvert( hBCSR, &hB, Magma_CSR, A.storage_type, queue )); CHECK( magma_zmtransfer( hB, B, Magma_CPU, A.memory_location, queue )); } } else { printf("%% error: A has too many rows: m > n.\n"); info = MAGMA_ERR_NOT_SUPPORTED; goto cleanup; } cleanup: magma_zmfree( &hA, queue ); magma_zmfree( &hB, queue ); magma_zmfree( &hACSR, queue ); magma_zmfree( &hBCSR, queue ); return info; }
extern "C" magma_int_t magma_zresidual( magma_z_matrix A, magma_z_matrix b, magma_z_matrix x, double *res, magma_queue_t queue ) { magma_int_t info = 0; // set queue for old dense routines magma_queue_t orig_queue=NULL; magmablasGetKernelStream( &orig_queue ); // some useful variables magmaDoubleComplex zero = MAGMA_Z_ZERO, one = MAGMA_Z_ONE, mone = MAGMA_Z_NEG_ONE; magma_int_t dofs = A.num_rows; magma_int_t num_vecs = b.num_rows*b.num_cols/A.num_rows; magma_z_matrix r={Magma_CSR}; if ( A.num_rows == b.num_rows ) { CHECK( magma_zvinit( &r, Magma_DEV, A.num_rows, b.num_cols, zero, queue )); CHECK( magma_z_spmv( one, A, x, zero, r, queue )); // r = A x magma_zaxpy(dofs, mone, b.dval, 1, r.dval, 1); // r = r - b *res = magma_dznrm2(dofs, r.dval, 1); // res = ||r|| // /magma_dznrm2(dofs, b.dval, 1); /||b|| //printf( "relative residual: %e\n", *res ); } else if ((b.num_rows*b.num_cols)%A.num_rows== 0 ) { CHECK( magma_zvinit( &r, Magma_DEV, b.num_rows,b.num_cols, zero, queue )); CHECK( magma_z_spmv( one, A, x, zero, r, queue )); // r = A x for( magma_int_t i=0; i<num_vecs; i++) { magma_zaxpy(dofs, mone, b(i), 1, r(i), 1); // r = r - b res[i] = magma_dznrm2(dofs, r(i), 1); // res = ||r|| } // /magma_dznrm2(dofs, b.dval, 1); /||b|| //printf( "relative residual: %e\n", *res ); } else { printf("error: dimensions do not match.\n"); info = MAGMA_ERR_NOT_SUPPORTED; } cleanup: magma_zmfree(&r, queue ); magmablasSetKernelStream( orig_queue ); return info; }
extern "C" magma_int_t magma_zresidual_slice( magma_int_t start, magma_int_t end, magma_z_matrix A, magma_z_matrix b, magma_z_matrix x, double *res, magma_queue_t queue ) { magma_int_t info = 0; // constants const magmaDoubleComplex c_zero = MAGMA_Z_ZERO; const magmaDoubleComplex c_one = MAGMA_Z_ONE; const magmaDoubleComplex c_neg_one = MAGMA_Z_NEG_ONE; // some useful variables magma_int_t dofs = A.num_rows; magma_int_t num_vecs = b.num_rows*b.num_cols/A.num_rows; magma_z_matrix r = {Magma_CSR}; if ( A.num_rows == b.num_rows ) { CHECK( magma_zvinit( &r, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_z_spmv( c_one, A, x, c_zero, r, queue )); // r = A x magma_zaxpy( dofs, c_neg_one, b.dval, 1, r.dval, 1, queue ); // r = r - b *res = magma_dznrm2( end-start, r.dval+start, 1, queue ); // res = ||r(start:end)|| } else if ((b.num_rows*b.num_cols)%A.num_rows == 0 ) { CHECK( magma_zvinit( &r, Magma_DEV, b.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_z_spmv( c_one, A, x, c_zero, r, queue )); // r = A x for( magma_int_t i=0; i < num_vecs; i++) { magma_zaxpy( dofs, c_neg_one, b(i), 1, r(i), 1, queue ); // r = r - b res[i] = magma_dznrm2( end-start, r(i)+start, 1, queue ); // res = ||r(start:end)|| } } else { printf("error: dimensions do not match.\n"); info = MAGMA_ERR_NOT_SUPPORTED; } cleanup: magma_zmfree( &r, queue ); return info; }
extern "C" magma_int_t magma_zmcsrcompressor( magma_z_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; magma_z_matrix B={Magma_CSR}; magma_z_matrix hA={Magma_CSR}, CSRA={Magma_CSR}; if ( A->memory_location == Magma_CPU && A->storage_type == Magma_CSR ) { CHECK( magma_zmconvert( *A, &B, Magma_CSR, Magma_CSR, queue )); magma_free_cpu( A->row ); magma_free_cpu( A->col ); magma_free_cpu( A->val ); CHECK( magma_z_csr_compressor(&B.val, &B.row, &B.col, &A->val, &A->row, &A->col, &A->num_rows, queue )); A->nnz = A->row[A->num_rows]; } else { magma_storage_t A_storage = A->storage_type; magma_location_t A_location = A->memory_location; CHECK( magma_zmtransfer( *A, &hA, A->memory_location, Magma_CPU, queue )); CHECK( magma_zmconvert( hA, &CSRA, hA.storage_type, Magma_CSR, queue )); CHECK( magma_zmcsrcompressor( &CSRA, queue )); magma_zmfree( &hA, queue ); magma_zmfree( A, queue ); CHECK( magma_zmconvert( CSRA, &hA, Magma_CSR, A_storage, queue )); CHECK( magma_zmtransfer( hA, A, Magma_CPU, A_location, queue )); magma_zmfree( &hA, queue ); magma_zmfree( &CSRA, queue ); } cleanup: magma_zmfree( &hA, queue ); magma_zmfree( &CSRA, queue ); magma_zmfree( &B, queue ); return info; }
magma_int_t magma_zinitguess( magma_z_matrix A, magma_z_matrix *L, magma_z_matrix *U, magma_queue_t queue ) { magma_int_t info = 0; magmaDoubleComplex one = MAGMA_Z_MAKE( 1.0, 0.0 ); magma_z_matrix hAL={Magma_CSR}, hAU={Magma_CSR}, dAL={Magma_CSR}, dAU={Magma_CSR}, dALU={Magma_CSR}, hALU={Magma_CSR}, hD={Magma_CSR}, dD={Magma_CSR}, dL={Magma_CSR}, hL={Magma_CSR}; magma_int_t i,j; magma_int_t offdiags = 0; magma_index_t *diag_offset; magmaDoubleComplex *diag_vals=NULL; // need only lower triangular hAL.diagorder_type = Magma_VALUE; CHECK( magma_zmconvert( A, &hAL, Magma_CSR, Magma_CSRL, queue )); //magma_zmconvert( hAL, &hALCOO, Magma_CSR, Magma_CSRCOO ); // need only upper triangular //magma_zmconvert( A, &hAU, Magma_CSR, Magma_CSRU ); CHECK( magma_z_cucsrtranspose( hAL, &hAU, queue )); //magma_zmconvert( hAU, &hAUCOO, Magma_CSR, Magma_CSRCOO ); CHECK( magma_zmtransfer( hAL, &dAL, Magma_CPU, Magma_DEV, queue )); CHECK( magma_z_spmm( one, dAL, dAU, &dALU, queue )); CHECK( magma_zmtransfer( dALU, &hALU, Magma_DEV, Magma_CPU, queue )); magma_zmfree( &dAU, queue); magma_zmfree( &dALU, queue); CHECK( magma_zmalloc_cpu( &diag_vals, offdiags+1 )); CHECK( magma_index_malloc_cpu( &diag_offset, offdiags+1 )); diag_offset[0] = 0; diag_vals[0] = MAGMA_Z_MAKE( 1.0, 0.0 ); CHECK( magma_zmgenerator( hALU.num_rows, offdiags, diag_offset, diag_vals, &hD, queue )); magma_zmfree( &hALU, queue ); for(i=0; i<hALU.num_rows; i++){ for(j=hALU.row[i]; j<hALU.row[i+1]; j++){ if( hALU.col[j] == i ){ //printf("%d %d %d == %d -> %f -->", i, j, hALU.col[j], i, hALU.val[j]); hD.val[i] = MAGMA_Z_MAKE( 1.0 / sqrt(fabs(MAGMA_Z_REAL(hALU.val[j]))) , 0.0 ); //printf("insert %f at %d\n", hD.val[i], i); } } } CHECK( magma_zmtransfer( hD, &dD, Magma_CPU, Magma_DEV, queue )); magma_zmfree( &hD, queue); CHECK( magma_z_spmm( one, dD, dAL, &dL, queue )); magma_zmfree( &dAL, queue ); magma_zmfree( &dD, queue ); /* // check for diagonal = 1 magma_z_matrix dLt={Magma_CSR}, dLL={Magma_CSR}, LL={Magma_CSR}; CHECK( magma_z_cucsrtranspose( dL, &dLt )); CHECK( magma_zcuspmm( dL, dLt, &dLL )); CHECK( magma_zmtransfer( dLL, &LL, Magma_DEV, Magma_CPU )); //for(i=0; i < hALU.num_rows; i++) { for(i=0; i < 100; i++) { for(j=hALU.row[i]; j < hALU.row[i+1]; j++) { if( hALU.col[j] == i ){ printf("%d %d -> %f -->", i, i, LL.val[j]); } } } */ CHECK( magma_zmtransfer( dL, &hL, Magma_DEV, Magma_CPU, queue )); CHECK( magma_zmconvert( hL, L, Magma_CSR, Magma_CSRCOO, queue )); cleanup: if( info !=0 ){ magma_zmfree( L, queue ); magma_zmfree( U, queue ); } magma_zmfree( &dAU, queue); magma_zmfree( &dALU, queue); magma_zmfree( &dL, queue ); magma_zmfree( &hL, queue ); magma_zmfree( &dAL, queue ); magma_zmfree( &dD, queue ); magma_zmfree( &hD, queue); magma_zmfree( &hALU, queue ); return info; }
magma_int_t magma_zicres( magma_z_matrix A, magma_z_matrix C, magma_z_matrix CT, magma_z_matrix *LU, real_Double_t *res, real_Double_t *nonlinres, magma_queue_t queue ) { magma_int_t info = 0; magmaDoubleComplex tmp; real_Double_t tmp2; magma_int_t i,j,k; magmaDoubleComplex one = MAGMA_Z_MAKE( 1.0, 0.0 ); magma_z_matrix L_d={Magma_CSR}, U_d={Magma_CSR}, LU_d={Magma_CSR}; *res = 0.0; *nonlinres = 0.0; CHECK( magma_zmtransfer( C, &L_d, Magma_CPU, Magma_DEV, queue )); CHECK( magma_zmtransfer( CT, &U_d, Magma_CPU, Magma_DEV, queue )); CHECK( magma_z_spmm( one, L_d, U_d, &LU_d, queue )); CHECK( magma_zmtransfer(LU_d, LU, Magma_DEV, Magma_CPU, queue )); magma_zmfree( &LU_d, queue ); // compute Frobenius norm of A-LU for(i=0; i<A.num_rows; i++){ for(j=A.row[i]; j<A.row[i+1]; j++){ magma_index_t lcol = A.col[j]; for(k=LU->row[i]; k<LU->row[i+1]; k++){ if( LU->col[k] == lcol ){ tmp = MAGMA_Z_MAKE( MAGMA_Z_REAL( LU->val[k] )- MAGMA_Z_REAL( A.val[j] ) , 0.0 ); LU->val[k] = tmp; tmp2 = (real_Double_t) fabs( MAGMA_Z_REAL(tmp) ); (*nonlinres) = (*nonlinres) + tmp2*tmp2; } } } } for(i=0; i<LU->num_rows; i++){ for(j=LU->row[i]; j<LU->row[i+1]; j++){ tmp2 = (real_Double_t) fabs( MAGMA_Z_REAL(LU->val[j]) ); (*res) = (*res) + tmp2* tmp2; } } (*res) = sqrt((*res)); (*nonlinres) = sqrt((*nonlinres)); cleanup: if( info !=0 ){ magma_zmfree( LU, queue ); } magma_zmfree( &L_d, queue ); magma_zmfree( &U_d, queue ); magma_zmfree( &LU_d, queue ); return info; }
magma_int_t magma_zilures( magma_z_matrix A, magma_z_matrix L, magma_z_matrix U, magma_z_matrix *LU, real_Double_t *res, real_Double_t *nonlinres, magma_queue_t queue ) { magma_int_t info = 0; magmaDoubleComplex tmp; real_Double_t tmp2; magma_int_t i, j, k; magmaDoubleComplex one = MAGMA_Z_MAKE( 1.0, 0.0 ); magma_z_matrix LL={Magma_CSR}, L_d={Magma_CSR}, U_d={Magma_CSR}, LU_d={Magma_CSR}; if( L.row[1]==1 ){ // lower triangular with unit diagonal //printf("L lower triangular.\n"); LL.diagorder_type = Magma_UNITY; CHECK( magma_zmconvert( L, &LL, Magma_CSR, Magma_CSRL, queue )); } else if ( L.row[1]==0 ){ // strictly lower triangular //printf("L strictly lower triangular.\n"); CHECK( magma_zmtransfer( L, &LL, Magma_CPU, Magma_CPU, queue )); magma_free_cpu( LL.col ); magma_free_cpu( LL.val ); LL.nnz = L.nnz+L.num_rows; CHECK( magma_zmalloc_cpu( &LL.val, LL.nnz )); CHECK( magma_index_malloc_cpu( &LL.col, LL.nnz )); magma_int_t z=0; for (i=0; i < L.num_rows; i++) { LL.row[i] = z; for (j=L.row[i]; j < L.row[i+1]; j++) { LL.val[z] = L.val[j]; LL.col[z] = L.col[j]; z++; } // add unit diagonal LL.val[z] = MAGMA_Z_MAKE(1.0, 0.0); LL.col[z] = i; z++; } LL.row[LL.num_rows] = z; } else { printf("error: L neither lower nor strictly lower triangular!\n"); } CHECK( magma_zmtransfer( LL, &L_d, Magma_CPU, Magma_DEV, queue )); CHECK( magma_zmtransfer( U, &U_d, Magma_CPU, Magma_DEV, queue )); magma_zmfree( &LL, queue ); CHECK( magma_z_spmm( one, L_d, U_d, &LU_d, queue )); CHECK( magma_zmtransfer(LU_d, LU, Magma_DEV, Magma_CPU, queue )); magma_zmfree( &L_d, queue ); magma_zmfree( &U_d, queue ); magma_zmfree( &LU_d, queue ); // compute Frobenius norm of A-LU for(i=0; i<A.num_rows; i++){ for(j=A.row[i]; j<A.row[i+1]; j++){ magma_index_t lcol = A.col[j]; for(k=LU->row[i]; k<LU->row[i+1]; k++){ if( LU->col[k] == lcol ){ tmp = MAGMA_Z_MAKE( MAGMA_Z_REAL( LU->val[k] )- MAGMA_Z_REAL( A.val[j] ) , 0.0 ); LU->val[k] = tmp; tmp2 = (real_Double_t) fabs( MAGMA_Z_REAL(tmp) ); (*nonlinres) = (*nonlinres) + tmp2*tmp2; } } } } for(i=0; i<LU->num_rows; i++){ for(j=LU->row[i]; j<LU->row[i+1]; j++){ tmp2 = (real_Double_t) fabs( MAGMA_Z_REAL(LU->val[j]) ); (*res) = (*res) + tmp2* tmp2; } } (*res) = sqrt((*res)); (*nonlinres) = sqrt((*nonlinres)); cleanup: if( info !=0 ){ magma_zmfree( LU, queue ); } magma_zmfree( &LL, queue ); magma_zmfree( &L_d, queue ); magma_zmfree( &U_d, queue ); magma_zmfree( &LU_d, queue ); return info; }
magma_int_t magma_znonlinres( magma_z_matrix A, magma_z_matrix L, magma_z_matrix U, magma_z_matrix *LU, real_Double_t *res, magma_queue_t queue ) { magma_int_t info = 0; real_Double_t tmp2; magma_int_t i,j,k; magmaDoubleComplex one = MAGMA_Z_MAKE( 1.0, 0.0 ); magma_z_matrix L_d={Magma_CSR}, U_d={Magma_CSR}, LU_d={Magma_CSR}, A_t={Magma_CSR}; CHECK( magma_zmtransfer( L, &L_d, Magma_CPU, Magma_DEV, queue )); CHECK( magma_zmtransfer( U, &U_d, Magma_CPU, Magma_DEV, queue )); CHECK( magma_zmtransfer( A, &A_t, Magma_CPU, Magma_CPU, queue )); CHECK( magma_z_spmm( one, L_d, U_d, &LU_d, queue )); CHECK( magma_zmtransfer(LU_d, LU, Magma_DEV, Magma_CPU, queue )); magma_zmfree( &L_d, queue ); magma_zmfree( &U_d, queue ); magma_zmfree( &LU_d, queue ); // compute Frobenius norm of A-LU for(i=0; i<A.num_rows; i++){ for(j=A.row[i]; j<A.row[i+1]; j++){ magma_index_t lcol = A.col[j]; magmaDoubleComplex newval = MAGMA_Z_MAKE(0.0, 0.0); for(k=LU->row[i]; k<LU->row[i+1]; k++){ if( LU->col[k] == lcol ){ newval = MAGMA_Z_MAKE( MAGMA_Z_REAL( LU->val[k] )- MAGMA_Z_REAL( A.val[j] ) , 0.0 ); } } A_t.val[j] = newval; } } for(i=0; i<A.num_rows; i++){ for(j=A.row[i]; j<A.row[i+1]; j++){ tmp2 = (real_Double_t) fabs( MAGMA_Z_REAL(A_t.val[j]) ); (*res) = (*res) + tmp2* tmp2; } } magma_zmfree( LU, queue ); magma_zmfree( &A_t, queue ); (*res) = sqrt((*res)); cleanup: if( info !=0 ){ magma_zmfree( LU, queue ); } magma_zmfree( &A_t, queue ); magma_zmfree( &L_d, queue ); magma_zmfree( &U_d, queue ); magma_zmfree( &LU_d, queue ); return info; }
extern "C" magma_int_t magma_zbicgstab_merge3( magma_z_matrix A, magma_z_matrix b, magma_z_matrix *x, magma_z_solver_par *solver_par, magma_queue_t queue ) { magma_int_t info = MAGMA_NOTCONVERGED; // prepare solver feedback solver_par->solver = Magma_BICGSTABMERGE; solver_par->numiter = 0; solver_par->spmv_count = 0; // solver variables magmaDoubleComplex alpha, beta, omega, rho_old, rho_new, *skp_h={0}; double nom, nom0, betanom, nomb; // some useful variables magmaDoubleComplex c_zero = MAGMA_Z_ZERO, c_one = MAGMA_Z_ONE; magma_int_t dofs = A.num_rows; // workspace magma_z_matrix q={Magma_CSR}, r={Magma_CSR}, rr={Magma_CSR}, p={Magma_CSR}, v={Magma_CSR}, s={Magma_CSR}, t={Magma_CSR}; magmaDoubleComplex *d1=NULL, *d2=NULL, *skp=NULL; d1 = NULL; d2 = NULL; skp = NULL; CHECK( magma_zmalloc( &d1, dofs*(2) )); CHECK( magma_zmalloc( &d2, dofs*(2) )); // array for the parameters CHECK( magma_zmalloc( &skp, 8 )); // skp = [alpha|beta|omega|rho_old|rho|nom|tmp1|tmp2] CHECK( magma_zvinit( &q, Magma_DEV, dofs*6, 1, c_zero, queue )); // q = rr|r|p|v|s|t rr.memory_location = Magma_DEV; rr.dval = NULL; rr.num_rows = rr.nnz = dofs; rr.num_cols = 1; rr.storage_type = Magma_DENSE; r.memory_location = Magma_DEV; r.dval = NULL; r.num_rows = r.nnz = dofs; r.num_cols = 1; r.storage_type = Magma_DENSE; p.memory_location = Magma_DEV; p.dval = NULL; p.num_rows = p.nnz = dofs; p.num_cols = 1; p.storage_type = Magma_DENSE; v.memory_location = Magma_DEV; v.dval = NULL; v.num_rows = v.nnz = dofs; v.num_cols = 1; v.storage_type = Magma_DENSE; s.memory_location = Magma_DEV; s.dval = NULL; s.num_rows = s.nnz = dofs; s.num_cols = 1; s.storage_type = Magma_DENSE; t.memory_location = Magma_DEV; t.dval = NULL; t.num_rows = t.nnz = dofs; t.num_cols = 1; t.storage_type = Magma_DENSE; rr.dval = q(0); r.dval = q(1); p.dval = q(2); v.dval = q(3); s.dval = q(4); t.dval = q(5); // solver setup CHECK( magma_zresidualvec( A, b, *x, &r, &nom0, queue)); magma_zcopy( dofs, r.dval, 1, q(0), 1, queue ); // rr = r magma_zcopy( dofs, r.dval, 1, q(1), 1, queue ); // q = r betanom = nom0; nom = nom0*nom0; rho_new = magma_zdotc( dofs, r.dval, 1, r.dval, 1, queue ); // rho=<rr,r> rho_old = omega = alpha = MAGMA_Z_MAKE( 1.0, 0. ); beta = rho_new; solver_par->init_res = nom0; // array on host for the parameters CHECK( magma_zmalloc_cpu( &skp_h, 8 )); nomb = magma_dznrm2( dofs, b.dval, 1, queue ); if ( nomb == 0.0 ){ nomb=1.0; } solver_par->final_res = solver_par->init_res; solver_par->iter_res = solver_par->init_res; if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = nom0; solver_par->timing[0] = 0.0; } skp_h[0]=alpha; skp_h[1]=beta; skp_h[2]=omega; skp_h[3]=rho_old; skp_h[4]=rho_new; skp_h[5]=MAGMA_Z_MAKE(nom, 0.0); magma_zsetvector( 8, skp_h, 1, skp, 1, queue ); CHECK( magma_z_spmv( c_one, A, r, c_zero, v, queue )); // z = A r nomb = magma_dznrm2( dofs, b.dval, 1, queue ); if( nom0 < solver_par->atol || nom0/nomb < solver_par->rtol ){ info = MAGMA_SUCCESS; goto cleanup; } //Chronometry real_Double_t tempo1, tempo2; tempo1 = magma_sync_wtime( queue ); solver_par->numiter = 0; solver_par->spmv_count = 0; // start iteration do { solver_par->numiter++; // computes p=r+beta*(p-omega*v) CHECK( magma_zbicgmerge1( dofs, skp, v.dval, r.dval, p.dval, queue )); CHECK( magma_z_spmv( c_one, A, p, c_zero, v, queue )); // v = Ap solver_par->spmv_count++; CHECK( magma_zmdotc( dofs, 1, q.dval, v.dval, d1, d2, skp, queue )); CHECK( magma_zbicgmerge4( 1, skp, queue )); CHECK( magma_zbicgmerge2( dofs, skp, r.dval, v.dval, s.dval, queue )); // s=r-alpha*v CHECK( magma_z_spmv( c_one, A, s, c_zero, t, queue )); // t=As solver_par->spmv_count++; CHECK( magma_zmdotc( dofs, 2, q.dval+4*dofs, t.dval, d1, d2, skp+6, queue )); CHECK( magma_zbicgmerge4( 2, skp, queue )); CHECK( magma_zbicgmerge_xrbeta( dofs, d1, d2, q.dval, r.dval, p.dval, s.dval, t.dval, x->dval, skp, queue )); // check stopping criterion magma_zgetvector_async( 1 , skp+5, 1, skp_h+5, 1, queue ); betanom = sqrt(MAGMA_Z_REAL(skp_h[5])); if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } if ( betanom < solver_par->atol || betanom/nomb < solver_par->rtol ) { break; } } while ( solver_par->numiter+1 <= solver_par->maxiter ); tempo2 = magma_sync_wtime( queue ); solver_par->runtime = (real_Double_t) tempo2-tempo1; double residual; CHECK( magma_zresidualvec( A, b, *x, &r, &residual, queue)); solver_par->iter_res = betanom; solver_par->final_res = residual; if ( solver_par->numiter < solver_par->maxiter ) { info = MAGMA_SUCCESS; } else if ( solver_par->init_res > solver_par->final_res ) { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_SLOW_CONVERGENCE; if( solver_par->iter_res < solver_par->atol || solver_par->iter_res/solver_par->init_res < solver_par->rtol ){ info = MAGMA_SUCCESS; } } else { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_DIVERGENCE; } cleanup: magma_zmfree(&q, queue ); // frees all vectors magma_free(d1); magma_free(d2); magma_free( skp ); magma_free_cpu( skp_h ); solver_par->info = info; return info; } /* zbicgstab_merge */
extern "C" magma_int_t magma_zptfqmr_merge( magma_z_matrix A, magma_z_matrix b, magma_z_matrix *x, magma_z_solver_par *solver_par, magma_z_preconditioner *precond_par, magma_queue_t queue ) { magma_int_t info = MAGMA_NOTCONVERGED; // prepare solver feedback solver_par->solver = Magma_TFQMRMERGE; solver_par->numiter = 0; solver_par->spmv_count = 0; // local variables magmaDoubleComplex c_zero = MAGMA_Z_ZERO, c_one = MAGMA_Z_ONE; // solver variables double nom0, r0, res=0, nomb; magmaDoubleComplex rho = c_one, rho_l = c_one, eta = c_zero , c = c_zero , theta = c_zero , tau = c_zero, alpha = c_one, beta = c_zero, sigma = c_zero; magma_int_t dofs = A.num_rows* b.num_cols; // GPU workspace magma_z_matrix r={Magma_CSR}, r_tld={Magma_CSR}, pu_m={Magma_CSR}, d={Magma_CSR}, w={Magma_CSR}, v={Magma_CSR}, t={Magma_CSR}, u_mp1={Magma_CSR}, u_m={Magma_CSR}, Au={Magma_CSR}, Ad={Magma_CSR}, Au_new={Magma_CSR}; CHECK( magma_zvinit( &t, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &r, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &u_mp1,Magma_DEV, A.num_rows, b.num_cols, c_one, queue )); CHECK( magma_zvinit( &r_tld,Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &u_m, Magma_DEV, A.num_rows, b.num_cols, c_one, queue )); CHECK( magma_zvinit( &pu_m, Magma_DEV, A.num_rows, b.num_cols, c_one, queue )); CHECK( magma_zvinit( &v, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &d, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &w, Magma_DEV, A.num_rows, b.num_cols, c_one, queue )); CHECK( magma_zvinit( &Ad, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &Au_new, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &Au, Magma_DEV, A.num_rows, b.num_cols, c_one, queue )); // solver setup CHECK( magma_zresidualvec( A, b, *x, &r, &nom0, queue)); solver_par->init_res = nom0; magma_zcopy( dofs, r.dval, 1, r_tld.dval, 1, queue ); magma_zcopy( dofs, r.dval, 1, w.dval, 1, queue ); magma_zcopy( dofs, r.dval, 1, u_m.dval, 1, queue ); // preconditioner CHECK( magma_z_applyprecond_left( MagmaNoTrans, A, u_m, &t, precond_par, queue )); CHECK( magma_z_applyprecond_right( MagmaNoTrans, A, t, &pu_m, precond_par, queue )); CHECK( magma_z_spmv( c_one, A, pu_m, c_zero, v, queue )); // v = A u magma_zcopy( dofs, v.dval, 1, Au.dval, 1, queue ); nomb = magma_dznrm2( dofs, b.dval, 1, queue ); if ( nomb == 0.0 ){ nomb=1.0; } if ( (r0 = nomb * solver_par->rtol) < ATOLERANCE ){ r0 = ATOLERANCE; } solver_par->final_res = solver_par->init_res; solver_par->iter_res = solver_par->init_res; if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = (real_Double_t)nom0; solver_par->timing[0] = 0.0; } if ( nom0 < r0 ) { info = MAGMA_SUCCESS; goto cleanup; } tau = magma_zsqrt( magma_zdotc( dofs, r.dval, 1, r_tld.dval, 1, queue) ); rho = magma_zdotc( dofs, r.dval, 1, r_tld.dval, 1, queue ); rho_l = rho; //Chronometry real_Double_t tempo1, tempo2; tempo1 = magma_sync_wtime( queue ); solver_par->numiter = 0; solver_par->spmv_count = 0; // start iteration do { solver_par->numiter++; // do this every iteration as unrolled alpha = rho / magma_zdotc( dofs, v.dval, 1, r_tld.dval, 1, queue ); sigma = theta * theta / alpha * eta; magma_ztfqmr_1( r.num_rows, r.num_cols, alpha, sigma, v.dval, Au.dval, u_m.dval, pu_m.dval, u_mp1.dval, w.dval, d.dval, Ad.dval, queue ); theta = magma_zsqrt( magma_zdotc(dofs, w.dval, 1, w.dval, 1, queue) ) / tau; c = c_one / magma_zsqrt( c_one + theta*theta ); tau = tau * theta *c; eta = c * c * alpha; sigma = theta * theta / alpha * eta; if ( magma_z_isnan_inf( theta ) || magma_z_isnan_inf( c ) || magma_z_isnan_inf( tau ) || magma_z_isnan_inf( eta ) || magma_z_isnan_inf( sigma ) ) { info = MAGMA_DIVERGENCE; break; } magma_ztfqmr_2( r.num_rows, r.num_cols, eta, d.dval, Ad.dval, x->dval, r.dval, queue ); res = magma_dznrm2( dofs, r.dval, 1, queue ); if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter)%solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } if ( res/nomb <= solver_par->rtol || res <= solver_par->atol ){ info = MAGMA_SUCCESS; break; } // preconditioner CHECK( magma_z_applyprecond_left( MagmaNoTrans, A, u_mp1, &t, precond_par, queue )); CHECK( magma_z_applyprecond_right( MagmaNoTrans, A, t, &pu_m, precond_par, queue )); CHECK( magma_z_spmv( c_one, A, pu_m, c_zero, Au_new, queue )); // Au_new = A u_mp1 solver_par->spmv_count++; magma_zcopy( dofs, Au_new.dval, 1, Au.dval, 1, queue ); magma_zcopy( dofs, u_mp1.dval, 1, u_m.dval, 1, queue ); // here starts the second part of the loop ################################# magma_ztfqmr_5( r.num_rows, r.num_cols, alpha, sigma, v.dval, Au.dval, pu_m.dval, w.dval, d.dval, Ad.dval, queue ); sigma = theta * theta / alpha * eta; theta = magma_zsqrt( magma_zdotc(dofs, w.dval, 1, w.dval, 1, queue) ) / tau; c = c_one / magma_zsqrt( c_one + theta*theta ); tau = tau * theta *c; eta = c * c * alpha; magma_ztfqmr_2( r.num_rows, r.num_cols, eta, d.dval, Ad.dval, x->dval, r.dval, queue ); res = magma_dznrm2( dofs, r.dval, 1, queue ); if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter)%solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } if ( res/nomb <= solver_par->rtol || res <= solver_par->atol ){ info = MAGMA_SUCCESS; break; } rho = magma_zdotc( dofs, w.dval, 1, r_tld.dval, 1, queue ); beta = rho / rho_l; rho_l = rho; magma_ztfqmr_3( r.num_rows, r.num_cols, beta, w.dval, u_m.dval, u_mp1.dval, queue ); // preconditioner CHECK( magma_z_applyprecond_left( MagmaNoTrans, A, u_mp1, &t, precond_par, queue )); CHECK( magma_z_applyprecond_right( MagmaNoTrans, A, t, &pu_m, precond_par, queue )); CHECK( magma_z_spmv( c_one, A, pu_m, c_zero, Au_new, queue )); // Au_new = A pu_m solver_par->spmv_count++; magma_ztfqmr_4( r.num_rows, r.num_cols, beta, Au_new.dval, v.dval, Au.dval, queue ); magma_zcopy( dofs, u_mp1.dval, 1, u_m.dval, 1, queue ); } while ( solver_par->numiter+1 <= solver_par->maxiter ); tempo2 = magma_sync_wtime( queue ); solver_par->runtime = (real_Double_t) tempo2-tempo1; double residual; CHECK( magma_zresidualvec( A, b, *x, &r, &residual, queue)); solver_par->iter_res = res; solver_par->final_res = residual; if ( solver_par->numiter < solver_par->maxiter ) { info = MAGMA_SUCCESS; } else if ( solver_par->init_res > solver_par->final_res ) { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_SLOW_CONVERGENCE; if( solver_par->iter_res < solver_par->rtol*solver_par->init_res || solver_par->iter_res < solver_par->atol ) { info = MAGMA_SUCCESS; } } else { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_DIVERGENCE; } cleanup: magma_zmfree(&r, queue ); magma_zmfree(&r_tld, queue ); magma_zmfree(&d, queue ); magma_zmfree(&w, queue ); magma_zmfree(&v, queue ); magma_zmfree(&u_m, queue ); magma_zmfree(&u_mp1, queue ); magma_zmfree(&pu_m, queue ); magma_zmfree(&d, queue ); magma_zmfree(&t, queue ); magma_zmfree(&Au, queue ); magma_zmfree(&Au_new, queue ); magma_zmfree(&Ad, queue ); solver_par->info = info; return info; } /* magma_zptfqmr_merge */
extern "C" magma_int_t magma_zbaiter_overlap( magma_z_matrix A, magma_z_matrix b, magma_z_matrix *x, magma_z_solver_par *solver_par, magma_z_preconditioner *precond_par, magma_queue_t queue ) { magma_int_t info = MAGMA_NOTCONVERGED; // prepare solver feedback solver_par->solver = Magma_BAITERO; // some useful variables magmaDoubleComplex c_zero = MAGMA_Z_ZERO; // initial residual real_Double_t tempo1, tempo2, runtime=0; double residual; magma_int_t localiter = precond_par->maxiter; magma_z_matrix Ah={Magma_CSR}, ACSR={Magma_CSR}, A_d={Magma_CSR}, r={Magma_CSR}, D={Magma_CSR}, R={Magma_CSR}; // setup magma_int_t matrices; matrices = precond_par->levels; struct magma_z_matrix D_d[ 256 ]; struct magma_z_matrix R_d[ 256 ]; magma_int_t overlap; magma_int_t blocksize = 256; if( matrices==2 || matrices==4 || matrices==8 || matrices==16 || matrices==32 || matrices==64 || matrices==128 ){ overlap = blocksize/matrices; }else if( matrices == 1){ overlap = 0; }else{ printf("error: overlap ratio not supported.\n"); goto cleanup; } CHECK( magma_zmtransfer( A, &Ah, A.memory_location, Magma_CPU, queue )); CHECK( magma_zmconvert( Ah, &ACSR, Ah.storage_type, Magma_CSR, queue )); CHECK( magma_zmtransfer( ACSR, &A_d, Magma_CPU, Magma_DEV, queue )); CHECK( magma_zvinit( &r, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zresidualvec( A_d, b, *x, &r, &residual, queue)); solver_par->init_res = residual; if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = (real_Double_t) residual; } // setup for( int i=0; i<matrices; i++ ){ CHECK( magma_zcsrsplit( i*overlap, 256, ACSR, &D, &R, queue )); CHECK( magma_zmtransfer( D, &D_d[i], Magma_CPU, Magma_DEV, queue )); CHECK( magma_zmtransfer( R, &R_d[i], Magma_CPU, Magma_DEV, queue )); magma_zmfree(&D, queue ); magma_zmfree(&R, queue ); } magma_int_t iterinc; if( solver_par->verbose == 0 ){ iterinc = solver_par->maxiter; } else{ iterinc = solver_par->verbose; } solver_par->numiter = 0; solver_par->spmv_count = 0; // block-asynchronous iteration iterator do { tempo1 = magma_sync_wtime( queue ); solver_par->numiter+= iterinc; for( int z=0; z<iterinc; z++){ CHECK( magma_zbajac_csr_overlap( localiter, matrices, overlap, D_d, R_d, b, x, queue )); } tempo2 = magma_sync_wtime( queue ); runtime += tempo2-tempo1; if ( solver_par->verbose > 0 ) { CHECK( magma_zresidualvec( A_d, b, *x, &r, &residual, queue)); solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) residual; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) runtime; } } while ( solver_par->numiter+1 <= solver_par->maxiter ); solver_par->runtime = runtime; CHECK( magma_zresidual( A_d, b, *x, &residual, queue)); solver_par->final_res = residual; solver_par->numiter = solver_par->maxiter; if ( solver_par->init_res > solver_par->final_res ){ info = MAGMA_SUCCESS; } else { info = MAGMA_DIVERGENCE; } cleanup: magma_zmfree(&r, queue ); magma_zmfree(&D, queue ); magma_zmfree(&R, queue ); for( int i=0; i<matrices; i++ ){ magma_zmfree(&D_d[i], queue ); magma_zmfree(&R_d[i], queue ); } magma_zmfree(&A_d, queue ); magma_zmfree(&ACSR, queue ); magma_zmfree(&Ah, queue ); solver_par->info = info; return info; } /* magma_zbaiter_overlap */
extern "C" magma_int_t magma_zcumilusetup_transpose( magma_z_matrix A, magma_z_preconditioner *precond, magma_queue_t queue ) { magma_int_t info = 0; magma_z_matrix Ah1={Magma_CSR}, Ah2={Magma_CSR}; cusparseHandle_t cusparseHandle=NULL; cusparseMatDescr_t descrLT=NULL; cusparseMatDescr_t descrUT=NULL; // CUSPARSE context // CHECK_CUSPARSE( cusparseCreate( &cusparseHandle )); CHECK_CUSPARSE( cusparseSetStream( cusparseHandle, queue->cuda_stream() )); // transpose the matrix magma_zmtransfer( precond->L, &Ah1, Magma_DEV, Magma_CPU, queue ); magma_zmconvert( Ah1, &Ah2, A.storage_type, Magma_CSR, queue ); magma_zmfree(&Ah1, queue ); magma_zmtransposeconjugate( Ah2, &Ah1, queue ); magma_zmfree(&Ah2, queue ); Ah2.blocksize = A.blocksize; Ah2.alignment = A.alignment; magma_zmconvert( Ah1, &Ah2, Magma_CSR, A.storage_type, queue ); magma_zmfree(&Ah1, queue ); magma_zmtransfer( Ah2, &(precond->LT), Magma_CPU, Magma_DEV, queue ); magma_zmfree(&Ah2, queue ); magma_zmtransfer( precond->U, &Ah1, Magma_DEV, Magma_CPU, queue ); magma_zmconvert( Ah1, &Ah2, A.storage_type, Magma_CSR, queue ); magma_zmfree(&Ah1, queue ); magma_zmtransposeconjugate( Ah2, &Ah1, queue ); magma_zmfree(&Ah2, queue ); Ah2.blocksize = A.blocksize; Ah2.alignment = A.alignment; magma_zmconvert( Ah1, &Ah2, Magma_CSR, A.storage_type, queue ); magma_zmfree(&Ah1, queue ); magma_zmtransfer( Ah2, &(precond->UT), Magma_CPU, Magma_DEV, queue ); magma_zmfree(&Ah2, queue ); CHECK_CUSPARSE( cusparseCreateMatDescr( &descrLT )); CHECK_CUSPARSE( cusparseSetMatType( descrLT, CUSPARSE_MATRIX_TYPE_TRIANGULAR )); CHECK_CUSPARSE( cusparseSetMatDiagType( descrLT, CUSPARSE_DIAG_TYPE_UNIT )); CHECK_CUSPARSE( cusparseSetMatIndexBase( descrLT, CUSPARSE_INDEX_BASE_ZERO )); CHECK_CUSPARSE( cusparseSetMatFillMode( descrLT, CUSPARSE_FILL_MODE_UPPER )); CHECK_CUSPARSE( cusparseCreateSolveAnalysisInfo( &precond->cuinfoLT )); CHECK_CUSPARSE( cusparseZcsrsm_analysis( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, precond->LT.num_rows, precond->LT.nnz, descrLT, precond->LT.dval, precond->LT.drow, precond->LT.dcol, precond->cuinfoLT )); CHECK_CUSPARSE( cusparseCreateMatDescr( &descrUT )); CHECK_CUSPARSE( cusparseSetMatType( descrUT, CUSPARSE_MATRIX_TYPE_TRIANGULAR )); CHECK_CUSPARSE( cusparseSetMatDiagType( descrUT, CUSPARSE_DIAG_TYPE_NON_UNIT )); CHECK_CUSPARSE( cusparseSetMatIndexBase( descrUT, CUSPARSE_INDEX_BASE_ZERO )); CHECK_CUSPARSE( cusparseSetMatFillMode( descrUT, CUSPARSE_FILL_MODE_LOWER )); CHECK_CUSPARSE( cusparseCreateSolveAnalysisInfo( &precond->cuinfoUT )); CHECK_CUSPARSE( cusparseZcsrsm_analysis( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, precond->UT.num_rows, precond->UT.nnz, descrUT, precond->UT.dval, precond->UT.drow, precond->UT.dcol, precond->cuinfoUT )); cleanup: cusparseDestroyMatDescr( descrLT ); cusparseDestroyMatDescr( descrUT ); cusparseDestroy( cusparseHandle ); magma_zmfree(&Ah1, queue ); magma_zmfree(&Ah2, queue ); return info; }
extern "C" magma_int_t magma_zcuspmm( magma_z_matrix A, magma_z_matrix B, magma_z_matrix *AB, magma_queue_t queue ) { magma_int_t info = 0; magma_z_matrix C={Magma_CSR}; C.num_rows = A.num_rows; C.num_cols = B.num_cols; C.storage_type = A.storage_type; C.memory_location = A.memory_location; C.fill_mode = MagmaFull; C.val = NULL; C.col = NULL; C.row = NULL; C.rowidx = NULL; C.blockinfo = NULL; C.diag = NULL; C.dval = NULL; C.dcol = NULL; C.drow = NULL; C.drowidx = NULL; C.ddiag = NULL; magma_index_t base_t, nnz_t, baseC; cusparseHandle_t handle=NULL; cusparseMatDescr_t descrA=NULL; cusparseMatDescr_t descrB=NULL; cusparseMatDescr_t descrC=NULL; if ( A.memory_location == Magma_DEV && B.memory_location == Magma_DEV && ( A.storage_type == Magma_CSR || A.storage_type == Magma_CSRCOO ) && ( B.storage_type == Magma_CSR || B.storage_type == Magma_CSRCOO ) ) { // CUSPARSE context / CHECK_CUSPARSE( cusparseCreate( &handle )); CHECK_CUSPARSE( cusparseSetStream( handle, queue->cuda_stream() )); CHECK_CUSPARSE( cusparseCreateMatDescr( &descrA )); CHECK_CUSPARSE( cusparseCreateMatDescr( &descrB )); CHECK_CUSPARSE( cusparseCreateMatDescr( &descrC )); CHECK_CUSPARSE( cusparseSetMatType( descrA, CUSPARSE_MATRIX_TYPE_GENERAL )); CHECK_CUSPARSE( cusparseSetMatType( descrB, CUSPARSE_MATRIX_TYPE_GENERAL )); CHECK_CUSPARSE( cusparseSetMatType( descrC, CUSPARSE_MATRIX_TYPE_GENERAL )); CHECK_CUSPARSE( cusparseSetMatIndexBase( descrA, CUSPARSE_INDEX_BASE_ZERO )); CHECK_CUSPARSE( cusparseSetMatIndexBase( descrB, CUSPARSE_INDEX_BASE_ZERO )); CHECK_CUSPARSE( cusparseSetMatIndexBase( descrC, CUSPARSE_INDEX_BASE_ZERO )); // nnzTotalDevHostPtr points to host memory magma_index_t *nnzTotalDevHostPtr = (magma_index_t*) &C.nnz; CHECK_CUSPARSE( cusparseSetPointerMode( handle, CUSPARSE_POINTER_MODE_HOST )); CHECK( magma_index_malloc( &C.drow, (A.num_rows + 1) )); CHECK_CUSPARSE( cusparseXcsrgemmNnz( handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, A.num_rows, B.num_cols, A.num_cols, descrA, A.nnz, A.drow, A.dcol, descrB, B.nnz, B.drow, B.dcol, descrC, C.drow, nnzTotalDevHostPtr )); if (NULL != nnzTotalDevHostPtr) { C.nnz = *nnzTotalDevHostPtr; } else { // workaround as nnz and base C are magma_int_t magma_index_getvector( 1, C.drow+C.num_rows, 1, &nnz_t, 1, queue ); magma_index_getvector( 1, C.drow, 1, &base_t, 1, queue ); C.nnz = (magma_int_t) nnz_t; baseC = (magma_int_t) base_t; C.nnz -= baseC; } CHECK( magma_index_malloc( &C.dcol, C.nnz )); CHECK( magma_zmalloc( &C.dval, C.nnz )); CHECK_CUSPARSE( cusparseZcsrgemm( handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, A.num_rows, B.num_cols, A.num_cols, descrA, A.nnz, A.dval, A.drow, A.dcol, descrB, B.nnz, B.dval, B.drow, B.dcol, descrC, C.dval, C.drow, C.dcol )); // end CUSPARSE context // //magma_device_sync(); magma_queue_sync( queue ); CHECK( magma_zmtransfer( C, AB, Magma_DEV, Magma_DEV, queue )); } else { info = MAGMA_ERR_NOT_SUPPORTED; } cleanup: cusparseDestroyMatDescr( descrA ); cusparseDestroyMatDescr( descrB ); cusparseDestroyMatDescr( descrC ); cusparseDestroy( handle ); magma_zmfree( &C, queue ); return info; }
extern "C" magma_int_t magma_zqr( magma_int_t m, magma_int_t n, magma_z_matrix A, magma_int_t lda, magma_z_matrix *Q, magma_z_matrix *R, magma_queue_t queue ) { magma_int_t info = 0; // local constants const magmaDoubleComplex c_zero = MAGMA_Z_ZERO; // local variables magma_int_t inc = 1; magma_int_t k = min(m,n); magma_int_t ldt; magma_int_t nb; magmaDoubleComplex *tau = NULL; magmaDoubleComplex *dT = NULL; magmaDoubleComplex *dA = NULL; magma_z_matrix dR1 = {Magma_CSR}; // allocate CPU resources CHECK( magma_zmalloc_pinned( &tau, k ) ); // query number of blocks required for QR factorization nb = magma_get_zgeqrf_nb( m, n ); ldt = (2 * k + magma_roundup(n, 32)) * nb; CHECK( magma_zmalloc( &dT, ldt ) ); // get copy of matrix array if ( A.memory_location == Magma_DEV ) { dA = A.dval; } else { CHECK( magma_zmalloc( &dA, lda * n ) ); magma_zsetvector( lda * n, A.val, inc, dA, inc, queue ); } // QR factorization magma_zgeqrf_gpu( m, n, dA, lda, tau, dT, &info ); // construct R matrix if ( R != NULL ) { if ( A.memory_location == Magma_DEV ) { CHECK( magma_zvinit( R, Magma_DEV, lda, n, c_zero, queue ) ); magmablas_zlacpy( MagmaUpper, k, n, dA, lda, R->dval, lda, queue ); } else { CHECK( magma_zvinit( &dR1, Magma_DEV, lda, n, c_zero, queue ) ); magmablas_zlacpy( MagmaUpper, k, n, dA, lda, dR1.dval, lda, queue ); CHECK( magma_zvinit( R, Magma_CPU, lda, n, c_zero, queue ) ); magma_zgetvector( lda * n, dR1.dval, inc, R->val, inc, queue ); } } // construct Q matrix if ( Q != NULL ) { magma_zungqr_gpu( m, n, k, dA, lda, tau, dT, nb, &info ); if ( A.memory_location == Magma_DEV ) { CHECK( magma_zvinit( Q, Magma_DEV, lda, n, c_zero, queue ) ); magma_zcopyvector( lda * n, dA, inc, Q->dval, inc, queue ); } else { CHECK( magma_zvinit( Q, Magma_CPU, lda, n, c_zero, queue ) ); magma_zgetvector( lda * n, dA, inc, Q->val, inc, queue ); } } cleanup: if( info != 0 ){ magma_zmfree( Q, queue ); magma_zmfree( R, queue ); magma_zmfree( &dR1, queue ); } // free resources magma_free_pinned( tau ); magma_free( dT ); if ( A.memory_location == Magma_CPU ) { magma_free( dA ); } return info; }
/* //////////////////////////////////////////////////////////////////////////// -- testing any solver */ int main( int argc, char** argv ) { magma_int_t info = 0; TESTING_INIT(); magma_queue_t queue=NULL; magma_queue_create( 0, &queue ); magmaDoubleComplex one = MAGMA_Z_MAKE(1.0, 0.0); magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magma_z_matrix A={Magma_CSR}, B_d={Magma_CSR}; magma_z_matrix x={Magma_CSR}, b={Magma_CSR}; int i=1; while( i < argc ) { if ( strcmp("LAPLACE2D", argv[i]) == 0 && i+1 < argc ) { // Laplace test i++; magma_int_t laplace_size = atoi( argv[i] ); CHECK( magma_zm_5stencil( laplace_size, &A, queue )); } else { // file-matrix test CHECK( magma_z_csr_mtx( &A, argv[i], queue )); } printf( "\n# matrix info: %d-by-%d with %d nonzeros\n\n", int(A.num_rows), int(A.num_cols), int(A.nnz) ); magma_int_t n = A.num_rows; CHECK( magma_zmtransfer( A, &B_d, Magma_CPU, Magma_DEV, queue )); // vectors and initial guess CHECK( magma_zvinit( &b, Magma_DEV, A.num_cols, 1, zero, queue )); CHECK( magma_zvinit( &x, Magma_DEV, A.num_cols, 1, one, queue )); CHECK( magma_zprint_vector( b, 90, 10, queue )); CHECK( magma_zprint_matrix( A, queue )); printf("\n\n\n"); CHECK( magma_zprint_matrix( B_d, queue )); double res; res = magma_dznrm2(n, b.dval, 1, queue ); printf("norm0: %f\n", res); CHECK( magma_z_spmv( one, B_d, x, zero, b, queue )); // b = A x CHECK( magma_zprint_vector( b, 0, 100, queue )); CHECK( magma_zprint_vector( b, b.num_rows-10, 10, queue )); res = magma_dznrm2( n, b.dval, 1, queue ); printf("norm: %f\n", res); CHECK( magma_zresidual( B_d, x, b, &res, queue)); printf("res: %f\n", res); magma_zmfree(&B_d, queue ); magma_zmfree(&A, queue ); magma_zmfree(&x, queue ); magma_zmfree(&b, queue ); i++; } cleanup: magma_zmfree(&A, queue ); magma_zmfree(&B_d, queue ); magma_zmfree(&x, queue ); magma_zmfree(&b, queue ); magma_queue_destroy( queue ); magma_finalize(); return info; }
/* //////////////////////////////////////////////////////////////////////////// -- testing any solver */ int main( int argc, char** argv ) { magma_int_t info = 0; TESTING_INIT(); magma_zopts zopts; magma_queue_t queue=NULL; magma_queue_create( &queue ); real_Double_t res; magma_z_matrix Z={Magma_CSR}, Z2={Magma_CSR}, A={Magma_CSR}, A2={Magma_CSR}, AT={Magma_CSR}, AT2={Magma_CSR}, B={Magma_CSR}; int i=1; CHECK( magma_zparse_opts( argc, argv, &zopts, &i, queue )); B.blocksize = zopts.blocksize; B.alignment = zopts.alignment; while( i < argc ) { if ( strcmp("LAPLACE2D", argv[i]) == 0 && i+1 < argc ) { // Laplace test i++; magma_int_t laplace_size = atoi( argv[i] ); CHECK( magma_zm_5stencil( laplace_size, &Z, queue )); } else { // file-matrix test CHECK( magma_z_csr_mtx( &Z, argv[i], queue )); } printf("%% matrix info: %d-by-%d with %d nonzeros\n", int(Z.num_rows), int(Z.num_cols), int(Z.nnz) ); // convert to be non-symmetric CHECK( magma_zmconvert( Z, &A, Magma_CSR, Magma_CSRL, queue )); CHECK( magma_zmconvert( Z, &B, Magma_CSR, Magma_CSRU, queue )); // transpose CHECK( magma_zmtranspose( A, &AT, queue )); // quite some conversions //ELL CHECK( magma_zmconvert( AT, &AT2, Magma_CSR, Magma_ELL, queue )); magma_zmfree(&AT, queue ); CHECK( magma_zmconvert( AT2, &AT, Magma_ELL, Magma_CSR, queue )); magma_zmfree(&AT2, queue ); //ELLPACKT CHECK( magma_zmconvert( AT, &AT2, Magma_CSR, Magma_ELLPACKT, queue )); magma_zmfree(&AT, queue ); CHECK( magma_zmconvert( AT2, &AT, Magma_ELLPACKT, Magma_CSR, queue )); magma_zmfree(&AT2, queue ); //ELLRT AT2.blocksize = 8; AT2.alignment = 8; CHECK( magma_zmconvert( AT, &AT2, Magma_CSR, Magma_ELLRT, queue )); magma_zmfree(&AT, queue ); CHECK( magma_zmconvert( AT2, &AT, Magma_ELLRT, Magma_CSR, queue )); magma_zmfree(&AT2, queue ); //SELLP AT2.blocksize = 8; AT2.alignment = 8; CHECK( magma_zmconvert( AT, &AT2, Magma_CSR, Magma_SELLP, queue )); magma_zmfree(&AT, queue ); CHECK( magma_zmconvert( AT2, &AT, Magma_SELLP, Magma_CSR, queue )); magma_zmfree(&AT2, queue ); //ELLD CHECK( magma_zmconvert( AT, &AT2, Magma_CSR, Magma_ELLD, queue )); magma_zmfree(&AT, queue ); CHECK( magma_zmconvert( AT2, &AT, Magma_ELLD, Magma_CSR, queue )); magma_zmfree(&AT2, queue ); //CSRCOO CHECK( magma_zmconvert( AT, &AT2, Magma_CSR, Magma_CSRCOO, queue )); magma_zmfree(&AT, queue ); CHECK( magma_zmconvert( AT2, &AT, Magma_CSRCOO, Magma_CSR, queue )); magma_zmfree(&AT2, queue ); //CSRLIST CHECK( magma_zmconvert( AT, &AT2, Magma_CSR, Magma_CSRLIST, queue )); magma_zmfree(&AT, queue ); CHECK( magma_zmconvert( AT2, &AT, Magma_CSRLIST, Magma_CSR, queue )); magma_zmfree(&AT2, queue ); //CSRD CHECK( magma_zmconvert( AT, &AT2, Magma_CSR, Magma_CSRD, queue )); magma_zmfree(&AT, queue ); CHECK( magma_zmconvert( AT2, &AT, Magma_CSRD, Magma_CSR, queue )); magma_zmfree(&AT2, queue ); // transpose CHECK( magma_zmtranspose( AT, &A2, queue )); CHECK( magma_zmdiff( A, A2, &res, queue)); printf("%% ||A-A2||_F = %8.2e\n", res); if ( res < .000001 ) printf("%% conversion tester: ok\n"); else printf("%% conversion tester: failed\n"); CHECK( magma_zmlumerge( A2, B, &Z2, queue )); CHECK( magma_zmdiff( Z, Z2, &res, queue)); printf("%% ||Z-Z2||_F = %8.2e\n", res); if ( res < .000001 ) printf("%% LUmerge tester: ok\n"); else printf("%% LUmerge tester: failed\n"); magma_zmfree(&A, queue ); magma_zmfree(&A2, queue ); magma_zmfree(&AT, queue ); magma_zmfree(&AT2, queue ); magma_zmfree(&B, queue ); magma_zmfree(&Z2, queue ); magma_zmfree(&Z, queue ); i++; } cleanup: magma_zmfree(&A, queue ); magma_zmfree(&A2, queue ); magma_zmfree(&AT, queue ); magma_zmfree(&AT2, queue ); magma_zmfree(&B, queue ); magma_zmfree(&Z2, queue ); magma_zmfree(&Z, queue ); magma_queue_destroy( queue ); TESTING_FINALIZE(); return info; }
extern "C" magma_int_t magma_zmlumerge( magma_z_matrix L, magma_z_matrix U, magma_z_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; if( L.storage_type == Magma_CSR && U.storage_type == Magma_CSR ){ if( L.memory_location == Magma_CPU && U.memory_location == Magma_CPU ){ CHECK( magma_zmtransfer( L, A, Magma_CPU, Magma_CPU, queue )); magma_free_cpu( A->col ); magma_free_cpu( A->val ); // make sure it is strictly lower triangular magma_int_t z = 0; for(magma_int_t i=0; i<A->num_rows; i++){ for(magma_int_t j=L.row[i]; j<L.row[i+1]; j++){ if( L.col[j] < i ){// make sure it is strictly lower triangular z++; } } for(magma_int_t j=U.row[i]; j<U.row[i+1]; j++){ z++; } } A->nnz = z; // fill A with the new structure; CHECK( magma_index_malloc_cpu( &A->col, A->nnz )); CHECK( magma_zmalloc_cpu( &A->val, A->nnz )); z = 0; for(magma_int_t i=0; i<A->num_rows; i++){ A->row[i] = z; for(magma_int_t j=L.row[i]; j<L.row[i+1]; j++){ if( L.col[j] < i ){// make sure it is strictly lower triangular A->col[z] = L.col[j]; A->val[z] = L.val[j]; z++; } } for(magma_int_t j=U.row[i]; j<U.row[i+1]; j++){ A->col[z] = U.col[j]; A->val[z] = U.val[j]; z++; } } A->row[A->num_rows] = z; A->nnz = z; } else{ printf("error: matrix not on CPU.\n"); info = MAGMA_ERR_NOT_SUPPORTED; } } else{ printf("error: matrix in wrong format.\n"); info = MAGMA_ERR_NOT_SUPPORTED; } cleanup: if( info != 0 ){ magma_zmfree( A, queue ); } return info; }
magma_int_t magma_zsymbilu( magma_z_matrix *A, magma_int_t levels, magma_z_matrix *L, magma_z_matrix *U, magma_queue_t queue ) { magma_int_t info = 0; magma_z_matrix A_copy={Magma_CSR}, B={Magma_CSR}; magma_z_matrix hA={Magma_CSR}, CSRCOOA={Magma_CSR}; if( A->memory_location == Magma_CPU && A->storage_type == Magma_CSR ){ CHECK( magma_zmtransfer( *A, &A_copy, Magma_CPU, Magma_CPU, queue )); CHECK( magma_zmtransfer( *A, &B, Magma_CPU, Magma_CPU, queue )); // possibility to scale to unit diagonal //magma_zmscale( &B, Magma_UNITDIAG ); CHECK( magma_zmconvert( B, L, Magma_CSR, Magma_CSR , queue)); CHECK( magma_zmconvert( B, U, Magma_CSR, Magma_CSR, queue )); magma_int_t num_lnnz = (levels > 0 ) ? B.nnz/2*(2*levels+50) : B.nnz; magma_int_t num_unnz = (levels > 0 ) ? B.nnz/2*(2*levels+50) : B.nnz; magma_free_cpu( L->col ); magma_free_cpu( U->col ); CHECK( magma_index_malloc_cpu( &L->col, num_lnnz )); CHECK( magma_index_malloc_cpu( &U->col, num_unnz )); magma_zsymbolic_ilu( levels, A->num_rows, &num_lnnz, &num_unnz, B.row, B.col, L->row, L->col, U->row, U->col ); L->nnz = num_lnnz; U->nnz = num_unnz; magma_free_cpu( L->val ); magma_free_cpu( U->val ); CHECK( magma_zmalloc_cpu( &L->val, L->nnz )); CHECK( magma_zmalloc_cpu( &U->val, U->nnz )); for( magma_int_t i=0; i<L->nnz; i++ ) L->val[i] = MAGMA_Z_MAKE( 0.0, 0.0 ); for( magma_int_t i=0; i<U->nnz; i++ ) U->val[i] = MAGMA_Z_MAKE( 0.0, 0.0 ); // take the original values (scaled) as initial guess for L for(magma_int_t i=0; i<L->num_rows; i++){ for(magma_int_t j=B.row[i]; j<B.row[i+1]; j++){ magma_index_t lcol = B.col[j]; for(magma_int_t k=L->row[i]; k<L->row[i+1]; k++){ if( L->col[k] == lcol ){ L->val[k] = B.val[j]; } } } } // take the original values (scaled) as initial guess for U for(magma_int_t i=0; i<U->num_rows; i++){ for(magma_int_t j=B.row[i]; j<B.row[i+1]; j++){ magma_index_t lcol = B.col[j]; for(magma_int_t k=U->row[i]; k<U->row[i+1]; k++){ if( U->col[k] == lcol ){ U->val[k] = B.val[j]; } } } } magma_zmfree( &B, queue ); // fill A with the new structure; magma_free_cpu( A->col ); magma_free_cpu( A->val ); CHECK( magma_index_malloc_cpu( &A->col, L->nnz+U->nnz )); CHECK( magma_zmalloc_cpu( &A->val, L->nnz+U->nnz )); A->nnz = L->nnz+U->nnz; magma_int_t z = 0; for(magma_int_t i=0; i<A->num_rows; i++){ A->row[i] = z; for(magma_int_t j=L->row[i]; j<L->row[i+1]; j++){ A->col[z] = L->col[j]; A->val[z] = L->val[j]; z++; } for(magma_int_t j=U->row[i]; j<U->row[i+1]; j++){ A->col[z] = U->col[j]; A->val[z] = U->val[j]; z++; } } A->row[A->num_rows] = z; // reset the values of A to the original entries for(magma_int_t i=0; i<A->num_rows; i++){ for(magma_int_t j=A_copy.row[i]; j<A_copy.row[i+1]; j++){ magma_index_t lcol = A_copy.col[j]; for(magma_int_t k=A->row[i]; k<A->row[i+1]; k++){ if( A->col[k] == lcol ){ A->val[k] = A_copy.val[j]; } } } } } else { magma_storage_t A_storage = A->storage_type; magma_location_t A_location = A->memory_location; CHECK( magma_zmtransfer( *A, &hA, A->memory_location, Magma_CPU, queue )); CHECK( magma_zmconvert( hA, &CSRCOOA, hA.storage_type, Magma_CSR, queue )); CHECK( magma_zsymbilu( &CSRCOOA, levels, L, U, queue )); magma_zmfree( &hA, queue ); magma_zmfree( A, queue ); CHECK( magma_zmconvert( CSRCOOA, &hA, Magma_CSR, A_storage, queue )); CHECK( magma_zmtransfer( hA, A, Magma_CPU, A_location, queue )); } cleanup: if( info != 0 ){ magma_zmfree( L, queue ); magma_zmfree( U, queue ); } magma_zmfree( &A_copy, queue ); magma_zmfree( &B, queue ); magma_zmfree( &hA, queue ); magma_zmfree( &CSRCOOA, queue ); return info; }
extern "C" magma_int_t magma_zpbicg( magma_z_matrix A, magma_z_matrix b, magma_z_matrix *x, magma_z_solver_par *solver_par, magma_z_preconditioner *precond_par, magma_queue_t queue ) { magma_int_t info = MAGMA_NOTCONVERGED; // prepare solver feedback solver_par->solver = Magma_PBICG; solver_par->numiter = 0; solver_par->spmv_count = 0; // some useful variables magmaDoubleComplex c_zero = MAGMA_Z_ZERO; magmaDoubleComplex c_one = MAGMA_Z_ONE; magmaDoubleComplex c_neg_one = MAGMA_Z_NEG_ONE; magma_int_t dofs = A.num_rows * b.num_cols; // workspace magma_z_matrix r={Magma_CSR}, rt={Magma_CSR}, p={Magma_CSR}, pt={Magma_CSR}, z={Magma_CSR}, zt={Magma_CSR}, q={Magma_CSR}, y={Magma_CSR}, yt={Magma_CSR}, qt={Magma_CSR}; // need to transpose the matrix magma_z_matrix AT={Magma_CSR}, Ah1={Magma_CSR}, Ah2={Magma_CSR}; CHECK( magma_zvinit( &r, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &rt,Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &p, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &pt,Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &q, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &qt,Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &y, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &yt,Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &z, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &zt,Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); // solver variables magmaDoubleComplex alpha, rho, beta, rho_new, ptq; double res, nomb, nom0, r0; // transpose the matrix magma_zmtransfer( A, &Ah1, Magma_DEV, Magma_CPU, queue ); magma_zmconvert( Ah1, &Ah2, A.storage_type, Magma_CSR, queue ); magma_zmfree(&Ah1, queue ); magma_zmtransposeconjugate( Ah2, &Ah1, queue ); magma_zmfree(&Ah2, queue ); Ah2.blocksize = A.blocksize; Ah2.alignment = A.alignment; magma_zmconvert( Ah1, &Ah2, Magma_CSR, A.storage_type, queue ); magma_zmfree(&Ah1, queue ); magma_zmtransfer( Ah2, &AT, Magma_CPU, Magma_DEV, queue ); magma_zmfree(&Ah2, queue ); // solver setup CHECK( magma_zresidualvec( A, b, *x, &r, &nom0, queue)); res = nom0; solver_par->init_res = nom0; magma_zcopy( dofs, r.dval, 1, rt.dval, 1, queue ); // rr = r rho_new = magma_zdotc( dofs, rt.dval, 1, r.dval, 1, queue ); // rho=<rr,r> rho = alpha = MAGMA_Z_MAKE( 1.0, 0. ); nomb = magma_dznrm2( dofs, b.dval, 1, queue ); if ( nomb == 0.0 ){ nomb=1.0; } if ( (r0 = nomb * solver_par->rtol) < ATOLERANCE ){ r0 = ATOLERANCE; } solver_par->final_res = solver_par->init_res; solver_par->iter_res = solver_par->init_res; if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = nom0; solver_par->timing[0] = 0.0; } if ( nom0 < r0 ) { info = MAGMA_SUCCESS; goto cleanup; } //Chronometry real_Double_t tempo1, tempo2; tempo1 = magma_sync_wtime( queue ); solver_par->numiter = 0; solver_par->spmv_count = 0; // start iteration do { solver_par->numiter++; CHECK( magma_z_applyprecond_left( MagmaNoTrans, A, r, &y, precond_par, queue )); CHECK( magma_z_applyprecond_right( MagmaNoTrans, A, y, &z, precond_par, queue )); CHECK( magma_z_applyprecond_right( MagmaTrans, A, rt, &yt, precond_par, queue )); CHECK( magma_z_applyprecond_left( MagmaTrans, A, yt, &zt, precond_par, queue )); //magma_zcopy( dofs, r.dval, 1 , y.dval, 1, queue ); // y=r //magma_zcopy( dofs, y.dval, 1 , z.dval, 1, queue ); // z=y //magma_zcopy( dofs, rt.dval, 1 , yt.dval, 1, queue ); // yt=rt //magma_zcopy( dofs, yt.dval, 1 , zt.dval, 1, queue ); // yt=rt rho= rho_new; rho_new = magma_zdotc( dofs, rt.dval, 1, z.dval, 1, queue ); // rho=<rt,z> if( magma_z_isnan_inf( rho_new ) ){ info = MAGMA_DIVERGENCE; break; } if( solver_par->numiter==1 ){ magma_zcopy( dofs, z.dval, 1 , p.dval, 1, queue ); // yt=rt magma_zcopy( dofs, zt.dval, 1 , pt.dval, 1, queue ); // zt=yt } else { beta = rho_new/rho; magma_zscal( dofs, beta, p.dval, 1, queue ); // p = beta*p magma_zaxpy( dofs, c_one , z.dval, 1 , p.dval, 1, queue ); // p = z+beta*p magma_zscal( dofs, MAGMA_Z_CONJ(beta), pt.dval, 1, queue ); // pt = beta*pt magma_zaxpy( dofs, c_one , zt.dval, 1 , pt.dval, 1, queue ); // pt = zt+beta*pt } CHECK( magma_z_spmv( c_one, A, p, c_zero, q, queue )); // v = Ap CHECK( magma_z_spmv( c_one, AT, pt, c_zero, qt, queue )); // v = Ap solver_par->spmv_count++; solver_par->spmv_count++; ptq = magma_zdotc( dofs, pt.dval, 1, q.dval, 1, queue ); alpha = rho_new /ptq; magma_zaxpy( dofs, alpha, p.dval, 1 , x->dval, 1, queue ); // x=x+alpha*p magma_zaxpy( dofs, c_neg_one * alpha, q.dval, 1 , r.dval, 1, queue ); // r=r+alpha*q magma_zaxpy( dofs, c_neg_one * MAGMA_Z_CONJ(alpha), qt.dval, 1 , rt.dval, 1, queue ); // r=r+alpha*q res = magma_dznrm2( dofs, r.dval, 1, queue ); if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } if ( res/nomb <= solver_par->rtol || res <= solver_par->atol ){ break; } } while ( solver_par->numiter+1 <= solver_par->maxiter ); tempo2 = magma_sync_wtime( queue ); solver_par->runtime = (real_Double_t) tempo2-tempo1; double residual; CHECK( magma_zresidualvec( A, b, *x, &r, &residual, queue)); solver_par->iter_res = res; solver_par->final_res = residual; if ( solver_par->numiter < solver_par->maxiter ) { info = MAGMA_SUCCESS; } else if ( solver_par->init_res > solver_par->final_res ) { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_SLOW_CONVERGENCE; if( solver_par->iter_res < solver_par->rtol*solver_par->init_res || solver_par->iter_res < solver_par->atol ) { info = MAGMA_SUCCESS; } } else { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_DIVERGENCE; } cleanup: magma_zmfree(&r, queue ); magma_zmfree(&rt, queue ); magma_zmfree(&p, queue ); magma_zmfree(&pt, queue ); magma_zmfree(&q, queue ); magma_zmfree(&qt, queue ); magma_zmfree(&y, queue ); magma_zmfree(&yt, queue ); magma_zmfree(&z, queue ); magma_zmfree(&zt, queue ); magma_zmfree(&AT, queue ); magma_zmfree(&Ah1, queue ); magma_zmfree(&Ah2, queue ); solver_par->info = info; return info; } /* magma_zpbicg */
extern "C" magma_int_t magma_zcumilugeneratesolverinfo( magma_z_preconditioner *precond, magma_queue_t queue ) { magma_int_t info = 0; cusparseHandle_t cusparseHandle=NULL; cusparseMatDescr_t descrL=NULL; cusparseMatDescr_t descrU=NULL; magma_z_matrix hA={Magma_CSR}, hL={Magma_CSR}, hU={Magma_CSR}; if (precond->L.memory_location != Magma_DEV ){ CHECK( magma_zmtransfer( precond->M, &hA, precond->M.memory_location, Magma_CPU, queue )); hL.diagorder_type = Magma_UNITY; CHECK( magma_zmconvert( hA, &hL , Magma_CSR, Magma_CSRL, queue )); hU.diagorder_type = Magma_VALUE; CHECK( magma_zmconvert( hA, &hU , Magma_CSR, Magma_CSRU, queue )); CHECK( magma_zmtransfer( hL, &(precond->L), Magma_CPU, Magma_DEV, queue )); CHECK( magma_zmtransfer( hU, &(precond->U), Magma_CPU, Magma_DEV, queue )); magma_zmfree(&hA, queue ); magma_zmfree(&hL, queue ); magma_zmfree(&hU, queue ); } // CUSPARSE context // CHECK_CUSPARSE( cusparseCreate( &cusparseHandle )); CHECK_CUSPARSE( cusparseSetStream( cusparseHandle, queue->cuda_stream() )); CHECK_CUSPARSE( cusparseCreateMatDescr( &descrL )); CHECK_CUSPARSE( cusparseSetMatType( descrL, CUSPARSE_MATRIX_TYPE_TRIANGULAR )); CHECK_CUSPARSE( cusparseSetMatDiagType( descrL, CUSPARSE_DIAG_TYPE_UNIT )); CHECK_CUSPARSE( cusparseSetMatIndexBase( descrL, CUSPARSE_INDEX_BASE_ZERO )); CHECK_CUSPARSE( cusparseSetMatFillMode( descrL, CUSPARSE_FILL_MODE_LOWER )); CHECK_CUSPARSE( cusparseCreateSolveAnalysisInfo( &precond->cuinfoL )); CHECK_CUSPARSE( cusparseZcsrsm_analysis( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, precond->L.num_rows, precond->L.nnz, descrL, precond->L.dval, precond->L.drow, precond->L.dcol, precond->cuinfoL )); CHECK_CUSPARSE( cusparseCreateMatDescr( &descrU )); CHECK_CUSPARSE( cusparseSetMatType( descrU, CUSPARSE_MATRIX_TYPE_TRIANGULAR )); CHECK_CUSPARSE( cusparseSetMatDiagType( descrU, CUSPARSE_DIAG_TYPE_NON_UNIT )); CHECK_CUSPARSE( cusparseSetMatIndexBase( descrU, CUSPARSE_INDEX_BASE_ZERO )); CHECK_CUSPARSE( cusparseSetMatFillMode( descrU, CUSPARSE_FILL_MODE_UPPER )); CHECK_CUSPARSE( cusparseCreateSolveAnalysisInfo( &precond->cuinfoU )); CHECK_CUSPARSE( cusparseZcsrsm_analysis( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, precond->U.num_rows, precond->U.nnz, descrU, precond->U.dval, precond->U.drow, precond->U.dcol, precond->cuinfoU )); if( precond->maxiter < 50 ){ //prepare for iterative solves // extract the diagonal of L into precond->d CHECK( magma_zjacobisetup_diagscal( precond->L, &precond->d, queue )); CHECK( magma_zvinit( &precond->work1, Magma_DEV, precond->U.num_rows, 1, MAGMA_Z_ZERO, queue )); // extract the diagonal of U into precond->d2 CHECK( magma_zjacobisetup_diagscal( precond->U, &precond->d2, queue )); CHECK( magma_zvinit( &precond->work2, Magma_DEV, precond->U.num_rows, 1, MAGMA_Z_ZERO, queue )); } cleanup: cusparseDestroyMatDescr( descrL ); cusparseDestroyMatDescr( descrU ); cusparseDestroy( cusparseHandle ); return info; }
extern "C" magma_int_t magma_ztfqmr_unrolled( magma_z_matrix A, magma_z_matrix b, magma_z_matrix *x, magma_z_solver_par *solver_par, magma_queue_t queue ) { magma_int_t info = MAGMA_NOTCONVERGED; // prepare solver feedback solver_par->solver = Magma_TFQMR; solver_par->numiter = 0; solver_par->spmv_count = 0; solver_par->spmv_count = 0; // local variables magmaDoubleComplex c_zero = MAGMA_Z_ZERO, c_one = MAGMA_Z_ONE; // solver variables double nom0, r0, res, nomb; magmaDoubleComplex rho = c_one, rho_l = c_one, eta = c_zero , c = c_zero , theta = c_zero , tau = c_zero, alpha = c_one, beta = c_zero, sigma = c_zero; magma_int_t dofs = A.num_rows* b.num_cols; // GPU workspace magma_z_matrix r={Magma_CSR}, r_tld={Magma_CSR}, d={Magma_CSR}, w={Magma_CSR}, v={Magma_CSR}, u_mp1={Magma_CSR}, u_m={Magma_CSR}, Au={Magma_CSR}, Ad={Magma_CSR}, Au_new={Magma_CSR}; CHECK( magma_zvinit( &r, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &u_mp1,Magma_DEV, A.num_rows, b.num_cols, c_one, queue )); CHECK( magma_zvinit( &r_tld,Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &u_m, Magma_DEV, A.num_rows, b.num_cols, c_one, queue )); CHECK( magma_zvinit( &v, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &d, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &w, Magma_DEV, A.num_rows, b.num_cols, c_one, queue )); CHECK( magma_zvinit( &Ad, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &Au_new, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &Au, Magma_DEV, A.num_rows, b.num_cols, c_one, queue )); // solver setup CHECK( magma_zresidualvec( A, b, *x, &r, &nom0, queue)); solver_par->init_res = nom0; magma_zcopy( dofs, r.dval, 1, r_tld.dval, 1, queue ); magma_zcopy( dofs, r.dval, 1, w.dval, 1, queue ); magma_zcopy( dofs, r.dval, 1, u_mp1.dval, 1, queue ); CHECK( magma_z_spmv( c_one, A, u_mp1, c_zero, v, queue )); // v = A u magma_zcopy( dofs, v.dval, 1, Au.dval, 1, queue ); nomb = magma_dznrm2( dofs, b.dval, 1, queue ); if ( nomb == 0.0 ){ nomb=1.0; } if ( (r0 = nomb * solver_par->rtol) < ATOLERANCE ){ r0 = ATOLERANCE; } solver_par->final_res = solver_par->init_res; solver_par->iter_res = solver_par->init_res; if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = (real_Double_t)nom0; solver_par->timing[0] = 0.0; } if ( nom0 < r0 ) { info = MAGMA_SUCCESS; goto cleanup; } tau = magma_zsqrt( magma_zdotc( dofs, r.dval, 1, r_tld.dval, 1, queue ) ); rho = magma_zdotc( dofs, r.dval, 1, r_tld.dval, 1, queue ); rho_l = rho; //Chronometry real_Double_t tempo1, tempo2; tempo1 = magma_sync_wtime( queue ); solver_par->numiter = 0; solver_par->spmv_count = 0; // start iteration do { solver_par->numiter++; // do this every iteration as unrolled alpha = rho / magma_zdotc( dofs, v.dval, 1, r_tld.dval, 1, queue ); sigma = theta * theta / alpha * eta; magma_zaxpy( dofs, -alpha, v.dval, 1, u_mp1.dval, 1, queue ); // u_mp1 = u_mp_1 - alpha*v; magma_zaxpy( dofs, -alpha, Au.dval, 1, w.dval, 1, queue ); // w = w - alpha*Au; magma_zscal( dofs, sigma, d.dval, 1, queue ); magma_zaxpy( dofs, c_one, u_mp1.dval, 1, d.dval, 1, queue ); // d = u_mp1 + sigma*d; //magma_zscal( dofs, sigma, Ad.dval, 1, queue ); //magma_zaxpy( dofs, c_one, Au.dval, 1, Ad.dval, 1, queue ); // Ad = Au + sigma*Ad; theta = magma_zsqrt( magma_zdotc(dofs, w.dval, 1, w.dval, 1, queue ) ) / tau; c = c_one / magma_zsqrt( c_one + theta*theta ); tau = tau * theta *c; eta = c * c * alpha; sigma = theta * theta / alpha * eta; printf("sigma: %f+%fi\n", MAGMA_Z_REAL(sigma), MAGMA_Z_IMAG(sigma) ); CHECK( magma_z_spmv( c_one, A, d, c_zero, Ad, queue )); // Au_new = A u_mp1 solver_par->spmv_count++; magma_zaxpy( dofs, eta, d.dval, 1, x->dval, 1, queue ); // x = x + eta * d magma_zaxpy( dofs, -eta, Ad.dval, 1, r.dval, 1, queue ); // r = r - eta * Ad // here starts the second part of the loop ################################# magma_zaxpy( dofs, -alpha, Au.dval, 1, w.dval, 1, queue ); // w = w - alpha*Au; magma_zscal( dofs, sigma, d.dval, 1, queue ); magma_zaxpy( dofs, c_one, u_mp1.dval, 1, d.dval, 1, queue ); // d = u_mp1 + sigma*d; magma_zscal( dofs, sigma, Ad.dval, 1, queue ); magma_zaxpy( dofs, c_one, Au.dval, 1, Ad.dval, 1, queue ); // Ad = Au + sigma*Ad; theta = magma_zsqrt( magma_zdotc(dofs, w.dval, 1, w.dval, 1, queue ) ) / tau; c = c_one / magma_zsqrt( c_one + theta*theta ); tau = tau * theta *c; eta = c * c * alpha; magma_zaxpy( dofs, eta, d.dval, 1, x->dval, 1, queue ); // x = x + eta * d magma_zaxpy( dofs, -eta, Ad.dval, 1, r.dval, 1, queue ); // r = r - eta * Ad res = magma_dznrm2( dofs, r.dval, 1, queue ); if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter)%solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } if ( res/nomb <= solver_par->rtol || res <= solver_par->atol ){ break; } // do this every loop as unrolled rho_l = rho; rho = magma_zdotc( dofs, w.dval, 1, r_tld.dval, 1, queue ); beta = rho / rho_l; magma_zscal( dofs, beta, u_mp1.dval, 1, queue ); magma_zaxpy( dofs, c_one, w.dval, 1, u_mp1.dval, 1, queue ); // u_mp1 = w + beta*u_mp1; CHECK( magma_z_spmv( c_one, A, u_mp1, c_zero, Au_new, queue )); // Au_new = A u_mp1 solver_par->spmv_count++; // do this every loop as unrolled magma_zscal( dofs, beta*beta, v.dval, 1, queue ); magma_zaxpy( dofs, beta, Au.dval, 1, v.dval, 1, queue ); magma_zaxpy( dofs, c_one, Au_new.dval, 1, v.dval, 1, queue ); // v = Au_new + beta*(Au+beta*v); magma_zcopy( dofs, Au_new.dval, 1, Au.dval, 1, queue ); } while ( solver_par->numiter+1 <= solver_par->maxiter ); tempo2 = magma_sync_wtime( queue ); solver_par->runtime = (real_Double_t) tempo2-tempo1; double residual; CHECK( magma_zresidualvec( A, b, *x, &r, &residual, queue)); solver_par->iter_res = res; solver_par->final_res = residual; if ( solver_par->numiter < solver_par->maxiter ) { info = MAGMA_SUCCESS; } else if ( solver_par->init_res > solver_par->final_res ) { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_SLOW_CONVERGENCE; if( solver_par->iter_res < solver_par->rtol*solver_par->init_res || solver_par->iter_res < solver_par->atol ) { info = MAGMA_SUCCESS; } } else { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_DIVERGENCE; } cleanup: magma_zmfree(&r, queue ); magma_zmfree(&r_tld, queue ); magma_zmfree(&d, queue ); magma_zmfree(&w, queue ); magma_zmfree(&v, queue ); magma_zmfree(&u_m, queue ); magma_zmfree(&u_mp1, queue ); magma_zmfree(&d, queue ); magma_zmfree(&Au, queue ); magma_zmfree(&Au_new, queue ); magma_zmfree(&Ad, queue ); solver_par->info = info; return info; } /* magma_zfqmr_unrolled */
extern "C" magma_int_t magma_zpbicgstab_merge( magma_z_matrix A, magma_z_matrix b, magma_z_matrix *x, magma_z_solver_par *solver_par, magma_z_preconditioner *precond_par, magma_queue_t queue ) { magma_int_t info = MAGMA_NOTCONVERGED; // prepare solver feedback solver_par->solver = Magma_BICGSTAB; solver_par->numiter = 0; solver_par->spmv_count = 0; // some useful variables magmaDoubleComplex c_zero = MAGMA_Z_ZERO; magmaDoubleComplex c_one = MAGMA_Z_ONE; magma_int_t dofs = A.num_rows * b.num_cols; // workspace magma_z_matrix r={Magma_CSR}, rr={Magma_CSR}, p={Magma_CSR}, v={Magma_CSR}, z={Magma_CSR}, y={Magma_CSR}, ms={Magma_CSR}, mt={Magma_CSR}, s={Magma_CSR}, t={Magma_CSR}, d1={Magma_CSR}, d2={Magma_CSR}; CHECK( magma_zvinit( &r, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &rr,Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &p, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &v, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &s, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &t, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &ms,Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &mt,Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &y, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &z, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &d1, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &d2, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); // solver variables magmaDoubleComplex alpha, beta, omega, rho_old, rho_new; double nom, betanom, nom0, r0, res, nomb; res=0; //double den; // solver setup CHECK( magma_zresidualvec( A, b, *x, &r, &nom0, queue)); magma_zcopy( dofs, r.dval, 1, rr.dval, 1, queue ); // rr = r betanom = nom0; nom = nom0*nom0; rho_new = magma_zdotc( dofs, r.dval, 1, r.dval, 1, queue ); // rho=<rr,r> rho_old = omega = alpha = MAGMA_Z_MAKE( 1.0, 0. ); solver_par->init_res = nom0; CHECK( magma_z_spmv( c_one, A, r, c_zero, v, queue )); // z = A r nomb = magma_dznrm2( dofs, b.dval, 1, queue ); if ( nomb == 0.0 ){ nomb=1.0; } if ( (r0 = nomb * solver_par->rtol) < ATOLERANCE ){ r0 = ATOLERANCE; } solver_par->final_res = solver_par->init_res; solver_par->iter_res = solver_par->init_res; if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = nom0; solver_par->timing[0] = 0.0; } if ( nom < r0 ) { info = MAGMA_SUCCESS; goto cleanup; } //Chronometry real_Double_t tempo1, tempo2, tempop1, tempop2; tempo1 = magma_sync_wtime( queue ); solver_par->numiter = 0; solver_par->spmv_count = 0; // start iteration do { solver_par->numiter++; rho_old = rho_new; // rho_old=rho rho_new = magma_zdotc( dofs, rr.dval, 1, r.dval, 1, queue ); // rho=<rr,r> beta = rho_new/rho_old * alpha/omega; // beta=rho/rho_old *alpha/omega if( magma_z_isnan_inf( beta ) ){ info = MAGMA_DIVERGENCE; break; } // p = r + beta * ( p - omega * v ) magma_zbicgstab_1( r.num_rows, r.num_cols, beta, omega, r.dval, v.dval, p.dval, queue ); // preconditioner tempop1 = magma_sync_wtime( queue ); CHECK( magma_z_applyprecond_left( MagmaNoTrans, A, p, &mt, precond_par, queue )); CHECK( magma_z_applyprecond_right( MagmaNoTrans, A, mt, &y, precond_par, queue )); tempop2 = magma_sync_wtime( queue ); precond_par->runtime += tempop2-tempop1; CHECK( magma_z_spmv( c_one, A, y, c_zero, v, queue )); // v = Ap solver_par->spmv_count++; //alpha = rho_new / tmpval; alpha = rho_new /magma_zdotc( dofs, rr.dval, 1, v.dval, 1, queue ); if( magma_z_isnan_inf( alpha ) ){ info = MAGMA_DIVERGENCE; break; } // s = r - alpha v magma_zbicgstab_2( r.num_rows, r.num_cols, alpha, r.dval, v.dval, s.dval, queue ); // preconditioner tempop1 = magma_sync_wtime( queue ); CHECK( magma_z_applyprecond_left( MagmaNoTrans, A, s, &ms, precond_par, queue )); CHECK( magma_z_applyprecond_right( MagmaNoTrans, A, ms, &z, precond_par, queue )); tempop2 = magma_sync_wtime( queue ); precond_par->runtime += tempop2-tempop1; CHECK( magma_z_spmv( c_one, A, z, c_zero, t, queue )); // t=As solver_par->spmv_count++; omega = magma_zdotc( dofs, t.dval, 1, s.dval, 1, queue ) // omega = <s,t>/<t,t> / magma_zdotc( dofs, t.dval, 1, t.dval, 1, queue ); // x = x + alpha * y + omega * z // r = s - omega * t magma_zbicgstab_4( r.num_rows, r.num_cols, alpha, omega, y.dval, z.dval, s.dval, t.dval, x->dval, r.dval, queue ); res = betanom = magma_dznrm2( dofs, r.dval, 1, queue ); nom = betanom*betanom; if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } if ( res/nomb <= solver_par->rtol || res <= solver_par->atol ){ break; } } while ( solver_par->numiter+1 <= solver_par->maxiter ); tempo2 = magma_sync_wtime( queue ); solver_par->runtime = (real_Double_t) tempo2-tempo1; double residual; CHECK( magma_zresidualvec( A, b, *x, &r, &residual, queue)); solver_par->final_res = residual; solver_par->iter_res = res; if ( solver_par->numiter < solver_par->maxiter && info == MAGMA_SUCCESS ) { info = MAGMA_SUCCESS; } else if ( solver_par->init_res > solver_par->final_res ) { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_SLOW_CONVERGENCE; if( solver_par->iter_res < solver_par->rtol*solver_par->init_res || solver_par->iter_res < solver_par->atol ) { info = MAGMA_SUCCESS; } } else { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_DIVERGENCE; } cleanup: magma_zmfree(&r, queue ); magma_zmfree(&rr, queue ); magma_zmfree(&p, queue ); magma_zmfree(&v, queue ); magma_zmfree(&s, queue ); magma_zmfree(&y, queue ); magma_zmfree(&z, queue ); magma_zmfree(&t, queue ); magma_zmfree(&ms, queue ); magma_zmfree(&mt, queue ); magma_zmfree(&d1, queue ); magma_zmfree(&d2, queue ); solver_par->info = info; return info; } /* magma_zbicgstab_merge */
extern "C" magma_int_t magma_zcumiccsetup( magma_z_matrix A, magma_z_preconditioner *precond, magma_queue_t queue ) { magma_int_t info = 0; cusparseHandle_t cusparseHandle=NULL; cusparseMatDescr_t descrA=NULL; cusparseMatDescr_t descrL=NULL; cusparseMatDescr_t descrU=NULL; #if CUDA_VERSION >= 7000 csric02Info_t info_M=NULL; void *pBuffer = NULL; #endif magma_z_matrix hA={Magma_CSR}, hACSR={Magma_CSR}, U={Magma_CSR}; CHECK( magma_zmtransfer( A, &hA, A.memory_location, Magma_CPU, queue )); U.diagorder_type = Magma_VALUE; CHECK( magma_zmconvert( hA, &hACSR, hA.storage_type, Magma_CSR, queue )); // in case using fill-in if( precond->levels > 0 ){ magma_z_matrix hAL={Magma_CSR}, hAUt={Magma_CSR}; CHECK( magma_zsymbilu( &hACSR, precond->levels, &hAL, &hAUt, queue )); magma_zmfree(&hAL, queue); magma_zmfree(&hAUt, queue); } CHECK( magma_zmconvert( hACSR, &U, Magma_CSR, Magma_CSRL, queue )); magma_zmfree( &hACSR, queue ); CHECK( magma_zmtransfer(U, &(precond->M), Magma_CPU, Magma_DEV, queue )); // CUSPARSE context // CHECK_CUSPARSE( cusparseCreate( &cusparseHandle )); CHECK_CUSPARSE( cusparseSetStream( cusparseHandle, queue->cuda_stream() )); CHECK_CUSPARSE( cusparseCreateMatDescr( &descrA )); CHECK_CUSPARSE( cusparseCreateSolveAnalysisInfo( &(precond->cuinfo) )); // use kernel to manually check for zeros n the diagonal CHECK( magma_zdiagcheck( precond->M, queue ) ); #if CUDA_VERSION >= 7000 // this version has the bug fixed where a zero on the diagonal causes a crash CHECK_CUSPARSE( cusparseCreateCsric02Info(&info_M) ); CHECK_CUSPARSE( cusparseSetMatType( descrA, CUSPARSE_MATRIX_TYPE_GENERAL )); CHECK_CUSPARSE( cusparseSetMatIndexBase( descrA, CUSPARSE_INDEX_BASE_ZERO )); int buffersize; int structural_zero; int numerical_zero; CHECK_CUSPARSE( cusparseZcsric02_bufferSize( cusparseHandle, precond->M.num_rows, precond->M.nnz, descrA, precond->M.dval, precond->M.drow, precond->M.dcol, info_M, &buffersize ) ); CHECK( magma_malloc((void**)&pBuffer, buffersize) ); CHECK_CUSPARSE( cusparseZcsric02_analysis( cusparseHandle, precond->M.num_rows, precond->M.nnz, descrA, precond->M.dval, precond->M.drow, precond->M.dcol, info_M, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer )); CHECK_CUSPARSE( cusparseXcsric02_zeroPivot( cusparseHandle, info_M, &numerical_zero ) ); CHECK_CUSPARSE( cusparseXcsric02_zeroPivot( cusparseHandle, info_M, &structural_zero ) ); CHECK_CUSPARSE( cusparseZcsric02( cusparseHandle, precond->M.num_rows, precond->M.nnz, descrA, precond->M.dval, precond->M.drow, precond->M.dcol, info_M, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer) ); #else // this version contains the bug but is needed for backward compability CHECK_CUSPARSE( cusparseSetMatType( descrA, CUSPARSE_MATRIX_TYPE_SYMMETRIC )); CHECK_CUSPARSE( cusparseSetMatDiagType( descrA, CUSPARSE_DIAG_TYPE_NON_UNIT )); CHECK_CUSPARSE( cusparseSetMatIndexBase( descrA, CUSPARSE_INDEX_BASE_ZERO )); CHECK_CUSPARSE( cusparseSetMatFillMode( descrA, CUSPARSE_FILL_MODE_LOWER )); CHECK_CUSPARSE( cusparseZcsrsm_analysis( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, precond->M.num_rows, precond->M.nnz, descrA, precond->M.dval, precond->M.drow, precond->M.dcol, precond->cuinfo )); CHECK_CUSPARSE( cusparseZcsric0( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, precond->M.num_rows, descrA, precond->M.dval, precond->M.drow, precond->M.dcol, precond->cuinfo )); #endif CHECK_CUSPARSE( cusparseCreateMatDescr( &descrL )); CHECK_CUSPARSE( cusparseSetMatType( descrL, CUSPARSE_MATRIX_TYPE_TRIANGULAR )); CHECK_CUSPARSE( cusparseSetMatDiagType( descrL, CUSPARSE_DIAG_TYPE_NON_UNIT )); CHECK_CUSPARSE( cusparseSetMatIndexBase( descrL, CUSPARSE_INDEX_BASE_ZERO )); CHECK_CUSPARSE( cusparseSetMatFillMode( descrL, CUSPARSE_FILL_MODE_LOWER )); CHECK_CUSPARSE( cusparseCreateSolveAnalysisInfo( &precond->cuinfoL )); CHECK_CUSPARSE( cusparseZcsrsm_analysis( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, precond->M.num_rows, precond->M.nnz, descrL, precond->M.dval, precond->M.drow, precond->M.dcol, precond->cuinfoL )); CHECK_CUSPARSE( cusparseCreateMatDescr( &descrU )); CHECK_CUSPARSE( cusparseSetMatType( descrU, CUSPARSE_MATRIX_TYPE_TRIANGULAR )); CHECK_CUSPARSE( cusparseSetMatDiagType( descrU, CUSPARSE_DIAG_TYPE_NON_UNIT )); CHECK_CUSPARSE( cusparseSetMatIndexBase( descrU, CUSPARSE_INDEX_BASE_ZERO )); CHECK_CUSPARSE( cusparseSetMatFillMode( descrU, CUSPARSE_FILL_MODE_LOWER )); CHECK_CUSPARSE( cusparseCreateSolveAnalysisInfo( &precond->cuinfoU )); CHECK_CUSPARSE( cusparseZcsrsm_analysis( cusparseHandle, CUSPARSE_OPERATION_TRANSPOSE, precond->M.num_rows, precond->M.nnz, descrU, precond->M.dval, precond->M.drow, precond->M.dcol, precond->cuinfoU )); if( precond->maxiter < 50 ){ //prepare for iterative solves // copy the matrix to precond->L and (transposed) to precond->U CHECK( magma_zmtransfer(precond->M, &(precond->L), Magma_DEV, Magma_DEV, queue )); CHECK( magma_zmtranspose( precond->L, &(precond->U), queue )); // extract the diagonal of L into precond->d CHECK( magma_zjacobisetup_diagscal( precond->L, &precond->d, queue )); CHECK( magma_zvinit( &precond->work1, Magma_DEV, hA.num_rows, 1, MAGMA_Z_ZERO, queue )); // extract the diagonal of U into precond->d2 CHECK( magma_zjacobisetup_diagscal( precond->U, &precond->d2, queue )); CHECK( magma_zvinit( &precond->work2, Magma_DEV, hA.num_rows, 1, MAGMA_Z_ZERO, queue )); } /* // to enable also the block-asynchronous iteration for the triangular solves CHECK( magma_zmtransfer( precond->M, &hA, Magma_DEV, Magma_CPU, queue )); hA.storage_type = Magma_CSR; magma_z_matrix hD, hR, hAt CHECK( magma_zcsrsplit( 256, hA, &hD, &hR, queue )); CHECK( magma_zmtransfer( hD, &precond->LD, Magma_CPU, Magma_DEV, queue )); CHECK( magma_zmtransfer( hR, &precond->L, Magma_CPU, Magma_DEV, queue )); magma_zmfree(&hD, queue ); magma_zmfree(&hR, queue ); CHECK( magma_z_cucsrtranspose( hA, &hAt, queue )); CHECK( magma_zcsrsplit( 256, hAt, &hD, &hR, queue )); CHECK( magma_zmtransfer( hD, &precond->UD, Magma_CPU, Magma_DEV, queue )); CHECK( magma_zmtransfer( hR, &precond->U, Magma_CPU, Magma_DEV, queue )); magma_zmfree(&hD, queue ); magma_zmfree(&hR, queue ); magma_zmfree(&hA, queue ); magma_zmfree(&hAt, queue ); */ cleanup: #if CUDA_VERSION >= 7000 magma_free( pBuffer ); cusparseDestroyCsric02Info( info_M ); #endif cusparseDestroySolveAnalysisInfo( precond->cuinfo ); cusparseDestroyMatDescr( descrL ); cusparseDestroyMatDescr( descrU ); cusparseDestroyMatDescr( descrA ); cusparseDestroy( cusparseHandle ); magma_zmfree(&U, queue ); magma_zmfree(&hA, queue ); return info; }
extern "C" magma_int_t magma_zqmr_merge( magma_z_matrix A, magma_z_matrix b, magma_z_matrix *x, magma_z_solver_par *solver_par, magma_queue_t queue ) { magma_int_t info = MAGMA_NOTCONVERGED; // prepare solver feedback solver_par->solver = Magma_QMRMERGE; solver_par->numiter = 0; solver_par->spmv_count = 0; // local variables magmaDoubleComplex c_zero = MAGMA_Z_ZERO, c_one = MAGMA_Z_ONE; // solver variables double nom0, r0, res=0, nomb; magmaDoubleComplex rho = c_one, rho1 = c_one, eta = -c_one , pds = c_one, thet = c_one, thet1 = c_one, epsilon = c_one, beta = c_one, delta = c_one, pde = c_one, rde = c_one, gamm = c_one, gamm1 = c_one, psi = c_one; magma_int_t dofs = A.num_rows* b.num_cols; // need to transpose the matrix magma_z_matrix AT={Magma_CSR}, Ah1={Magma_CSR}, Ah2={Magma_CSR}; // GPU workspace magma_z_matrix r={Magma_CSR}, r_tld={Magma_CSR}, v={Magma_CSR}, w={Magma_CSR}, wt={Magma_CSR}, d={Magma_CSR}, s={Magma_CSR}, z={Magma_CSR}, q={Magma_CSR}, p={Magma_CSR}, pt={Magma_CSR}, y={Magma_CSR}; CHECK( magma_zvinit( &r, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &r_tld, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &v, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &w, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &wt,Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &d, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &s, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &z, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &q, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &p, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &pt,Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &y, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); // solver setup CHECK( magma_zresidualvec( A, b, *x, &r, &nom0, queue)); solver_par->init_res = nom0; magma_zcopy( dofs, r.dval, 1, r_tld.dval, 1, queue ); magma_zcopy( dofs, r.dval, 1, y.dval, 1, queue ); magma_zcopy( dofs, r.dval, 1, v.dval, 1, queue ); magma_zcopy( dofs, r.dval, 1, wt.dval, 1, queue ); magma_zcopy( dofs, r.dval, 1, z.dval, 1, queue ); // transpose the matrix magma_zmtransfer( A, &Ah1, Magma_DEV, Magma_CPU, queue ); magma_zmconvert( Ah1, &Ah2, A.storage_type, Magma_CSR, queue ); magma_zmfree(&Ah1, queue ); magma_zmtransposeconjugate( Ah2, &Ah1, queue ); magma_zmfree(&Ah2, queue ); Ah2.blocksize = A.blocksize; Ah2.alignment = A.alignment; magma_zmconvert( Ah1, &Ah2, Magma_CSR, A.storage_type, queue ); magma_zmfree(&Ah1, queue ); magma_zmtransfer( Ah2, &AT, Magma_CPU, Magma_DEV, queue ); magma_zmfree(&Ah2, queue ); nomb = magma_dznrm2( dofs, b.dval, 1, queue ); if ( nomb == 0.0 ){ nomb=1.0; } if ( (r0 = nomb * solver_par->rtol) < ATOLERANCE ){ r0 = ATOLERANCE; } solver_par->final_res = solver_par->init_res; solver_par->iter_res = solver_par->init_res; if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = (real_Double_t)nom0; solver_par->timing[0] = 0.0; } if ( nom0 < r0 ) { info = MAGMA_SUCCESS; goto cleanup; } psi = magma_zsqrt( magma_zdotc( dofs, z.dval, 1, z.dval, 1, queue )); rho = magma_zsqrt( magma_zdotc( dofs, y.dval, 1, y.dval, 1, queue )); // v = y / rho // y = y / rho // w = wt / psi // z = z / psi magma_zqmr_1( r.num_rows, r.num_cols, rho, psi, y.dval, z.dval, v.dval, w.dval, queue ); //Chronometry real_Double_t tempo1, tempo2; tempo1 = magma_sync_wtime( queue ); solver_par->numiter = 0; solver_par->spmv_count = 0; // start iteration do { solver_par->numiter++; if( magma_z_isnan_inf( rho ) || magma_z_isnan_inf( psi ) ){ info = MAGMA_DIVERGENCE; break; } // delta = z' * y; delta = magma_zdotc( dofs, z.dval, 1, y.dval, 1, queue ); if( magma_z_isnan_inf( delta ) ){ info = MAGMA_DIVERGENCE; break; } // no precond: yt = y, zt = z //magma_zcopy( dofs, y.dval, 1, yt.dval, 1 ); //magma_zcopy( dofs, z.dval, 1, zt.dval, 1 ); if( solver_par->numiter == 1 ){ // p = y; // q = z; magma_zcopy( dofs, y.dval, 1, p.dval, 1, queue ); magma_zcopy( dofs, z.dval, 1, q.dval, 1, queue ); } else{ pde = psi * delta / epsilon; rde = rho * MAGMA_Z_CONJ(delta/epsilon); // p = y - pde * p // q = z - rde * q magma_zqmr_2( r.num_rows, r.num_cols, pde, rde, y.dval, z.dval, p.dval, q.dval, queue ); } if( magma_z_isnan_inf( rho ) || magma_z_isnan_inf( psi ) ){ info = MAGMA_DIVERGENCE; break; } CHECK( magma_z_spmv( c_one, A, p, c_zero, pt, queue )); solver_par->spmv_count++; // epsilon = q' * pt; epsilon = magma_zdotc( dofs, q.dval, 1, pt.dval, 1, queue ); beta = epsilon / delta; if( magma_z_isnan_inf( epsilon ) || magma_z_isnan_inf( beta ) ){ info = MAGMA_DIVERGENCE; break; } // v = pt - beta * v // y = v magma_zqmr_3( r.num_rows, r.num_cols, beta, pt.dval, v.dval, y.dval, queue ); rho1 = rho; // rho = norm(y); rho = magma_zsqrt( magma_zdotc( dofs, y.dval, 1, y.dval, 1, queue )); // wt = A' * q - beta' * w; CHECK( magma_z_spmv( c_one, AT, q, c_zero, wt, queue )); solver_par->spmv_count++; magma_zaxpy( dofs, - MAGMA_Z_CONJ( beta ), w.dval, 1, wt.dval, 1, queue ); // no precond: z = wt magma_zcopy( dofs, wt.dval, 1, z.dval, 1, queue ); thet1 = thet; thet = rho / (gamm * MAGMA_Z_MAKE( MAGMA_Z_ABS(beta), 0.0 )); gamm1 = gamm; gamm = c_one / magma_zsqrt(c_one + thet*thet); eta = - eta * rho1 * gamm * gamm / (beta * gamm1 * gamm1); if( magma_z_isnan_inf( thet ) || magma_z_isnan_inf( gamm ) || magma_z_isnan_inf( eta ) ){ info = MAGMA_DIVERGENCE; break; } if( solver_par->numiter == 1 ){ // d = eta * p + pds * d; // s = eta * pt + pds * d; // x = x + d; // r = r - s; magma_zqmr_4( r.num_rows, r.num_cols, eta, p.dval, pt.dval, d.dval, s.dval, x->dval, r.dval, queue ); } else{ pds = (thet1 * gamm) * (thet1 * gamm); // d = eta * p + pds * d; // s = eta * pt + pds * d; // x = x + d; // r = r - s; magma_zqmr_5( r.num_rows, r.num_cols, eta, pds, p.dval, pt.dval, d.dval, s.dval, x->dval, r.dval, queue ); } // psi = norm(z); psi = magma_zsqrt( magma_zdotc( dofs, z.dval, 1, z.dval, 1, queue ) ); res = magma_dznrm2( dofs, r.dval, 1, queue ); if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter)%solver_par->verbose == c_zero ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } // v = y / rho // y = y / rho // w = wt / psi // z = z / psi magma_zqmr_1( r.num_rows, r.num_cols, rho, psi, y.dval, z.dval, v.dval, w.dval, queue ); if ( res/nomb <= solver_par->rtol || res <= solver_par->atol ){ break; } } while ( solver_par->numiter+1 <= solver_par->maxiter ); tempo2 = magma_sync_wtime( queue ); solver_par->runtime = (real_Double_t) tempo2-tempo1; double residual; CHECK( magma_zresidualvec( A, b, *x, &r, &residual, queue)); solver_par->iter_res = res; solver_par->final_res = residual; if ( solver_par->numiter < solver_par->maxiter && info == MAGMA_SUCCESS ) { info = MAGMA_SUCCESS; } else if ( solver_par->init_res > solver_par->final_res ) { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose == c_zero ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_SLOW_CONVERGENCE; if( solver_par->iter_res < solver_par->rtol*solver_par->init_res || solver_par->iter_res < solver_par->atol ) { info = MAGMA_SUCCESS; } } else { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose == c_zero ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_DIVERGENCE; } cleanup: magma_zmfree(&r, queue ); magma_zmfree(&r_tld, queue ); magma_zmfree(&v, queue ); magma_zmfree(&w, queue ); magma_zmfree(&wt, queue ); magma_zmfree(&d, queue ); magma_zmfree(&s, queue ); magma_zmfree(&z, queue ); magma_zmfree(&q, queue ); magma_zmfree(&p, queue ); magma_zmfree(&pt, queue ); magma_zmfree(&y, queue ); magma_zmfree(&AT, queue ); magma_zmfree(&Ah1, queue ); magma_zmfree(&Ah2, queue ); solver_par->info = info; return info; } /* magma_zqmr_merge */
extern "C" magma_int_t magma_zcg_res( magma_z_matrix A, magma_z_matrix b, magma_z_matrix *x, magma_z_solver_par *solver_par, magma_queue_t queue ) { magma_int_t info = MAGMA_NOTCONVERGED; // prepare solver feedback solver_par->solver = Magma_CG; solver_par->numiter = 0; solver_par->spmv_count = 0; // solver variables magmaDoubleComplex alpha, beta; double nom, nom0, r0, res, nomb; magmaDoubleComplex den, gammanew, gammaold = MAGMA_Z_MAKE(1.0,0.0); // local variables magmaDoubleComplex c_zero = MAGMA_Z_ZERO, c_one = MAGMA_Z_ONE; magma_int_t dofs = A.num_rows* b.num_cols; // GPU workspace magma_z_matrix r={Magma_CSR}, p={Magma_CSR}, q={Magma_CSR}; CHECK( magma_zvinit( &r, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &p, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &q, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); // solver setup CHECK( magma_zresidualvec( A, b, *x, &r, &nom0, queue)); magma_zcopy( dofs, r.dval, 1, p.dval, 1, queue ); // p = h nom = MAGMA_Z_ABS( magma_zdotc( dofs, r.dval, 1, r.dval, 1, queue) ); CHECK( magma_z_spmv( c_one, A, p, c_zero, q, queue )); // q = A p solver_par->spmv_count++; den = magma_zdotc( dofs, p.dval, 1, q.dval, 1, queue ); // den = p dot q solver_par->init_res = nom0; nomb = magma_dznrm2( dofs, b.dval, 1, queue ); if ( nomb == 0.0 ){ nomb=1.0; } if ( (r0 = nomb * solver_par->rtol) < ATOLERANCE ){ r0 = ATOLERANCE; } solver_par->final_res = solver_par->init_res; solver_par->iter_res = solver_par->init_res; if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = (real_Double_t)nom0; solver_par->timing[0] = 0.0; } if ( nom < r0 ) { info = MAGMA_SUCCESS; goto cleanup; } // check positive definite if ( MAGMA_Z_ABS(den) <= 0.0 ) { info = MAGMA_NONSPD; goto cleanup; } //Chronometry real_Double_t tempo1, tempo2; tempo1 = magma_sync_wtime( queue ); solver_par->numiter = 0; solver_par->spmv_count = 0; // start iteration do { solver_par->numiter++; gammanew = magma_zdotc( dofs, r.dval, 1, r.dval, 1, queue ); // gn = < r,r> if ( solver_par->numiter == 1 ) { magma_zcopy( dofs, r.dval, 1, p.dval, 1, queue ); // p = r } else { beta = (gammanew/gammaold); // beta = gn/go magma_zscal( dofs, beta, p.dval, 1, queue ); // p = beta*p magma_zaxpy( dofs, c_one, r.dval, 1, p.dval, 1, queue ); // p = p + r } CHECK( magma_z_spmv( c_one, A, p, c_zero, q, queue )); // q = A p solver_par->spmv_count++; den = magma_zdotc( dofs, p.dval, 1, q.dval, 1, queue ); // den = p dot q alpha = gammanew / den; magma_zaxpy( dofs, alpha, p.dval, 1, x->dval, 1, queue ); // x = x + alpha p magma_zaxpy( dofs, -alpha, q.dval, 1, r.dval, 1, queue ); // r = r - alpha q gammaold = gammanew; res = magma_dznrm2( dofs, r.dval, 1, queue ); if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter)%solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } if ( res/nomb <= solver_par->rtol || res <= solver_par->atol ){ break; } } while ( solver_par->numiter+1 <= solver_par->maxiter ); tempo2 = magma_sync_wtime( queue ); solver_par->runtime = (real_Double_t) tempo2-tempo1; double residual; CHECK( magma_zresidualvec( A, b, *x, &r, &residual, queue)); solver_par->iter_res = res; solver_par->final_res = residual; if ( solver_par->numiter < solver_par->maxiter ) { info = MAGMA_SUCCESS; } else if ( solver_par->init_res > solver_par->final_res ) { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_SLOW_CONVERGENCE; if( solver_par->iter_res < solver_par->rtol*solver_par->init_res || solver_par->iter_res < solver_par->atol ) { info = MAGMA_SUCCESS; } } else { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_DIVERGENCE; } cleanup: magma_zmfree(&r, queue ); magma_zmfree(&p, queue ); magma_zmfree(&q, queue ); solver_par->info = info; return info; } /* magma_zcg */
extern "C" magma_int_t magma_zpcg_merge( magma_z_matrix A, magma_z_matrix b, magma_z_matrix *x, magma_z_solver_par *solver_par, magma_z_preconditioner *precond_par, magma_queue_t queue ) { magma_int_t info = MAGMA_NOTCONVERGED; // prepare solver feedback solver_par->solver = Magma_PCGMERGE; solver_par->numiter = 0; solver_par->spmv_count = 0; // solver variables magmaDoubleComplex alpha, beta, gamma, rho, tmp1, *skp_h={0}; double nom, nom0, r0, res, nomb; magmaDoubleComplex den; // some useful variables magmaDoubleComplex c_zero = MAGMA_Z_ZERO, c_one = MAGMA_Z_ONE; magma_int_t dofs = A.num_rows*b.num_cols; magma_z_matrix r={Magma_CSR}, d={Magma_CSR}, z={Magma_CSR}, h={Magma_CSR}, rt={Magma_CSR}; magmaDoubleComplex *d1=NULL, *d2=NULL, *skp=NULL; // GPU workspace CHECK( magma_zvinit( &r, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &d, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &z, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &rt, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zvinit( &h, Magma_DEV, A.num_rows, b.num_cols, c_zero, queue )); CHECK( magma_zmalloc( &d1, dofs*(2) )); CHECK( magma_zmalloc( &d2, dofs*(2) )); // array for the parameters CHECK( magma_zmalloc( &skp, 7 )); // skp = [alpha|beta|gamma|rho|tmp1|tmp2|res] // solver setup CHECK( magma_zresidualvec( A, b, *x, &r, &nom0, queue)); // preconditioner CHECK( magma_z_applyprecond_left( MagmaNoTrans, A, r, &rt, precond_par, queue )); CHECK( magma_z_applyprecond_right( MagmaNoTrans, A, rt, &h, precond_par, queue )); magma_zcopy( dofs, h.dval, 1, d.dval, 1, queue ); nom = MAGMA_Z_ABS( magma_zdotc( dofs, r.dval, 1, h.dval, 1, queue )); CHECK( magma_z_spmv( c_one, A, d, c_zero, z, queue )); // z = A d den = magma_zdotc( dofs, d.dval, 1, z.dval, 1, queue ); // den = d'* z solver_par->init_res = nom0; nomb = magma_dznrm2( dofs, b.dval, 1, queue ); if ( nomb == 0.0 ){ nomb=1.0; } if ( (r0 = nomb * solver_par->rtol) < ATOLERANCE ){ r0 = ATOLERANCE; } solver_par->final_res = solver_par->init_res; solver_par->iter_res = solver_par->init_res; if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = (real_Double_t)nom0; solver_par->timing[0] = 0.0; } if ( nom < r0 ) { info = MAGMA_SUCCESS; goto cleanup; } // check positive definite if ( MAGMA_Z_ABS(den) <= 0.0 ) { info = MAGMA_NONSPD; goto cleanup; } // array on host for the parameters CHECK( magma_zmalloc_cpu( &skp_h, 7 )); alpha = rho = gamma = tmp1 = c_one; beta = magma_zdotc( dofs, h.dval, 1, r.dval, 1, queue ); skp_h[0]=alpha; skp_h[1]=beta; skp_h[2]=gamma; skp_h[3]=rho; skp_h[4]=tmp1; skp_h[5]=MAGMA_Z_MAKE(nom, 0.0); skp_h[6]=MAGMA_Z_MAKE(nom, 0.0); magma_zsetvector( 7, skp_h, 1, skp, 1, queue ); //Chronometry real_Double_t tempo1, tempo2, tempop1, tempop2; tempo1 = magma_sync_wtime( queue ); solver_par->numiter = 0; solver_par->spmv_count = 0; // start iteration do { solver_par->numiter++; // computes SpMV and dot product CHECK( magma_zcgmerge_spmv1( A, d1, d2, d.dval, z.dval, skp, queue )); solver_par->spmv_count++; if( precond_par->solver == Magma_JACOBI ){ CHECK( magma_zjcgmerge_xrbeta( dofs, d1, d2, precond_par->d.dval, x->dval, r.dval, d.dval, z.dval, h.dval, skp, queue )); } else if( precond_par->solver == Magma_NONE ){ // updates x, r CHECK( magma_zpcgmerge_xrbeta1( dofs, x->dval, r.dval, d.dval, z.dval, skp, queue )); // computes scalars and updates d CHECK( magma_zpcgmerge_xrbeta2( dofs, d1, d2, r.dval, r.dval, d.dval, skp, queue )); } else { // updates x, r CHECK( magma_zpcgmerge_xrbeta1( dofs, x->dval, r.dval, d.dval, z.dval, skp, queue )); // preconditioner in between tempop1 = magma_sync_wtime( queue ); CHECK( magma_z_applyprecond_left( MagmaNoTrans, A, r, &rt, precond_par, queue )); CHECK( magma_z_applyprecond_right( MagmaNoTrans, A, rt, &h, precond_par, queue )); // magma_zcopy( dofs, r.dval, 1, h.dval, 1 ); tempop2 = magma_sync_wtime( queue ); precond_par->runtime += tempop2-tempop1; // computes scalars and updates d CHECK( magma_zpcgmerge_xrbeta2( dofs, d1, d2, h.dval, r.dval, d.dval, skp, queue )); } //if( solver_par->numiter==1){ // magma_zcopy( dofs, h.dval, 1, d.dval, 1 ); //} // updates x, r, computes scalars and updates d //CHECK( magma_zcgmerge_xrbeta( dofs, d1, d2, x->dval, r.dval, d.dval, z.dval, skp, queue )); // check stopping criterion (asynchronous copy) magma_zgetvector( 1 , skp+6, 1, skp_h+6, 1, queue ); res = sqrt(MAGMA_Z_ABS(skp_h[6])); if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } if ( res/nomb <= solver_par->rtol || res <= solver_par->atol ){ break; } } while ( solver_par->numiter+1 <= solver_par->maxiter ); tempo2 = magma_sync_wtime( queue ); solver_par->runtime = (real_Double_t) tempo2-tempo1; double residual; CHECK( magma_zresidualvec( A, b, *x, &r, &residual, queue)); solver_par->iter_res = res; solver_par->final_res = residual; if ( solver_par->numiter < solver_par->maxiter ) { info = MAGMA_SUCCESS; } else if ( solver_par->init_res > solver_par->final_res ) { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_SLOW_CONVERGENCE; if( solver_par->iter_res < solver_par->atol || solver_par->iter_res/solver_par->init_res < solver_par->rtol ){ info = MAGMA_SUCCESS; } } else { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) res; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } solver_par->info = MAGMA_DIVERGENCE; } cleanup: magma_zmfree(&r, queue ); magma_zmfree(&z, queue ); magma_zmfree(&d, queue ); magma_zmfree(&rt, queue ); magma_zmfree(&h, queue ); magma_free( d1 ); magma_free( d2 ); magma_free( skp ); magma_free_cpu( skp_h ); solver_par->info = info; return info; } /* magma_zpcg_merge */
extern "C" magma_int_t magma_zpidr_merge( magma_z_matrix A, magma_z_matrix b, magma_z_matrix *x, magma_z_solver_par *solver_par, magma_z_preconditioner *precond_par, magma_queue_t queue ) { magma_int_t info = MAGMA_NOTCONVERGED; // prepare solver feedback solver_par->solver = Magma_PIDRMERGE; solver_par->numiter = 0; solver_par->spmv_count = 0; solver_par->init_res = 0.0; solver_par->final_res = 0.0; solver_par->iter_res = 0.0; solver_par->runtime = 0.0; // constants const magmaDoubleComplex c_zero = MAGMA_Z_ZERO; const magmaDoubleComplex c_one = MAGMA_Z_ONE; const magmaDoubleComplex c_n_one = MAGMA_Z_NEG_ONE; // internal user parameters const magma_int_t smoothing = 1; // 0 = disable, 1 = enable const double angle = 0.7; // [0-1] // local variables magma_int_t iseed[4] = {0, 0, 0, 1}; magma_int_t dof; magma_int_t s; magma_int_t distr; magma_int_t k, i, sk; magma_int_t innerflag; magma_int_t ldd; double residual; double nrm; double nrmb; double nrmr; double nrmt; double rho; magmaDoubleComplex om; magmaDoubleComplex gamma; magmaDoubleComplex fk; // matrices and vectors magma_z_matrix dxs = {Magma_CSR}; magma_z_matrix dr = {Magma_CSR}, drs = {Magma_CSR}; magma_z_matrix dP = {Magma_CSR}, dP1 = {Magma_CSR}; magma_z_matrix dG = {Magma_CSR}, dGcol = {Magma_CSR}; magma_z_matrix dU = {Magma_CSR}; magma_z_matrix dM = {Magma_CSR}, hMdiag = {Magma_CSR}; magma_z_matrix df = {Magma_CSR}; magma_z_matrix dt = {Magma_CSR}, dtt = {Magma_CSR}; magma_z_matrix dc = {Magma_CSR}; magma_z_matrix dv = {Magma_CSR}; magma_z_matrix dlu = {Magma_CSR}; magma_z_matrix dskp = {Magma_CSR}, hskp = {Magma_CSR}; magma_z_matrix dalpha = {Magma_CSR}, halpha = {Magma_CSR}; magma_z_matrix dbeta = {Magma_CSR}, hbeta = {Magma_CSR}; magmaDoubleComplex *d1 = NULL, *d2 = NULL; // chronometry real_Double_t tempo1, tempo2; // initial s space // TODO: add option for 's' (shadow space number) // Hack: uses '--restart' option as the shadow space number. // This is not a good idea because the default value of restart option is used to detect // if the user provided a custom restart. This means that if the default restart value // is changed then the code will think it was the user (unless the default value is // also updated in the 'if' statement below. s = 1; if ( solver_par->restart != 50 ) { if ( solver_par->restart > A.num_cols ) { s = A.num_cols; } else { s = solver_par->restart; } } solver_par->restart = s; // set max iterations solver_par->maxiter = min( 2 * A.num_cols, solver_par->maxiter ); // check if matrix A is square if ( A.num_rows != A.num_cols ) { //printf("Matrix A is not square.\n"); info = MAGMA_ERR_NOT_SUPPORTED; goto cleanup; } // |b| nrmb = magma_dznrm2( b.num_rows, b.dval, 1, queue ); if ( nrmb == 0.0 ) { magma_zscal( x->num_rows, MAGMA_Z_ZERO, x->dval, 1, queue ); info = MAGMA_SUCCESS; goto cleanup; } // t = 0 // make t twice as large to contain both, dt and dr ldd = magma_roundup( b.num_rows, 32 ); CHECK( magma_zvinit( &dt, Magma_DEV, ldd, 2, c_zero, queue )); dt.num_rows = b.num_rows; dt.num_cols = 1; dt.nnz = dt.num_rows; // redirect the dr.dval to the second part of dt CHECK( magma_zvinit( &dr, Magma_DEV, b.num_rows, 1, c_zero, queue )); magma_free( dr.dval ); dr.dval = dt.dval + ldd; // r = b - A x CHECK( magma_zresidualvec( A, b, *x, &dr, &nrmr, queue )); // |r| solver_par->init_res = nrmr; solver_par->final_res = solver_par->init_res; solver_par->iter_res = solver_par->init_res; if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = (real_Double_t)nrmr; } // check if initial is guess good enough if ( nrmr <= solver_par->atol || nrmr/nrmb <= solver_par->rtol ) { info = MAGMA_SUCCESS; goto cleanup; } // P = randn(n, s) // P = ortho(P) //--------------------------------------- // P = 0.0 CHECK( magma_zvinit( &dP, Magma_CPU, A.num_cols, s, c_zero, queue )); // P = randn(n, s) distr = 3; // 1 = unif (0,1), 2 = unif (-1,1), 3 = normal (0,1) dof = dP.num_rows * dP.num_cols; lapackf77_zlarnv( &distr, iseed, &dof, dP.val ); // transfer P to device CHECK( magma_zmtransfer( dP, &dP1, Magma_CPU, Magma_DEV, queue )); magma_zmfree( &dP, queue ); // P = ortho(P1) if ( dP1.num_cols > 1 ) { // P = magma_zqr(P1), QR factorization CHECK( magma_zqr( dP1.num_rows, dP1.num_cols, dP1, dP1.ld, &dP, NULL, queue )); } else { // P = P1 / |P1| nrm = magma_dznrm2( dof, dP1.dval, 1, queue ); nrm = 1.0 / nrm; magma_zdscal( dof, nrm, dP1.dval, 1, queue ); CHECK( magma_zmtransfer( dP1, &dP, Magma_DEV, Magma_DEV, queue )); } magma_zmfree( &dP1, queue ); //--------------------------------------- // allocate memory for the scalar products CHECK( magma_zvinit( &hskp, Magma_CPU, 4, 1, c_zero, queue )); CHECK( magma_zvinit( &dskp, Magma_DEV, 4, 1, c_zero, queue )); CHECK( magma_zvinit( &halpha, Magma_CPU, s, 1, c_zero, queue )); CHECK( magma_zvinit( &dalpha, Magma_DEV, s, 1, c_zero, queue )); CHECK( magma_zvinit( &hbeta, Magma_CPU, s, 1, c_zero, queue )); CHECK( magma_zvinit( &dbeta, Magma_DEV, s, 1, c_zero, queue )); // workspace for merged dot product CHECK( magma_zmalloc( &d1, max(2, s) * b.num_rows )); CHECK( magma_zmalloc( &d2, max(2, s) * b.num_rows )); // smoothing enabled if ( smoothing > 0 ) { // set smoothing solution vector CHECK( magma_zmtransfer( *x, &dxs, Magma_DEV, Magma_DEV, queue )); // tt = 0 // make tt twice as large to contain both, dtt and drs ldd = magma_roundup( b.num_rows, 32 ); CHECK( magma_zvinit( &dtt, Magma_DEV, ldd, 2, c_zero, queue )); dtt.num_rows = dr.num_rows; dtt.num_cols = 1; dtt.nnz = dtt.num_rows; // redirect the drs.dval to the second part of dtt CHECK( magma_zvinit( &drs, Magma_DEV, dr.num_rows, 1, c_zero, queue )); magma_free( drs.dval ); drs.dval = dtt.dval + ldd; // set smoothing residual vector magma_zcopyvector( dr.num_rows, dr.dval, 1, drs.dval, 1, queue ); } // G(n,s) = 0 if ( s > 1 ) { ldd = magma_roundup( A.num_rows, 32 ); CHECK( magma_zvinit( &dG, Magma_DEV, ldd, s, c_zero, queue )); dG.num_rows = A.num_rows; } else { CHECK( magma_zvinit( &dG, Magma_DEV, A.num_rows, s, c_zero, queue )); } // dGcol represents a single column of dG, array pointer is set inside loop CHECK( magma_zvinit( &dGcol, Magma_DEV, dG.num_rows, 1, c_zero, queue )); magma_free( dGcol.dval ); // U(n,s) = 0 if ( s > 1 ) { ldd = magma_roundup( A.num_cols, 32 ); CHECK( magma_zvinit( &dU, Magma_DEV, ldd, s, c_zero, queue )); dU.num_rows = A.num_cols; } else { CHECK( magma_zvinit( &dU, Magma_DEV, A.num_cols, s, c_zero, queue )); } // M(s,s) = I CHECK( magma_zvinit( &dM, Magma_DEV, s, s, c_zero, queue )); CHECK( magma_zvinit( &hMdiag, Magma_CPU, s, 1, c_zero, queue )); magmablas_zlaset( MagmaFull, dM.num_rows, dM.num_cols, c_zero, c_one, dM.dval, dM.ld, queue ); // f = 0 CHECK( magma_zvinit( &df, Magma_DEV, dP.num_cols, 1, c_zero, queue )); // c = 0 CHECK( magma_zvinit( &dc, Magma_DEV, dM.num_cols, 1, c_zero, queue )); // v = 0 CHECK( magma_zvinit( &dv, Magma_DEV, dr.num_rows, 1, c_zero, queue )); // lu = 0 CHECK( magma_zvinit( &dlu, Magma_DEV, dr.num_rows, 1, c_zero, queue )); //--------------START TIME--------------- // chronometry tempo1 = magma_sync_wtime( queue ); if ( solver_par->verbose > 0 ) { solver_par->timing[0] = 0.0; } om = MAGMA_Z_ONE; innerflag = 0; // start iteration do { solver_par->numiter++; // new RHS for small systems // f = P' r magma_zgemvmdot_shfl( dP.num_rows, dP.num_cols, dP.dval, dr.dval, d1, d2, df.dval, queue ); // shadow space loop for ( k = 0; k < s; ++k ) { sk = s - k; // c(k:s) = M(k:s,k:s) \ f(k:s) magma_zcopyvector( sk, &df.dval[k], 1, &dc.dval[k], 1, queue ); magma_ztrsv( MagmaLower, MagmaNoTrans, MagmaNonUnit, sk, &dM.dval[k*dM.ld+k], dM.ld, &dc.dval[k], 1, queue ); // v = r - G(:,k:s) c(k:s) magma_zcopyvector( dr.num_rows, dr.dval, 1, dv.dval, 1, queue ); magmablas_zgemv( MagmaNoTrans, dG.num_rows, sk, c_n_one, &dG.dval[k*dG.ld], dG.ld, &dc.dval[k], 1, c_one, dv.dval, 1, queue ); // preconditioning operation // v = L \ v; // v = U \ v; CHECK( magma_z_applyprecond_left( MagmaNoTrans, A, dv, &dlu, precond_par, queue )); CHECK( magma_z_applyprecond_right( MagmaNoTrans, A, dlu, &dv, precond_par, queue )); // U(:,k) = om * v + U(:,k:s) c(k:s) magmablas_zgemv( MagmaNoTrans, dU.num_rows, sk, c_one, &dU.dval[k*dU.ld], dU.ld, &dc.dval[k], 1, om, dv.dval, 1, queue ); magma_zcopyvector( dU.num_rows, dv.dval, 1, &dU.dval[k*dU.ld], 1, queue ); // G(:,k) = A U(:,k) dGcol.dval = dG.dval + k * dG.ld; CHECK( magma_z_spmv( c_one, A, dv, c_zero, dGcol, queue )); solver_par->spmv_count++; // bi-orthogonalize the new basis vectors for ( i = 0; i < k; ++i ) { // alpha = P(:,i)' G(:,k) halpha.val[i] = magma_zdotc( dP.num_rows, &dP.dval[i*dP.ld], 1, &dG.dval[k*dG.ld], 1, queue ); // alpha = alpha / M(i,i) halpha.val[i] = halpha.val[i] / hMdiag.val[i]; // G(:,k) = G(:,k) - alpha * G(:,i) magma_zaxpy( dG.num_rows, -halpha.val[i], &dG.dval[i*dG.ld], 1, &dG.dval[k*dG.ld], 1, queue ); } // non-first s iteration if ( k > 0 ) { // U update outside of loop using GEMV // U(:,k) = U(:,k) - U(:,1:k) * alpha(1:k) magma_zsetvector( k, halpha.val, 1, dalpha.dval, 1, queue ); magmablas_zgemv( MagmaNoTrans, dU.num_rows, k, c_n_one, dU.dval, dU.ld, dalpha.dval, 1, c_one, &dU.dval[k*dU.ld], 1, queue ); } // new column of M = P'G, first k-1 entries are zero // M(k:s,k) = P(:,k:s)' G(:,k) magma_zgemvmdot_shfl( dP.num_rows, sk, &dP.dval[k*dP.ld], &dG.dval[k*dG.ld], d1, d2, &dM.dval[k*dM.ld+k], queue ); magma_zgetvector( 1, &dM.dval[k*dM.ld+k], 1, &hMdiag.val[k], 1, queue ); // check M(k,k) == 0 if ( MAGMA_Z_EQUAL(hMdiag.val[k], MAGMA_Z_ZERO) ) { innerflag = 1; info = MAGMA_DIVERGENCE; break; } // beta = f(k) / M(k,k) magma_zgetvector( 1, &df.dval[k], 1, &fk, 1, queue ); hbeta.val[k] = fk / hMdiag.val[k]; // check for nan if ( magma_z_isnan( hbeta.val[k] ) || magma_z_isinf( hbeta.val[k] )) { innerflag = 1; info = MAGMA_DIVERGENCE; break; } // r = r - beta * G(:,k) magma_zaxpy( dr.num_rows, -hbeta.val[k], &dG.dval[k*dG.ld], 1, dr.dval, 1, queue ); // smoothing disabled if ( smoothing <= 0 ) { // |r| nrmr = magma_dznrm2( dr.num_rows, dr.dval, 1, queue ); // smoothing enabled } else { // x = x + beta * U(:,k) magma_zaxpy( x->num_rows, hbeta.val[k], &dU.dval[k*dU.ld], 1, x->dval, 1, queue ); // smoothing operation //--------------------------------------- // t = rs - r magma_zidr_smoothing_1( drs.num_rows, drs.num_cols, drs.dval, dr.dval, dtt.dval, queue ); // t't // t'rs CHECK( magma_zgemvmdot_shfl( dt.ld, 2, dtt.dval, dtt.dval, d1, d2, &dskp.dval[2], queue )); magma_zgetvector( 2, &dskp.dval[2], 1, &hskp.val[2], 1, queue ); // gamma = (t' * rs) / (t' * t) gamma = hskp.val[3] / hskp.val[2]; // rs = rs - gamma * (rs - r) magma_zaxpy( drs.num_rows, -gamma, dtt.dval, 1, drs.dval, 1, queue ); // xs = xs - gamma * (xs - x) magma_zidr_smoothing_2( dxs.num_rows, dxs.num_cols, -gamma, x->dval, dxs.dval, queue ); // |rs| nrmr = magma_dznrm2( drs.num_rows, drs.dval, 1, queue ); //--------------------------------------- } // store current timing and residual if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter) % solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter) / solver_par->verbose] = (real_Double_t)nrmr; solver_par->timing[(solver_par->numiter) / solver_par->verbose] = (real_Double_t)tempo2 - tempo1; } } // check convergence or iteration limit if ( nrmr <= solver_par->atol || nrmr/nrmb <= solver_par->rtol ) { s = k + 1; // for the x-update outside the loop innerflag = 2; info = MAGMA_SUCCESS; break; } // non-last s iteration if ( (k + 1) < s ) { // f(k+1:s) = f(k+1:s) - beta * M(k+1:s,k) magma_zaxpy( sk-1, -hbeta.val[k], &dM.dval[k*dM.ld+(k+1)], 1, &df.dval[k+1], 1, queue ); } } // smoothing disabled if ( smoothing <= 0 && innerflag != 1 ) { // update solution approximation x // x = x + U(:,1:s) * beta(1:s) magma_zsetvector( s, hbeta.val, 1, dbeta.dval, 1, queue ); magmablas_zgemv( MagmaNoTrans, dU.num_rows, s, c_one, dU.dval, dU.ld, dbeta.dval, 1, c_one, x->dval, 1, queue ); } // check convergence or iteration limit or invalid result of inner loop if ( innerflag > 0 ) { break; } // v = r magma_zcopy( dr.num_rows, dr.dval, 1, dv.dval, 1, queue ); // preconditioning operation // v = L \ v; // v = U \ v; CHECK( magma_z_applyprecond_left( MagmaNoTrans, A, dv, &dlu, precond_par, queue )); CHECK( magma_z_applyprecond_right( MagmaNoTrans, A, dlu, &dv, precond_par, queue )); // t = A v CHECK( magma_z_spmv( c_one, A, dv, c_zero, dt, queue )); solver_par->spmv_count++; // computation of a new omega //--------------------------------------- // t't // t'r CHECK( magma_zgemvmdot_shfl( dt.ld, 2, dt.dval, dt.dval, d1, d2, dskp.dval, queue )); magma_zgetvector( 2, dskp.dval, 1, hskp.val, 1, queue ); // |t| nrmt = magma_dsqrt( MAGMA_Z_REAL(hskp.val[0]) ); // rho = abs((t' * r) / (|t| * |r|)) rho = MAGMA_D_ABS( MAGMA_Z_REAL(hskp.val[1]) / (nrmt * nrmr) ); // om = (t' * r) / (|t| * |t|) om = hskp.val[1] / hskp.val[0]; if ( rho < angle ) { om = (om * angle) / rho; } //--------------------------------------- if ( MAGMA_Z_EQUAL(om, MAGMA_Z_ZERO) ) { info = MAGMA_DIVERGENCE; break; } // update approximation vector // x = x + om * v magma_zaxpy( x->num_rows, om, dv.dval, 1, x->dval, 1, queue ); // update residual vector // r = r - om * t magma_zaxpy( dr.num_rows, -om, dt.dval, 1, dr.dval, 1, queue ); // smoothing disabled if ( smoothing <= 0 ) { // residual norm nrmr = magma_dznrm2( dr.num_rows, dr.dval, 1, queue ); // smoothing enabled } else { // smoothing operation //--------------------------------------- // t = rs - r magma_zidr_smoothing_1( drs.num_rows, drs.num_cols, drs.dval, dr.dval, dtt.dval, queue ); // t't // t'rs CHECK( magma_zgemvmdot_shfl( dt.ld, 2, dtt.dval, dtt.dval, d1, d2, &dskp.dval[2], queue )); magma_zgetvector( 2, &dskp.dval[2], 1, &hskp.val[2], 1, queue ); // gamma = (t' * rs) / (t' * t) gamma = hskp.val[3] / hskp.val[2]; // rs = rs - gamma * (rs - r) magma_zaxpy( drs.num_rows, -gamma, dtt.dval, 1, drs.dval, 1, queue ); // xs = xs - gamma * (xs - x) magma_zidr_smoothing_2( dxs.num_rows, dxs.num_cols, -gamma, x->dval, dxs.dval, queue ); // |rs| nrmr = magma_dznrm2( drs.num_rows, drs.dval, 1, queue ); //--------------------------------------- } // store current timing and residual if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter) % solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter) / solver_par->verbose] = (real_Double_t)nrmr; solver_par->timing[(solver_par->numiter) / solver_par->verbose] = (real_Double_t)tempo2 - tempo1; } } // check convergence if ( nrmr <= solver_par->atol || nrmr/nrmb <= solver_par->rtol ) { info = MAGMA_SUCCESS; break; } } while ( solver_par->numiter + 1 <= solver_par->maxiter ); // smoothing enabled if ( smoothing > 0 ) { // x = xs magma_zcopyvector( x->num_rows, dxs.dval, 1, x->dval, 1, queue ); // r = rs magma_zcopyvector( dr.num_rows, drs.dval, 1, dr.dval, 1, queue ); } // get last iteration timing tempo2 = magma_sync_wtime( queue ); solver_par->runtime = (real_Double_t)tempo2 - tempo1; //--------------STOP TIME---------------- // get final stats solver_par->iter_res = nrmr; CHECK( magma_zresidualvec( A, b, *x, &dr, &residual, queue )); solver_par->final_res = residual; // set solver conclusion if ( info != MAGMA_SUCCESS && info != MAGMA_DIVERGENCE ) { if ( solver_par->init_res > solver_par->final_res ) { info = MAGMA_SLOW_CONVERGENCE; } } cleanup: // free resources // smoothing enabled if ( smoothing > 0 ) { drs.dval = NULL; // needed because its pointer is redirected to dtt magma_zmfree( &dxs, queue ); magma_zmfree( &drs, queue ); magma_zmfree( &dtt, queue ); } dr.dval = NULL; // needed because its pointer is redirected to dt dGcol.dval = NULL; // needed because its pointer is redirected to dG magma_zmfree( &dr, queue ); magma_zmfree( &dP, queue ); magma_zmfree( &dP1, queue ); magma_zmfree( &dG, queue ); magma_zmfree( &dGcol, queue ); magma_zmfree( &dU, queue ); magma_zmfree( &dM, queue ); magma_zmfree( &hMdiag, queue ); magma_zmfree( &df, queue ); magma_zmfree( &dt, queue ); magma_zmfree( &dc, queue ); magma_zmfree( &dv, queue ); magma_zmfree( &dlu, queue ); magma_zmfree( &dskp, queue ); magma_zmfree( &dalpha, queue ); magma_zmfree( &dbeta, queue ); magma_zmfree( &hskp, queue ); magma_zmfree( &halpha, queue ); magma_zmfree( &hbeta, queue ); magma_free( d1 ); magma_free( d2 ); solver_par->info = info; return info; /* magma_zpidr_merge */ }