extern "C" magma_int_t magma_cbicgstab_merge( magma_c_sparse_matrix A, magma_c_vector b, magma_c_vector *x, magma_c_solver_par *solver_par, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); // prepare solver feedback solver_par->solver = Magma_BICGSTABMERGE; solver_par->numiter = 0; solver_par->info = MAGMA_SUCCESS; // some useful variables magmaFloatComplex c_zero = MAGMA_C_ZERO, c_one = MAGMA_C_ONE; magma_int_t dofs = A.num_rows; // GPU stream magma_queue_t stream[2]; magma_event_t event[1]; magma_queue_create( &stream[0] ); magma_queue_create( &stream[1] ); magma_event_create( &event[0] ); // workspace magma_c_vector q, r,rr,p,v,s,t; magmaFloatComplex *d1, *d2, *skp; d1 = NULL; d2 = NULL; skp = NULL; magma_int_t stat_dev = 0, stat_cpu = 0; stat_dev += magma_cmalloc( &d1, dofs*(2) ); stat_dev += magma_cmalloc( &d2, dofs*(2) ); // array for the parameters stat_dev += magma_cmalloc( &skp, 8 ); if( stat_dev != 0 ){ magma_free( d1 ); magma_free( d2 ); magma_free( skp ); printf("error: memory allocation.\n"); return MAGMA_ERR_DEVICE_ALLOC; } // skp = [alpha|beta|omega|rho_old|rho|nom|tmp1|tmp2] magma_c_vinit( &q, Magma_DEV, dofs*6, c_zero, queue ); // q = rr|r|p|v|s|t rr.memory_location = Magma_DEV; rr.dval = NULL; rr.num_rows = rr.nnz = dofs; rr.num_cols = 1; r.memory_location = Magma_DEV; r.dval = NULL; r.num_rows = r.nnz = dofs; r.num_cols = 1; p.memory_location = Magma_DEV; p.dval = NULL; p.num_rows = p.nnz = dofs; p.num_cols = 1; v.memory_location = Magma_DEV; v.dval = NULL; v.num_rows = v.nnz = dofs; v.num_cols = 1; s.memory_location = Magma_DEV; s.dval = NULL; s.num_rows = s.nnz = dofs; s.num_cols = 1; t.memory_location = Magma_DEV; t.dval = NULL; t.num_rows = t.nnz = dofs; t.num_cols = 1; rr.dval = q(0); r.dval = q(1); p.dval = q(2); v.dval = q(3); s.dval = q(4); t.dval = q(5); // solver variables magmaFloatComplex alpha, beta, omega, rho_old, rho_new, *skp_h; float nom, nom0, betanom, r0, den; // solver setup magma_cscal( dofs, c_zero, x->dval, 1) ; // x = 0 magma_ccopy( dofs, b.dval, 1, q(0), 1 ); // rr = b magma_ccopy( dofs, b.dval, 1, q(1), 1 ); // r = b rho_new = magma_cdotc( dofs, r.dval, 1, r.dval, 1 ); // rho=<rr,r> nom = MAGMA_C_REAL(magma_cdotc( dofs, r.dval, 1, r.dval, 1 )); nom0 = betanom = sqrt(nom); // nom = || r || rho_old = omega = alpha = MAGMA_C_MAKE( 1.0, 0. ); beta = rho_new; solver_par->init_res = nom0; // array on host for the parameters stat_cpu = magma_cmalloc_cpu( &skp_h, 8 ); if( stat_cpu != 0 ){ magma_free( d1 ); magma_free( d2 ); magma_free( skp ); magma_free_cpu( skp_h ); printf("error: memory allocation.\n"); return MAGMA_ERR_HOST_ALLOC; } skp_h[0]=alpha; skp_h[1]=beta; skp_h[2]=omega; skp_h[3]=rho_old; skp_h[4]=rho_new; skp_h[5]=MAGMA_C_MAKE(nom, 0.0); magma_csetvector( 8, skp_h, 1, skp, 1 ); magma_c_spmv( c_one, A, r, c_zero, v, queue ); // z = A r den = MAGMA_C_REAL( magma_cdotc(dofs, v.dval, 1, r.dval, 1) );// den = z dot r if ( (r0 = nom * solver_par->epsilon) < ATOLERANCE ) r0 = ATOLERANCE; if ( nom < r0 ) { magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } // check positive definite if (den <= 0.0) { printf("Operator A is not postive definite. (Ar,r) = %f\n", den); magmablasSetKernelStream( orig_queue ); return MAGMA_NONSPD; solver_par->info = MAGMA_NONSPD;; } //Chronometry real_Double_t tempo1, tempo2; tempo1 = magma_sync_wtime( queue ); if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = nom0; solver_par->timing[0] = 0.0; } // start iteration for( solver_par->numiter= 1; solver_par->numiter<solver_par->maxiter; solver_par->numiter++ ) { magmablasSetKernelStream(stream[0]); // computes p=r+beta*(p-omega*v) magma_cbicgmerge1( dofs, skp, v.dval, r.dval, p.dval, queue ); magma_c_spmv( c_one, A, p, c_zero, v, queue ); // v = Ap magma_cmdotc( dofs, 1, q.dval, v.dval, d1, d2, skp, queue ); magma_cbicgmerge4( 1, skp, queue ); magma_cbicgmerge2( dofs, skp, r.dval, v.dval, s.dval, queue ); // s=r-alpha*v magma_c_spmv( c_one, A, s, c_zero, t, queue ); // t=As magma_cmdotc( dofs, 2, q.dval+4*dofs, t.dval, d1, d2, skp+6, queue ); magma_cbicgmerge4( 2, skp, queue ); magma_cbicgmerge_xrbeta( dofs, d1, d2, q.dval, r.dval, p.dval, s.dval, t.dval, x->dval, skp, queue ); // check stopping criterion (asynchronous copy) magma_cgetvector_async( 1 , skp+5, 1, skp_h+5, 1, stream[1] ); betanom = sqrt(MAGMA_C_REAL(skp_h[5])); if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } if ( betanom < r0 ) { break; } } tempo2 = magma_sync_wtime( queue ); solver_par->runtime = (real_Double_t) tempo2-tempo1; float residual; magma_cresidual( A, b, *x, &residual, queue ); solver_par->iter_res = betanom; solver_par->final_res = residual; if ( solver_par->numiter < solver_par->maxiter) { solver_par->info = MAGMA_SUCCESS; } else if ( solver_par->init_res > solver_par->final_res ) { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } solver_par->info = MAGMA_SLOW_CONVERGENCE; } else { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } solver_par->info = MAGMA_DIVERGENCE; } magma_c_vfree(&q, queue ); // frees all vectors magma_free(d1); magma_free(d2); magma_free( skp ); magma_free_cpu( skp_h ); magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /* cbicgstab_merge */
extern "C" magma_int_t magma_cpidr_strms( magma_c_matrix A, magma_c_matrix b, magma_c_matrix *x, magma_c_solver_par *solver_par, magma_c_preconditioner *precond_par, magma_queue_t queue ) { magma_int_t info = MAGMA_NOTCONVERGED; // prepare solver feedback solver_par->solver = Magma_PIDRMERGE; solver_par->numiter = 0; solver_par->spmv_count = 0; solver_par->init_res = 0.0; solver_par->final_res = 0.0; solver_par->iter_res = 0.0; solver_par->runtime = 0.0; // constants const magmaFloatComplex c_zero = MAGMA_C_ZERO; const magmaFloatComplex c_one = MAGMA_C_ONE; const magmaFloatComplex c_n_one = MAGMA_C_NEG_ONE; // internal user options const magma_int_t smoothing = 1; // 0 = disable, 1 = enable const float angle = 0.7; // [0-1] // local variables magma_int_t iseed[4] = {0, 0, 0, 1}; magma_int_t dof; magma_int_t s; magma_int_t distr; magma_int_t k, i, sk; magma_int_t innerflag; magma_int_t ldd; magma_int_t q; float residual; float nrm; float nrmb; float nrmr; float nrmt; float rho; magmaFloatComplex om; magmaFloatComplex gamma; // matrices and vectors magma_c_matrix dxs = {Magma_CSR}; magma_c_matrix dr = {Magma_CSR}, drs = {Magma_CSR}; magma_c_matrix dP = {Magma_CSR}, dP1 = {Magma_CSR}; magma_c_matrix dG = {Magma_CSR}, dGcol = {Magma_CSR}; magma_c_matrix dU = {Magma_CSR}; magma_c_matrix dM = {Magma_CSR}; magma_c_matrix df = {Magma_CSR}; magma_c_matrix dt = {Magma_CSR}, dtt = {Magma_CSR}; magma_c_matrix dc = {Magma_CSR}; magma_c_matrix dv = {Magma_CSR}; magma_c_matrix dlu = {Magma_CSR}; magma_c_matrix dskp = {Magma_CSR}; magma_c_matrix dalpha = {Magma_CSR}; magma_c_matrix dbeta = {Magma_CSR}; magmaFloatComplex *hMdiag = NULL; magmaFloatComplex *hskp = NULL; magmaFloatComplex *halpha = NULL; magmaFloatComplex *hbeta = NULL; magmaFloatComplex *d1 = NULL, *d2 = NULL; // queue variables const magma_int_t nqueues = 3; // number of queues magma_queue_t queues[nqueues]; // chronometry real_Double_t tempo1, tempo2; // create additional queues queues[0] = queue; for ( q = 1; q < nqueues; q++ ) { magma_queue_create( queue->device(), &(queues[q]) ); } // initial s space // TODO: add option for 's' (shadow space number) // Hack: uses '--restart' option as the shadow space number. // This is not a good idea because the default value of restart option is used to detect // if the user provided a custom restart. This means that if the default restart value // is changed then the code will think it was the user (unless the default value is // also updated in the 'if' statement below. s = 1; if ( solver_par->restart != 50 ) { if ( solver_par->restart > A.num_cols ) { s = A.num_cols; } else { s = solver_par->restart; } } solver_par->restart = s; // set max iterations solver_par->maxiter = min( 2 * A.num_cols, solver_par->maxiter ); // check if matrix A is square if ( A.num_rows != A.num_cols ) { //printf("Matrix A is not square.\n"); info = MAGMA_ERR_NOT_SUPPORTED; goto cleanup; } // |b| nrmb = magma_scnrm2( b.num_rows, b.dval, 1, queue ); if ( nrmb == 0.0 ) { magma_cscal( x->num_rows, MAGMA_C_ZERO, x->dval, 1, queue ); info = MAGMA_SUCCESS; goto cleanup; } // t = 0 // make t twice as large to contain both, dt and dr ldd = magma_roundup( b.num_rows, 32 ); CHECK( magma_cvinit( &dt, Magma_DEV, ldd, 2, c_zero, queue )); dt.num_rows = b.num_rows; dt.num_cols = 1; dt.nnz = dt.num_rows; // redirect the dr.dval to the second part of dt CHECK( magma_cvinit( &dr, Magma_DEV, b.num_rows, 1, c_zero, queue )); magma_free( dr.dval ); dr.dval = dt.dval + ldd; // r = b - A x CHECK( magma_cresidualvec( A, b, *x, &dr, &nrmr, queue )); // |r| solver_par->init_res = nrmr; solver_par->final_res = solver_par->init_res; solver_par->iter_res = solver_par->init_res; if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = (real_Double_t)nrmr; } // check if initial is guess good enough if ( nrmr <= solver_par->atol || nrmr/nrmb <= solver_par->rtol ) { info = MAGMA_SUCCESS; goto cleanup; } // P = randn(n, s) // P = ortho(P) //--------------------------------------- // P = 0.0 CHECK( magma_cvinit( &dP, Magma_CPU, A.num_cols, s, c_zero, queue )); // P = randn(n, s) distr = 3; // 1 = unif (0,1), 2 = unif (-1,1), 3 = normal (0,1) dof = dP.num_rows * dP.num_cols; lapackf77_clarnv( &distr, iseed, &dof, dP.val ); // transfer P to device CHECK( magma_cmtransfer( dP, &dP1, Magma_CPU, Magma_DEV, queue )); magma_cmfree( &dP, queue ); // P = ortho(P1) if ( dP1.num_cols > 1 ) { // P = magma_cqr(P1), QR factorization CHECK( magma_cqr( dP1.num_rows, dP1.num_cols, dP1, dP1.ld, &dP, NULL, queue )); } else { // P = P1 / |P1| nrm = magma_scnrm2( dof, dP1.dval, 1, queue ); nrm = 1.0 / nrm; magma_csscal( dof, nrm, dP1.dval, 1, queue ); CHECK( magma_cmtransfer( dP1, &dP, Magma_DEV, Magma_DEV, queue )); } magma_cmfree( &dP1, queue ); //--------------------------------------- // allocate memory for the scalar products CHECK( magma_cmalloc_pinned( &hskp, 5 )); CHECK( magma_cvinit( &dskp, Magma_DEV, 4, 1, c_zero, queue )); CHECK( magma_cmalloc_pinned( &halpha, s )); CHECK( magma_cvinit( &dalpha, Magma_DEV, s, 1, c_zero, queue )); CHECK( magma_cmalloc_pinned( &hbeta, s )); CHECK( magma_cvinit( &dbeta, Magma_DEV, s, 1, c_zero, queue )); // workspace for merged dot product CHECK( magma_cmalloc( &d1, max(2, s) * b.num_rows )); CHECK( magma_cmalloc( &d2, max(2, s) * b.num_rows )); // smoothing enabled if ( smoothing > 0 ) { // set smoothing solution vector CHECK( magma_cmtransfer( *x, &dxs, Magma_DEV, Magma_DEV, queue )); // tt = 0 // make tt twice as large to contain both, dtt and drs ldd = magma_roundup( b.num_rows, 32 ); CHECK( magma_cvinit( &dtt, Magma_DEV, ldd, 2, c_zero, queue )); dtt.num_rows = dr.num_rows; dtt.num_cols = 1; dtt.nnz = dtt.num_rows; // redirect the drs.dval to the second part of dtt CHECK( magma_cvinit( &drs, Magma_DEV, dr.num_rows, 1, c_zero, queue )); magma_free( drs.dval ); drs.dval = dtt.dval + ldd; // set smoothing residual vector magma_ccopyvector( dr.num_rows, dr.dval, 1, drs.dval, 1, queue ); } // G(n,s) = 0 if ( s > 1 ) { ldd = magma_roundup( A.num_rows, 32 ); CHECK( magma_cvinit( &dG, Magma_DEV, ldd, s, c_zero, queue )); dG.num_rows = A.num_rows; } else { CHECK( magma_cvinit( &dG, Magma_DEV, A.num_rows, s, c_zero, queue )); } // dGcol represents a single column of dG, array pointer is set inside loop CHECK( magma_cvinit( &dGcol, Magma_DEV, dG.num_rows, 1, c_zero, queue )); magma_free( dGcol.dval ); // U(n,s) = 0 if ( s > 1 ) { ldd = magma_roundup( A.num_cols, 32 ); CHECK( magma_cvinit( &dU, Magma_DEV, ldd, s, c_zero, queue )); dU.num_rows = A.num_cols; } else { CHECK( magma_cvinit( &dU, Magma_DEV, A.num_cols, s, c_zero, queue )); } // M(s,s) = I CHECK( magma_cvinit( &dM, Magma_DEV, s, s, c_zero, queue )); CHECK( magma_cmalloc_pinned( &hMdiag, s )); magmablas_claset( MagmaFull, dM.num_rows, dM.num_cols, c_zero, c_one, dM.dval, dM.ld, queue ); // f = 0 CHECK( magma_cvinit( &df, Magma_DEV, dP.num_cols, 1, c_zero, queue )); // c = 0 CHECK( magma_cvinit( &dc, Magma_DEV, dM.num_cols, 1, c_zero, queue )); // v = r CHECK( magma_cmtransfer( dr, &dv, Magma_DEV, Magma_DEV, queue )); // lu = 0 CHECK( magma_cvinit( &dlu, Magma_DEV, dr.num_rows, 1, c_zero, queue )); //--------------START TIME--------------- // chronometry tempo1 = magma_sync_wtime( queue ); if ( solver_par->verbose > 0 ) { solver_par->timing[0] = 0.0; } om = MAGMA_C_ONE; gamma = MAGMA_C_ZERO; innerflag = 0; // start iteration do { solver_par->numiter++; // new RHS for small systems // f = P' r // Q1 magma_cgemvmdot_shfl( dP.num_rows, dP.num_cols, dP.dval, dr.dval, d1, d2, df.dval, queues[1] ); // skp[4] = f(k) // Q1 magma_cgetvector_async( 1, df.dval, 1, &hskp[4], 1, queues[1] ); // c(k:s) = f(k:s) // Q1 magma_ccopyvector_async( s, df.dval, 1, dc.dval, 1, queues[1] ); // c(k:s) = M(k:s,k:s) \ f(k:s) // Q1 magma_ctrsv( MagmaLower, MagmaNoTrans, MagmaNonUnit, s, dM.dval, dM.ld, dc.dval, 1, queues[1] ); // shadow space loop for ( k = 0; k < s; ++k ) { sk = s - k; dGcol.dval = dG.dval + k * dG.ld; // v = r - G(:,k:s) c(k:s) // Q1 magmablas_cgemv( MagmaNoTrans, dG.num_rows, sk, c_n_one, dGcol.dval, dG.ld, &dc.dval[k], 1, c_one, dv.dval, 1, queues[1] ); // preconditioning operation // v = L \ v; // v = U \ v; // Q1 CHECK( magma_c_applyprecond_left( MagmaNoTrans, A, dv, &dlu, precond_par, queues[1] )); CHECK( magma_c_applyprecond_right( MagmaNoTrans, A, dlu, &dv, precond_par, queues[1] )); // sync Q0 --> U(:,k) = U(:,k) - U(:,1:k) * alpha(1:k) magma_queue_sync( queues[0] ); // U(:,k) = om * v + U(:,k:s) c(k:s) // Q1 magmablas_cgemv( MagmaNoTrans, dU.num_rows, sk, c_one, &dU.dval[k*dU.ld], dU.ld, &dc.dval[k], 1, om, dv.dval, 1, queues[1] ); // G(:,k) = A U(:,k) // Q1 CHECK( magma_c_spmv( c_one, A, dv, c_zero, dGcol, queues[1] )); solver_par->spmv_count++; // bi-orthogonalize the new basis vectors for ( i = 0; i < k; ++i ) { // alpha = P(:,i)' G(:,k) // Q1 halpha[i] = magma_cdotc( dP.num_rows, &dP.dval[i*dP.ld], 1, dGcol.dval, 1, queues[1] ); // implicit sync Q1 --> alpha = P(:,i)' G(:,k) // alpha = alpha / M(i,i) halpha[i] = halpha[i] / hMdiag[i]; // G(:,k) = G(:,k) - alpha * G(:,i) // Q1 magma_caxpy( dG.num_rows, -halpha[i], &dG.dval[i*dG.ld], 1, dGcol.dval, 1, queues[1] ); } // sync Q1 --> compute new G, skp[4] = f(k magma_queue_sync( queues[1] ); // new column of M = P'G, first k-1 entries are zero // M(k:s,k) = P(:,k:s)' G(:,k) // Q2 magma_cgemvmdot_shfl( dP.num_rows, sk, &dP.dval[k*dP.ld], dGcol.dval, d1, d2, &dM.dval[k*dM.ld+k], queues[2] ); // U(:,k) = v // Q0 magma_ccopyvector_async( dU.num_rows, dv.dval, 1, &dU.dval[k*dU.ld], 1, queues[0] ); // non-first s iteration if ( k > 0 ) { // alpha = dalpha // Q0 magma_csetvector_async( k, halpha, 1, dalpha.dval, 1, queues[0] ); // U update outside of loop using GEMV // U(:,k) = U(:,k) - U(:,1:k) * alpha(1:k) // Q0 magmablas_cgemv( MagmaNoTrans, dU.num_rows, k, c_n_one, dU.dval, dU.ld, dalpha.dval, 1, c_one, &dU.dval[k*dU.ld], 1, queues[0] ); } // Mdiag(k) = M(k,k) // Q2 magma_cgetvector( 1, &dM.dval[k*dM.ld+k], 1, &hMdiag[k], 1, queues[2] ); // implicit sync Q2 --> Mdiag(k) = M(k,k) // check M(k,k) == 0 if ( MAGMA_C_EQUAL(hMdiag[k], MAGMA_C_ZERO) ) { innerflag = 1; info = MAGMA_DIVERGENCE; break; } // beta = f(k) / M(k,k) hbeta[k] = hskp[4] / hMdiag[k]; // check for nan if ( magma_c_isnan( hbeta[k] ) || magma_c_isinf( hbeta[k] )) { innerflag = 1; info = MAGMA_DIVERGENCE; break; } // non-last s iteration if ( (k + 1) < s ) { // f(k+1:s) = f(k+1:s) - beta * M(k+1:s,k) // Q1 magma_caxpy( sk-1, -hbeta[k], &dM.dval[k*dM.ld+(k+1)], 1, &df.dval[k+1], 1, queues[1] ); // c(k+1:s) = f(k+1:s) // Q1 magma_ccopyvector_async( sk-1, &df.dval[k+1], 1, &dc.dval[k+1], 1, queues[1] ); // c(k+1:s) = M(k+1:s,k+1:s) \ f(k+1:s) // Q1 magma_ctrsv( MagmaLower, MagmaNoTrans, MagmaNonUnit, sk-1, &dM.dval[(k+1)*dM.ld+(k+1)], dM.ld, &dc.dval[k+1], 1, queues[1] ); // skp[4] = f(k+1) // Q1 magma_cgetvector_async( 1, &df.dval[k+1], 1, &hskp[4], 1, queues[1] ); } // r = r - beta * G(:,k) // Q2 magma_caxpy( dr.num_rows, -hbeta[k], dGcol.dval, 1, dr.dval, 1, queues[2] ); // smoothing disabled if ( smoothing <= 0 ) { // |r| // Q2 nrmr = magma_scnrm2( dr.num_rows, dr.dval, 1, queues[2] ); // implicit sync Q2 --> |r| // v = r // Q1 magma_ccopyvector_async( dr.num_rows, dr.dval, 1, dv.dval, 1, queues[1] ); // smoothing enabled } else { // x = x + beta * U(:,k) // Q0 magma_caxpy( x->num_rows, hbeta[k], &dU.dval[k*dU.ld], 1, x->dval, 1, queues[0] ); // smoothing operation //--------------------------------------- // t = rs - r // Q2 magma_cidr_smoothing_1( drs.num_rows, drs.num_cols, drs.dval, dr.dval, dtt.dval, queues[2] ); // t't // t'rs // Q2 CHECK( magma_cgemvmdot_shfl( dt.ld, 2, dtt.dval, dtt.dval, d1, d2, &dskp.dval[2], queues[2] )); // skp[2-3] = dskp[2-3] // Q2 magma_cgetvector( 2, &dskp.dval[2], 1, &hskp[2], 1, queues[2] ); // implicit sync Q2 --> skp = dskp // gamma = (t' * rs) / (t' * t) gamma = hskp[3] / hskp[2]; // xs = xs - gamma * (xs - x) // Q0 magma_cidr_smoothing_2( dxs.num_rows, dxs.num_cols, -gamma, x->dval, dxs.dval, queues[0] ); // v = r // Q1 magma_ccopyvector_async( dr.num_rows, dr.dval, 1, dv.dval, 1, queues[1] ); // rs = rs - gamma * t // Q2 magma_caxpy( drs.num_rows, -gamma, dtt.dval, 1, drs.dval, 1, queues[2] ); // |rs| // Q2 nrmr = magma_scnrm2( drs.num_rows, drs.dval, 1, queues[2] ); // implicit sync Q2 --> |r| //--------------------------------------- } // store current timing and residual if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter) % solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter) / solver_par->verbose] = (real_Double_t)nrmr; solver_par->timing[(solver_par->numiter) / solver_par->verbose] = (real_Double_t)tempo2 - tempo1; } } // check convergence or iteration limit if ( nrmr <= solver_par->atol || nrmr/nrmb <= solver_par->rtol ) { s = k + 1; // for the x-update outside the loop innerflag = 2; info = MAGMA_SUCCESS; break; } } // smoothing disabled if ( smoothing <= 0 && innerflag != 1 ) { // dbeta(1:s) = beta(1:s) // Q0 magma_csetvector_async( s, hbeta, 1, dbeta.dval, 1, queues[0] ); // x = x + U(:,1:s) * beta(1:s) // Q0 magmablas_cgemv( MagmaNoTrans, dU.num_rows, s, c_one, dU.dval, dU.ld, dbeta.dval, 1, c_one, x->dval, 1, queues[0] ); } // check convergence or iteration limit or invalid result of inner loop if ( innerflag > 0 ) { break; } // preconditioning operation // v = L \ v; // v = U \ v; // Q2 CHECK( magma_c_applyprecond_left( MagmaNoTrans, A, dv, &dlu, precond_par, queues[2] )); CHECK( magma_c_applyprecond_right( MagmaNoTrans, A, dlu, &dv, precond_par, queues[2] )); // t = A v // Q2 CHECK( magma_c_spmv( c_one, A, dv, c_zero, dt, queues[2] )); solver_par->spmv_count++; // computation of a new omega //--------------------------------------- // t't // t'r // Q2 CHECK( magma_cgemvmdot_shfl( dt.ld, 2, dt.dval, dt.dval, d1, d2, dskp.dval, queues[2] )); // skp[0-2] = dskp[0-2] // Q2 magma_cgetvector( 2, dskp.dval, 1, hskp, 1, queues[2] ); // implicit sync Q2 --> skp = dskp // |t| nrmt = magma_ssqrt( MAGMA_C_REAL(hskp[0]) ); // rho = abs((t' * r) / (|t| * |r|)) rho = MAGMA_D_ABS( MAGMA_C_REAL(hskp[1]) / (nrmt * nrmr) ); // om = (t' * r) / (|t| * |t|) om = hskp[1] / hskp[0]; if ( rho < angle ) { om = (om * angle) / rho; } //--------------------------------------- if ( MAGMA_C_EQUAL(om, MAGMA_C_ZERO) ) { info = MAGMA_DIVERGENCE; break; } // sync Q1 --> v = r magma_queue_sync( queues[1] ); // r = r - om * t // Q2 magma_caxpy( dr.num_rows, -om, dt.dval, 1, dr.dval, 1, queues[2] ); // x = x + om * v // Q0 magma_caxpy( x->num_rows, om, dv.dval, 1, x->dval, 1, queues[0] ); // smoothing disabled if ( smoothing <= 0 ) { // |r| // Q2 nrmr = magma_scnrm2( dr.num_rows, dr.dval, 1, queues[2] ); // implicit sync Q2 --> |r| // v = r // Q1 magma_ccopyvector_async( dr.num_rows, dr.dval, 1, dv.dval, 1, queues[1] ); // smoothing enabled } else { // smoothing operation //--------------------------------------- // t = rs - r // Q2 magma_cidr_smoothing_1( drs.num_rows, drs.num_cols, drs.dval, dr.dval, dtt.dval, queues[2] ); // t't // t'rs // Q2 CHECK( magma_cgemvmdot_shfl( dt.ld, 2, dtt.dval, dtt.dval, d1, d2, &dskp.dval[2], queues[2] )); // skp[2-3] = dskp[2-3] // Q2 magma_cgetvector( 2, &dskp.dval[2], 1, &hskp[2], 1, queues[2] ); // implicit sync Q2 --> skp = dskp // gamma = (t' * rs) / (t' * t) gamma = hskp[3] / hskp[2]; // xs = xs - gamma * (xs - x) // Q0 magma_cidr_smoothing_2( dxs.num_rows, dxs.num_cols, -gamma, x->dval, dxs.dval, queues[0] ); // v = r // Q1 magma_ccopyvector_async( dr.num_rows, dr.dval, 1, dv.dval, 1, queues[1] ); // rs = rs - gamma * (rs - r) // Q2 magma_caxpy( drs.num_rows, -gamma, dtt.dval, 1, drs.dval, 1, queues[2] ); // |rs| // Q2 nrmr = magma_scnrm2( drs.num_rows, drs.dval, 1, queues[2] ); // implicit sync Q2 --> |r| //--------------------------------------- } // store current timing and residual if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); magma_queue_sync( queue ); if ( (solver_par->numiter) % solver_par->verbose == 0 ) { solver_par->res_vec[(solver_par->numiter) / solver_par->verbose] = (real_Double_t)nrmr; solver_par->timing[(solver_par->numiter) / solver_par->verbose] = (real_Double_t)tempo2 - tempo1; } } // check convergence or iteration limit if ( nrmr <= solver_par->atol || nrmr/nrmb <= solver_par->rtol ) { info = MAGMA_SUCCESS; break; } } while ( solver_par->numiter + 1 <= solver_par->maxiter ); // sync all queues for ( q = 0; q < nqueues; q++ ) { magma_queue_sync( queues[q] ); } // smoothing enabled if ( smoothing > 0 ) { // x = xs magma_ccopyvector_async( x->num_rows, dxs.dval, 1, x->dval, 1, queue ); // r = rs magma_ccopyvector_async( dr.num_rows, drs.dval, 1, dr.dval, 1, queue ); } // get last iteration timing tempo2 = magma_sync_wtime( queue ); magma_queue_sync( queue ); solver_par->runtime = (real_Double_t)tempo2 - tempo1; //--------------STOP TIME---------------- // get final stats solver_par->iter_res = nrmr; CHECK( magma_cresidualvec( A, b, *x, &dr, &residual, queue )); solver_par->final_res = residual; // set solver conclusion if ( info != MAGMA_SUCCESS && info != MAGMA_DIVERGENCE ) { if ( solver_par->init_res > solver_par->final_res ) { info = MAGMA_SLOW_CONVERGENCE; } } cleanup: // free resources // sync all queues, destory additional queues magma_queue_sync( queues[0] ); for ( q = 1; q < nqueues; q++ ) { magma_queue_sync( queues[q] ); magma_queue_destroy( queues[q] ); } // smoothing enabled if ( smoothing > 0 ) { drs.dval = NULL; // needed because its pointer is redirected to dtt magma_cmfree( &dxs, queue ); magma_cmfree( &drs, queue ); magma_cmfree( &dtt, queue ); } dr.dval = NULL; // needed because its pointer is redirected to dt dGcol.dval = NULL; // needed because its pointer is redirected to dG magma_cmfree( &dr, queue ); magma_cmfree( &dP, queue ); magma_cmfree( &dP1, queue ); magma_cmfree( &dG, queue ); magma_cmfree( &dGcol, queue ); magma_cmfree( &dU, queue ); magma_cmfree( &dM, queue ); magma_cmfree( &df, queue ); magma_cmfree( &dt, queue ); magma_cmfree( &dc, queue ); magma_cmfree( &dv, queue ); magma_cmfree( &dlu, queue ); magma_cmfree( &dskp, queue ); magma_cmfree( &dalpha, queue ); magma_cmfree( &dbeta, queue ); magma_free_pinned( hMdiag ); magma_free_pinned( hskp ); magma_free_pinned( halpha ); magma_free_pinned( hbeta ); magma_free( d1 ); magma_free( d2 ); solver_par->info = info; return info; /* magma_cpidr_strms */ }
magma_int_t magma_cgmres( magma_c_sparse_matrix A, magma_c_vector b, magma_c_vector *x, magma_c_solver_par *solver_par ){ // prepare solver feedback solver_par->solver = Magma_GMRES; solver_par->numiter = 0; solver_par->info = 0; // local variables magmaFloatComplex c_zero = MAGMA_C_ZERO, c_one = MAGMA_C_ONE, c_mone = MAGMA_C_NEG_ONE; magma_int_t dofs = A.num_rows; magma_int_t i, j, k, m = 0; magma_int_t restart = min( dofs-1, solver_par->restart ); magma_int_t ldh = restart+1; float nom, rNorm, RNorm, nom0, betanom, r0 = 0.; // CPU workspace magma_setdevice(0); magmaFloatComplex *H, *HH, *y, *h1; magma_cmalloc_pinned( &H, (ldh+1)*ldh ); magma_cmalloc_pinned( &y, ldh ); magma_cmalloc_pinned( &HH, ldh*ldh ); magma_cmalloc_pinned( &h1, ldh ); // GPU workspace magma_c_vector r, q, q_t; magma_c_vinit( &r, Magma_DEV, dofs, c_zero ); magma_c_vinit( &q, Magma_DEV, dofs*(ldh+1), c_zero ); q_t.memory_location = Magma_DEV; q_t.val = NULL; q_t.num_rows = q_t.nnz = dofs; magmaFloatComplex *dy, *dH = NULL; if (MAGMA_SUCCESS != magma_cmalloc( &dy, ldh )) return MAGMA_ERR_DEVICE_ALLOC; if (MAGMA_SUCCESS != magma_cmalloc( &dH, (ldh+1)*ldh )) return MAGMA_ERR_DEVICE_ALLOC; // GPU stream magma_queue_t stream[2]; magma_event_t event[1]; magma_queue_create( &stream[0] ); magma_queue_create( &stream[1] ); magma_event_create( &event[0] ); magmablasSetKernelStream(stream[0]); magma_cscal( dofs, c_zero, x->val, 1 ); // x = 0 magma_ccopy( dofs, b.val, 1, r.val, 1 ); // r = b nom0 = betanom = magma_scnrm2( dofs, r.val, 1 ); // nom0= || r|| nom = nom0 * nom0; solver_par->init_res = nom0; H(1,0) = MAGMA_C_MAKE( nom0, 0. ); magma_csetvector(1, &H(1,0), 1, &dH(1,0), 1); if ( (r0 = nom * solver_par->epsilon) < ATOLERANCE ) r0 = ATOLERANCE; if ( nom < r0 ) return MAGMA_SUCCESS; //Chronometry real_Double_t tempo1, tempo2; magma_device_sync(); tempo1=magma_wtime(); if( solver_par->verbose > 0 ){ solver_par->res_vec[0] = nom0; solver_par->timing[0] = 0.0; } // start iteration for( solver_par->numiter= 1; solver_par->numiter<solver_par->maxiter; solver_par->numiter++ ){ magma_ccopy(dofs, r.val, 1, q(0), 1); // q[0] = 1.0/||r|| magma_cscal(dofs, 1./H(1,0), q(0), 1); // (to be fused) for(k=1; k<=restart; k++) { q_t.val = q(k-1); magmablasSetKernelStream(stream[0]); magma_c_spmv( c_one, A, q_t, c_zero, r ); // r = A q[k] if (solver_par->ortho == Magma_MGS ) { // modified Gram-Schmidt magmablasSetKernelStream(stream[0]); for (i=1; i<=k; i++) { H(i,k) =magma_cdotc(dofs, q(i-1), 1, r.val, 1); // H(i,k) = q[i] . r magma_caxpy(dofs,-H(i,k), q(i-1), 1, r.val, 1); // r = r - H(i,k) q[i] } H(k+1,k) = MAGMA_C_MAKE( magma_scnrm2(dofs, r.val, 1), 0. ); // H(k+1,k) = sqrt(r . r) if (k < restart) { magma_ccopy(dofs, r.val, 1, q(k), 1); // q[k] = 1.0/H[k][k-1] r magma_cscal(dofs, 1./H(k+1,k), q(k), 1); // (to be fused) } } else if (solver_par->ortho == Magma_FUSED_CGS ) { // fusing cgemv with scnrm2 in classical Gram-Schmidt magmablasSetKernelStream(stream[0]); magma_ccopy(dofs, r.val, 1, q(k), 1); // dH(1:k+1,k) = q[0:k] . r magmablas_cgemv(MagmaTrans, dofs, k+1, c_one, q(0), dofs, r.val, 1, c_zero, &dH(1,k), 1); // r = r - q[0:k-1] dH(1:k,k) magmablas_cgemv(MagmaNoTrans, dofs, k, c_mone, q(0), dofs, &dH(1,k), 1, c_one, r.val, 1); // 1) dH(k+1,k) = sqrt( dH(k+1,k) - dH(1:k,k) ) magma_ccopyscale( dofs, k, r.val, q(k), &dH(1,k) ); // 2) q[k] = q[k] / dH(k+1,k) magma_event_record( event[0], stream[0] ); magma_queue_wait_event( stream[1], event[0] ); magma_cgetvector_async(k+1, &dH(1,k), 1, &H(1,k), 1, stream[1]); // asynch copy dH(1:(k+1),k) to H(1:(k+1),k) } else { // classical Gram-Schmidt (default) // > explicitly calling magmabls magmablasSetKernelStream(stream[0]); magmablas_cgemv(MagmaTrans, dofs, k, c_one, q(0), dofs, r.val, 1, c_zero, &dH(1,k), 1); // dH(1:k,k) = q[0:k-1] . r #ifndef SCNRM2SCALE // start copying dH(1:k,k) to H(1:k,k) magma_event_record( event[0], stream[0] ); magma_queue_wait_event( stream[1], event[0] ); magma_cgetvector_async(k, &dH(1,k), 1, &H(1,k), 1, stream[1]); #endif // r = r - q[0:k-1] dH(1:k,k) magmablas_cgemv(MagmaNoTrans, dofs, k, c_mone, q(0), dofs, &dH(1,k), 1, c_one, r.val, 1); #ifdef SCNRM2SCALE magma_ccopy(dofs, r.val, 1, q(k), 1); // q[k] = r / H(k,k-1) magma_scnrm2scale(dofs, q(k), dofs, &dH(k+1,k) ); // dH(k+1,k) = sqrt(r . r) and r = r / dH(k+1,k) magma_event_record( event[0], stream[0] ); // start sending dH(1:k,k) to H(1:k,k) magma_queue_wait_event( stream[1], event[0] ); // can we keep H(k+1,k) on GPU and combine? magma_cgetvector_async(k+1, &dH(1,k), 1, &H(1,k), 1, stream[1]); #else H(k+1,k) = MAGMA_C_MAKE( magma_scnrm2(dofs, r.val, 1), 0. ); // H(k+1,k) = sqrt(r . r) if( k<solver_par->restart ){ magmablasSetKernelStream(stream[0]); magma_ccopy(dofs, r.val, 1, q(k), 1); // q[k] = 1.0/H[k][k-1] r magma_cscal(dofs, 1./H(k+1,k), q(k), 1); // (to be fused) } #endif } } magma_queue_sync( stream[1] ); for( k=1; k<=restart; k++ ){ /* Minimization of || b-Ax || in H_k */ for (i=1; i<=k; i++) { #if defined(PRECISION_z) || defined(PRECISION_c) cblas_cdotc_sub( i+1, &H(1,k), 1, &H(1,i), 1, &HH(k,i) ); #else HH(k,i) = cblas_cdotc(i+1, &H(1,k), 1, &H(1,i), 1); #endif } h1[k] = H(1,k)*H(1,0); if (k != 1) for (i=1; i<k; i++) { for (m=i+1; m<k; m++){ HH(k,m) -= HH(k,i) * HH(m,i); } HH(k,k) -= HH(k,i) * HH(k,i) / HH(i,i); HH(k,i) = HH(k,i)/HH(i,i); h1[k] -= h1[i] * HH(k,i); } y[k] = h1[k]/HH(k,k); if (k != 1) for (i=k-1; i>=1; i--) { y[i] = h1[i]/HH(i,i); for (j=i+1; j<=k; j++) y[i] -= y[j] * HH(j,i); } m = k; rNorm = fabs(MAGMA_C_REAL(H(k+1,k))); } magma_csetmatrix_async(m, 1, y+1, m, dy, m, stream[0]); magmablasSetKernelStream(stream[0]); magma_cgemv(MagmaNoTrans, dofs, m, c_one, q(0), dofs, dy, 1, c_one, x->val, 1); magma_c_spmv( c_mone, A, *x, c_zero, r ); // r = - A * x magma_caxpy(dofs, c_one, b.val, 1, r.val, 1); // r = r + b H(1,0) = MAGMA_C_MAKE( magma_scnrm2(dofs, r.val, 1), 0. ); // RNorm = H[1][0] = || r || RNorm = MAGMA_C_REAL( H(1,0) ); betanom = fabs(RNorm); if( solver_par->verbose > 0 ){ magma_device_sync(); tempo2=magma_wtime(); if( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } if ( betanom < r0 ) { break; } } magma_device_sync(); tempo2=magma_wtime(); solver_par->runtime = (real_Double_t) tempo2-tempo1; float residual; magma_cresidual( A, b, *x, &residual ); solver_par->iter_res = betanom; solver_par->final_res = residual; if( solver_par->numiter < solver_par->maxiter){ solver_par->info = 0; }else if( solver_par->init_res > solver_par->final_res ){ if( solver_par->verbose > 0 ){ if( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } solver_par->info = -2; } else{ if( solver_par->verbose > 0 ){ if( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } solver_par->info = -1; } // free pinned memory magma_free_pinned( H ); magma_free_pinned( y ); magma_free_pinned( HH ); magma_free_pinned( h1 ); // free GPU memory magma_free(dy); if (dH != NULL ) magma_free(dH); magma_c_vfree(&r); magma_c_vfree(&q); // free GPU streams and events //magma_queue_destroy( stream[0] ); //magma_queue_destroy( stream[1] ); magma_event_destroy( event[0] ); magmablasSetKernelStream(NULL); return MAGMA_SUCCESS; } /* magma_cgmres */
/** Purpose ------- CLAHR2 reduces the first NB columns of a complex general n-BY-(n-k+1) matrix A so that elements below the k-th subdiagonal are zero. The reduction is performed by an orthogonal similarity transformation Q' * A * Q. The routine returns the matrices V and T which determine Q as a block reflector I - V*T*V', and also the matrix Y = A * V. (Note this is different than LAPACK, which computes Y = A * V * T.) This is an auxiliary routine called by CGEHRD. Arguments --------- @param[in] n INTEGER The order of the matrix A. @param[in] k INTEGER The offset for the reduction. Elements below the k-th subdiagonal in the first NB columns are reduced to zero. K < N. @param[in] nb INTEGER The number of columns to be reduced. @param[in,out] A COMPLEX array, dimension (LDA,N-K+1) On entry, the n-by-(n-k+1) general matrix A. On exit, the elements on and above the k-th subdiagonal in the first NB columns are overwritten with the corresponding elements of the reduced matrix; the elements below the k-th subdiagonal, with the array TAU, represent the matrix Q as a product of elementary reflectors. The other columns of A are unchanged. See Further Details. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,N). @param[out] tau COMPLEX array, dimension (NB) The scalar factors of the elementary reflectors. See Further Details. @param[out] T COMPLEX array, dimension (LDT,NB) The upper triangular matrix T. @param[in] ldt INTEGER The leading dimension of the array T. LDT >= NB. @param[out] Y COMPLEX array, dimension (LDY,NB) The n-by-nb matrix Y. @param[in] ldy INTEGER The leading dimension of the array Y. LDY >= N. @param[in,out] data Structure with pointers to dA, dT, dV, dW, dY which are distributed across multiple GPUs. Further Details --------------- The matrix Q is represented as a product of nb elementary reflectors Q = H(1) H(2) . . . H(nb). Each H(i) has the form H(i) = I - tau * v * v' where tau is a complex scalar, and v is a complex vector with v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in A(i+k+1:n,i), and tau in TAU(i). The elements of the vectors v together form the (n-k+1)-by-nb matrix V which is needed, with T and Y, to apply the transformation to the unreduced part of the matrix, using an update of the form: A := (I - V*T*V') * (A - Y*T*V'). The contents of A on exit are illustrated by the following example with n = 7, k = 3 and nb = 2: @verbatim ( a a a a a ) ( a a a a a ) ( a a a a a ) ( h h a a a ) ( v1 h a a a ) ( v1 v2 a a a ) ( v1 v2 a a a ) @endverbatim where "a" denotes an element of the original matrix A, h denotes a modified element of the upper Hessenberg matrix H, and vi denotes an element of the vector defining H(i). This implementation follows the hybrid algorithm and notations described in S. Tomov and J. Dongarra, "Accelerating the reduction to upper Hessenberg form through hybrid GPU-based computing," University of Tennessee Computer Science Technical Report, UT-CS-09-642 (also LAPACK Working Note 219), May 24, 2009. @ingroup magma_cgeev_aux ********************************************************************/ extern "C" magma_int_t magma_clahr2_m( magma_int_t n, magma_int_t k, magma_int_t nb, magmaFloatComplex *A, magma_int_t lda, magmaFloatComplex *tau, magmaFloatComplex *T, magma_int_t ldt, magmaFloatComplex *Y, magma_int_t ldy, struct cgehrd_data *data ) { #define A( i, j ) ( A + (i) + (j)*lda) #define Y( i, j ) ( Y + (i) + (j)*ldy) #define T( i, j ) ( T + (i) + (j)*ldt) #define dA( d, i, j ) (data->A [d] + (i) + (j)*ldda) #define dTi( d ) (data->Ti[d]) #define dV( d, i, j ) (data->V [d] + (i) + (j)*ldv ) #define dVd( d, i, j ) (data->Vd[d] + (i) + (j)*ldvd) #define dY( d, i, j ) (data->Y [d] + (i) + (j)*ldda) magmaFloatComplex c_zero = MAGMA_C_ZERO; magmaFloatComplex c_one = MAGMA_C_ONE; magmaFloatComplex c_neg_one = MAGMA_C_NEG_ONE; magmaFloatComplex tmp; magma_int_t ngpu = data->ngpu; magma_int_t ldda = data->ldda; magma_int_t ldv = data->ldv; magma_int_t ldvd = data->ldvd; magma_int_t ione = 1; magma_int_t d, dki1, dn, nblocks, gblock, lblock, lgid; magma_int_t n_k_i_1, n_k; magmaFloatComplex scale; magma_int_t i; magmaFloatComplex ei = MAGMA_C_ZERO; magma_int_t info_data = 0; magma_int_t *info = &info_data; if (n < 0) { *info = -1; } else if (k < 0 || k >= n) { *info = -2; } else if (nb < 1 || nb > n) { *info = -3; } else if (lda < max(1,n)) { *info = -5; } else if (ldt < nb) { *info = -8; } else if (ldy < max(1,n)) { *info = -10; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // adjust from 1-based indexing k -= 1; // Function Body if (n <= 1) return *info; magma_device_t orig_dev; magma_getdevice( &orig_dev ); magma_queue_t orig_stream; magmablasGetKernelStream( &orig_stream ); // zero out current top block of V on all GPUs for( d = 0; d < ngpu; ++d ) { magma_setdevice( d ); magmablasSetKernelStream( data->streams[d] ); magmablas_claset( MagmaFull, nb, nb, c_zero, c_zero, dV(d,k,0), ldv ); } // set all Y=0 lapackf77_claset( "Full", &n, &nb, &c_zero, &c_zero, Y, &ldy ); for (i = 0; i < nb; ++i) { n_k_i_1 = n - k - i - 1; n_k = n - k; if (i > 0) { // Finish applying I - V * T * V' on right tmp = MAGMA_C_NEGATE( tau[i-1] ); blasf77_caxpy( &n_k, &tmp, Y(k,i-1), &ione, A(k,i), &ione ); // Apply I - V * T' * V' to this column (call it b) from the // left, using the last column of T as workspace, w. // // Let V = ( V1 ) and b = ( b1 ) (first i-1 rows) // ( V2 ) ( b2 ) // where V1 is unit lower triangular // w := b1 = A(k+1:k+i, i) blasf77_ccopy( &i, A(k+1,i), &ione, T(0,nb-1), &ione ); // w := V1' * b1 = VA(k+1:k+i, 0:i-1)' * w blasf77_ctrmv( "Lower", "Conj", "Unit", &i, A(k+1,0), &lda, T(0,nb-1), &ione ); // w := w + V2'*b2 = w + VA(k+i+1:n-1, 0:i-1)' * A(k+i+1:n-1, i) blasf77_cgemv( "Conj", &n_k_i_1, &i, &c_one, A(k+i+1,0), &lda, A(k+i+1,i), &ione, &c_one, T(0,nb-1), &ione ); // w := T'*w = T(0:i-1, 0:i-1)' * w blasf77_ctrmv( "Upper", "Conj", "Non-unit", &i, T(0,0), &ldt, T(0,nb-1), &ione ); // b2 := b2 - V2*w = A(k+i+1:n-1, i) - VA(k+i+1:n-1, 0:i-1) * w blasf77_cgemv( "No trans", &n_k_i_1, &i, &c_neg_one, A(k+i+1,0), &lda, T(0,nb-1), &ione, &c_one, A(k+i+1,i), &ione ); // w := V1*w = VA(k+1:k+i, 0:i-1) * w blasf77_ctrmv( "Lower", "No trans", "Unit", &i, A(k+1,0), &lda, T(0,nb-1), &ione ); // b1 := b1 - w = A(k+1:k+i-1, i) - w blasf77_caxpy( &i, &c_neg_one, T(0,nb-1), &ione, A(k+1,i), &ione ); // Restore diagonal element, saved below during previous iteration *A(k+i,i-1) = ei; } // Generate the elementary reflector H(i) to annihilate A(k+i+1:n-1,i) lapackf77_clarfg( &n_k_i_1, A(k+i+1,i), A(k+i+2,i), &ione, &tau[i] ); // Save diagonal element and set to one, to simplify multiplying by V ei = *A(k+i+1,i); *A(k+i+1,i) = c_one; // compute yi = A vi = sum_g A{d} vi{d} nblocks = (n-1) / nb / ngpu + 1; for( d = 0; d < ngpu; ++d ) { magma_setdevice( d ); magmablasSetKernelStream( data->streams[d] ); // dV(k+i+1:n-1, i) = VA(k+i:n, i) magma_csetvector_async( n_k_i_1, A(k+i+1,i), 1, dV(d, k+i+1, i), 1, data->streams[d] ); // copy column of dV -> dVd, using block cyclic distribution. // This assumes V and Vd have been padded so that // a 2D matrix copy doesn't access them out-of-bounds gblock = k / nb; lblock = gblock / ngpu; lgid = gblock % ngpu; if ( d < lgid ) { lblock += 1; } // treat V as (nb*ngpu) x nblock matrix, and Vd as nb x nblock matrix magmablas_clacpy( MagmaFull, nb, nblocks-lblock, dV (d, d*nb + lblock*nb*ngpu, i), nb*ngpu, dVd(d, 0 + lblock*nb, i), nb ); // convert global indices (k) to local indices (dk) magma_indices_1D_bcyclic( nb, ngpu, d, k+i+1, n, &dki1, &dn ); // dY(k:n, i) = dA(k:n, k+i+1:n) * dV(k+i+1:n, i) // skip if matrix is empty // each GPU copies to different temporary vector in Y, // which are summed in separate loop below if ( dn-dki1 > 0 ) { magma_cgemv( MagmaNoTrans, n-k, dn-dki1, c_one, dA (d, k, dki1), ldda, dVd(d, dki1, i), 1, c_zero, dY (d, k, i), 1 ); // copy vector to host, storing in column nb+d of Y // as temporary space (Y has >= nb+ngpu columns) magma_cgetvector_async( n-k, dY(d, k, i), 1, Y(k, nb+d), 1, data->streams[d] ); } } // while GPU is doing above Ag*v... // Compute T(0:i,i) = [ -tau T V' vi ] // [ tau ] // T(0:i-1, i) = -tau VA(k+i+1:n-1, 0:i-1)' VA(k+i+1:n-1, i) scale = MAGMA_C_NEGATE( tau[i] ); blasf77_cgemv( "Conj", &n_k_i_1, &i, &scale, A(k+i+1,0), &lda, A(k+i+1,i), &ione, &c_zero, T(0,i), &ione ); // T(0:i-1, i) = T(0:i-1, 0:i-1) * T(0:i-1, i) blasf77_ctrmv( "Upper", "No trans", "Non-unit", &i, T(0,0), &ldt, T(0,i), &ione ); *T(i,i) = tau[i]; // apply reflectors to next column, A(i+1), on right only. // one axpy will be required to finish this, in the next iteration above if ( i > 0 && i+1 < nb ) { // Update next column, A(k:n,i+1), applying Q on right. // One axpy will be required to finish this, in the next iteration // above, after yi is computed. // This updates one more row than LAPACK does (row k), // making block above panel an even multiple of nb. // Use last column of T as workspace, w. magma_int_t i1 = i+1; // If complex, conjugate row of V, and undo afterwards #if defined(PRECISION_z) || defined(PRECISION_c) lapackf77_clacgv( &i1, A(k+i1,0), &lda ); #endif // w = T(0:i, 0:i+1) * VA(k+i+1, 0:i+1)' // T is now rectangular, so we use gemv instead of trmv as in lapack. blasf77_cgemv( "No trans", &i, &i1, &c_one, T(0,0), &ldt, A(k+i1,0), &lda, &c_zero, T(0,nb-1), &ione ); #if defined(PRECISION_z) || defined(PRECISION_c) lapackf77_clacgv( &i1, A(k+i1,0), &lda ); #endif // A(k:n, i+1) -= Y(k:n, 0:i) * w blasf77_cgemv( "No trans", &n_k, &i, &c_neg_one, Y(k,0), &ldy, T(0,nb-1), &ione, &c_one, A(k,i1), &ione ); } // yi = sum_g yi{d} for( d = 0; d < ngpu; ++d ) { magma_setdevice( d ); magma_queue_sync( data->streams[d] ); magma_indices_1D_bcyclic( nb, ngpu, d, k+i+1, n, &dki1, &dn ); if ( dn-dki1 > 0 ) { // yi = yi + yi{d} blasf77_caxpy( &n_k, &c_one, Y(k,nb+d), &ione, Y(k,i), &ione ); } } } // Restore diagonal element *A(k+nb,nb-1) = ei; // compute Y = Am V = sum_g Am{d} V{d} --- top part, Y(0:k-1,:) for( d = 0; d < ngpu; ++d ) { magma_setdevice( d ); magmablasSetKernelStream( data->streams[d] ); // convert global indices (k) to local indices (dk) magma_indices_1D_bcyclic( nb, ngpu, d, k+1, n, &dki1, &dn ); // dY(0:k, :) = dA(0:k, k+i+1:n-1) * dV(k+i+1:n-1, :) // skip if matrix is empty // each GPU copies to different temporary block in Y, // which are summed in separate loop below if ( dn-dki1 > 0 ) { magma_cgemm( MagmaNoTrans, MagmaNoTrans, k, nb, dn-dki1, c_one, dA (d, 0, dki1), ldda, dVd(d, dki1, 0), ldvd, c_zero, dY (d, 0, 0), ldda ); // copy result to host, storing in columns [nb + nb*d : nb + nb*(d+1)] of Y // as temporary space (Y has nb + nb*ngpu columns) magma_cgetmatrix_async( k, nb, dY(d, 0, 0), ldda, Y(0,nb+nb*d), ldy, data->streams[d] ); } } // Y = sum_g Y{d} for( d = 0; d < ngpu; ++d ) { magma_setdevice( d ); magma_queue_sync( 0 ); magma_indices_1D_bcyclic( nb, ngpu, d, k+1, n, &dki1, &dn ); if ( dn-dki1 > 0 ) { // Y = Y + Am V for( i = 0; i < nb; ++i ) { blasf77_caxpy( &k, &c_one, Y(0,nb+nb*d+i), &ione, Y(0,i), &ione ); } } } // copy Y and T matrices to GPUs for( d = 0; d < ngpu; ++d ) { magma_setdevice( d ); magma_csetmatrix_async( n, nb, Y, ldy, dY(d, 0, 0), ldda, data->streams[d] ); magma_csetmatrix_async( nb, nb, T, nb, dTi(d), nb, data->streams[d] ); } magma_setdevice( orig_dev ); magmablasSetKernelStream( orig_stream ); return *info; } /* magma_clahr2 */
extern "C" magma_int_t magma_cbicgstab_merge3( magma_c_matrix A, magma_c_matrix b, magma_c_matrix *x, magma_c_solver_par *solver_par, magma_queue_t queue ) { magma_int_t info = MAGMA_NOTCONVERGED; // prepare solver feedback solver_par->solver = Magma_BICGSTABMERGE; solver_par->numiter = 0; solver_par->spmv_count = 0; // solver variables magmaFloatComplex alpha, beta, omega, rho_old, rho_new, *skp_h={0}; float nom, nom0, betanom, nomb; // some useful variables magmaFloatComplex c_zero = MAGMA_C_ZERO, c_one = MAGMA_C_ONE; magma_int_t dofs = A.num_rows; // workspace magma_c_matrix q={Magma_CSR}, r={Magma_CSR}, rr={Magma_CSR}, p={Magma_CSR}, v={Magma_CSR}, s={Magma_CSR}, t={Magma_CSR}; magmaFloatComplex *d1=NULL, *d2=NULL, *skp=NULL; d1 = NULL; d2 = NULL; skp = NULL; CHECK( magma_cmalloc( &d1, dofs*(2) )); CHECK( magma_cmalloc( &d2, dofs*(2) )); // array for the parameters CHECK( magma_cmalloc( &skp, 8 )); // skp = [alpha|beta|omega|rho_old|rho|nom|tmp1|tmp2] CHECK( magma_cvinit( &q, Magma_DEV, dofs*6, 1, c_zero, queue )); // q = rr|r|p|v|s|t rr.memory_location = Magma_DEV; rr.dval = NULL; rr.num_rows = rr.nnz = dofs; rr.num_cols = 1; rr.storage_type = Magma_DENSE; r.memory_location = Magma_DEV; r.dval = NULL; r.num_rows = r.nnz = dofs; r.num_cols = 1; r.storage_type = Magma_DENSE; p.memory_location = Magma_DEV; p.dval = NULL; p.num_rows = p.nnz = dofs; p.num_cols = 1; p.storage_type = Magma_DENSE; v.memory_location = Magma_DEV; v.dval = NULL; v.num_rows = v.nnz = dofs; v.num_cols = 1; v.storage_type = Magma_DENSE; s.memory_location = Magma_DEV; s.dval = NULL; s.num_rows = s.nnz = dofs; s.num_cols = 1; s.storage_type = Magma_DENSE; t.memory_location = Magma_DEV; t.dval = NULL; t.num_rows = t.nnz = dofs; t.num_cols = 1; t.storage_type = Magma_DENSE; rr.dval = q(0); r.dval = q(1); p.dval = q(2); v.dval = q(3); s.dval = q(4); t.dval = q(5); // solver setup CHECK( magma_cresidualvec( A, b, *x, &r, &nom0, queue)); magma_ccopy( dofs, r.dval, 1, q(0), 1, queue ); // rr = r magma_ccopy( dofs, r.dval, 1, q(1), 1, queue ); // q = r betanom = nom0; nom = nom0*nom0; rho_new = magma_cdotc( dofs, r.dval, 1, r.dval, 1, queue ); // rho=<rr,r> rho_old = omega = alpha = MAGMA_C_MAKE( 1.0, 0. ); beta = rho_new; solver_par->init_res = nom0; // array on host for the parameters CHECK( magma_cmalloc_cpu( &skp_h, 8 )); nomb = magma_scnrm2( dofs, b.dval, 1, queue ); if ( nomb == 0.0 ){ nomb=1.0; } solver_par->final_res = solver_par->init_res; solver_par->iter_res = solver_par->init_res; if ( solver_par->verbose > 0 ) { solver_par->res_vec[0] = nom0; solver_par->timing[0] = 0.0; } skp_h[0]=alpha; skp_h[1]=beta; skp_h[2]=omega; skp_h[3]=rho_old; skp_h[4]=rho_new; skp_h[5]=MAGMA_C_MAKE(nom, 0.0); magma_csetvector( 8, skp_h, 1, skp, 1, queue ); CHECK( magma_c_spmv( c_one, A, r, c_zero, v, queue )); // z = A r nomb = magma_scnrm2( dofs, b.dval, 1, queue ); if( nom0 < solver_par->atol || nom0/nomb < solver_par->rtol ){ info = MAGMA_SUCCESS; goto cleanup; } //Chronometry real_Double_t tempo1, tempo2; tempo1 = magma_sync_wtime( queue ); solver_par->numiter = 0; solver_par->spmv_count = 0; // start iteration do { solver_par->numiter++; // computes p=r+beta*(p-omega*v) CHECK( magma_cbicgmerge1( dofs, skp, v.dval, r.dval, p.dval, queue )); CHECK( magma_c_spmv( c_one, A, p, c_zero, v, queue )); // v = Ap solver_par->spmv_count++; CHECK( magma_cmdotc( dofs, 1, q.dval, v.dval, d1, d2, skp, queue )); CHECK( magma_cbicgmerge4( 1, skp, queue )); CHECK( magma_cbicgmerge2( dofs, skp, r.dval, v.dval, s.dval, queue )); // s=r-alpha*v CHECK( magma_c_spmv( c_one, A, s, c_zero, t, queue )); // t=As solver_par->spmv_count++; CHECK( magma_cmdotc( dofs, 2, q.dval+4*dofs, t.dval, d1, d2, skp+6, queue )); CHECK( magma_cbicgmerge4( 2, skp, queue )); CHECK( magma_cbicgmerge_xrbeta( dofs, d1, d2, q.dval, r.dval, p.dval, s.dval, t.dval, x->dval, skp, queue )); // check stopping criterion magma_cgetvector_async( 1 , skp+5, 1, skp_h+5, 1, queue ); betanom = sqrt(MAGMA_C_REAL(skp_h[5])); if ( solver_par->verbose > 0 ) { tempo2 = magma_sync_wtime( queue ); if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } if ( betanom < solver_par->atol || betanom/nomb < solver_par->rtol ) { break; } } while ( solver_par->numiter+1 <= solver_par->maxiter ); tempo2 = magma_sync_wtime( queue ); solver_par->runtime = (real_Double_t) tempo2-tempo1; float residual; CHECK( magma_cresidualvec( A, b, *x, &r, &residual, queue)); solver_par->iter_res = betanom; solver_par->final_res = residual; if ( solver_par->numiter < solver_par->maxiter ) { info = MAGMA_SUCCESS; } else if ( solver_par->init_res > solver_par->final_res ) { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_SLOW_CONVERGENCE; if( solver_par->iter_res < solver_par->atol || solver_par->iter_res/solver_par->init_res < solver_par->rtol ){ info = MAGMA_SUCCESS; } } else { if ( solver_par->verbose > 0 ) { if ( (solver_par->numiter)%solver_par->verbose==0 ) { solver_par->res_vec[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) betanom; solver_par->timing[(solver_par->numiter)/solver_par->verbose] = (real_Double_t) tempo2-tempo1; } } info = MAGMA_DIVERGENCE; } cleanup: magma_cmfree(&q, queue ); // frees all vectors magma_free(d1); magma_free(d2); magma_free( skp ); magma_free_cpu( skp_h ); solver_par->info = info; return info; } /* cbicgstab_merge */
/** Purpose ======= CLAHEF computes a partial factorization of a complex Hermitian matrix A using the Bunch-Kaufman diagonal pivoting method. The partial factorization has the form: A = ( I U12 ) ( A11 0 ) ( I 0 ) if UPLO = 'U', or: ( 0 U22 ) ( 0 D ) ( U12' U22' ) A = ( L11 0 ) ( D 0 ) ( L11' L21' ) if UPLO = 'L' ( L21 I ) ( 0 A22 ) ( 0 I ) where the order of D is at most NB. The actual order is returned in the argument KB, and is either NB or NB-1, or N if N <= NB. Note that U' denotes the conjugate transpose of U. CLAHEF is an auxiliary routine called by CHETRF. It uses blocked code (calling Level 3 BLAS) to update the submatrix A11 (if UPLO = 'U') or A22 (if UPLO = 'L'). Arguments --------- @param[in] UPLO CHARACTER Specifies whether the upper or lower triangular part of the Hermitian matrix A is stored: - = 'U': Upper triangular - = 'L': Lower triangular @param[in] N INTEGER The order of the matrix A. N >= 0. @param[in] NB INTEGER The maximum number of columns of the matrix A that should be factored. NB should be at least 2 to allow for 2-by-2 pivot blocks. @param[out] KB INTEGER The number of columns of A that were actually factored. KB is either NB-1 or NB, or N if N <= NB. @param[in,out] A COMPLEX array, dimension (LDA,N) On entry, the Hermitian matrix A. If UPLO = 'U', the leading n-by-n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading n-by-n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit, A contains details of the partial factorization. @param[in] LDA INTEGER The leading dimension of the array A. LDA >= max(1,N). @param[out] ipiv INTEGER array, dimension (N) Details of the interchanges and the block structure of D. If UPLO = 'U', only the last KB elements of ipiv are set; if UPLO = 'L', only the first KB elements are set. \n If ipiv(k) > 0, then rows and columns k and ipiv(k) were interchanged and D(k,k) is a 1-by-1 diagonal block. If UPLO = 'U' and ipiv(k) = ipiv(k-1) < 0, then rows and columns k-1 and -ipiv(k) were interchanged and D(k-1:k,k-1:k) is a 2-by-2 diagonal block. If UPLO = 'L' and ipiv(k) = ipiv(k+1) < 0, then rows and columns k+1 and -ipiv(k) were interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block. @param[out] W (workspace) COMPLEX array, dimension (LDW,NB) @param[in] LDW INTEGER The leading dimension of the array W. LDW >= max(1,N). @param[out] INFO INTEGER - = 0: successful exit - > 0: if INFO = k, D(k,k) is exactly zero. The factorization has been completed, but the block diagonal matrix D is exactly singular. @ingroup magma_chetrf_comp ********************************************************************/ extern "C" magma_int_t magma_clahef_gpu( magma_uplo_t uplo, magma_int_t n, magma_int_t nb, magma_int_t *kb, magmaFloatComplex *hA, magma_int_t lda, magmaFloatComplex_ptr dA, size_t dA_offset, magma_int_t ldda, magma_int_t *ipiv, magmaFloatComplex_ptr dW, size_t dW_offset, magma_int_t lddw, magma_queue_t queue, magma_int_t *info) { /* .. Parameters .. */ float d_one = 1.0; float d_zero = 0.0; float d_eight = 8.0; float d_seven = 7.0; #if defined(PRECISION_c) float f_zero = 0.0; #endif magmaFloatComplex c_one = MAGMA_C_ONE; magmaFloatComplex c_mone = -MAGMA_C_ONE; magma_int_t upper = (uplo == MagmaUpper); magma_int_t ione = 1; /* .. Local Scalars .. */ magma_int_t imax = 0, jmax = 0, kk, kkW, kp, kstep, iinfo; float abs_akk, alpha, colmax, R1, rowmax; magmaFloatComplex Zimax, Z; #define dA(i, j) dA, dA_offset + (j)*ldda + (i) #define dW(i, j) dW, dW_offset + (j)*lddw + (i) #define A(i, j) (hA + (j)*lda + (i)) /* .. Executable Statements .. */ *info = 0; /* Initialize alpha for use in choosing pivot block size. */ alpha = ( d_one+sqrt( d_seven ) ) / d_eight; magma_event_t event = NULL; if( upper ) { /* Factorize the trailing columns of A using the upper triangle of A and working backwards, and compute the matrix W = U12*D for use in updating A11 (note that conjg(W) is actually stored) K is the main loop index, decreasing from N in steps of 1 or 2 KW is the column of W which corresponds to column K of A */ int k, kw = 0; for (k = n-1; k+1 > max(n-nb+1, nb); k -= kstep) { kw = nb - (n-k); /* Copy column K of A to column KW of W and update it */ magma_ccopy( k+1, dA( 0, k ), 1, dW( 0, kw ), 1, queue ); // set imaginary part of diagonal to be zero #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dW, 2*(k+ kw*lddw+dW_offset)+1, 1, queue, &event); magma_queue_sync( queue ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dW, 2*(k+ kw*lddw+dW_offset)+1, 1, queue, &event); magma_queue_sync( queue ); #endif if (k+1 < n) { magma_cgemv( MagmaNoTrans, k+1, n-(k+1), c_mone, dA( 0, k+1 ), ldda, dW( k, kw+1 ), lddw, c_one, dW( 0, kw ), ione, queue ); // set imaginary part of diagonal to be zero #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dW, 2*(k+ kw*lddw+dW_offset)+1, 1, queue, &event ); magma_queue_sync( queue ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dW, 2*(k+ kw*lddw+dW_offset)+1, 1, queue, &event ); magma_queue_sync( queue ); #endif } kstep = 1; /* Determine rows and columns to be interchanged and whether a 1-by-1 or 2-by-2 pivot block will be used */ magma_cgetvector_async( 1, dW( k, kw ), 1, &Z, 1, queue, &event ); magma_queue_sync( queue ); abs_akk = fabs( MAGMA_C_REAL( Z ) ); /* imax is the row-index of the largest off-diagonal element in column K, and colmax is its absolute value */ if( k > 0 ) { // magma is one-base imax = magma_icamax( k, dW( 0, kw ), 1, queue ) - 1; magma_cgetvector( 1, dW( imax, kw ), 1, &Z, 1, queue ); colmax = MAGMA_C_ABS1( Z ); } else { colmax = d_zero; } if( max( abs_akk, colmax ) == 0.0 ) { /* Column K is zero: set INFO and continue */ if ( *info == 0 ) *info = k; kp = k; #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dA, 2*(k+ k*ldda+dA_offset)+1, 1, queue, &event ); magma_queue_sync( queue ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dA, 2*(k+ k*ldda+dA_offset)+1, 1, queue, &event ); magma_queue_sync( queue ); #endif } else { if( abs_akk >= alpha*colmax ) { /* no interchange, use 1-by-1 pivot block */ kp = k; } else { /* Copy column imax to column KW-1 of W and update it */ magma_ccopy( imax+1, dA( 0, imax ), 1, dW( 0, kw-1 ), 1, queue ); #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dW, 2*(imax+ (kw-1)*lddw+dW_offset)+1, 1, queue, &event ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dW, 2*(imax+ (kw-1)*lddw+dW_offset)+1, 1, queue, &event ); #endif #if defined(PRECISION_z) || defined(PRECISION_c) magmablas_clacpy_cnjg( k-imax, dA(imax,imax+1), ldda, dW(imax+1,kw-1), 1, queue ); #else magma_ccopy( k-imax, dA(imax,imax+1), ldda, dW(imax+1,kw-1), 1, queue ); #endif if( k+1 < n ) { magma_cgemv( MagmaNoTrans, k+1, n-(k+1), c_mone, dA( 0, k+1 ), ldda, dW( imax, kw+1 ), lddw, c_one, dW( 0, kw-1 ), ione, queue ); #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dW, 2*(imax+ (kw-1)*lddw+dW_offset)+1, 1, queue, &event ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dW, 2*(imax+ (kw-1)*lddw+dW_offset)+1, 1, queue, &event ); #endif } magma_cgetvector_async( 1, dW( imax, kw-1 ), 1, &Zimax, 1, queue, &event ); magma_queue_sync( queue ); /* jmax is the column-index of the largest off-diagonal element in row imax, and rowmax is its absolute value */ jmax = imax + magma_icamax( k-imax, dW( imax+1, kw-1 ), 1, queue ); magma_cgetvector( 1, dW( jmax, kw-1 ), 1, &Z, 1, queue ); rowmax = MAGMA_C_ABS1( Z ); if ( imax > 0 ) { // magma is one-base jmax = magma_icamax( imax, dW( 0, kw-1 ), 1, queue ) - 1; magma_cgetvector( 1, dW( jmax, kw-1 ), 1, &Z, 1, queue ); rowmax = max( rowmax, MAGMA_C_ABS1( Z ) ); } if( abs_akk >= alpha*colmax*( colmax / rowmax ) ) { /* no interchange, use 1-by-1 pivot block */ kp = k; } else if ( fabs( MAGMA_C_REAL( Zimax ) ) >= alpha*rowmax ) { /* interchange rows and columns K and imax, use 1-by-1 pivot block */ kp = imax; /* copy column KW-1 of W to column KW */ magma_ccopy( k+1, dW( 0, kw-1 ), 1, dW( 0, kw ), 1, queue ); } else { /* interchange rows and columns K-1 and imax, use 2-by-2 pivot block */ kp = imax; kstep = 2; } } kk = k - kstep + 1; kkW = nb - (n - kk); /* Updated column kp is already stored in column kkW of W */ if( kp != kk ) { /* Interchange rows kk and kp in last kk columns of A and W */ // note: row-swap A(:,kk) magmablas_cswap( n-kk, dA( kk, kk ), ldda, dA( kp, kk ), ldda, queue ); magmablas_cswap( n-kk, dW( kk, kkW), lddw, dW( kp, kkW), lddw, queue ); /* Copy non-updated column kk to column kp */ #if defined(PRECISION_z) || defined(PRECISION_c) magmablas_clacpy_cnjg( kk-kp-1, dA( kp+1, kk ), 1, dA( kp, kp+1 ), ldda, queue ); #else magma_ccopy( kk-kp-1, dA( kp+1, kk ), 1, dA( kp, kp+1 ), ldda, queue ); #endif // now A(kp,kk) should be A(kk,kk), and copy to A(kp,kp) magma_ccopy( kp+1, dA( 0, kk ), 1, dA( 0, kp ), 1, queue ); #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dA, 2*(kp+ kp*ldda+dA_offset)+1, 1, queue, &event ); magma_queue_sync( queue ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dA, 2*(kp+ kp*ldda+dA_offset)+1, 1, queue, &event ); #endif } if( kstep == 1 ) { /* 1-by-1 pivot block D(k): column KW of W now holds W(k) = U(k)*D(k) where U(k) is the k-th column of U Store U(k) in column k of A */ magma_ccopy( k+1, dW( 0, kw ), 1, dA( 0, k ), 1, queue ); if ( k > 0 ) { magma_cgetvector_async( 1, dA( k, k ), 1, &Z, 1, queue, &event ); magma_queue_sync( queue ); R1 = d_one / MAGMA_C_REAL( Z ); magma_csscal( k, R1, dA( 0, k ), 1, queue ); /* Conjugate W(k) */ #if defined(PRECISION_z) || defined(PRECISION_c) magmablas_clacpy_cnjg( k, dW( 0, kw ), 1, dW( 0, kw ), 1, queue ); #endif } } else { /* 2-by-2 pivot block D(k): columns KW and KW-1 of W now hold ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) where U(k) and U(k-1) are the k-th and (k-1)-th columns of U */ if( k > 1 ) { /* Store U(k) and U(k-1) in columns k and k-1 of A */ magmablas_clascl_2x2( MagmaUpper, k-1, dW(0, kw-1), lddw, dA(0,k-1), ldda, &iinfo, queue ); } /* Copy D(k) to A */ magma_ccopymatrix( 2, 2, dW( k-1, kw-1 ), lddw, dA( k-1, k-1 ), ldda, queue ); /* Conjugate W(k) and W(k-1) */ #if defined(PRECISION_z) || defined(PRECISION_c) magmablas_clacpy_cnjg( k, dW( 0, kw ), 1, dW( 0, kw ), 1, queue ); magmablas_clacpy_cnjg( k-1, dW( 0, kw-1 ), 1, dW( 0, kw-1 ), 1, queue ); #endif } } /* Store details of the interchanges in ipiv */ if( kstep == 1 ) { ipiv[ k ] = 1+kp; } else { ipiv[ k ] = -(1+kp); ipiv[ k-1 ] = -(1+kp); } } /* Update the upper triangle of A11 (= A(1:k,1:k)) as A11 := A11 - U12*D*U12' = A11 - U12*W' computing blocks of NB columns at a time (note that conjg(W) is actually stored) */ kw = nb - (n-k); for (int j = ( k / nb )*nb; j >= 0; j -= nb ) { int jb = min( nb, k-j+1 ); #ifdef SYMMETRIC_UPDATE /* Update the upper triangle of the diagonal block */ for (int jj = j; jj < j + jb; jj++) { #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dA, 2*(jj+ jj*ldda+dA_offset)+1, 1, queue, &event ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dA, 2*(jj+ jj*ldda+dA_offset)+1, 1, queue, &event ); #endif magma_cgemv( MagmaNoTrans, jj-j+1, n-(k+1), c_mone, dA( j, k+1 ), ldda, dW( jj, kw+1 ), lddw, c_one, dA( j, jj ), 1, queue ); #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dA, 2*(jj+ jj*ldda+dA_offset)+1, 1, queue, &event ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dA, 2*(jj+ jj*ldda+dA_offset)+1, 1, queue, &event ); #endif } /* Update the rectangular superdiagonal block */ magma_cgemm( MagmaNoTrans, MagmaTrans, j, jb, n-(k+1), c_mone, dA( 0, k+1 ), ldda, dW( j, kw+1 ), lddw, c_one, dA( 0, j ), ldda, queue ); #else #if defined(PRECISION_z) magmablas_dlaset(MagmaUpperLower, 1, jb, 0, 0, dA, 2*(j+ j*ldda+dA_offset)+1, 2*(1+ldda), queue ); #elif defined(PRECISION_c) magmablas_slaset(MagmaUpperLower, 1, jb, 0, 0, dA, 2*(j+ j*ldda+dA_offset)+1, 2*(1+ldda), queue ); #endif magma_cgemm( MagmaNoTrans, MagmaTrans, j+jb, jb, n-(k+1), c_mone, dA( 0, k+1 ), ldda, dW( j, kw+1 ), lddw, c_one, dA( 0, j ), ldda, queue ); #if defined(PRECISION_z) magmablas_dlaset(MagmaUpperLower, 1, jb, 0, 0, dA, 2*(j+ j*ldda+dA_offset)+1, 2*(1+ldda), queue ); #elif defined(PRECISION_c) magmablas_slaset(MagmaUpperLower, 1, jb, 0, 0, dA, 2*(j+ j*ldda+dA_offset)+1, 2*(1+ldda), queue ); #endif #endif } /* Put U12 in standard form by partially undoing the interchanges in columns k+1:n */ for (int j = k+1; j < n;) { int jj = j; int jp = ipiv[ j ]; if( jp < 0 ) { jp = -jp; j = j + 1; } j = j + 1; jp = jp - 1; if( jp != jj && j < n ) magmablas_cswap( n-j, dA( jp, j ), ldda, dA( jj, j ), ldda, queue ); } // copying the panel back to CPU magma_cgetmatrix_async( n, n-(k+1), dA(0,k+1), ldda, A(0,k+1), lda, queue, &event ); magma_queue_sync( queue ); /* Set KB to the number of columns factorized */ *kb = n - (k+1); } else { /* Factorize the leading columns of A using the lower triangle of A and working forwards, and compute the matrix W = L21*D for use in updating A22 (note that conjg(W) is actually stored) K is the main loop index, increasing from 1 in steps of 1 or 2 */ int k; for (k = 0; k < min(nb-1,n); k += kstep) { /* Copy column K of A to column K of W and update it */ /* -------------------------------------------------------------- */ magma_ccopy( n-k, dA( k, k ), 1, dW( k, k ), 1, queue ); // set imaginary part of diagonal to be zero #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dW, 2*(k*lddw+k+dW_offset)+1, 1, queue, &event); magma_queue_sync( queue ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dW, 2*(k*lddw+k+dW_offset)+1, 1, queue, &event); magma_queue_sync( queue ); #endif /* -------------------------------------------------------------- */ magma_cgemv( MagmaNoTrans, n-k, k, c_mone, dA( k, 0 ), ldda, dW( k, 0 ), lddw, c_one, dW( k, k ), ione, queue ); // re-set imaginary part of diagonal to be zero #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dW, 2*(k*lddw+k+dW_offset)+1, 1, queue, &event ); magma_queue_sync( queue ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dW, 2*(k*lddw+k+dW_offset)+1, 1, queue, &event ); magma_queue_sync( queue ); #endif kstep = 1; /* Determine rows and columns to be interchanged and whether a 1-by-1 or 2-by-2 pivot block will be used */ magma_cgetvector_async( 1, dW( k, k ), 1, &Z, 1, queue, &event ); magma_queue_sync( queue ); abs_akk = fabs( MAGMA_C_REAL( Z ) ); /* imax is the row-index of the largest off-diagonal element in column K, and colmax is its absolute value */ if( k < n-1 ) { // magmablas is one-base imax = k + magma_icamax( n-k-1, dW(k+1,k), 1, queue ); magma_cgetvector( 1, dW( imax,k ), 1, &Z, 1, queue ); colmax = MAGMA_C_ABS1( Z ); } else { colmax = d_zero; } if ( max( abs_akk, colmax ) == 0.0 ) { /* Column K is zero: set INFO and continue */ if( *info == 0 ) *info = k; kp = k; // make sure the imaginary part of diagonal is zero #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dA, 2*(k*ldda+k+dA_offset)+1, 1, queue, &event ); magma_queue_sync( queue ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dA, 2*(k*ldda+k+dA_offset)+1, 1, queue, &event ); magma_queue_sync( queue ); #endif } else { if ( abs_akk >= alpha*colmax ) { /* no interchange, use 1-by-1 pivot block */ kp = k; } else { /* Copy column imax to column K+1 of W and update it */ #if defined(PRECISION_z) || defined(PRECISION_c) magmablas_clacpy_cnjg( imax-k, dA(imax,k), ldda, dW(k,k+1), 1, queue ); #else magma_ccopy( imax-k, dA( imax, k ), ldda, dW( k, k+1 ), 1, queue ); #endif magma_ccopy( n-imax, dA( imax, imax ), 1, dW( imax, k+1 ), 1, queue ); #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dW, 2*((k+1)*lddw+imax+dW_offset)+1, 1, queue, &event); magma_queue_sync( queue ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dW, 2*((k+1)*lddw+imax+dW_offset)+1, 1, queue, &event); magma_queue_sync( queue ); #endif magma_cgemv( MagmaNoTrans, n-k, k, c_mone, dA( k, 0 ), ldda, dW( imax, 0 ), lddw, c_one, dW( k, k+1 ), ione, queue ); #if defined(PRECISION_z) magma_dsetvector_async( 1, &d_zero, 1, dW, 2*((k+1)*lddw+imax+dW_offset)+1, 1, queue, &event); magma_queue_sync( queue ); #elif defined(PRECISION_c) magma_ssetvector_async( 1, &f_zero, 1, dW, 2*((k+1)*lddw+imax+dW_offset)+1, 1, queue, &event); magma_queue_sync( queue ); #endif magma_cgetvector_async( 1, dW(imax,k+1), 1, &Zimax, 1, queue, &event); magma_queue_sync( queue ); /* jmax is the column-index of the largest off-diagonal element in row imax, and rowmax is its absolute value */ // magmablas is one-base jmax = k-1 + magma_icamax( imax-k, dW(k, k+1), 1, queue ); magma_cgetvector( 1, dW(jmax,k+1), 1, &Z, 1, queue ); rowmax = MAGMA_C_ABS1( Z ); if( imax < n-1 ) { // magmablas is one-base jmax = imax + magma_icamax( (n-1)-imax, dW(imax+1,k+1), 1, queue); magma_cgetvector( 1, dW(jmax,k+1), 1, &Z, 1, queue ); rowmax = max( rowmax, MAGMA_C_ABS1( Z ) ); } if( abs_akk >= alpha*colmax*( colmax / rowmax ) ) { /* no interchange, use 1-by-1 pivot block */ kp = k; } else if( fabs( MAGMA_C_REAL( Zimax ) ) >= alpha*rowmax ) { /* interchange rows and columns K and imax, use 1-by-1 pivot block */ kp = imax; /* copy column K+1 of W to column K */ magma_ccopy( n-k, dW( k, k+1 ), 1, dW( k, k ), 1, queue ); } else { /* interchange rows and columns K+1 and imax, use 2-by-2 pivot block */ kp = imax; kstep = 2; } } kk = k + kstep - 1; /* Updated column kp is already stored in column kk of W */ if( kp != kk ) { /* Copy non-updated column kk to column kp */ /* ------------------------------------------------------------------ */ #if defined(PRECISION_z) || defined(PRECISION_c) magmablas_clacpy_cnjg( kp-kk, dA( kk, kk ), 1, dA( kp, kk ), ldda, queue ); #else magma_ccopy( kp-kk, dA( kk, kk ), 1, dA( kp, kk ), ldda, queue ); #endif if ( kp < n ) { magma_ccopy( n-kp, dA( kp, kk), 1, dA( kp, kp ), 1, queue ); } /* ------------------------------------------------------------------ */ /* Interchange rows kk and kp in first kk columns of A and W */ magmablas_cswap( kk+1, dA( kk, 0 ), ldda, dA( kp, 0 ), ldda, queue ); magmablas_cswap( kk+1, dW( kk, 0 ), lddw, dW( kp, 0 ), lddw, queue ); } if ( kstep == 1 ) { /* 1-by-1 pivot block D(k): column k of W now holds W(k) = L(k)*D(k) where L(k) is the k-th column of L Store L(k) in column k of A */ magma_ccopy( n-k, dW( k, k ), 1, dA( k, k ), 1, queue ); if ( k < n-1 ) { magma_cgetvector_async( 1, dA(k,k), 1, &Z, 1, queue, &event ); magma_queue_sync( queue ); R1 = d_one / MAGMA_C_REAL( Z ); magma_csscal((n-1)-k, R1, dA( k+1,k ), 1, queue); /* Conjugate W(k) */ #if defined(PRECISION_z) || defined(PRECISION_c) magmablas_clacpy_cnjg( (n-1)-k, dW( k+1,k ), 1, dW( k+1,k ), 1, queue ); #endif } } else { /* 2-by-2 pivot block D(k): columns k and k+1 of W now hold ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k) where L(k) and L(k+1) are the k-th and (k+1)-th columns of L */ magmablas_clascl_2x2( MagmaLower, n-(k+2), dW(k,k), lddw, dA(k+2,k), ldda, &iinfo, queue ); /* Copy D(k) to A */ magma_ccopymatrix( 2, 2, dW( k, k ), lddw, dA( k, k ), ldda, queue ); /* Conjugate W(k) and W(k+1) */ #if defined(PRECISION_z) || defined(PRECISION_c) magmablas_clacpy_cnjg( (n-1)-k, dW( k+1,k ), 1, dW( k+1,k ), 1, queue ); magmablas_clacpy_cnjg( (n-1)-k-1, dW( k+2,k+1), 1, dW( k+2,k+1 ), 1, queue ); #endif } } /* Store details of the interchanges in ipiv */ if ( kstep == 1 ) { ipiv[k] = kp+1; } else { ipiv[k] = -kp-1; ipiv[k+1] = -kp-1; } } /* Update the lower triangle of A22 (= A(k:n,k:n)) as A22 := A22 - L21*D*L21' = A22 - L21*W' computing blocks of NB columns at a time (note that conjg(W) is actually stored) */ for( int j = k; j < n; j += nb ) { int jb = min( nb, n-j ); /* Update the lower triangle of the diagonal block */ #ifdef SYMMETRIC_UPDATE for (int jj = j; jj < j + jb; jj++) { int jnb = j + jb - jj; /* -------------------------------------------------------- */ magma_cgemv( MagmaNoTrans, jnb, k, c_mone, dA( jj, 0 ), ldda, dW( jj, 0 ), lddw, c_one, dA( jj, jj ), ione, queue ); /* -------------------------------------------------------- */ } /* Update the rectangular subdiagonal block */ if( j+jb < n ) { int nk = n - (j+jb); /* -------------------------------------------- */ magma_cgemm( MagmaNoTrans, MagmaTrans, nk, jb, k, c_mone, dA( j+jb, 0 ), ldda, dW( j, 0 ), lddw, c_one, dA( j+jb, j ), ldda, queue ); /* ------------------------------------------- */ } #else #if defined(PRECISION_z) magmablas_dlaset(MagmaUpperLower, 1, jb, 0, 0, dA, 2*(j*ldda+j+dA_offset)+1, 2*(1+ldda), queue ); #elif defined(PRECISION_c) magmablas_slaset(MagmaUpperLower, 1, jb, 0, 0, dA, 2*(j*ldda+j+dA_offset)+1, 2*(1+ldda), queue ); #endif magma_cgemm( MagmaNoTrans, MagmaTrans, n-j, jb, k, c_mone, dA( j, 0 ), ldda, dW( j, 0 ), lddw, c_one, dA( j, j ), ldda, queue ); #if defined(PRECISION_z) magmablas_dlaset(MagmaUpperLower, 1, jb, 0, 0, dA, 2*(j*ldda+j+dA_offset)+1, 2*(1+ldda), queue ); #elif defined(PRECISION_c) magmablas_slaset(MagmaUpperLower, 1, jb, 0, 0, dA, 2*(j*ldda+j+dA_offset)+1, 2*(1+ldda), queue ); #endif #endif } /* Put L21 in standard form by partially undoing the interchanges in columns 1:k-1 */ for (int j = k; j > 0;) { int jj = j; int jp = ipiv[j-1]; if( jp < 0 ) { jp = -jp; j--; } j--; if ( jp != jj && j >= 1 ) { magmablas_cswap( j, dA( jp-1,0 ), ldda, dA( jj-1,0 ), ldda, queue ); } } // copying the panel back to CPU magma_cgetmatrix_async( n, k, dA(0,0), ldda, A(0,0), lda, queue, &event ); magma_queue_sync( queue ); /* Set KB to the number of columns factorized */ *kb = k; } return *info; /* End of CLAHEF */ }