コード例 #1
0
ファイル: cpidr_strms.cpp プロジェクト: xulunfan/magma
extern "C" magma_int_t
magma_cpidr_strms(
    magma_c_matrix A, magma_c_matrix b, magma_c_matrix *x,
    magma_c_solver_par *solver_par,
    magma_c_preconditioner *precond_par,
    magma_queue_t queue )
{
    magma_int_t info = MAGMA_NOTCONVERGED;

    // prepare solver feedback
    solver_par->solver = Magma_PIDRMERGE;
    solver_par->numiter = 0;
    solver_par->spmv_count = 0;
    solver_par->init_res = 0.0;
    solver_par->final_res = 0.0;
    solver_par->iter_res = 0.0;
    solver_par->runtime = 0.0;

    // constants
    const magmaFloatComplex c_zero = MAGMA_C_ZERO;
    const magmaFloatComplex c_one = MAGMA_C_ONE;
    const magmaFloatComplex c_n_one = MAGMA_C_NEG_ONE;

    // internal user options
    const magma_int_t smoothing = 1;   // 0 = disable, 1 = enable
    const float angle = 0.7;          // [0-1]

    // local variables
    magma_int_t iseed[4] = {0, 0, 0, 1};
    magma_int_t dof;
    magma_int_t s;
    magma_int_t distr;
    magma_int_t k, i, sk;
    magma_int_t innerflag;
    magma_int_t ldd;
    magma_int_t q;
    float residual;
    float nrm;
    float nrmb;
    float nrmr;
    float nrmt;
    float rho;
    magmaFloatComplex om;
    magmaFloatComplex gamma;

    // matrices and vectors
    magma_c_matrix dxs = {Magma_CSR};
    magma_c_matrix dr = {Magma_CSR}, drs = {Magma_CSR};
    magma_c_matrix dP = {Magma_CSR}, dP1 = {Magma_CSR};
    magma_c_matrix dG = {Magma_CSR}, dGcol = {Magma_CSR};
    magma_c_matrix dU = {Magma_CSR};
    magma_c_matrix dM = {Magma_CSR};
    magma_c_matrix df = {Magma_CSR};
    magma_c_matrix dt = {Magma_CSR}, dtt = {Magma_CSR};
    magma_c_matrix dc = {Magma_CSR};
    magma_c_matrix dv = {Magma_CSR};
    magma_c_matrix dlu = {Magma_CSR};
    magma_c_matrix dskp = {Magma_CSR};
    magma_c_matrix dalpha = {Magma_CSR};
    magma_c_matrix dbeta = {Magma_CSR};
    magmaFloatComplex *hMdiag = NULL;
    magmaFloatComplex *hskp = NULL;
    magmaFloatComplex *halpha = NULL;
    magmaFloatComplex *hbeta = NULL;
    magmaFloatComplex *d1 = NULL, *d2 = NULL;
    
    // queue variables
    const magma_int_t nqueues = 3;     // number of queues
    magma_queue_t queues[nqueues];    

    // chronometry
    real_Double_t tempo1, tempo2;

    // create additional queues
    queues[0] = queue;
    for ( q = 1; q < nqueues; q++ ) {
        magma_queue_create( queue->device(), &(queues[q]) );
    }

    // initial s space
    // TODO: add option for 's' (shadow space number)
    // Hack: uses '--restart' option as the shadow space number.
    //       This is not a good idea because the default value of restart option is used to detect
    //       if the user provided a custom restart. This means that if the default restart value
    //       is changed then the code will think it was the user (unless the default value is
    //       also updated in the 'if' statement below.
    s = 1;
    if ( solver_par->restart != 50 ) {
        if ( solver_par->restart > A.num_cols ) {
            s = A.num_cols;
        } else {
            s = solver_par->restart;
        }
    }
    solver_par->restart = s;

    // set max iterations
    solver_par->maxiter = min( 2 * A.num_cols, solver_par->maxiter );

    // check if matrix A is square
    if ( A.num_rows != A.num_cols ) {
        //printf("Matrix A is not square.\n");
        info = MAGMA_ERR_NOT_SUPPORTED;
        goto cleanup;
    }

    // |b|
    nrmb = magma_scnrm2( b.num_rows, b.dval, 1, queue );
    if ( nrmb == 0.0 ) {
        magma_cscal( x->num_rows, MAGMA_C_ZERO, x->dval, 1, queue );
        info = MAGMA_SUCCESS;
        goto cleanup;
    }

    // t = 0
    // make t twice as large to contain both, dt and dr
    ldd = magma_roundup( b.num_rows, 32 );
    CHECK( magma_cvinit( &dt, Magma_DEV, ldd, 2, c_zero, queue ));
    dt.num_rows = b.num_rows;
    dt.num_cols = 1;
    dt.nnz = dt.num_rows;

    // redirect the dr.dval to the second part of dt
    CHECK( magma_cvinit( &dr, Magma_DEV, b.num_rows, 1, c_zero, queue ));
    magma_free( dr.dval );
    dr.dval = dt.dval + ldd;

    // r = b - A x
    CHECK( magma_cresidualvec( A, b, *x, &dr, &nrmr, queue ));
    
    // |r|
    solver_par->init_res = nrmr;
    solver_par->final_res = solver_par->init_res;
    solver_par->iter_res = solver_par->init_res;
    if ( solver_par->verbose > 0 ) {
        solver_par->res_vec[0] = (real_Double_t)nrmr;
    }

    // check if initial is guess good enough
    if ( nrmr <= solver_par->atol ||
        nrmr/nrmb <= solver_par->rtol ) {
        info = MAGMA_SUCCESS;
        goto cleanup;
    }

    // P = randn(n, s)
    // P = ortho(P)
//---------------------------------------
    // P = 0.0
    CHECK( magma_cvinit( &dP, Magma_CPU, A.num_cols, s, c_zero, queue ));

    // P = randn(n, s)
    distr = 3;        // 1 = unif (0,1), 2 = unif (-1,1), 3 = normal (0,1) 
    dof = dP.num_rows * dP.num_cols;
    lapackf77_clarnv( &distr, iseed, &dof, dP.val );

    // transfer P to device
    CHECK( magma_cmtransfer( dP, &dP1, Magma_CPU, Magma_DEV, queue ));
    magma_cmfree( &dP, queue );

    // P = ortho(P1)
    if ( dP1.num_cols > 1 ) {
        // P = magma_cqr(P1), QR factorization
        CHECK( magma_cqr( dP1.num_rows, dP1.num_cols, dP1, dP1.ld, &dP, NULL, queue ));
    } else {
        // P = P1 / |P1|
        nrm = magma_scnrm2( dof, dP1.dval, 1, queue );
        nrm = 1.0 / nrm;
        magma_csscal( dof, nrm, dP1.dval, 1, queue );
        CHECK( magma_cmtransfer( dP1, &dP, Magma_DEV, Magma_DEV, queue ));
    }
    magma_cmfree( &dP1, queue );
//---------------------------------------

    // allocate memory for the scalar products
    CHECK( magma_cmalloc_pinned( &hskp, 5 ));
    CHECK( magma_cvinit( &dskp, Magma_DEV, 4, 1, c_zero, queue ));

    CHECK( magma_cmalloc_pinned( &halpha, s ));
    CHECK( magma_cvinit( &dalpha, Magma_DEV, s, 1, c_zero, queue ));

    CHECK( magma_cmalloc_pinned( &hbeta, s ));
    CHECK( magma_cvinit( &dbeta, Magma_DEV, s, 1, c_zero, queue ));
    
    // workspace for merged dot product
    CHECK( magma_cmalloc( &d1, max(2, s) * b.num_rows ));
    CHECK( magma_cmalloc( &d2, max(2, s) * b.num_rows ));

    // smoothing enabled
    if ( smoothing > 0 ) {
        // set smoothing solution vector
        CHECK( magma_cmtransfer( *x, &dxs, Magma_DEV, Magma_DEV, queue ));

        // tt = 0
        // make tt twice as large to contain both, dtt and drs
        ldd = magma_roundup( b.num_rows, 32 );
        CHECK( magma_cvinit( &dtt, Magma_DEV, ldd, 2, c_zero, queue ));
        dtt.num_rows = dr.num_rows;
        dtt.num_cols = 1;
        dtt.nnz = dtt.num_rows;

        // redirect the drs.dval to the second part of dtt
        CHECK( magma_cvinit( &drs, Magma_DEV, dr.num_rows, 1, c_zero, queue ));
        magma_free( drs.dval );
        drs.dval = dtt.dval + ldd;

        // set smoothing residual vector
        magma_ccopyvector( dr.num_rows, dr.dval, 1, drs.dval, 1, queue );
    }

    // G(n,s) = 0
    if ( s > 1 ) {
        ldd = magma_roundup( A.num_rows, 32 );
        CHECK( magma_cvinit( &dG, Magma_DEV, ldd, s, c_zero, queue ));
        dG.num_rows = A.num_rows;
    } else {
        CHECK( magma_cvinit( &dG, Magma_DEV, A.num_rows, s, c_zero, queue ));
    }

    // dGcol represents a single column of dG, array pointer is set inside loop
    CHECK( magma_cvinit( &dGcol, Magma_DEV, dG.num_rows, 1, c_zero, queue ));
    magma_free( dGcol.dval );

    // U(n,s) = 0
    if ( s > 1 ) {
        ldd = magma_roundup( A.num_cols, 32 );
        CHECK( magma_cvinit( &dU, Magma_DEV, ldd, s, c_zero, queue ));
        dU.num_rows = A.num_cols;
    } else {
        CHECK( magma_cvinit( &dU, Magma_DEV, A.num_cols, s, c_zero, queue ));
    }

    // M(s,s) = I
    CHECK( magma_cvinit( &dM, Magma_DEV, s, s, c_zero, queue ));
    CHECK( magma_cmalloc_pinned( &hMdiag, s ));
    magmablas_claset( MagmaFull, dM.num_rows, dM.num_cols, c_zero, c_one, dM.dval, dM.ld, queue );

    // f = 0
    CHECK( magma_cvinit( &df, Magma_DEV, dP.num_cols, 1, c_zero, queue ));

    // c = 0
    CHECK( magma_cvinit( &dc, Magma_DEV, dM.num_cols, 1, c_zero, queue ));

    // v = r
    CHECK( magma_cmtransfer( dr, &dv, Magma_DEV, Magma_DEV, queue ));

    // lu = 0
    CHECK( magma_cvinit( &dlu, Magma_DEV, dr.num_rows, 1, c_zero, queue ));

    //--------------START TIME---------------
    // chronometry
    tempo1 = magma_sync_wtime( queue );
    if ( solver_par->verbose > 0 ) {
        solver_par->timing[0] = 0.0;
    }

    om = MAGMA_C_ONE;
    gamma = MAGMA_C_ZERO;
    innerflag = 0;

    // start iteration
    do
    {
        solver_par->numiter++;

        // new RHS for small systems
        // f = P' r
        // Q1
        magma_cgemvmdot_shfl( dP.num_rows, dP.num_cols, dP.dval, dr.dval, d1, d2, df.dval, queues[1] );

        // skp[4] = f(k)
        // Q1
        magma_cgetvector_async( 1, df.dval, 1, &hskp[4], 1, queues[1] );

        // c(k:s) = f(k:s)
        // Q1
        magma_ccopyvector_async( s, df.dval, 1, dc.dval, 1, queues[1] );

        // c(k:s) = M(k:s,k:s) \ f(k:s)
        // Q1
        magma_ctrsv( MagmaLower, MagmaNoTrans, MagmaNonUnit, s, dM.dval, dM.ld, dc.dval, 1, queues[1] );

        // shadow space loop
        for ( k = 0; k < s; ++k ) {
            sk = s - k;
            dGcol.dval = dG.dval + k * dG.ld;

            // v = r - G(:,k:s) c(k:s)
            // Q1
            magmablas_cgemv( MagmaNoTrans, dG.num_rows, sk, c_n_one, dGcol.dval, dG.ld, &dc.dval[k], 1, c_one, dv.dval, 1, queues[1] );

            // preconditioning operation 
            // v = L \ v;
            // v = U \ v;
            // Q1
            CHECK( magma_c_applyprecond_left( MagmaNoTrans, A, dv, &dlu, precond_par, queues[1] )); 
            CHECK( magma_c_applyprecond_right( MagmaNoTrans, A, dlu, &dv, precond_par, queues[1] )); 

            // sync Q0 --> U(:,k) = U(:,k) - U(:,1:k) * alpha(1:k)
            magma_queue_sync( queues[0] );

            // U(:,k) = om * v + U(:,k:s) c(k:s)
            // Q1
            magmablas_cgemv( MagmaNoTrans, dU.num_rows, sk, c_one, &dU.dval[k*dU.ld], dU.ld, &dc.dval[k], 1, om, dv.dval, 1, queues[1] );

            // G(:,k) = A U(:,k)
            // Q1
            CHECK( magma_c_spmv( c_one, A, dv, c_zero, dGcol, queues[1] ));
            solver_par->spmv_count++;

            // bi-orthogonalize the new basis vectors
            for ( i = 0; i < k; ++i ) {
                // alpha = P(:,i)' G(:,k)
                // Q1
                halpha[i] = magma_cdotc( dP.num_rows, &dP.dval[i*dP.ld], 1, dGcol.dval, 1, queues[1] );
                // implicit sync Q1 --> alpha = P(:,i)' G(:,k) 

                // alpha = alpha / M(i,i)
                halpha[i] = halpha[i] / hMdiag[i];
                    
                // G(:,k) = G(:,k) - alpha * G(:,i)
                // Q1
                magma_caxpy( dG.num_rows, -halpha[i], &dG.dval[i*dG.ld], 1, dGcol.dval, 1, queues[1] );
            }

            // sync Q1 --> compute new G, skp[4] = f(k
            magma_queue_sync( queues[1] );

            // new column of M = P'G, first k-1 entries are zero
            // M(k:s,k) = P(:,k:s)' G(:,k)
            // Q2
            magma_cgemvmdot_shfl( dP.num_rows, sk, &dP.dval[k*dP.ld], dGcol.dval, d1, d2, &dM.dval[k*dM.ld+k], queues[2] );

            // U(:,k) = v
            // Q0
            magma_ccopyvector_async( dU.num_rows, dv.dval, 1, &dU.dval[k*dU.ld], 1, queues[0] );

            // non-first s iteration
            if ( k > 0 ) {
                // alpha = dalpha
                // Q0
                magma_csetvector_async( k, halpha, 1, dalpha.dval, 1, queues[0] );

                // U update outside of loop using GEMV
                // U(:,k) = U(:,k) - U(:,1:k) * alpha(1:k)
                // Q0
                magmablas_cgemv( MagmaNoTrans, dU.num_rows, k, c_n_one, dU.dval, dU.ld, dalpha.dval, 1, c_one, &dU.dval[k*dU.ld], 1, queues[0] );
            }

            // Mdiag(k) = M(k,k)
            // Q2
            magma_cgetvector( 1, &dM.dval[k*dM.ld+k], 1, &hMdiag[k], 1, queues[2] );
            // implicit sync Q2 --> Mdiag(k) = M(k,k)

            // check M(k,k) == 0
            if ( MAGMA_C_EQUAL(hMdiag[k], MAGMA_C_ZERO) ) {
                innerflag = 1;
                info = MAGMA_DIVERGENCE;
                break;
            }

            // beta = f(k) / M(k,k)
            hbeta[k] = hskp[4] / hMdiag[k];

            // check for nan
            if ( magma_c_isnan( hbeta[k] ) || magma_c_isinf( hbeta[k] )) {
                innerflag = 1;
                info = MAGMA_DIVERGENCE;
                break;
            }

            // non-last s iteration 
            if ( (k + 1) < s ) {
                // f(k+1:s) = f(k+1:s) - beta * M(k+1:s,k)
                // Q1
                magma_caxpy( sk-1, -hbeta[k], &dM.dval[k*dM.ld+(k+1)], 1, &df.dval[k+1], 1, queues[1] );

                // c(k+1:s) = f(k+1:s)
                // Q1
                magma_ccopyvector_async( sk-1, &df.dval[k+1], 1, &dc.dval[k+1], 1, queues[1] );

                // c(k+1:s) = M(k+1:s,k+1:s) \ f(k+1:s)
                // Q1
                magma_ctrsv( MagmaLower, MagmaNoTrans, MagmaNonUnit, sk-1, &dM.dval[(k+1)*dM.ld+(k+1)], dM.ld, &dc.dval[k+1], 1, queues[1] );

                // skp[4] = f(k+1)
                // Q1
                magma_cgetvector_async( 1, &df.dval[k+1], 1, &hskp[4], 1, queues[1] );
            }

            // r = r - beta * G(:,k)
            // Q2
            magma_caxpy( dr.num_rows, -hbeta[k], dGcol.dval, 1, dr.dval, 1, queues[2] );

            // smoothing disabled
            if ( smoothing <= 0 ) {
                // |r|
                // Q2
                nrmr = magma_scnrm2( dr.num_rows, dr.dval, 1, queues[2] );           
                // implicit sync Q2 --> |r|

                // v = r
                // Q1
                magma_ccopyvector_async( dr.num_rows, dr.dval, 1, dv.dval, 1, queues[1] );

            // smoothing enabled
            } else {
                // x = x + beta * U(:,k)
                // Q0
                magma_caxpy( x->num_rows, hbeta[k], &dU.dval[k*dU.ld], 1, x->dval, 1, queues[0] );

                // smoothing operation
//---------------------------------------
                // t = rs - r
                // Q2
                magma_cidr_smoothing_1( drs.num_rows, drs.num_cols, drs.dval, dr.dval, dtt.dval, queues[2] );

                // t't
                // t'rs
                // Q2
                CHECK( magma_cgemvmdot_shfl( dt.ld, 2, dtt.dval, dtt.dval, d1, d2, &dskp.dval[2], queues[2] ));

                // skp[2-3] = dskp[2-3]
                // Q2
                magma_cgetvector( 2, &dskp.dval[2], 1, &hskp[2], 1, queues[2] );
                // implicit sync Q2 --> skp = dskp

                // gamma = (t' * rs) / (t' * t)
                gamma = hskp[3] / hskp[2];
                
                // xs = xs - gamma * (xs - x) 
                // Q0
                magma_cidr_smoothing_2( dxs.num_rows, dxs.num_cols, -gamma, x->dval, dxs.dval, queues[0] );

                // v = r
                // Q1
                magma_ccopyvector_async( dr.num_rows, dr.dval, 1, dv.dval, 1, queues[1] );

                // rs = rs - gamma * t 
                // Q2
                magma_caxpy( drs.num_rows, -gamma, dtt.dval, 1, drs.dval, 1, queues[2] );

                // |rs|
                // Q2
                nrmr = magma_scnrm2( drs.num_rows, drs.dval, 1, queues[2] );       
                // implicit sync Q2 --> |r|
//---------------------------------------
            }

            // store current timing and residual
            if ( solver_par->verbose > 0 ) {
                tempo2 = magma_sync_wtime( queue );
                if ( (solver_par->numiter) % solver_par->verbose == 0 ) {
                    solver_par->res_vec[(solver_par->numiter) / solver_par->verbose]
                            = (real_Double_t)nrmr;
                    solver_par->timing[(solver_par->numiter) / solver_par->verbose]
                            = (real_Double_t)tempo2 - tempo1;
                }
            }

            // check convergence or iteration limit
            if ( nrmr <= solver_par->atol ||
                nrmr/nrmb <= solver_par->rtol ) { 
                s = k + 1; // for the x-update outside the loop
                innerflag = 2;
                info = MAGMA_SUCCESS;
                break;
            }

        }

        // smoothing disabled
        if ( smoothing <= 0 && innerflag != 1 ) {
            // dbeta(1:s) = beta(1:s)
            // Q0
            magma_csetvector_async( s, hbeta, 1, dbeta.dval, 1, queues[0] );

            // x = x + U(:,1:s) * beta(1:s)
            // Q0
            magmablas_cgemv( MagmaNoTrans, dU.num_rows, s, c_one, dU.dval, dU.ld, dbeta.dval, 1, c_one, x->dval, 1, queues[0] );
        }

        // check convergence or iteration limit or invalid result of inner loop
        if ( innerflag > 0 ) {
            break;
        }

        // preconditioning operation 
        // v = L \ v;
        // v = U \ v;
        // Q2
        CHECK( magma_c_applyprecond_left( MagmaNoTrans, A, dv, &dlu, precond_par, queues[2] )); 
        CHECK( magma_c_applyprecond_right( MagmaNoTrans, A, dlu, &dv, precond_par, queues[2] )); 

        // t = A v
        // Q2
        CHECK( magma_c_spmv( c_one, A, dv, c_zero, dt, queues[2] ));
        solver_par->spmv_count++;

        // computation of a new omega
//---------------------------------------
        // t't
        // t'r
        // Q2
        CHECK( magma_cgemvmdot_shfl( dt.ld, 2, dt.dval, dt.dval, d1, d2, dskp.dval, queues[2] ));

        // skp[0-2] = dskp[0-2]
        // Q2
        magma_cgetvector( 2, dskp.dval, 1, hskp, 1, queues[2] );
        // implicit sync Q2 --> skp = dskp

        // |t|
        nrmt = magma_ssqrt( MAGMA_C_REAL(hskp[0]) );

        // rho = abs((t' * r) / (|t| * |r|))
        rho = MAGMA_D_ABS( MAGMA_C_REAL(hskp[1]) / (nrmt * nrmr) );

        // om = (t' * r) / (|t| * |t|)
        om = hskp[1] / hskp[0]; 
        if ( rho < angle ) {
            om = (om * angle) / rho;
        }
//---------------------------------------
        if ( MAGMA_C_EQUAL(om, MAGMA_C_ZERO) ) {
            info = MAGMA_DIVERGENCE;
            break;
        }

        // sync Q1 --> v = r
        magma_queue_sync( queues[1] );

        // r = r - om * t
        // Q2
        magma_caxpy( dr.num_rows, -om, dt.dval, 1, dr.dval, 1, queues[2] );

        // x = x + om * v
        // Q0
        magma_caxpy( x->num_rows, om, dv.dval, 1, x->dval, 1, queues[0] );

        // smoothing disabled
        if ( smoothing <= 0 ) {
            // |r|
            // Q2
            nrmr = magma_scnrm2( dr.num_rows, dr.dval, 1, queues[2] );           
            // implicit sync Q2 --> |r|

            // v = r
            // Q1
            magma_ccopyvector_async( dr.num_rows, dr.dval, 1, dv.dval, 1, queues[1] );

        // smoothing enabled
        } else {
            // smoothing operation
//---------------------------------------
            // t = rs - r
            // Q2
            magma_cidr_smoothing_1( drs.num_rows, drs.num_cols, drs.dval, dr.dval, dtt.dval, queues[2] );

            // t't
            // t'rs
            // Q2
            CHECK( magma_cgemvmdot_shfl( dt.ld, 2, dtt.dval, dtt.dval, d1, d2, &dskp.dval[2], queues[2] ));

            // skp[2-3] = dskp[2-3]
            // Q2
            magma_cgetvector( 2, &dskp.dval[2], 1, &hskp[2], 1, queues[2] );
            // implicit sync Q2 --> skp = dskp

            // gamma = (t' * rs) / (t' * t)
            gamma = hskp[3] / hskp[2];

            // xs = xs - gamma * (xs - x) 
            // Q0
            magma_cidr_smoothing_2( dxs.num_rows, dxs.num_cols, -gamma, x->dval, dxs.dval, queues[0] );

            // v = r
            // Q1
            magma_ccopyvector_async( dr.num_rows, dr.dval, 1, dv.dval, 1, queues[1] );

            // rs = rs - gamma * (rs - r) 
            // Q2
            magma_caxpy( drs.num_rows, -gamma, dtt.dval, 1, drs.dval, 1, queues[2] );

            // |rs|
            // Q2
            nrmr = magma_scnrm2( drs.num_rows, drs.dval, 1, queues[2] );           
            // implicit sync Q2 --> |r|
//---------------------------------------
        }

        // store current timing and residual
        if ( solver_par->verbose > 0 ) {
            tempo2 = magma_sync_wtime( queue );
            magma_queue_sync( queue );
            if ( (solver_par->numiter) % solver_par->verbose == 0 ) {
                solver_par->res_vec[(solver_par->numiter) / solver_par->verbose]
                        = (real_Double_t)nrmr;
                solver_par->timing[(solver_par->numiter) / solver_par->verbose]
                        = (real_Double_t)tempo2 - tempo1;
            }
        }

        // check convergence or iteration limit
        if ( nrmr <= solver_par->atol ||
            nrmr/nrmb <= solver_par->rtol ) { 
            info = MAGMA_SUCCESS;
            break;
        }
    }
    while ( solver_par->numiter + 1 <= solver_par->maxiter );

    // sync all queues
    for ( q = 0; q < nqueues; q++ ) {
        magma_queue_sync( queues[q] );
    }

    // smoothing enabled
    if ( smoothing > 0 ) {
        // x = xs
        magma_ccopyvector_async( x->num_rows, dxs.dval, 1, x->dval, 1, queue );

        // r = rs
        magma_ccopyvector_async( dr.num_rows, drs.dval, 1, dr.dval, 1, queue );
    }

    // get last iteration timing
    tempo2 = magma_sync_wtime( queue );
    magma_queue_sync( queue );
    solver_par->runtime = (real_Double_t)tempo2 - tempo1;
//--------------STOP TIME----------------

    // get final stats
    solver_par->iter_res = nrmr;
    CHECK( magma_cresidualvec( A, b, *x, &dr, &residual, queue ));
    solver_par->final_res = residual;

    // set solver conclusion
    if ( info != MAGMA_SUCCESS && info != MAGMA_DIVERGENCE ) {
        if ( solver_par->init_res > solver_par->final_res ) {
            info = MAGMA_SLOW_CONVERGENCE;
        }
    }


cleanup:
    // free resources
    // sync all queues, destory additional queues
    magma_queue_sync( queues[0] );
    for ( q = 1; q < nqueues; q++ ) {
        magma_queue_sync( queues[q] );
        magma_queue_destroy( queues[q] );
    }

    // smoothing enabled
    if ( smoothing > 0 ) {
        drs.dval = NULL;  // needed because its pointer is redirected to dtt
        magma_cmfree( &dxs, queue );
        magma_cmfree( &drs, queue ); 
        magma_cmfree( &dtt, queue );
    }
    dr.dval = NULL;       // needed because its pointer is redirected to dt
    dGcol.dval = NULL;    // needed because its pointer is redirected to dG
    magma_cmfree( &dr, queue );
    magma_cmfree( &dP, queue );
    magma_cmfree( &dP1, queue );
    magma_cmfree( &dG, queue );
    magma_cmfree( &dGcol, queue );
    magma_cmfree( &dU, queue );
    magma_cmfree( &dM, queue );
    magma_cmfree( &df, queue );
    magma_cmfree( &dt, queue );
    magma_cmfree( &dc, queue );
    magma_cmfree( &dv, queue );
    magma_cmfree( &dlu, queue );
    magma_cmfree( &dskp, queue );
    magma_cmfree( &dalpha, queue );
    magma_cmfree( &dbeta, queue );
    magma_free_pinned( hMdiag );
    magma_free_pinned( hskp );
    magma_free_pinned( halpha );
    magma_free_pinned( hbeta );
    magma_free( d1 );
    magma_free( d2 );

    solver_par->info = info;
    return info;
    /* magma_cpidr_strms */
}
コード例 #2
0
extern "C" magma_int_t
magma_cidr(
    magma_c_matrix A, magma_c_matrix b, magma_c_matrix *x,
    magma_c_solver_par *solver_par,
    magma_queue_t queue )
{
    magma_int_t info = MAGMA_NOTCONVERGED;

    // prepare solver feedback
    solver_par->solver = Magma_IDR;
    solver_par->numiter = 0;
    solver_par->spmv_count = 0;
    solver_par->init_res = 0.0;
    solver_par->final_res = 0.0;
    solver_par->iter_res = 0.0;
    solver_par->runtime = 0.0;

    // constants
    const magmaFloatComplex c_zero = MAGMA_C_ZERO;
    const magmaFloatComplex c_one = MAGMA_C_ONE;
    const magmaFloatComplex c_n_one = MAGMA_C_NEG_ONE;

    // internal user parameters
    const magma_int_t smoothing = 1;   // 0 = disable, 1 = enable
    const float angle = 0.7;          // [0-1]

    // local variables
    magma_int_t iseed[4] = {0, 0, 0, 1};
    magma_int_t dof;
    magma_int_t s;
    magma_int_t distr;
    magma_int_t k, i, sk;
    magma_int_t innerflag;
    float residual;
    float nrm;
    float nrmb;
    float nrmr;
    float nrmt;
    float rho;
    magmaFloatComplex om;
    magmaFloatComplex tt;
    magmaFloatComplex tr;
    magmaFloatComplex gamma;
    magmaFloatComplex alpha;
    magmaFloatComplex mkk;
    magmaFloatComplex fk;

    // matrices and vectors
    magma_c_matrix dxs = {Magma_CSR};
    magma_c_matrix dr = {Magma_CSR}, drs = {Magma_CSR};
    magma_c_matrix dP = {Magma_CSR}, dP1 = {Magma_CSR};
    magma_c_matrix dG = {Magma_CSR};
    magma_c_matrix dU = {Magma_CSR};
    magma_c_matrix dM = {Magma_CSR};
    magma_c_matrix df = {Magma_CSR};
    magma_c_matrix dt = {Magma_CSR};
    magma_c_matrix dc = {Magma_CSR};
    magma_c_matrix dv = {Magma_CSR};
    magma_c_matrix dbeta = {Magma_CSR}, hbeta = {Magma_CSR};

    // chronometry
    real_Double_t tempo1, tempo2;

    // initial s space
    // TODO: add option for 's' (shadow space number)
    // Hack: uses '--restart' option as the shadow space number.
    //       This is not a good idea because the default value of restart option is used to detect
    //       if the user provided a custom restart. This means that if the default restart value
    //       is changed then the code will think it was the user (unless the default value is
    //       also updated in the 'if' statement below.
    s = 1;
    if ( solver_par->restart != 50 ) {
        if ( solver_par->restart > A.num_cols ) {
            s = A.num_cols;
        } else {
            s = solver_par->restart;
        }
    }
    solver_par->restart = s;

    // set max iterations
    solver_par->maxiter = min( 2 * A.num_cols, solver_par->maxiter );

    // check if matrix A is square
    if ( A.num_rows != A.num_cols ) {
        //printf("Matrix A is not square.\n");
        info = MAGMA_ERR_NOT_SUPPORTED;
        goto cleanup;
    }

    // |b|
    nrmb = magma_scnrm2( b.num_rows, b.dval, 1, queue );
    if ( nrmb == 0.0 ) {
        magma_cscal( x->num_rows, MAGMA_C_ZERO, x->dval, 1, queue );
        info = MAGMA_SUCCESS;
        goto cleanup;
    }

    // r = b - A x
    CHECK( magma_cvinit( &dr, Magma_DEV, b.num_rows, 1, c_zero, queue ));
    CHECK( magma_cresidualvec( A, b, *x, &dr, &nrmr, queue ));
    
    // |r|
    solver_par->init_res = nrmr;
    solver_par->final_res = solver_par->init_res;
    solver_par->iter_res = solver_par->init_res;
    if ( solver_par->verbose > 0 ) {
        solver_par->res_vec[0] = (real_Double_t)nrmr;
    }

    // check if initial is guess good enough
    if ( nrmr <= solver_par->atol ||
        nrmr/nrmb <= solver_par->rtol ) {
        info = MAGMA_SUCCESS;
        goto cleanup;
    }

    // P = randn(n, s)
    // P = ortho(P)
//---------------------------------------
    // P = 0.0
    CHECK( magma_cvinit( &dP, Magma_CPU, A.num_cols, s, c_zero, queue ));

    // P = randn(n, s)
    distr = 3;        // 1 = unif (0,1), 2 = unif (-1,1), 3 = normal (0,1) 
    dof = dP.num_rows * dP.num_cols;
    lapackf77_clarnv( &distr, iseed, &dof, dP.val );

    // transfer P to device
    CHECK( magma_cmtransfer( dP, &dP1, Magma_CPU, Magma_DEV, queue ));
    magma_cmfree( &dP, queue );

    // P = ortho(P1)
    if ( dP1.num_cols > 1 ) {
        // P = magma_cqr(P1), QR factorization
        CHECK( magma_cqr( dP1.num_rows, dP1.num_cols, dP1, dP1.ld, &dP, NULL, queue ));
    } else {
        // P = P1 / |P1|
        nrm = magma_scnrm2( dof, dP1.dval, 1, queue );
        nrm = 1.0 / nrm;
        magma_csscal( dof, nrm, dP1.dval, 1, queue );
        CHECK( magma_cmtransfer( dP1, &dP, Magma_DEV, Magma_DEV, queue ));
    }
    magma_cmfree( &dP1, queue );
//---------------------------------------

    // allocate memory for the scalar products
    CHECK( magma_cvinit( &hbeta, Magma_CPU, s, 1, c_zero, queue ));
    CHECK( magma_cvinit( &dbeta, Magma_DEV, s, 1, c_zero, queue ));

    // smoothing enabled
    if ( smoothing > 0 ) {
        // set smoothing solution vector
        CHECK( magma_cmtransfer( *x, &dxs, Magma_DEV, Magma_DEV, queue ));

        // set smoothing residual vector
        CHECK( magma_cmtransfer( dr, &drs, Magma_DEV, Magma_DEV, queue ));
    }

    // G(n,s) = 0
    CHECK( magma_cvinit( &dG, Magma_DEV, A.num_cols, s, c_zero, queue ));

    // U(n,s) = 0
    CHECK( magma_cvinit( &dU, Magma_DEV, A.num_cols, s, c_zero, queue ));

    // M(s,s) = I
    CHECK( magma_cvinit( &dM, Magma_DEV, s, s, c_zero, queue ));
    magmablas_claset( MagmaFull, s, s, c_zero, c_one, dM.dval, s, queue );

    // f = 0
    CHECK( magma_cvinit( &df, Magma_DEV, dP.num_cols, 1, c_zero, queue ));

    // t = 0
    CHECK( magma_cvinit( &dt, Magma_DEV, dr.num_rows, 1, c_zero, queue ));

    // c = 0
    CHECK( magma_cvinit( &dc, Magma_DEV, dM.num_cols, 1, c_zero, queue ));

    // v = 0
    CHECK( magma_cvinit( &dv, Magma_DEV, dr.num_rows, 1, c_zero, queue ));

    //--------------START TIME---------------
    // chronometry
    tempo1 = magma_sync_wtime( queue );
    if ( solver_par->verbose > 0 ) {
        solver_par->timing[0] = 0.0;
    }

    om = MAGMA_C_ONE;
    innerflag = 0;

    // start iteration
    do
    {
        solver_par->numiter++;
    
        // new RHS for small systems
        // f = P' r
        magmablas_cgemv( MagmaConjTrans, dP.num_rows, dP.num_cols, c_one, dP.dval, dP.ld, dr.dval, 1, c_zero, df.dval, 1, queue );

        // shadow space loop
        for ( k = 0; k < s; ++k ) {
            sk = s - k;
    
            // f(k:s) = M(k:s,k:s) c(k:s)
            magma_ccopyvector( sk, &df.dval[k], 1, &dc.dval[k], 1, queue );
            magma_ctrsv( MagmaLower, MagmaNoTrans, MagmaNonUnit, sk, &dM.dval[k*dM.ld+k], dM.ld, &dc.dval[k], 1, queue );

            // v = r - G(:,k:s) c(k:s)
            magma_ccopyvector( dr.num_rows, dr.dval, 1, dv.dval, 1, queue );
            magmablas_cgemv( MagmaNoTrans, dG.num_rows, sk, c_n_one, &dG.dval[k*dG.ld], dG.ld, &dc.dval[k], 1, c_one, dv.dval, 1, queue );

            // U(:,k) = om * v + U(:,k:s) c(k:s)
            magmablas_cgemv( MagmaNoTrans, dU.num_rows, sk, c_one, &dU.dval[k*dU.ld], dU.ld, &dc.dval[k], 1, om, dv.dval, 1, queue );
            magma_ccopyvector( dU.num_rows, dv.dval, 1, &dU.dval[k*dU.ld], 1, queue );

            // G(:,k) = A U(:,k)
            CHECK( magma_c_spmv( c_one, A, dv, c_zero, dv, queue ));
            solver_par->spmv_count++;
            magma_ccopyvector( dG.num_rows, dv.dval, 1, &dG.dval[k*dG.ld], 1, queue );

            // bi-orthogonalize the new basis vectors
            for ( i = 0; i < k; ++i ) {
                // alpha = P(:,i)' G(:,k)
                alpha = magma_cdotc( dP.num_rows, &dP.dval[i*dP.ld], 1, &dG.dval[k*dG.ld], 1, queue );

                // alpha = alpha / M(i,i)
                magma_cgetvector( 1, &dM.dval[i*dM.ld+i], 1, &mkk, 1, queue );
                alpha = alpha / mkk;

                // G(:,k) = G(:,k) - alpha * G(:,i)
                magma_caxpy( dG.num_rows, -alpha, &dG.dval[i*dG.ld], 1, &dG.dval[k*dG.ld], 1, queue );

                // U(:,k) = U(:,k) - alpha * U(:,i)
                magma_caxpy( dU.num_rows, -alpha, &dU.dval[i*dU.ld], 1, &dU.dval[k*dU.ld], 1, queue );
            }

            // new column of M = P'G, first k-1 entries are zero
            // M(k:s,k) = P(:,k:s)' G(:,k)
            magmablas_cgemv( MagmaConjTrans, dP.num_rows, sk, c_one, &dP.dval[k*dP.ld], dP.ld, &dG.dval[k*dG.ld], 1, c_zero, &dM.dval[k*dM.ld+k], 1, queue );

            // check M(k,k) == 0
            magma_cgetvector( 1, &dM.dval[k*dM.ld+k], 1, &mkk, 1, queue );
            if ( MAGMA_C_EQUAL(mkk, MAGMA_C_ZERO) ) {
                innerflag = 1;
                info = MAGMA_DIVERGENCE;
                break;
            }

            // beta = f(k) / M(k,k)
            magma_cgetvector( 1, &df.dval[k], 1, &fk, 1, queue );
            hbeta.val[k] = fk / mkk;

            // check for nan
            if ( magma_c_isnan( hbeta.val[k] ) || magma_c_isinf( hbeta.val[k] )) {
                innerflag = 1;
                info = MAGMA_DIVERGENCE;
                break;
            }

            // r = r - beta * G(:,k)
            magma_caxpy( dr.num_rows, -hbeta.val[k], &dG.dval[k*dG.ld], 1, dr.dval, 1, queue );

            // smoothing disabled
            if ( smoothing <= 0 ) {
                // |r|
                nrmr = magma_scnrm2( dr.num_rows, dr.dval, 1, queue );

            // smoothing enabled
            } else {
                // x = x + beta * U(:,k)
                magma_caxpy( x->num_rows, hbeta.val[k], &dU.dval[k*dU.ld], 1, x->dval, 1, queue );

                // smoothing operation
//---------------------------------------
                // t = rs - r
                magma_ccopyvector( drs.num_rows, drs.dval, 1, dt.dval, 1, queue );
                magma_caxpy( dt.num_rows, c_n_one, dr.dval, 1, dt.dval, 1, queue );

                // t't
                // t'rs 
                tt = magma_cdotc( dt.num_rows, dt.dval, 1, dt.dval, 1, queue );
                tr = magma_cdotc( dt.num_rows, dt.dval, 1, drs.dval, 1, queue );

                // gamma = (t' * rs) / (t' * t)
                gamma = tr / tt;

                // rs = rs - gamma * (rs - r) 
                magma_caxpy( drs.num_rows, -gamma, dt.dval, 1, drs.dval, 1, queue );

                // xs = xs - gamma * (xs - x) 
                magma_ccopyvector( dxs.num_rows, dxs.dval, 1, dt.dval, 1, queue );
                magma_caxpy( dt.num_rows, c_n_one, x->dval, 1, dt.dval, 1, queue );
                magma_caxpy( dxs.num_rows, -gamma, dt.dval, 1, dxs.dval, 1, queue );

                // |rs|
                nrmr = magma_scnrm2( drs.num_rows, drs.dval, 1, queue );           
//---------------------------------------
            }

            // store current timing and residual
            if ( solver_par->verbose > 0 ) {
                tempo2 = magma_sync_wtime( queue );
                if ( (solver_par->numiter) % solver_par->verbose == 0 ) {
                    solver_par->res_vec[(solver_par->numiter) / solver_par->verbose]
                            = (real_Double_t)nrmr;
                    solver_par->timing[(solver_par->numiter) / solver_par->verbose]
                            = (real_Double_t)tempo2 - tempo1;
                }
            }

            // check convergence
            if ( nrmr <= solver_par->atol ||
                nrmr/nrmb <= solver_par->rtol ) {
                s = k + 1; // for the x-update outside the loop
                innerflag = 2;
                info = MAGMA_SUCCESS;
                break;
            }

            // non-last s iteration
            if ( (k + 1) < s ) {
                // f(k+1:s) = f(k+1:s) - beta * M(k+1:s,k)
                magma_caxpy( sk-1, -hbeta.val[k], &dM.dval[k*dM.ld+(k+1)], 1, &df.dval[k+1], 1, queue );
            }

        }

        // smoothing disabled
        if ( smoothing <= 0 && innerflag != 1 ) {
            // update solution approximation x
            // x = x + U(:,1:s) * beta(1:s)
            magma_csetvector( s, hbeta.val, 1, dbeta.dval, 1, queue );
            magmablas_cgemv( MagmaNoTrans, dU.num_rows, s, c_one, dU.dval, dU.ld, dbeta.dval, 1, c_one, x->dval, 1, queue );
        }

        // check convergence or iteration limit or invalid result of inner loop
        if ( innerflag > 0 ) {
            break;
        }

        // t = A v
        // t = A r
        CHECK( magma_c_spmv( c_one, A, dr, c_zero, dt, queue ));
        solver_par->spmv_count++;

        // computation of a new omega
//---------------------------------------
        // |t|
        nrmt = magma_scnrm2( dt.num_rows, dt.dval, 1, queue );

        // t'r 
        tr = magma_cdotc( dt.num_rows, dt.dval, 1, dr.dval, 1, queue );

        // rho = abs(t' * r) / (|t| * |r|))
        rho = MAGMA_D_ABS( MAGMA_C_REAL(tr) / (nrmt * nrmr) );

        // om = (t' * r) / (|t| * |t|)
        om = tr / (nrmt * nrmt);
        if ( rho < angle ) {
            om = (om * angle) / rho;
        }
//---------------------------------------
        if ( MAGMA_C_EQUAL(om, MAGMA_C_ZERO) ) {
            info = MAGMA_DIVERGENCE;
            break;
        }

        // update approximation vector
        // x = x + om * v
        // x = x + om * r
        magma_caxpy( x->num_rows, om, dr.dval, 1, x->dval, 1, queue );

        // update residual vector
        // r = r - om * t
        magma_caxpy( dr.num_rows, -om, dt.dval, 1, dr.dval, 1, queue );

        // smoothing disabled
        if ( smoothing <= 0 ) {
            // residual norm
            nrmr = magma_scnrm2( b.num_rows, dr.dval, 1, queue );

        // smoothing enabled
        } else {
            // smoothing operation
//---------------------------------------
            // t = rs - r
            magma_ccopyvector( drs.num_rows, drs.dval, 1, dt.dval, 1, queue );
            magma_caxpy( dt.num_rows, c_n_one, dr.dval, 1, dt.dval, 1, queue );

            // t't
            // t'rs
            tt = magma_cdotc( dt.num_rows, dt.dval, 1, dt.dval, 1, queue );
            tr = magma_cdotc( dt.num_rows, dt.dval, 1, drs.dval, 1, queue );

            // gamma = (t' * rs) / (|t| * |t|)
            gamma = tr / tt;

            // rs = rs - gamma * (rs - r) 
            magma_caxpy( drs.num_rows, -gamma, dt.dval, 1, drs.dval, 1, queue );

            // xs = xs - gamma * (xs - x) 
            magma_ccopyvector( dxs.num_rows, dxs.dval, 1, dt.dval, 1, queue );
            magma_caxpy( dt.num_rows, c_n_one, x->dval, 1, dt.dval, 1, queue );
            magma_caxpy( dxs.num_rows, -gamma, dt.dval, 1, dxs.dval, 1, queue );

            // |rs|
            nrmr = magma_scnrm2( b.num_rows, drs.dval, 1, queue );           
//---------------------------------------
        }

        // store current timing and residual
        if ( solver_par->verbose > 0 ) {
            tempo2 = magma_sync_wtime( queue );
            if ( (solver_par->numiter) % solver_par->verbose == 0 ) {
                solver_par->res_vec[(solver_par->numiter) / solver_par->verbose]
                        = (real_Double_t)nrmr;
                solver_par->timing[(solver_par->numiter) / solver_par->verbose]
                        = (real_Double_t)tempo2 - tempo1;
            }
        }

        // check convergence
        if ( nrmr <= solver_par->atol ||
            nrmr/nrmb <= solver_par->rtol ) { 
            info = MAGMA_SUCCESS;
            break;
        }
    }
    while ( solver_par->numiter + 1 <= solver_par->maxiter );

    // smoothing enabled
    if ( smoothing > 0 ) {
        // x = xs
        magma_ccopyvector( x->num_rows, dxs.dval, 1, x->dval, 1, queue );

        // r = rs
        magma_ccopyvector( dr.num_rows, drs.dval, 1, dr.dval, 1, queue );
    }

    // get last iteration timing
    tempo2 = magma_sync_wtime( queue );
    solver_par->runtime = (real_Double_t)tempo2 - tempo1;
//--------------STOP TIME----------------

    // get final stats
    solver_par->iter_res = nrmr;
    CHECK( magma_cresidualvec( A, b, *x, &dr, &residual, queue ));
    solver_par->final_res = residual;

    // set solver conclusion
    if ( info != MAGMA_SUCCESS && info != MAGMA_DIVERGENCE ) {
        if ( solver_par->init_res > solver_par->final_res ) {
            info = MAGMA_SLOW_CONVERGENCE;
        }
    }


cleanup:
    // free resources
    // smoothing enabled
    if ( smoothing > 0 ) {
        magma_cmfree( &dxs, queue );
        magma_cmfree( &drs, queue );
    }
    magma_cmfree( &dr, queue );
    magma_cmfree( &dP, queue );
    magma_cmfree( &dP1, queue );
    magma_cmfree( &dG, queue );
    magma_cmfree( &dU, queue );
    magma_cmfree( &dM, queue );
    magma_cmfree( &df, queue );
    magma_cmfree( &dt, queue );
    magma_cmfree( &dc, queue );
    magma_cmfree( &dv, queue );
    magma_cmfree( &dbeta, queue );
    magma_cmfree( &hbeta, queue );

    solver_par->info = info;
    return info;
    /* magma_cidr */
}
コード例 #3
0
ファイル: clahef_gpu.cpp プロジェクト: kjbartel/clmagma
/**
    Purpose
    =======

    CLAHEF computes a partial factorization of a complex Hermitian
    matrix A using the Bunch-Kaufman diagonal pivoting method. The
    partial factorization has the form:

    A  =  ( I  U12 ) ( A11  0  ) (  I    0   )  if UPLO = 'U', or:
          ( 0  U22 ) (  0   D  ) ( U12' U22' )

    A  =  ( L11  0 ) (  D   0  ) ( L11' L21' )  if UPLO = 'L'
          ( L21  I ) (  0  A22 ) (  0    I   )

    where the order of D is at most NB. The actual order is returned in
    the argument KB, and is either NB or NB-1, or N if N <= NB.
    Note that U' denotes the conjugate transpose of U.

    CLAHEF is an auxiliary routine called by CHETRF. It uses blocked code
    (calling Level 3 BLAS) to update the submatrix A11 (if UPLO = 'U') or
    A22 (if UPLO = 'L').

    Arguments
    ---------
    @param[in]
    UPLO    CHARACTER
            Specifies whether the upper or lower triangular part of the
            Hermitian matrix A is stored:
      -     = 'U':  Upper triangular
      -     = 'L':  Lower triangular

    @param[in]
    N       INTEGER
            The order of the matrix A.  N >= 0.

    @param[in]
    NB      INTEGER
            The maximum number of columns of the matrix A that should be
            factored.  NB should be at least 2 to allow for 2-by-2 pivot
            blocks.

    @param[out]
    KB      INTEGER
            The number of columns of A that were actually factored.
            KB is either NB-1 or NB, or N if N <= NB.

    @param[in,out]
    A       COMPLEX array, dimension (LDA,N)
            On entry, the Hermitian matrix A.  If UPLO = 'U', the leading
            n-by-n upper triangular part of A contains the upper
            triangular part of the matrix A, and the strictly lower
            triangular part of A is not referenced.  If UPLO = 'L', the
            leading n-by-n lower triangular part of A contains the lower
            triangular part of the matrix A, and the strictly upper
            triangular part of A is not referenced.
            On exit, A contains details of the partial factorization.

    @param[in]
    LDA     INTEGER
            The leading dimension of the array A.  LDA >= max(1,N).

    @param[out]
    ipiv    INTEGER array, dimension (N)
            Details of the interchanges and the block structure of D.
            If UPLO = 'U', only the last KB elements of ipiv are set;
            if UPLO = 'L', only the first KB elements are set.
    \n
            If ipiv(k) > 0, then rows and columns k and ipiv(k) were
            interchanged and D(k,k) is a 1-by-1 diagonal block.
            If UPLO = 'U' and ipiv(k) = ipiv(k-1) < 0, then rows and
            columns k-1 and -ipiv(k) were interchanged and D(k-1:k,k-1:k)
            is a 2-by-2 diagonal block.  If UPLO = 'L' and ipiv(k) =
            ipiv(k+1) < 0, then rows and columns k+1 and -ipiv(k) were
            interchanged and D(k:k+1,k:k+1) is a 2-by-2 diagonal block.

    @param[out]
    W       (workspace) COMPLEX array, dimension (LDW,NB)

    @param[in]
    LDW     INTEGER
            The leading dimension of the array W.  LDW >= max(1,N).

    @param[out]
    INFO    INTEGER
      -     = 0: successful exit
      -     > 0: if INFO = k, D(k,k) is exactly zero.  The factorization
                 has been completed, but the block diagonal matrix D is
                 exactly singular.

    @ingroup magma_chetrf_comp
    ********************************************************************/
extern "C" magma_int_t
magma_clahef_gpu(
    magma_uplo_t uplo, magma_int_t n, magma_int_t nb, magma_int_t *kb,
    magmaFloatComplex *hA, magma_int_t lda,
    magmaFloatComplex_ptr dA, size_t dA_offset, magma_int_t ldda,
    magma_int_t *ipiv,
    magmaFloatComplex_ptr dW, size_t dW_offset, magma_int_t lddw,
    magma_queue_t queue,
    magma_int_t *info)
{
    /* .. Parameters .. */
    float d_one   = 1.0;
    float d_zero  = 0.0;
    float d_eight = 8.0;
    float d_seven = 7.0;
#if defined(PRECISION_c)
    float  f_zero =  0.0;
#endif
    magmaFloatComplex c_one  =  MAGMA_C_ONE;
    magmaFloatComplex c_mone = -MAGMA_C_ONE;
    magma_int_t upper = (uplo == MagmaUpper);
    magma_int_t ione = 1;

    /* .. Local Scalars .. */
    magma_int_t imax = 0, jmax = 0, kk, kkW, kp, kstep, iinfo;
    float   abs_akk, alpha, colmax, R1, rowmax;
    magmaFloatComplex Zimax, Z;

#define dA(i, j)  dA, dA_offset + (j)*ldda  + (i)
#define dW(i, j)  dW, dW_offset + (j)*lddw  + (i)
#define  A(i, j) (hA + (j)*lda   + (i))

    /* .. Executable Statements .. */
    *info = 0;

    /* Initialize alpha for use in choosing pivot block size. */
    alpha = ( d_one+sqrt( d_seven ) ) / d_eight;

    magma_event_t event = NULL;
    if( upper ) {
        /* Factorize the trailing columns of A using the upper triangle
           of A and working backwards, and compute the matrix W = U12*D
           for use in updating A11 (note that conjg(W) is actually stored)

           K is the main loop index, decreasing from N in steps of 1 or 2

           KW is the column of W which corresponds to column K of A   */
        int k, kw = 0;
        for (k = n-1; k+1 > max(n-nb+1, nb); k -= kstep) {
            kw = nb - (n-k);
            /* Copy column K of A to column KW of W and update it */

            magma_ccopy( k+1, dA( 0, k ), 1, dW( 0, kw ), 1, queue );

            // set imaginary part of diagonal to be zero
#if defined(PRECISION_z)
            magma_dsetvector_async( 1, &d_zero, 1,
                                    dW, 2*(k+ kw*lddw+dW_offset)+1, 1, queue, &event);
            magma_queue_sync( queue );
#elif defined(PRECISION_c)
            magma_ssetvector_async( 1, &f_zero, 1,
                                    dW, 2*(k+ kw*lddw+dW_offset)+1, 1, queue, &event);
            magma_queue_sync( queue );
#endif

            if (k+1 < n) {
                magma_cgemv( MagmaNoTrans, k+1, n-(k+1), c_mone, dA( 0, k+1 ), ldda,
                             dW( k, kw+1 ), lddw, c_one, dW( 0, kw ), ione, queue );

                // set imaginary part of diagonal to be zero
#if defined(PRECISION_z)
                magma_dsetvector_async( 1, &d_zero, 1,
                                        dW, 2*(k+ kw*lddw+dW_offset)+1, 1, queue, &event );
                magma_queue_sync( queue );
#elif defined(PRECISION_c)
                magma_ssetvector_async( 1, &f_zero, 1,
                                        dW, 2*(k+ kw*lddw+dW_offset)+1, 1, queue, &event );
                magma_queue_sync( queue );
#endif
            }

            kstep = 1;

            /* Determine rows and columns to be interchanged and whether
               a 1-by-1 or 2-by-2 pivot block will be used */
            magma_cgetvector_async( 1, dW( k, kw ), 1, &Z, 1, queue, &event );
            magma_queue_sync( queue );
            abs_akk = fabs( MAGMA_C_REAL( Z ) );

            /* imax is the row-index of the largest off-diagonal element in
               column K, and colmax is its absolute value */
            if( k > 0 ) {
                // magma is one-base
                imax = magma_icamax( k, dW( 0, kw ), 1, queue ) - 1;
                magma_cgetvector( 1, dW( imax, kw ), 1, &Z, 1, queue );
                colmax = MAGMA_C_ABS1( Z );
            } else {
                colmax = d_zero;
            }
            if( max( abs_akk, colmax ) == 0.0 ) {

                /* Column K is zero: set INFO and continue */
                if ( *info == 0 ) *info = k;

                kp = k;

#if defined(PRECISION_z)
                magma_dsetvector_async( 1, &d_zero, 1,
                                        dA, 2*(k+ k*ldda+dA_offset)+1, 1, queue, &event );
                magma_queue_sync( queue );
#elif defined(PRECISION_c)
                magma_ssetvector_async( 1, &f_zero, 1,
                                        dA, 2*(k+ k*ldda+dA_offset)+1, 1, queue, &event );
                magma_queue_sync( queue );
#endif
            } else {
                if( abs_akk >= alpha*colmax ) {

                    /* no interchange, use 1-by-1 pivot block */
                    kp = k;
                } else {

                    /* Copy column imax to column KW-1 of W and update it */
                    magma_ccopy( imax+1, dA( 0, imax ), 1, dW( 0, kw-1 ), 1, queue );
#if defined(PRECISION_z)
                    magma_dsetvector_async( 1, &d_zero, 1,
                                            dW, 2*(imax+ (kw-1)*lddw+dW_offset)+1, 1, queue, &event );
#elif defined(PRECISION_c)
                    magma_ssetvector_async( 1, &f_zero, 1,
                                            dW, 2*(imax+ (kw-1)*lddw+dW_offset)+1, 1, queue, &event );
#endif

#if defined(PRECISION_z) || defined(PRECISION_c)
                    magmablas_clacpy_cnjg( k-imax, dA(imax,imax+1), ldda, dW(imax+1,kw-1), 1, queue );
#else
                    magma_ccopy( k-imax, dA(imax,imax+1), ldda, dW(imax+1,kw-1), 1, queue );
#endif
                    if( k+1 < n ) {
                        magma_cgemv( MagmaNoTrans, k+1, n-(k+1), c_mone,
                                     dA( 0, k+1 ), ldda, dW( imax, kw+1 ), lddw,
                                     c_one, dW( 0, kw-1 ), ione, queue );

#if defined(PRECISION_z)
                        magma_dsetvector_async( 1, &d_zero, 1,
                                                dW, 2*(imax+ (kw-1)*lddw+dW_offset)+1, 1, queue, &event );
#elif defined(PRECISION_c)
                        magma_ssetvector_async( 1, &f_zero, 1,
                                                dW, 2*(imax+ (kw-1)*lddw+dW_offset)+1, 1, queue, &event );
#endif
                    }
                    magma_cgetvector_async( 1, dW( imax, kw-1 ), 1, &Zimax, 1, queue, &event );
                    magma_queue_sync( queue );

                    /* jmax is the column-index of the largest off-diagonal
                      element in row imax, and rowmax is its absolute value */
                    jmax = imax + magma_icamax( k-imax, dW( imax+1, kw-1 ), 1, queue );
                    magma_cgetvector( 1, dW( jmax, kw-1 ), 1, &Z, 1, queue );
                    rowmax = MAGMA_C_ABS1( Z );
                    if ( imax > 0 ) {
                        // magma is one-base
                        jmax = magma_icamax( imax, dW( 0, kw-1 ), 1, queue ) - 1;
                        magma_cgetvector( 1, dW( jmax, kw-1 ), 1, &Z, 1, queue );
                        rowmax = max( rowmax, MAGMA_C_ABS1( Z  ) );
                    }

                    if( abs_akk >= alpha*colmax*( colmax / rowmax ) ) {

                        /* no interchange, use 1-by-1 pivot block */
                        kp = k;
                    } else if ( fabs( MAGMA_C_REAL( Zimax ) ) >= alpha*rowmax ) {

                        /* interchange rows and columns K and imax, use 1-by-1
                           pivot block */
                        kp = imax;

                        /* copy column KW-1 of W to column KW */
                        magma_ccopy( k+1, dW( 0, kw-1 ), 1, dW( 0, kw ), 1, queue );
                    } else {

                        /* interchange rows and columns K-1 and imax, use 2-by-2
                           pivot block */
                        kp = imax;
                        kstep = 2;
                    }
                }
                kk = k - kstep + 1;
                kkW = nb - (n - kk);

                /* Updated column kp is already stored in column kkW of W */
                if( kp != kk ) {

                    /* Interchange rows kk and kp in last kk columns of A and W */
                    // note: row-swap A(:,kk)
                    magmablas_cswap( n-kk, dA( kk, kk ), ldda, dA( kp, kk ), ldda, queue );
                    magmablas_cswap( n-kk, dW( kk, kkW), lddw, dW( kp, kkW), lddw, queue );

                    /* Copy non-updated column kk to column kp */
#if defined(PRECISION_z) || defined(PRECISION_c)
                    magmablas_clacpy_cnjg( kk-kp-1, dA( kp+1, kk ), 1, dA( kp, kp+1 ), ldda, queue );
#else
                    magma_ccopy( kk-kp-1, dA( kp+1, kk ), 1, dA( kp, kp+1 ), ldda, queue );
#endif

                    // now A(kp,kk) should be A(kk,kk), and copy to A(kp,kp)
                    magma_ccopy( kp+1, dA( 0, kk ), 1, dA( 0, kp ), 1, queue );
#if defined(PRECISION_z)
                    magma_dsetvector_async( 1, &d_zero, 1,
                                            dA, 2*(kp+ kp*ldda+dA_offset)+1, 1, queue, &event );
                    magma_queue_sync( queue );
#elif defined(PRECISION_c)
                    magma_ssetvector_async( 1, &f_zero, 1,
                                            dA, 2*(kp+ kp*ldda+dA_offset)+1, 1, queue, &event );
#endif
                }
                if( kstep == 1 ) {

                    /* 1-by-1 pivot block D(k): column KW of W now holds
                          W(k) = U(k)*D(k)
                          where U(k) is the k-th column of U
                          Store U(k) in column k of A */
                    magma_ccopy( k+1, dW( 0, kw ), 1, dA( 0, k ), 1, queue );
                    if ( k > 0 ) {
                        magma_cgetvector_async( 1, dA( k, k ), 1, &Z, 1, queue, &event );
                        magma_queue_sync( queue );
                        R1 = d_one / MAGMA_C_REAL( Z );
                        magma_csscal( k, R1, dA( 0, k ), 1, queue );

                        /* Conjugate W(k) */
#if defined(PRECISION_z) || defined(PRECISION_c)
                        magmablas_clacpy_cnjg( k, dW( 0, kw ), 1, dW( 0, kw ), 1, queue );
#endif
                    }
                } else {

                    /* 2-by-2 pivot block D(k): columns KW and KW-1 of W now hold
                      ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k)
                      where U(k) and U(k-1) are the k-th and (k-1)-th columns of U */
                    if( k > 1 ) {
                        /* Store U(k) and U(k-1) in columns k and k-1 of A */
                        magmablas_clascl_2x2( MagmaUpper,
                                              k-1, dW(0, kw-1), lddw, dA(0,k-1), ldda, &iinfo, queue );
                    }

                    /* Copy D(k) to A */
                    magma_ccopymatrix( 2, 2, dW( k-1, kw-1 ), lddw, dA( k-1, k-1 ), ldda, queue );

                    /* Conjugate W(k) and W(k-1) */
#if defined(PRECISION_z) || defined(PRECISION_c)
                    magmablas_clacpy_cnjg( k,   dW( 0, kw ),   1, dW( 0, kw ),   1, queue );
                    magmablas_clacpy_cnjg( k-1, dW( 0, kw-1 ), 1, dW( 0, kw-1 ), 1, queue );
#endif
                }
            }

            /* Store details of the interchanges in ipiv */
            if( kstep == 1 ) {
                ipiv[ k ] = 1+kp;
            } else {
                ipiv[ k ] = -(1+kp);
                ipiv[ k-1 ] = -(1+kp);
            }
        }
        /* Update the upper triangle of A11 (= A(1:k,1:k)) as
            A11 := A11 - U12*D*U12' = A11 - U12*W'
           computing blocks of NB columns at a time (note that conjg(W) is
           actually stored) */
        kw = nb - (n-k);
        for (int j = ( k / nb )*nb; j >= 0; j -= nb ) {
            int jb = min( nb, k-j+1 );

#ifdef SYMMETRIC_UPDATE
            /* Update the upper triangle of the diagonal block */
            for (int jj = j; jj < j + jb; jj++) {
#if defined(PRECISION_z)
                magma_dsetvector_async( 1, &d_zero, 1,
                                        dA, 2*(jj+ jj*ldda+dA_offset)+1, 1, queue, &event );
#elif defined(PRECISION_c)
                magma_ssetvector_async( 1, &f_zero, 1,
                                        dA, 2*(jj+ jj*ldda+dA_offset)+1, 1, queue, &event );
#endif
                magma_cgemv( MagmaNoTrans, jj-j+1, n-(k+1), c_mone,
                             dA( j, k+1 ), ldda, dW( jj, kw+1 ), lddw, c_one,
                             dA( j, jj ), 1, queue );
#if defined(PRECISION_z)
                magma_dsetvector_async( 1, &d_zero, 1,
                                        dA, 2*(jj+ jj*ldda+dA_offset)+1, 1, queue, &event );
#elif defined(PRECISION_c)
                magma_ssetvector_async( 1, &f_zero, 1,
                                        dA, 2*(jj+ jj*ldda+dA_offset)+1, 1, queue, &event );
#endif
            }
            /* Update the rectangular superdiagonal block */
            magma_cgemm( MagmaNoTrans, MagmaTrans, j, jb, n-(k+1),
                         c_mone, dA( 0, k+1 ), ldda, dW( j, kw+1 ), lddw,
                         c_one, dA( 0, j ), ldda, queue );
#else
#if defined(PRECISION_z)
            magmablas_dlaset(MagmaUpperLower, 1, jb,
                             0, 0, dA, 2*(j+ j*ldda+dA_offset)+1, 2*(1+ldda), queue );
#elif defined(PRECISION_c)
            magmablas_slaset(MagmaUpperLower, 1, jb,
                             0, 0, dA, 2*(j+ j*ldda+dA_offset)+1, 2*(1+ldda), queue );
#endif
            magma_cgemm( MagmaNoTrans, MagmaTrans, j+jb, jb, n-(k+1),
                         c_mone, dA( 0, k+1 ),  ldda,
                         dW( j, kw+1 ), lddw,
                         c_one,  dA( 0, j ),    ldda, queue );
#if defined(PRECISION_z)
            magmablas_dlaset(MagmaUpperLower, 1, jb,
                             0, 0, dA, 2*(j+ j*ldda+dA_offset)+1, 2*(1+ldda), queue );
#elif defined(PRECISION_c)
            magmablas_slaset(MagmaUpperLower, 1, jb,
                             0, 0, dA, 2*(j+ j*ldda+dA_offset)+1, 2*(1+ldda), queue );
#endif
#endif
        }

        /* Put U12 in standard form by partially undoing the interchanges in columns k+1:n */
        for (int j = k+1; j < n;)
        {
            int jj = j;
            int jp = ipiv[ j ];
            if( jp < 0 ) {
                jp = -jp;
                j = j + 1;
            }
            j = j + 1;
            jp = jp - 1;
            if( jp != jj && j < n )
                magmablas_cswap( n-j, dA( jp, j ), ldda, dA( jj, j ), ldda, queue );
        }

        // copying the panel back to CPU
        magma_cgetmatrix_async( n, n-(k+1), dA(0,k+1), ldda, A(0,k+1), lda, queue, &event );
        magma_queue_sync( queue );

        /* Set KB to the number of columns factorized */
        *kb = n - (k+1);

    } else {
        /* Factorize the leading columns of A using the lower triangle
           of A and working forwards, and compute the matrix W = L21*D
           for use in updating A22 (note that conjg(W) is actually stored)

           K is the main loop index, increasing from 1 in steps of 1 or 2 */

        int k;
        for (k = 0; k < min(nb-1,n); k += kstep) {

            /* Copy column K of A to column K of W and update it */
            /* -------------------------------------------------------------- */
            magma_ccopy( n-k, dA( k, k ), 1, dW( k, k ), 1, queue );

            // set imaginary part of diagonal to be zero
#if defined(PRECISION_z)
            magma_dsetvector_async( 1, &d_zero, 1,
                                    dW, 2*(k*lddw+k+dW_offset)+1, 1, queue, &event);
            magma_queue_sync( queue );
#elif defined(PRECISION_c)
            magma_ssetvector_async( 1, &f_zero, 1,
                                    dW, 2*(k*lddw+k+dW_offset)+1, 1, queue, &event);
            magma_queue_sync( queue );
#endif
            /* -------------------------------------------------------------- */

            magma_cgemv( MagmaNoTrans, n-k, k, c_mone, dA( k, 0 ), ldda,
                         dW( k, 0 ), lddw, c_one, dW( k, k ), ione, queue );
            // re-set imaginary part of diagonal to be zero
#if defined(PRECISION_z)
            magma_dsetvector_async( 1, &d_zero, 1,
                                    dW, 2*(k*lddw+k+dW_offset)+1, 1, queue, &event );
            magma_queue_sync( queue );
#elif defined(PRECISION_c)
            magma_ssetvector_async( 1, &f_zero, 1,
                                    dW, 2*(k*lddw+k+dW_offset)+1, 1, queue, &event );
            magma_queue_sync( queue );
#endif

            kstep = 1;

            /* Determine rows and columns to be interchanged and whether
               a 1-by-1 or 2-by-2 pivot block will be used */
            magma_cgetvector_async( 1, dW( k, k ), 1, &Z, 1, queue, &event );
            magma_queue_sync( queue );
            abs_akk = fabs( MAGMA_C_REAL( Z ) );

            /* imax is the row-index of the largest off-diagonal element in
               column K, and colmax is its absolute value */
            if( k < n-1 ) {
                // magmablas is one-base
                imax = k + magma_icamax( n-k-1, dW(k+1,k), 1, queue );

                magma_cgetvector( 1, dW( imax,k ), 1, &Z, 1, queue );
                colmax = MAGMA_C_ABS1( Z );

            } else {
                colmax = d_zero;
            }

            if ( max( abs_akk, colmax ) == 0.0 ) {

                /* Column K is zero: set INFO and continue */
                if( *info == 0 ) *info = k;
                kp = k;

                // make sure the imaginary part of diagonal is zero
#if defined(PRECISION_z)
                magma_dsetvector_async( 1, &d_zero, 1,
                                        dA, 2*(k*ldda+k+dA_offset)+1, 1, queue, &event );
                magma_queue_sync( queue );
#elif defined(PRECISION_c)
                magma_ssetvector_async( 1, &f_zero, 1,
                                        dA, 2*(k*ldda+k+dA_offset)+1, 1, queue, &event );
                magma_queue_sync( queue );
#endif
            } else {
                if ( abs_akk >= alpha*colmax ) {

                    /* no interchange, use 1-by-1 pivot block */

                    kp = k;
                } else {
                    /* Copy column imax to column K+1 of W and update it */
#if defined(PRECISION_z) || defined(PRECISION_c)
                    magmablas_clacpy_cnjg( imax-k, dA(imax,k), ldda, dW(k,k+1), 1, queue );
#else
                    magma_ccopy( imax-k, dA( imax, k ), ldda, dW( k, k+1 ), 1, queue );
#endif

                    magma_ccopy( n-imax, dA( imax, imax ), 1, dW( imax, k+1 ), 1, queue );
#if defined(PRECISION_z)
                    magma_dsetvector_async( 1, &d_zero, 1,
                                            dW, 2*((k+1)*lddw+imax+dW_offset)+1, 1, queue, &event);
                    magma_queue_sync( queue );
#elif defined(PRECISION_c)
                    magma_ssetvector_async( 1, &f_zero, 1,
                                            dW, 2*((k+1)*lddw+imax+dW_offset)+1, 1, queue, &event);
                    magma_queue_sync( queue );
#endif

                    magma_cgemv( MagmaNoTrans, n-k, k, c_mone, dA( k, 0 ), ldda,
                                 dW( imax, 0 ), lddw, c_one, dW( k, k+1 ), ione, queue );
#if defined(PRECISION_z)
                    magma_dsetvector_async( 1, &d_zero, 1,
                                            dW, 2*((k+1)*lddw+imax+dW_offset)+1, 1, queue, &event);
                    magma_queue_sync( queue );
#elif defined(PRECISION_c)
                    magma_ssetvector_async( 1, &f_zero, 1,
                                            dW, 2*((k+1)*lddw+imax+dW_offset)+1, 1, queue, &event);
                    magma_queue_sync( queue );
#endif

                    magma_cgetvector_async( 1, dW(imax,k+1), 1, &Zimax, 1, queue, &event);
                    magma_queue_sync( queue );

                    /* jmax is the column-index of the largest off-diagonal
                       element in row imax, and rowmax is its absolute value */

                    // magmablas is one-base
                    jmax = k-1 + magma_icamax( imax-k, dW(k, k+1), 1, queue );

                    magma_cgetvector( 1, dW(jmax,k+1), 1, &Z, 1, queue );
                    rowmax = MAGMA_C_ABS1( Z );
                    if( imax < n-1 ) {
                        // magmablas is one-base
                        jmax = imax + magma_icamax( (n-1)-imax, dW(imax+1,k+1), 1, queue);
                        magma_cgetvector( 1, dW(jmax,k+1), 1, &Z, 1, queue );
                        rowmax = max( rowmax, MAGMA_C_ABS1( Z ) );
                    }

                    if( abs_akk >= alpha*colmax*( colmax / rowmax ) ) {

                        /* no interchange, use 1-by-1 pivot block */
                        kp = k;
                    } else if( fabs( MAGMA_C_REAL( Zimax ) ) >= alpha*rowmax ) {

                        /* interchange rows and columns K and imax, use 1-by-1
                           pivot block */
                        kp = imax;

                        /* copy column K+1 of W to column K */
                        magma_ccopy( n-k, dW( k, k+1 ), 1, dW( k, k ), 1, queue );
                    } else {

                        /* interchange rows and columns K+1 and imax, use 2-by-2
                           pivot block */
                        kp = imax;
                        kstep = 2;
                    }
                }

                kk = k + kstep - 1;

                /* Updated column kp is already stored in column kk of W */
                if( kp != kk ) {

                    /* Copy non-updated column kk to column kp */
                    /* ------------------------------------------------------------------ */
#if defined(PRECISION_z) || defined(PRECISION_c)
                    magmablas_clacpy_cnjg( kp-kk, dA( kk, kk ), 1, dA( kp, kk ), ldda, queue );
#else
                    magma_ccopy( kp-kk, dA( kk, kk ), 1, dA( kp, kk ), ldda, queue );
#endif
                    if ( kp < n ) {
                        magma_ccopy( n-kp, dA( kp, kk), 1, dA( kp, kp ), 1, queue );
                    }
                    /* ------------------------------------------------------------------ */

                    /* Interchange rows kk and kp in first kk columns of A and W */
                    magmablas_cswap( kk+1, dA( kk, 0 ), ldda, dA( kp, 0 ), ldda, queue );
                    magmablas_cswap( kk+1, dW( kk, 0 ), lddw, dW( kp, 0 ), lddw, queue );
                }

                if ( kstep == 1 ) {

                    /* 1-by-1 pivot block D(k): column k of W now holds

                       W(k) = L(k)*D(k)

                       where L(k) is the k-th column of L

                       Store L(k) in column k of A */
                    magma_ccopy( n-k, dW( k, k ), 1, dA( k, k ), 1, queue );

                    if ( k < n-1 ) {
                        magma_cgetvector_async( 1, dA(k,k), 1, &Z, 1, queue, &event );
                        magma_queue_sync( queue );
                        R1 = d_one / MAGMA_C_REAL( Z );
                        magma_csscal((n-1)-k, R1, dA( k+1,k ), 1, queue);

                        /* Conjugate W(k) */
#if defined(PRECISION_z) || defined(PRECISION_c)
                        magmablas_clacpy_cnjg( (n-1)-k, dW( k+1,k ), 1, dW( k+1,k ), 1, queue );
#endif
                    }
                } else {

                    /* 2-by-2 pivot block D(k): columns k and k+1 of W now hold

                    ( W(k) W(k+1) ) = ( L(k) L(k+1) )*D(k)

                    where L(k) and L(k+1) are the k-th and (k+1)-th columns
                    of L */
                    magmablas_clascl_2x2( MagmaLower,
                                          n-(k+2), dW(k,k), lddw, dA(k+2,k), ldda, &iinfo,
                                          queue );

                    /* Copy D(k) to A */
                    magma_ccopymatrix( 2, 2, dW( k, k ), lddw, dA( k, k ), ldda, queue );

                    /* Conjugate W(k) and W(k+1) */
#if defined(PRECISION_z) || defined(PRECISION_c)
                    magmablas_clacpy_cnjg( (n-1)-k,   dW( k+1,k ),  1, dW( k+1,k ),   1, queue );
                    magmablas_clacpy_cnjg( (n-1)-k-1, dW( k+2,k+1), 1, dW( k+2,k+1 ), 1, queue );
#endif
                }
            }

            /* Store details of the interchanges in ipiv */
            if ( kstep == 1 ) {
                ipiv[k] = kp+1;
            } else {
                ipiv[k] = -kp-1;
                ipiv[k+1] = -kp-1;
            }
        }

        /* Update the lower triangle of A22 (= A(k:n,k:n)) as

           A22 := A22 - L21*D*L21' = A22 - L21*W'

           computing blocks of NB columns at a time (note that conjg(W) is
           actually stored) */
        for( int j = k; j < n; j += nb ) {
            int jb = min( nb, n-j );

            /* Update the lower triangle of the diagonal block */

#ifdef SYMMETRIC_UPDATE
            for (int jj = j; jj < j + jb; jj++) {
                int jnb = j + jb - jj;

                /* -------------------------------------------------------- */
                magma_cgemv( MagmaNoTrans, jnb, k, c_mone, dA( jj, 0 ), ldda,
                             dW( jj, 0 ), lddw, c_one, dA( jj, jj ), ione, queue );
                /* -------------------------------------------------------- */
            }

            /* Update the rectangular subdiagonal block */

            if( j+jb < n ) {
                int nk = n - (j+jb);

                /* -------------------------------------------- */
                magma_cgemm( MagmaNoTrans, MagmaTrans, nk, jb, k,
                             c_mone, dA( j+jb, 0 ), ldda,
                             dW( j, 0 ),    lddw,
                             c_one,  dA( j+jb, j ), ldda, queue );
                /* ------------------------------------------- */
            }
#else

#if defined(PRECISION_z)
            magmablas_dlaset(MagmaUpperLower, 1, jb,
                             0, 0, dA, 2*(j*ldda+j+dA_offset)+1, 2*(1+ldda), queue );
#elif defined(PRECISION_c)
            magmablas_slaset(MagmaUpperLower, 1, jb,
                             0, 0, dA, 2*(j*ldda+j+dA_offset)+1, 2*(1+ldda), queue );
#endif
            magma_cgemm( MagmaNoTrans, MagmaTrans, n-j, jb, k,
                         c_mone, dA( j, 0 ), ldda,
                         dW( j, 0 ), lddw,
                         c_one,  dA( j, j ), ldda, queue );
#if defined(PRECISION_z)
            magmablas_dlaset(MagmaUpperLower, 1, jb,
                             0, 0, dA, 2*(j*ldda+j+dA_offset)+1, 2*(1+ldda), queue );
#elif defined(PRECISION_c)
            magmablas_slaset(MagmaUpperLower, 1, jb,
                             0, 0, dA, 2*(j*ldda+j+dA_offset)+1, 2*(1+ldda), queue );
#endif
#endif
        }

        /* Put L21 in standard form by partially undoing the interchanges
           in columns 1:k-1 */
        for (int j = k; j > 0;) {
            int jj = j;
            int jp = ipiv[j-1];
            if( jp < 0 ) {
                jp = -jp;
                j--;
            }
            j--;
            if ( jp != jj && j >= 1 ) {
                magmablas_cswap( j, dA( jp-1,0 ), ldda, dA( jj-1,0 ), ldda, queue );
            }
        }
        // copying the panel back to CPU
        magma_cgetmatrix_async( n, k, dA(0,0), ldda, A(0,0), lda, queue, &event );
        magma_queue_sync( queue );

        /* Set KB to the number of columns factorized */
        *kb = k;
    }

    return *info;
    /* End of CLAHEF */
}