FLA_Error FLA_Check_equal_vector_dims( FLA_Obj x, FLA_Obj y ) { FLA_Error e_val = FLA_SUCCESS; if ( FLA_Obj_vector_dim( x ) != FLA_Obj_vector_dim( y ) ) e_val = FLA_UNEQUAL_VECTOR_DIMS; return e_val; }
int FLAME_invert_ztau( FLA_Obj t ) { dim_t m = FLA_Obj_vector_dim( t ); dim_t inc = FLA_Obj_vector_inc( t ); dcomplex* buff = FLA_Obj_buffer_at_view( t ); double one = 1.0; double conjsign = one; // if conjugate -one; double zero = 0.0; double temp, s, xr_s, xi_s; dcomplex* chi; int i; for ( i = 0; i < m; ++i ) { chi = buff + i*inc; s = bl1_fmaxabs( chi->real, chi->imag ); if ( s != zero ) { xr_s = chi->real / s; xi_s = chi->imag / s; temp = xr_s * chi->real + xi_s * chi->imag; chi->real = xr_s / temp; chi->imag = conjsign * xi_s / temp; } } return 0; }
FLA_Error FLA_Tevd_eigval_v_opt_var1( FLA_Obj G, FLA_Obj d, FLA_Obj e, FLA_Obj k ) { FLA_Datatype datatype; int m_A, n_G; int rs_G, cs_G; int inc_d; int inc_e; datatype = FLA_Obj_datatype( d ); m_A = FLA_Obj_vector_dim( d ); n_G = FLA_Obj_width( G ); rs_G = FLA_Obj_row_stride( G ); cs_G = FLA_Obj_col_stride( G ); inc_d = FLA_Obj_vector_inc( d ); inc_e = FLA_Obj_vector_inc( e ); switch ( datatype ) { case FLA_FLOAT: { scomplex* buff_G = FLA_COMPLEX_PTR( G ); float* buff_d = FLA_FLOAT_PTR( d ); float* buff_e = FLA_FLOAT_PTR( e ); int* buff_k = FLA_INT_PTR( k ); FLA_Tevd_eigval_v_ops_var1( m_A, n_G, buff_G, rs_G, cs_G, buff_d, inc_d, buff_e, inc_e, buff_k ); break; } case FLA_DOUBLE: { dcomplex* buff_G = FLA_DOUBLE_COMPLEX_PTR( G ); double* buff_d = FLA_DOUBLE_PTR( d ); double* buff_e = FLA_DOUBLE_PTR( e ); int* buff_k = FLA_INT_PTR( k ); FLA_Tevd_eigval_v_opd_var1( m_A, n_G, buff_G, rs_G, cs_G, buff_d, inc_d, buff_e, inc_e, buff_k ); break; } } return FLA_SUCCESS; }
FLA_Error FLA_Check_vector_dim( FLA_Obj x, dim_t expected_length ) { FLA_Error e_val = FLA_SUCCESS; if ( FLA_Obj_vector_dim( x ) != expected_length ) e_val = FLA_INVALID_VECTOR_DIM; return e_val; }
FLA_Error FLA_Check_vector_dim_min( FLA_Obj x, dim_t min_dim ) { FLA_Error e_val = FLA_SUCCESS; if ( FLA_Obj_vector_dim( x ) < min_dim ) e_val = FLA_VECTOR_DIM_BELOW_MIN; return e_val; }
FLA_Error FLA_Check_matrix_vector_dims( FLA_Trans trans, FLA_Obj A, FLA_Obj x, FLA_Obj y ) { FLA_Error e_val = FLA_SUCCESS; if ( trans == FLA_NO_TRANSPOSE || trans == FLA_CONJ_NO_TRANSPOSE ) { if ( FLA_Obj_width( A ) != FLA_Obj_vector_dim( x ) ) e_val = FLA_NONCONFORMAL_DIMENSIONS; if ( FLA_Obj_length( A ) != FLA_Obj_vector_dim( y ) ) e_val = FLA_NONCONFORMAL_DIMENSIONS; } else { if ( FLA_Obj_length( A ) != FLA_Obj_vector_dim( x ) ) e_val = FLA_NONCONFORMAL_DIMENSIONS; if ( FLA_Obj_width( A ) != FLA_Obj_vector_dim( y ) ) e_val = FLA_NONCONFORMAL_DIMENSIONS; } return e_val; }
// Transform tau. int FLAME_invert_stau( FLA_Obj t ) { dim_t m = FLA_Obj_vector_dim( t ); dim_t inc = FLA_Obj_vector_inc( t ); float* buff = FLA_Obj_buffer_at_view( t ); float one = 1.0F; float zero = 0.0F; float* chi; int i; for ( i = 0; i < m; ++i ) { chi = buff + i*inc; if ( *chi != zero ) *chi = ( one / *chi ); } return 0; }
FLA_Error FLA_Obj_extract_imag_part_check( FLA_Obj a, FLA_Obj b ) { FLA_Error e_val; e_val = FLA_Check_floating_object( a ); FLA_Check_error_code( e_val ); e_val = FLA_Check_real_object( b ); FLA_Check_error_code( e_val ); e_val = FLA_Check_nonconstant_object( b ); FLA_Check_error_code( e_val ); e_val = FLA_Check_identical_object_precision( a, b ); FLA_Check_error_code( e_val ); e_val = FLA_Check_vector_dim( a, FLA_Obj_vector_dim( b ) ); FLA_Check_error_code( e_val ); return FLA_SUCCESS; }
FLA_Error FLA_Sort( FLA_Direct direct, FLA_Obj x ) { FLA_Datatype datatype; FLA_Obj x_use; dim_t m_x; dim_t inc_x; if ( FLA_Check_error_level() >= FLA_MIN_ERROR_CHECKING ) FLA_Sort_check( direct, x ); datatype = FLA_Obj_datatype( x ); m_x = FLA_Obj_vector_dim( x ); inc_x = FLA_Obj_vector_inc( x ); // If the vector does not have unit stride, copy it to a temporary vector // that does have unit stride. if ( inc_x != 1 ) { FLA_Obj_create_copy_of( FLA_NO_TRANSPOSE, x, &x_use ); inc_x = FLA_Obj_vector_inc( x_use ); } else { x_use = x; } switch ( datatype ) { case FLA_FLOAT: { float* x_p = ( float* ) FLA_FLOAT_PTR( x_use ); if ( direct == FLA_FORWARD ) FLA_Sort_f_ops( m_x, x_p, inc_x ); else // if ( direct == FLA_BACKWARD ) FLA_Sort_b_ops( m_x, x_p, inc_x ); break; } case FLA_DOUBLE: { double* x_p = ( double* ) FLA_DOUBLE_PTR( x_use ); if ( direct == FLA_FORWARD ) FLA_Sort_f_opd( m_x, x_p, inc_x ); else // if ( direct == FLA_BACKWARD ) FLA_Sort_b_opd( m_x, x_p, inc_x ); break; } } if ( inc_x != 1 ) { FLA_Copy( x_use, x ); FLA_Obj_free( &x_use ); } return FLA_SUCCESS; }
FLA_Error FLA_Tevd_v_opt_var2( dim_t n_iter_max, FLA_Obj d, FLA_Obj e, FLA_Obj G, FLA_Obj R, FLA_Obj W, FLA_Obj U, dim_t b_alg ) { FLA_Error r_val = FLA_SUCCESS; FLA_Datatype datatype; int m_A, m_U, n_G; int inc_d; int inc_e; int rs_G, cs_G; int rs_R, cs_R; int rs_U, cs_U; int rs_W, cs_W; datatype = FLA_Obj_datatype( U ); m_A = FLA_Obj_vector_dim( d ); m_U = FLA_Obj_length( U ); n_G = FLA_Obj_width( G ); inc_d = FLA_Obj_vector_inc( d ); inc_e = FLA_Obj_vector_inc( e ); rs_G = FLA_Obj_row_stride( G ); cs_G = FLA_Obj_col_stride( G ); rs_R = FLA_Obj_row_stride( R ); cs_R = FLA_Obj_col_stride( R ); rs_W = FLA_Obj_row_stride( W ); cs_W = FLA_Obj_col_stride( W ); rs_U = FLA_Obj_row_stride( U ); cs_U = FLA_Obj_col_stride( U ); switch ( datatype ) { case FLA_FLOAT: { float* buff_d = FLA_FLOAT_PTR( d ); float* buff_e = FLA_FLOAT_PTR( e ); scomplex* buff_G = FLA_COMPLEX_PTR( G ); float* buff_R = FLA_FLOAT_PTR( R ); float* buff_W = FLA_FLOAT_PTR( W ); float* buff_U = FLA_FLOAT_PTR( U ); r_val = FLA_Tevd_v_ops_var2( m_A, m_U, n_G, n_iter_max, buff_d, inc_d, buff_e, inc_e, buff_G, rs_G, cs_G, buff_R, rs_R, cs_R, buff_W, rs_W, cs_W, buff_U, rs_U, cs_U, b_alg ); break; } case FLA_DOUBLE: { double* buff_d = FLA_DOUBLE_PTR( d ); double* buff_e = FLA_DOUBLE_PTR( e ); dcomplex* buff_G = FLA_DOUBLE_COMPLEX_PTR( G ); double* buff_R = FLA_DOUBLE_PTR( R ); double* buff_W = FLA_DOUBLE_PTR( W ); double* buff_U = FLA_DOUBLE_PTR( U ); r_val = FLA_Tevd_v_opd_var2( m_A, m_U, n_G, n_iter_max, buff_d, inc_d, buff_e, inc_e, buff_G, rs_G, cs_G, buff_R, rs_R, cs_R, buff_W, rs_W, cs_W, buff_U, rs_U, cs_U, b_alg ); break; } case FLA_COMPLEX: { float* buff_d = FLA_FLOAT_PTR( d ); float* buff_e = FLA_FLOAT_PTR( e ); scomplex* buff_G = FLA_COMPLEX_PTR( G ); float* buff_R = FLA_FLOAT_PTR( R ); scomplex* buff_W = FLA_COMPLEX_PTR( W ); scomplex* buff_U = FLA_COMPLEX_PTR( U ); r_val = FLA_Tevd_v_opc_var2( m_A, m_U, n_G, n_iter_max, buff_d, inc_d, buff_e, inc_e, buff_G, rs_G, cs_G, buff_R, rs_R, cs_R, buff_W, rs_W, cs_W, buff_U, rs_U, cs_U, b_alg ); break; } case FLA_DOUBLE_COMPLEX: { double* buff_d = FLA_DOUBLE_PTR( d ); double* buff_e = FLA_DOUBLE_PTR( e ); dcomplex* buff_G = FLA_DOUBLE_COMPLEX_PTR( G ); double* buff_R = FLA_DOUBLE_PTR( R ); dcomplex* buff_W = FLA_DOUBLE_COMPLEX_PTR( W ); dcomplex* buff_U = FLA_DOUBLE_COMPLEX_PTR( U ); r_val = FLA_Tevd_v_opz_var2( m_A, m_U, n_G, n_iter_max, buff_d, inc_d, buff_e, inc_e, buff_G, rs_G, cs_G, buff_R, rs_R, cs_R, buff_W, rs_W, cs_W, buff_U, rs_U, cs_U, b_alg ); break; } } return r_val; }
// According to the sorted order of a given vector s, // U and V are reordered in columns while C is reordered // in rows when they need to be applied. FLA_Error FLA_Sort_bsvd_ext( FLA_Direct direct, FLA_Obj s, FLA_Bool apply_U, FLA_Obj U, FLA_Bool apply_V, FLA_Obj V, FLA_Bool apply_C, FLA_Obj C ) { FLA_Datatype datatype; dim_t m_U, rs_U, cs_U; dim_t m_V, rs_V, cs_V; dim_t n_C, rs_C, cs_C; dim_t m_s, inc_s; //if ( FLA_Check_error_level() >= FLA_MIN_ERROR_CHECKING ) // FLA_Sort_bsvd_check( direct, s, // apply_U, U, // apply_V, V, // apply_C, C ); // Sort singular values only; quick sort if ( apply_U == FALSE && apply_V == FALSE ) return FLA_Sort( direct, s ); // s dimensions must be provided. m_s = FLA_Obj_vector_dim( s ); inc_s = FLA_Obj_vector_inc( s ); // Datatype of U, V and C must be consistent and must be defined from one of them. FLA_SORT_BSVD_EXT_DEFINE_OBJ_VARIABLES( U, apply_U, datatype, m_U, FLA_Obj_length, rs_U, cs_U ); FLA_SORT_BSVD_EXT_DEFINE_OBJ_VARIABLES( V, apply_V, datatype, m_V, FLA_Obj_length, rs_V, cs_V ); FLA_SORT_BSVD_EXT_DEFINE_OBJ_VARIABLES( C, apply_C, datatype, n_C, FLA_Obj_width, rs_C, cs_C ); switch ( datatype ) { case FLA_FLOAT: { float* s_p = ( float* ) FLA_FLOAT_PTR( s ); float* U_p = ( apply_U == TRUE ? ( float* ) FLA_FLOAT_PTR( U ) : NULL ); float* V_p = ( apply_V == TRUE ? ( float* ) FLA_FLOAT_PTR( V ) : NULL ); float* C_p = ( apply_C == TRUE ? ( float* ) FLA_FLOAT_PTR( C ) : NULL ); if ( direct == FLA_FORWARD ) FLA_Sort_bsvd_ext_f_ops( m_s, s_p, inc_s, m_U, U_p, rs_U, cs_U, m_V, V_p, rs_V, cs_V, n_C, C_p, rs_C, cs_C ); else // if ( direct == FLA_BACKWARD ) FLA_Sort_bsvd_ext_b_ops( m_s, s_p, inc_s, m_U, U_p, rs_U, cs_U, m_V, V_p, rs_V, cs_V, n_C, C_p, rs_C, cs_C ); break; } case FLA_DOUBLE: { double* s_p = ( double* ) FLA_DOUBLE_PTR( s ); double* U_p = ( apply_U == TRUE ? ( double* ) FLA_DOUBLE_PTR( U ) : NULL ); double* V_p = ( apply_V == TRUE ? ( double* ) FLA_DOUBLE_PTR( V ) : NULL ); double* C_p = ( apply_C == TRUE ? ( double* ) FLA_DOUBLE_PTR( C ) : NULL ); if ( direct == FLA_FORWARD ) FLA_Sort_bsvd_ext_f_opd( m_s, s_p, inc_s, m_U, U_p, rs_U, cs_U, m_V, V_p, rs_V, cs_V, n_C, C_p, rs_C, cs_C ); else // if ( direct == FLA_BACKWARD ) FLA_Sort_bsvd_ext_b_opd( m_s, s_p, inc_s, m_U, U_p, rs_U, cs_U, m_V, V_p, rs_V, cs_V, n_C, C_p, rs_C, cs_C ); break; } case FLA_COMPLEX: { float* s_p = ( float* ) FLA_FLOAT_PTR( s ); scomplex* U_p = ( apply_U == TRUE ? ( scomplex* ) FLA_COMPLEX_PTR( U ) : NULL ); scomplex* V_p = ( apply_V == TRUE ? ( scomplex* ) FLA_COMPLEX_PTR( V ) : NULL ); scomplex* C_p = ( apply_C == TRUE ? ( scomplex* ) FLA_COMPLEX_PTR( C ) : NULL ); if ( direct == FLA_FORWARD ) FLA_Sort_bsvd_ext_f_opc( m_s, s_p, inc_s, m_U, U_p, rs_U, cs_U, m_V, V_p, rs_V, cs_V, n_C, C_p, rs_C, cs_C ); else // if ( direct == FLA_BACKWARD ) FLA_Sort_bsvd_ext_b_opc( m_s, s_p, inc_s, m_U, U_p, rs_U, cs_U, m_V, V_p, rs_V, cs_V, n_C, C_p, rs_C, cs_C ); break; } case FLA_DOUBLE_COMPLEX: { double* s_p = ( double* ) FLA_DOUBLE_PTR( s ); dcomplex* U_p = ( apply_U == TRUE ? ( dcomplex* ) FLA_DOUBLE_COMPLEX_PTR( U ) : NULL ); dcomplex* V_p = ( apply_V == TRUE ? ( dcomplex* ) FLA_DOUBLE_COMPLEX_PTR( V ) : NULL ); dcomplex* C_p = ( apply_C == TRUE ? ( dcomplex* ) FLA_DOUBLE_COMPLEX_PTR( C ) : NULL ); if ( direct == FLA_FORWARD ) FLA_Sort_bsvd_ext_f_opz( m_s, s_p, inc_s, m_U, U_p, rs_U, cs_U, m_V, V_p, rs_V, cs_V, n_C, C_p, rs_C, cs_C ); else // if ( direct == FLA_BACKWARD ) FLA_Sort_bsvd_ext_b_opz( m_s, s_p, inc_s, m_U, U_p, rs_U, cs_U, m_V, V_p, rs_V, cs_V, n_C, C_p, rs_C, cs_C ); break; } } return FLA_SUCCESS; }
FLA_Error FLA_Bidiag_UT_realify_diagonals_opt( FLA_Obj a, FLA_Obj b, FLA_Obj d, FLA_Obj e ) { FLA_Datatype datatype; int i, m, inc_a, inc_b, inc_d, inc_e; datatype = FLA_Obj_datatype( a ); m = FLA_Obj_vector_dim( a ); inc_a = FLA_Obj_vector_inc( a ); inc_b = ( m > 1 ? FLA_Obj_vector_inc( b ) : 0 ); inc_d = FLA_Obj_vector_inc( d ); inc_e = FLA_Obj_vector_inc( e ); switch ( datatype ) { case FLA_FLOAT: { float* buff_d = FLA_FLOAT_PTR( d ); float* buff_e = FLA_FLOAT_PTR( e ); float* buff_1 = FLA_FLOAT_PTR( FLA_ONE ); bl1_ssetv( m, buff_1, buff_d, inc_d ); bl1_ssetv( m, buff_1, buff_e, inc_e ); break; } case FLA_DOUBLE: { double* buff_d = FLA_DOUBLE_PTR( d ); double* buff_e = FLA_DOUBLE_PTR( e ); double* buff_1 = FLA_DOUBLE_PTR( FLA_ONE ); bl1_dsetv( m, buff_1, buff_d, inc_d ); bl1_dsetv( m, buff_1, buff_e, inc_e ); break; } case FLA_COMPLEX: { scomplex* buff_a = FLA_COMPLEX_PTR( a ); scomplex* buff_b = ( m > 1 ? FLA_COMPLEX_PTR( b ) : NULL ); scomplex* buff_d = FLA_COMPLEX_PTR( d ); scomplex* buff_e = FLA_COMPLEX_PTR( e ); scomplex* buff_1 = FLA_COMPLEX_PTR( FLA_ONE ); float* buff_0 = FLA_FLOAT_PTR( FLA_ZERO ); for ( i = 0; i < m; ++i ) { scomplex* alpha1 = buff_a + (i )*inc_a; scomplex* delta1 = buff_d + (i )*inc_d; scomplex* epsilon1 = buff_e + (i )*inc_e; scomplex absv; if ( i == 0 ) { *delta1 = *buff_1; } else { scomplex* beta1 = buff_b + (i-1)*inc_b; if ( beta1->imag == 0.0F ) *delta1 = *buff_1; else { bl1_ccopys( BLIS1_CONJUGATE, beta1, delta1 ); bl1_cabsval2( beta1, &absv ); bl1_cinvscals( &absv, delta1 ); bl1_cscals( delta1, beta1 ); beta1->imag = *buff_0; bl1_cscals( delta1, alpha1 ); } } if ( alpha1->imag == 0.0F ) *epsilon1 = *buff_1; else { bl1_ccopys( BLIS1_CONJUGATE, alpha1, epsilon1 ); bl1_cabsval2( alpha1, &absv ); bl1_cinvscals( &absv, epsilon1 ); bl1_cscals( epsilon1, alpha1 ); alpha1->imag = *buff_0; } if ( i < ( m - 1 ) ) { scomplex* beta2 = buff_b + (i )*inc_b; bl1_cscals( epsilon1, beta2 ); } } break; } case FLA_DOUBLE_COMPLEX: { dcomplex* buff_a = FLA_DOUBLE_COMPLEX_PTR( a ); dcomplex* buff_b = ( m > 1 ? FLA_DOUBLE_COMPLEX_PTR( b ) : NULL ); dcomplex* buff_d = FLA_DOUBLE_COMPLEX_PTR( d ); dcomplex* buff_e = FLA_DOUBLE_COMPLEX_PTR( e ); dcomplex* buff_1 = FLA_DOUBLE_COMPLEX_PTR( FLA_ONE ); double* buff_0 = FLA_DOUBLE_PTR( FLA_ZERO ); for ( i = 0; i < m; ++i ) { dcomplex* alpha1 = buff_a + (i )*inc_a; dcomplex* delta1 = buff_d + (i )*inc_d; dcomplex* epsilon1 = buff_e + (i )*inc_e; dcomplex absv; if ( i == 0 ) { *delta1 = *buff_1; } else { dcomplex* beta1 = buff_b + (i-1)*inc_b; bl1_zcopys( BLIS1_CONJUGATE, beta1, delta1 ); bl1_zabsval2( beta1, &absv ); bl1_zinvscals( &absv, delta1 ); bl1_zscals( delta1, beta1 ); beta1->imag = *buff_0; bl1_zscals( delta1, alpha1 ); } bl1_zcopys( BLIS1_CONJUGATE, alpha1, epsilon1 ); bl1_zabsval2( alpha1, &absv ); bl1_zinvscals( &absv, epsilon1 ); bl1_zscals( epsilon1, alpha1 ); alpha1->imag = *buff_0; if ( i < ( m - 1 ) ) { dcomplex* beta2 = buff_b + (i )*inc_b; bl1_zscals( epsilon1, beta2 ); } } break; } } return FLA_SUCCESS; }
FLA_Error FLA_Nrm2_external( FLA_Obj x, FLA_Obj norm_x ) { FLA_Datatype datatype; int num_elem; int inc_x; if ( FLA_Check_error_level() == FLA_FULL_ERROR_CHECKING ) FLA_Nrm2_check( x, norm_x ); if ( FLA_Obj_has_zero_dim( x ) ) { FLA_Set( FLA_ZERO, norm_x ); return FLA_SUCCESS; } datatype = FLA_Obj_datatype( x ); inc_x = FLA_Obj_vector_inc( x ); num_elem = FLA_Obj_vector_dim( x ); switch ( datatype ){ case FLA_FLOAT: { float *buff_x = ( float * ) FLA_FLOAT_PTR( x ); float *buff_norm_x = ( float * ) FLA_FLOAT_PTR( norm_x ); bli_snrm2( num_elem, buff_x, inc_x, buff_norm_x ); break; } case FLA_DOUBLE: { double *buff_x = ( double * ) FLA_DOUBLE_PTR( x ); double *buff_norm_x = ( double * ) FLA_DOUBLE_PTR( norm_x ); bli_dnrm2( num_elem, buff_x, inc_x, buff_norm_x ); break; } case FLA_COMPLEX: { scomplex *buff_x = ( scomplex * ) FLA_COMPLEX_PTR( x ); float *buff_norm_x = ( float * ) FLA_COMPLEX_PTR( norm_x ); bli_cnrm2( num_elem, buff_x, inc_x, buff_norm_x ); break; } case FLA_DOUBLE_COMPLEX: { dcomplex *buff_x = ( dcomplex * ) FLA_DOUBLE_COMPLEX_PTR( x ); double *buff_norm_x = ( double * ) FLA_DOUBLE_COMPLEX_PTR( norm_x ); bli_znrm2( num_elem, buff_x, inc_x, buff_norm_x ); break; } } return FLA_SUCCESS; }
FLA_Error FLA_Househ3UD_UT( FLA_Obj chi_0, FLA_Obj x1, FLA_Obj y2, FLA_Obj tau ) /* Compute an up-and-downdating UT Householder transformation / / 1 0 0 \ / 1 0 0 \ / 1 \ ( 1 u1' v2' ) \ H = | | 0 I 0 | - inv(tau) | 0 I 0 | | u1 | | \ \ 0 0 I / \ 0 0 -I / \ v2 / / by computing tau, u1, and v2 such that the following is satisfied: / chi_0 \ / alpha \ H | x1 | = | 0 | \ y2 / \ 0 / where alpha = - lambda * chi_0 / | chi_0 | lambda = sqrt( conj(chi0) chi0 + x1' x1 - y2' y2 ) / chi_0 \ x = | x1 | \ y2 / tau = ( 1 + u1' u1 - v2' v2 ) / 2 u1 = x1 / ( chi_0 - alpha ) v2 = -y2 / ( chi_0 - alpha ) Upon completion, alpha, u1, and v2 have overwritten objects chi_0, x1, and y2, respectively. -FGVZ */ { FLA_Datatype datatype; int m_x1; int m_y2; int inc_x1; int inc_y2; datatype = FLA_Obj_datatype( x1 ); m_x1 = FLA_Obj_vector_dim( x1 ); m_y2 = FLA_Obj_vector_dim( y2 ); inc_x1 = FLA_Obj_vector_inc( x1 ); inc_y2 = FLA_Obj_vector_inc( y2 ); if ( FLA_Check_error_level() >= FLA_MIN_ERROR_CHECKING ) FLA_Househ3UD_UT_check( chi_0, x1, y2, tau ); switch ( datatype ) { case FLA_FLOAT: { float* chi_0_p = ( float* ) FLA_FLOAT_PTR( chi_0 ); float* x1_p = ( float* ) FLA_FLOAT_PTR( x1 ); float* y2_p = ( float* ) FLA_FLOAT_PTR( y2 ); float* tau_p = ( float* ) FLA_FLOAT_PTR( tau ); FLA_Househ3UD_UT_ops( m_x1, m_y2, chi_0_p, x1_p, inc_x1, y2_p, inc_y2, tau_p ); break; } case FLA_DOUBLE: { double* chi_0_p = ( double* ) FLA_DOUBLE_PTR( chi_0 ); double* x1_p = ( double* ) FLA_DOUBLE_PTR( x1 ); double* y2_p = ( double* ) FLA_DOUBLE_PTR( y2 ); double* tau_p = ( double* ) FLA_DOUBLE_PTR( tau ); FLA_Househ3UD_UT_opd( m_x1, m_y2, chi_0_p, x1_p, inc_x1, y2_p, inc_y2, tau_p ); break; } case FLA_COMPLEX: { scomplex* chi_0_p = ( scomplex* ) FLA_COMPLEX_PTR( chi_0 ); scomplex* x1_p = ( scomplex* ) FLA_COMPLEX_PTR( x1 ); scomplex* y2_p = ( scomplex* ) FLA_COMPLEX_PTR( y2 ); scomplex* tau_p = ( scomplex* ) FLA_COMPLEX_PTR( tau ); FLA_Househ3UD_UT_opc( m_x1, m_y2, chi_0_p, x1_p, inc_x1, y2_p, inc_y2, tau_p ); break; } case FLA_DOUBLE_COMPLEX: { dcomplex* chi_0_p = ( dcomplex* ) FLA_DOUBLE_COMPLEX_PTR( chi_0 ); dcomplex* x1_p = ( dcomplex* ) FLA_DOUBLE_COMPLEX_PTR( x1 ); dcomplex* y2_p = ( dcomplex* ) FLA_DOUBLE_COMPLEX_PTR( y2 ); dcomplex* tau_p = ( dcomplex* ) FLA_DOUBLE_COMPLEX_PTR( tau ); FLA_Househ3UD_UT_opz( m_x1, m_y2, chi_0_p, x1_p, inc_x1, y2_p, inc_y2, tau_p ); break; } } return FLA_SUCCESS; }
FLA_Error FLA_Apply_pivots_rt_opt_var1( FLA_Obj p, FLA_Obj A ) { FLA_Datatype datatype; int m_A; int rs_A, cs_A; int inc_p; int k1_0, k2_0; datatype = FLA_Obj_datatype( A ); m_A = FLA_Obj_length( A ); // Swap the stride; FLA_Apply_pivots_ln_ops_var1 already consider the memory access pattern. cs_A = FLA_Obj_row_stride( A ); rs_A = FLA_Obj_col_stride( A ); // Use minus increment of the ln version. inc_p = FLA_Obj_vector_inc( p ); // Use zero-based indices. k1_0 = 0; k2_0 = ( int ) FLA_Obj_vector_dim( p ) - 1; switch ( datatype ) { case FLA_INT: { int* buff_A = FLA_INT_PTR( A ); int* buff_p = FLA_INT_PTR( p ); FLA_Apply_pivots_ln_opi_var1( m_A, buff_A, rs_A, cs_A, k1_0, k2_0, buff_p, inc_p ); break; } case FLA_FLOAT: { float* buff_A = FLA_FLOAT_PTR( A ); int* buff_p = FLA_INT_PTR( p ); FLA_Apply_pivots_ln_ops_var1( m_A, buff_A, rs_A, cs_A, k1_0, k2_0, buff_p, inc_p ); break; } case FLA_DOUBLE: { double* buff_A = FLA_DOUBLE_PTR( A ); int* buff_p = FLA_INT_PTR( p ); FLA_Apply_pivots_ln_opd_var1( m_A, buff_A, rs_A, cs_A, k1_0, k2_0, buff_p, inc_p ); break; } case FLA_COMPLEX: { scomplex* buff_A = FLA_COMPLEX_PTR( A ); int* buff_p = FLA_INT_PTR( p ); FLA_Apply_pivots_ln_opc_var1( m_A, buff_A, rs_A, cs_A, k1_0, k2_0, buff_p, inc_p ); break; } case FLA_DOUBLE_COMPLEX: { dcomplex* buff_A = FLA_DOUBLE_COMPLEX_PTR( A ); int* buff_p = FLA_INT_PTR( p ); FLA_Apply_pivots_ln_opz_var1( m_A, buff_A, rs_A, cs_A, k1_0, k2_0, buff_p, inc_p ); break; } } return FLA_SUCCESS; }
FLA_Error FLA_Bidiag_apply_V_external( FLA_Side side, FLA_Trans trans, FLA_Obj A, FLA_Obj t, FLA_Obj B ) { int info = 0; #ifdef FLA_ENABLE_EXTERNAL_LAPACK_INTERFACES FLA_Datatype datatype; // int m_A, n_A; int m_B, n_B; int cs_A; int cs_B; int k_t; int lwork; FLA_Obj work; char blas_side; char blas_vect = 'P'; char blas_trans; int i; //if ( FLA_Check_error_level() == FLA_FULL_ERROR_CHECKING ) // FLA_Apply_Q_check( side, trans, storev, A, t, B ); if ( FLA_Obj_has_zero_dim( A ) ) return FLA_SUCCESS; datatype = FLA_Obj_datatype( A ); // m_A = FLA_Obj_length( A ); // n_A = FLA_Obj_width( A ); cs_A = FLA_Obj_col_stride( A ); m_B = FLA_Obj_length( B ); n_B = FLA_Obj_width( B ); cs_B = FLA_Obj_col_stride( B ); if ( blas_vect == 'Q' ) k_t = FLA_Obj_vector_dim( t ); else k_t = FLA_Obj_vector_dim( t ) + 1; if ( FLA_Obj_is_real( A ) && trans == FLA_CONJ_TRANSPOSE ) trans = FLA_TRANSPOSE; FLA_Param_map_flame_to_netlib_side( side, &blas_side ); FLA_Param_map_flame_to_netlib_trans( trans, &blas_trans ); // Make a workspace query the first time through. This will provide us with // and ideal workspace size based on an internal block size. lwork = -1; FLA_Obj_create( datatype, 1, 1, 0, 0, &work ); for ( i = 0; i < 2; ++i ) { if ( i == 1 ) { // Grab the queried ideal workspace size from the work array, free the // work object, and then re-allocate the workspace with the ideal size. if ( datatype == FLA_FLOAT || datatype == FLA_COMPLEX ) lwork = ( int ) *FLA_FLOAT_PTR( work ); else if ( datatype == FLA_DOUBLE || datatype == FLA_DOUBLE_COMPLEX ) lwork = ( int ) *FLA_DOUBLE_PTR( work ); FLA_Obj_free( &work ); FLA_Obj_create( datatype, lwork, 1, 0, 0, &work ); } switch( datatype ){ case FLA_FLOAT: { float *buff_A = ( float * ) FLA_FLOAT_PTR( A ); float *buff_t = ( float * ) FLA_FLOAT_PTR( t ); float *buff_B = ( float * ) FLA_FLOAT_PTR( B ); float *buff_work = ( float * ) FLA_FLOAT_PTR( work ); F77_sormbr( &blas_vect, &blas_side, &blas_trans, &m_B, &n_B, &k_t, buff_A, &cs_A, buff_t, buff_B, &cs_B, buff_work, &lwork, &info ); break; } case FLA_DOUBLE: { double *buff_A = ( double * ) FLA_DOUBLE_PTR( A ); double *buff_t = ( double * ) FLA_DOUBLE_PTR( t ); double *buff_B = ( double * ) FLA_DOUBLE_PTR( B ); double *buff_work = ( double * ) FLA_DOUBLE_PTR( work ); F77_dormbr( &blas_vect, &blas_side, &blas_trans, &m_B, &n_B, &k_t, buff_A, &cs_A, buff_t, buff_B, &cs_B, buff_work, &lwork, &info ); break; } case FLA_COMPLEX: { scomplex *buff_A = ( scomplex * ) FLA_COMPLEX_PTR( A ); scomplex *buff_t = ( scomplex * ) FLA_COMPLEX_PTR( t ); scomplex *buff_B = ( scomplex * ) FLA_COMPLEX_PTR( B ); scomplex *buff_work = ( scomplex * ) FLA_COMPLEX_PTR( work ); F77_cunmbr( &blas_vect, &blas_side, &blas_trans, &m_B, &n_B, &k_t, buff_A, &cs_A, buff_t, buff_B, &cs_B, buff_work, &lwork, &info ); break; } case FLA_DOUBLE_COMPLEX: { dcomplex *buff_A = ( dcomplex * ) FLA_DOUBLE_COMPLEX_PTR( A ); dcomplex *buff_t = ( dcomplex * ) FLA_DOUBLE_COMPLEX_PTR( t ); dcomplex *buff_B = ( dcomplex * ) FLA_DOUBLE_COMPLEX_PTR( B ); dcomplex *buff_work = ( dcomplex * ) FLA_DOUBLE_COMPLEX_PTR( work ); F77_zunmbr( &blas_vect, &blas_side, &blas_trans, &m_B, &n_B, &k_t, buff_A, &cs_A, buff_t, buff_B, &cs_B, buff_work, &lwork, &info ); break; } } } FLA_Obj_free( &work ); #else FLA_Check_error_code( FLA_EXTERNAL_LAPACK_NOT_IMPLEMENTED ); #endif return info; }
FLA_Error FLA_Bsvd_sinval_v_opt_var1( FLA_Obj tol, FLA_Obj thresh, FLA_Obj G, FLA_Obj H, FLA_Obj d, FLA_Obj e, FLA_Obj k ) { FLA_Datatype datatype; int m_A, n_GH; int rs_G, cs_G; int rs_H, cs_H; int inc_d; int inc_e; datatype = FLA_Obj_datatype( d ); m_A = FLA_Obj_vector_dim( d ); n_GH = FLA_Obj_width( G ); rs_G = FLA_Obj_row_stride( G ); cs_G = FLA_Obj_col_stride( G ); rs_H = FLA_Obj_row_stride( H ); cs_H = FLA_Obj_col_stride( H ); inc_d = FLA_Obj_vector_inc( d ); inc_e = FLA_Obj_vector_inc( e ); switch ( datatype ) { case FLA_FLOAT: { float* buff_tol = FLA_FLOAT_PTR( tol ); float* buff_thresh = FLA_FLOAT_PTR( thresh ); scomplex* buff_G = FLA_COMPLEX_PTR( G ); scomplex* buff_H = FLA_COMPLEX_PTR( H ); float* buff_d = FLA_FLOAT_PTR( d ); float* buff_e = FLA_FLOAT_PTR( e ); int* buff_k = FLA_INT_PTR( k ); FLA_Bsvd_sinval_v_ops_var1( m_A, n_GH, 9, *buff_tol, *buff_thresh, buff_G, rs_G, cs_G, buff_H, rs_H, cs_H, buff_d, inc_d, buff_e, inc_e, buff_k ); break; } case FLA_DOUBLE: { double* buff_tol = FLA_DOUBLE_PTR( tol ); double* buff_thresh = FLA_DOUBLE_PTR( thresh ); dcomplex* buff_G = FLA_DOUBLE_COMPLEX_PTR( G ); dcomplex* buff_H = FLA_DOUBLE_COMPLEX_PTR( H ); double* buff_d = FLA_DOUBLE_PTR( d ); double* buff_e = FLA_DOUBLE_PTR( e ); int* buff_k = FLA_INT_PTR( k ); FLA_Bsvd_sinval_v_opd_var1( m_A, n_GH, 9, *buff_tol, *buff_thresh, buff_G, rs_G, cs_G, buff_H, rs_H, cs_H, buff_d, inc_d, buff_e, inc_e, buff_k ); break; } } return FLA_SUCCESS; }
FLA_Error FLA_Fill_with_logarithmic_dist( FLA_Obj alpha, FLA_Obj x ) { FLA_Obj lT, l0, lB, lambda1, l2; FLA_Obj l, k, alpha2; FLA_Datatype dt_real; dim_t n_x; if ( FLA_Check_error_level() >= FLA_MIN_ERROR_CHECKING ) FLA_Fill_with_logarithmic_dist_check( alpha, x ); dt_real = FLA_Obj_datatype_proj_to_real( x ); n_x = FLA_Obj_vector_dim( x ); // Create a local counter to increment as we create the distribution. FLA_Obj_create( dt_real, 1, 1, 0, 0, &k ); // Create a local vector l. We will work with this vector, which is // the same length as x, so that we can use vertical partitioning. FLA_Obj_create( dt_real, n_x, 1, 0, 0, &l ); // Create a local real scalar alpha2 of the same precision as // alpha. Then copy alpha to alpha2, which will convert the // complex value to real, if necessary (ie: if alpha is complex). FLA_Obj_create( dt_real, 1, 1, 0, 0, &alpha2 ); FLA_Copy( alpha, alpha2 ); // Initialize k to 0. FLA_Set( FLA_ZERO, k ); FLA_Part_2x1( l, &lT, &lB, 0, FLA_TOP ); while ( FLA_Obj_length( lB ) > 0 ) { FLA_Repart_2x1_to_3x1( lT, &l0, /* ** */ /* ******* */ &lambda1, lB, &l2, 1, FLA_BOTTOM ); /*------------------------------------------------------------*/ // lambda1 = alpha^k; FLA_Pow( alpha2, k, lambda1 ); // k = k + 1; FLA_Mult_add( FLA_ONE, FLA_ONE, k ); /*------------------------------------------------------------*/ FLA_Cont_with_3x1_to_2x1( &lT, l0, lambda1, /* ** */ /* ******* */ &lB, l2, FLA_TOP ); } // Normalize by last element. FLA_Part_2x1( l, &lT, &lB, 1, FLA_BOTTOM ); FLA_Inv_scal( lB, l ); // Overwrite x with the distribution we created in l. FLA_Copy( l, x ); FLA_Obj_free( &l ); FLA_Obj_free( &k ); FLA_Obj_free( &alpha2 ); return FLA_SUCCESS; }