void gaussian_elimination< M, N, float_t >:: compute( matrix< N, N, float_t >& A, vector< N, float_t >& b ) { p.a = A.array; p.b = b.array; lapack::xgesv_call( p ); if ( p.info != 0 ) { if ( p.info < 0 ) VMMLIB_ERROR( "invalid value in input matrix", VMMLIB_HERE ); else VMMLIB_ERROR( "factor U is exactly singular, solution could not be computed.", VMMLIB_HERE ); } }
void operator++() { if ( _tensor3 == 0 ) { VMMLIB_ERROR( "attempt to increase singular iterator", VMMLIB_HERE ); } if ( _matrix_it != _matrix_it_end ) ++_matrix_it; if ( _matrix_it == _matrix_it_end && _matrix_index + 1 < T::SLICES ) { ++_matrix_index; //slice_type& slice_ = _tensor3->get_frontal_slice( _matrix_index ); _matrix_it = _tensor3->get_frontal_slice_fwd( _matrix_index ).begin(); _matrix_it_end = _tensor3->get_frontal_slice_fwd( _matrix_index ).end(); } }
void compute_inv( const T& input, T& pseudoinverse_transposed, typename T::value_type tolerance = std::numeric_limits< typename T::value_type >::epsilon() ) { if ( _work_inv == 0 ) { _work_inv = new tmp_matrices_inv(); } // perform an SVD on the matrix to get the singular values svd_type_inv svd; matrix_nm_type& U = _work_inv->U; vec_m_type& sigmas = _work_inv->sigmas; matrix_mm_type& Vt = _work_inv->Vt; matrix_nm_type& in_data = _work_inv->input; in_data.cast_from( transpose(input) ); bool svd_ok = svd.compute( in_data, U, sigmas, Vt ); if ( ! svd_ok ) { VMMLIB_ERROR( "matrix compute_pseudoinverse - problem with lapack svd.", VMMLIB_HERE ); } /*std::cout << "U: " << std::endl << U << std::endl << " sigmas: " << std::endl << sigmas << std::endl << " Vt: " << std::endl << Vt << std::endl;*/ // get the number of significant singular, i.e., values which are above the tolerance value typename vector< T::ROWS, Tinternal >::const_iterator it = sigmas.begin() , it_end = sigmas.end(); size_t num_sigmas = 0; for( ; it != it_end; ++it ) { if ( *it >= tolerance ) ++num_sigmas; else return; } //compute inverse with all the significant inverse singular values matrix_mn_type& result = _work_inv->result; result.zero(); matrix_mn_type& tmp = _work_inv->tmp; sigmas.reciprocal_safe(); vec_m_type vt_i; vec_n_type u_i; blas_type_inv blas_dgemm1; if ( num_sigmas >= 1 ) { it = sigmas.begin(); for( size_t i = 0 ; i < num_sigmas && it != it_end; ++it, ++i ) { Vt.get_row( i, vt_i); U.get_column( i, u_i ); blas_dgemm1.compute_vv_outer( vt_i, u_i, tmp ); tmp *= *it ; result += tmp; } pseudoinverse_transposed.cast_from( result ); } else { pseudoinverse_transposed.zero(); //return matrix with zeros } }
inline void svd_call( svd_params< float_t >& ) { VMMLIB_ERROR( "not implemented for this type.", VMMLIB_HERE ); }
inline void xgesv_call( xgesv_params< float_t >& p ) { VMMLIB_ERROR( "not implemented for this type.", VMMLIB_HERE ); }
VMML_TEMPLATE_STRING // template< typename T_init> tensor_stats VMML_TEMPLATE_CLASSNAME::incremental_als(const t3_type& data_, u1_type& u1_, u2_type& u2_, u3_type& u3_, lambda_type& lambdas_, const size_t max_iterations_, const float tolerance_) { tensor_stats result; if (R % NBLOCKS != 0) { std::ostringstream convert1, convert2; convert1 << R; convert2 << NBLOCKS; VMMLIB_ERROR("In incremental CP, R = " + convert1.str() + ", NBLOCKS = " + convert2.str() + " (must be divisible)", VMMLIB_HERE); } t3_coeff_type* approx_data = new t3_coeff_type; approx_data->zero(); t3_coeff_type* residual_data = new t3_coeff_type; residual_data->cast_from(data_); lambda_tmp_type* lambdas_tmp = new lambda_tmp_type; lambdas_tmp->set(0); u1_tmp_type* u1_tmp = new u1_tmp_type; u2_tmp_type* u2_tmp = new u2_tmp_type; u3_tmp_type* u3_tmp = new u3_tmp_type; lambda_incr_type* lambdas_incr = new lambda_incr_type; lambdas_incr->set(0); u1_incr_type* u1_incr = new u1_incr_type; u1_incr->zero(); u2_incr_type* u2_incr = new u2_incr_type; u2_incr->zero(); u3_incr_type* u3_incr = new u3_incr_type; u3_incr->zero(); u1_1col_type* u1_1col = new u1_1col_type; u2_1col_type* u2_1col = new u2_1col_type; u3_1col_type* u3_1col = new u3_1col_type; typedef t3_hopm < R / NBLOCKS, I1, I2, I3, T_coeff > hopm_type; for (size_t i = 0; i < NBLOCKS; ++i) { #ifdef CP_LOG std::cout << "Incremental CP: block number '" << i << "'" << std::endl; #endif //init all values to zero u1_tmp->zero(); u2_tmp->zero(); u3_tmp->zero(); *lambdas_tmp = 0.0; approx_data->zero(); result += hopm_type::als(*residual_data, *u1_tmp, *u2_tmp, *u3_tmp, *lambdas_tmp, typename hopm_type::init_hosvd(), max_iterations_, tolerance_); //set lambdas und us to appropriate position size_t r_incr = 0; T_coeff lambda_r = 0; for (size_t r = 0; r < R / NBLOCKS; ++r) { r_incr = i * R / NBLOCKS + r; u1_tmp->get_column(r, *u1_1col); u1_incr->set_column(r_incr, *u1_1col); u2_tmp->get_column(r, *u2_1col); u2_incr->set_column(r_incr, *u2_1col); u3_tmp->get_column(r, *u3_1col); u3_incr->set_column(r_incr, *u3_1col); lambda_r = lambdas_tmp->at(r); lambdas_incr->at(r_incr) = lambda_r; //set lambda } t3_hopm < R / NBLOCKS, I1, I2, I3, T_coeff >::reconstruct(*approx_data, *u1_tmp, *u2_tmp, *u3_tmp, *lambdas_tmp); *residual_data = *residual_data - *approx_data; } u1_.cast_from(*u1_incr); u2_.cast_from(*u2_incr); u3_.cast_from(*u3_incr); lambdas_.cast_from(*lambdas_incr); delete u1_1col; delete u2_1col; delete u3_1col; delete u1_tmp; delete u2_tmp; delete u3_tmp; delete lambdas_tmp; delete u1_incr; delete u2_incr; delete u3_incr; delete lambdas_incr; delete residual_data; delete approx_data; return result; }
inline float_t dot_call( dot_params< float_t >& ) { VMMLIB_ERROR( "not implemented for this type.", VMMLIB_HERE ); }