np::ndarray vcl_structured_matrix_to_ndarray(MATRIXTYPE& m) { // TODO: THIS IS VERY CRUDE! typedef typename MATRIXTYPE::value_type::value_type SCALARTYPE; ublas::matrix<SCALARTYPE> cpu_dense_m(m.size1(), m.size2()); vcl::matrix<SCALARTYPE> vcl_dense_m(m.size1(), m.size2()); vcl::copy(m, cpu_dense_m); vcl::copy(cpu_dense_m, vcl_dense_m); return vcl_matrix_to_ndarray<vcl::matrix<SCALARTYPE>, SCALARTYPE> (vcl_dense_m); }
void copy(toeplitz_matrix<SCALARTYPE, ALIGNMENT> const & tep_src, MATRIXTYPE & com_dst) { std::size_t size = tep_src.size1(); assert(size == com_dst.size1() && bool("Size mismatch")); assert(size == com_dst.size2() && bool("Size mismatch")); std::vector<SCALARTYPE> tmp(tep_src.size1() * 2 - 1); copy(tep_src, tmp); for(std::size_t i = 0; i < size; i++) for(std::size_t j = 0; j < size; j++) com_dst(i, j) = tmp[static_cast<int>(j) - static_cast<int>(i) + static_cast<int>(size) - 1]; }
void copy(MATRIXTYPE const & com_src, toeplitz_matrix<SCALARTYPE, ALIGNMENT>& tep_dst) { std::size_t size = tep_dst.size1(); assert(size == com_src.size1() && bool("Size mismatch")); assert(size == com_src.size2() && bool("Size mismatch")); std::vector<SCALARTYPE> tmp(2*size - 1); for(int i = size - 1; i >= 0; i--) tmp[size - i - 1] = com_src(i, 0); for(std::size_t i = 1; i < size; i++) tmp[size + i - 1] = com_src(0, i); copy(tmp, tep_dst); }
np::ndarray vcl_matrix_to_ndarray(const MATRIXTYPE& m) { std::size_t size = m.internal_size1() * m.internal_size2() * sizeof(SCALARTYPE); // TODO!!! : How to have Python keep track of this memory, to avoid a leak? SCALARTYPE* data = (SCALARTYPE*)malloc(size); // Read the whole matrix vcl::backend::memory_read(m.handle(), 0, size, data); np::dtype dt = np::dtype::get_builtin<SCALARTYPE>(); bp::tuple shape = bp::make_tuple(m.size1(), m.size2()); // Delegate determination of strides and start offset to function templates bp::tuple strides = get_strides<SCALARTYPE>(m); np::ndarray array = np::from_data(data + get_offset<SCALARTYPE>(m), dt, shape, strides, bp::object(m)); return array; }