void BasicColumn<T>::move_last_over(std::size_t target_row_ndx, std::size_t last_row_ndx) { TIGHTDB_ASSERT(target_row_ndx < last_row_ndx); TIGHTDB_ASSERT(last_row_ndx + 1 == size()); T value = get(last_row_ndx); set(target_row_ndx, value); // Throws bool is_last = true; erase(last_row_ndx, is_last); // Throws }
template<class T> void BasicArray<T>::truncate(std::size_t size) { TIGHTDB_ASSERT(is_attached()); TIGHTDB_ASSERT(size <= m_size); copy_on_write(); // Throws // Update size in accessor and in header. This leaves the capacity // unchanged. m_size = size; set_header_size(size); }
void BasicColumn<T>::erase(std::size_t ndx, bool is_last) { TIGHTDB_ASSERT(ndx < size()); TIGHTDB_ASSERT(is_last == (ndx == size()-1)); if (!m_array->is_inner_bptree_node()) { static_cast<BasicArray<T>*>(m_array)->erase(ndx); // Throws return; } size_t ndx_2 = is_last ? npos : ndx; EraseLeafElem erase_leaf_elem(*this); Array::erase_bptree_elem(m_array, ndx_2, erase_leaf_elem); // Throws }
ref_type BasicArray<T>::bptree_leaf_insert(size_t ndx, T value, TreeInsertBase& state) { size_t leaf_size = size(); TIGHTDB_ASSERT(leaf_size <= TIGHTDB_MAX_BPNODE_SIZE); if (leaf_size < ndx) ndx = leaf_size; if (TIGHTDB_LIKELY(leaf_size < TIGHTDB_MAX_BPNODE_SIZE)) { insert(ndx, value); return 0; // Leaf was not split } // Split leaf node BasicArray<T> new_leaf(get_alloc()); new_leaf.create(); // Throws if (ndx == leaf_size) { new_leaf.add(value); state.m_split_offset = ndx; } else { // FIXME: Could be optimized by first resizing the target // array, then copy elements with std::copy(). for (size_t i = ndx; i != leaf_size; ++i) new_leaf.add(get(i)); truncate(ndx); add(value); state.m_split_offset = ndx + 1; } state.m_split_size = leaf_size + 1; return new_leaf.get_ref(); }
void BasicArray<T>::insert(std::size_t ndx, T value) { TIGHTDB_ASSERT(ndx <= m_size); // Check if we need to copy before modifying copy_on_write(); // Throws // Make room for the new value alloc(m_size+1, m_width); // Throws // Move values below insertion if (ndx != m_size) { char* base = reinterpret_cast<char*>(m_data); char* src_begin = base + ndx*m_width; char* src_end = base + m_size*m_width; char* dst_end = src_end + m_width; std::copy_backward(src_begin, src_end, dst_end); } // Set the value T* data = reinterpret_cast<T*>(m_data) + ndx; *data = value; ++m_size; }
template<class T> inline void BasicColumn<T>::insert(std::size_t row_ndx, T value) { std::size_t size = this->size(); // Slow TIGHTDB_ASSERT(row_ndx <= size); std::size_t row_ndx_2 = row_ndx == size ? tightdb::npos : row_ndx; std::size_t num_rows = 1; do_insert(row_ndx_2, value, num_rows); // Throws }
double BasicArray<T>::sum(std::size_t begin, std::size_t end) const { if (end == npos) end = m_size; TIGHTDB_ASSERT(begin <= m_size && end <= m_size && begin <= end); const T* data = reinterpret_cast<const T*>(m_data); return std::accumulate(data + begin, data + end, double(0)); }
std::size_t BasicArray<T>::count(T value, std::size_t begin, std::size_t end) const { if (end == npos) end = m_size; TIGHTDB_ASSERT(begin <= m_size && end <= m_size && begin <= end); const T* data = reinterpret_cast<const T*>(m_data); return std::count(data + begin, data + end, value); }
std::size_t BasicArray<T>::find(T value, std::size_t begin, std::size_t end) const { if (end == npos) end = m_size; TIGHTDB_ASSERT(begin <= m_size && end <= m_size && begin <= end); const T* data = reinterpret_cast<const T*>(m_data); const T* i = std::find(data + begin, data + end, value); return i == data + end ? not_found : std::size_t(i - data); }
inline std::size_t BasicArray<T>::calc_aligned_byte_size(std::size_t size) { std::size_t max = std::numeric_limits<std::size_t>::max(); std::size_t max_2 = max & ~size_t(7); // Allow for upwards 8-byte alignment if (size > (max_2 - header_size) / sizeof (T)) throw std::runtime_error("Byte size overflow"); size_t byte_size = header_size + size * sizeof (T); TIGHTDB_ASSERT(byte_size > 0); size_t aligned_byte_size = ((byte_size-1) | 7) + 1; // 8-byte alignment return aligned_byte_size; }
inline void BasicArray<T>::set(std::size_t ndx, T value) { TIGHTDB_ASSERT(ndx < m_size); // Check if we need to copy before modifying copy_on_write(); // Throws // Set the value T* data = reinterpret_cast<T*>(m_data) + ndx; *data = value; }
MemRef BasicArray<T>::slice(std::size_t offset, std::size_t size, Allocator& target_alloc) const { TIGHTDB_ASSERT(is_attached()); // FIXME: This can be optimized as a single contiguous copy // operation. BasicArray slice(target_alloc); _impl::ShallowArrayDestroyGuard dg(&slice); slice.create(); // Throws size_t begin = offset; size_t end = offset + size; for (size_t i = begin; i != end; ++i) { T value = get(i); slice.add(value); // Throws } dg.release(); return slice.get_mem(); }
bool BasicArray<T>::minmax(T& result, std::size_t begin, std::size_t end) const { if (end == npos) end = m_size; if (m_size == 0) return false; TIGHTDB_ASSERT(begin < m_size && end <= m_size && begin < end); T m = get(begin); ++begin; for (; begin < end; ++begin) { T val = get(begin); if (find_max ? val > m : val < m) m = val; } result = m; return true; }
void BasicArray<T>::erase(std::size_t ndx) { TIGHTDB_ASSERT(ndx < m_size); // Check if we need to copy before modifying copy_on_write(); // Throws // move data under deletion up if (ndx < m_size-1) { char* base = reinterpret_cast<char*>(m_data); char* dst_begin = base + ndx*m_width; const char* src_begin = dst_begin + m_width; const char* src_end = base + m_size*m_width; std::copy(src_begin, src_end, dst_begin); } // Update size (also in header) --m_size; set_header_size(m_size); }