inline ref_type to_ref(int_fast64_t v) noexcept { REALM_ASSERT_DEBUG(!util::int_cast_has_overflow<ref_type>(v)); // Check that v is divisible by 8 (64-bit aligned). REALM_ASSERT_DEBUG(v % 8 == 0); return ref_type(v); }
static T get_null_float() { typename std::conditional<std::is_same<T, float>::value, uint32_t, uint64_t>::type i; int64_t double_nan = 0x7ff80000000000aa; i = std::is_same<T, float>::value ? 0x7fc000aa : static_cast<decltype(i)>(double_nan); T d = type_punning<T, decltype(i)>(i); REALM_ASSERT_DEBUG(std::isnan(static_cast<double>(d))); REALM_ASSERT_DEBUG(!is_signaling(d)); return d; }
inline BinaryData BinaryColumn::get(size_t ndx) const noexcept { REALM_ASSERT_DEBUG(ndx < size()); if (root_is_leaf()) { bool is_big = m_array->get_context_flag(); BinaryData ret; if (!is_big) { // Small blobs root leaf ArrayBinary* leaf = static_cast<ArrayBinary*>(m_array.get()); ret = leaf->get(ndx); } else { // Big blobs root leaf ArrayBigBlobs* leaf = static_cast<ArrayBigBlobs*>(m_array.get()); ret = leaf->get(ndx); } if (!m_nullable && ret.is_null()) return BinaryData("", 0); // return empty string (non-null) return ret; } // Non-leaf root std::pair<MemRef, size_t> p = m_array->get_bptree_leaf(ndx); const char* leaf_header = p.first.m_addr; size_t ndx_in_leaf = p.second; Allocator& alloc = m_array->get_alloc(); bool is_big = Array::get_context_flag_from_header(leaf_header); if (!is_big) { // Small blobs return ArrayBinary::get(leaf_header, ndx_in_leaf, alloc); } // Big blobs return ArrayBigBlobs::get(leaf_header, ndx_in_leaf, alloc); }
inline void StringEnumColumn::add(StringData value) { REALM_ASSERT_DEBUG(!(!m_nullable && value.is_null())); size_t row_ndx = realm::npos; size_t num_rows = 1; do_insert(row_ndx, value, num_rows); // Throws }
/// If \a data is 'null', \a size must be zero. OwnedData(const char* data, size_t size) : m_size(size) { REALM_ASSERT_DEBUG(data || size == 0); if (data) { m_data = std::unique_ptr<char[]>(new char[size]); memcpy(m_data.get(), data, size); } }
// Implementing pure virtual method of ColumnBase. inline void BinaryColumn::move_last_row_over(size_t row_ndx, size_t prior_num_rows, bool) { REALM_ASSERT_DEBUG(prior_num_rows == size()); REALM_ASSERT(row_ndx < prior_num_rows); size_t last_row_ndx = prior_num_rows - 1; do_move_last_over(row_ndx, last_row_ndx); // Throws }
inline void StringEnumColumn::insert(size_t row_ndx, StringData value) { REALM_ASSERT_DEBUG(!(!m_nullable && value.is_null())); size_t column_size = this->size(); REALM_ASSERT_3(row_ndx, <=, column_size); size_t num_rows = 1; bool is_append = row_ndx == column_size; do_insert(row_ndx, value, num_rows, is_append); // Throws }
// Implementing pure virtual method of ColumnBase. inline void StringColumn::insert_rows(size_t row_ndx, size_t num_rows_to_insert, size_t prior_num_rows) { REALM_ASSERT_DEBUG(prior_num_rows == size()); REALM_ASSERT(row_ndx <= prior_num_rows); StringData value = m_nullable ? realm::null() : StringData(""); bool is_append = (row_ndx == prior_num_rows); do_insert(row_ndx, value, num_rows_to_insert, is_append); // Throws }
void SyncUserMetadata::set_is_admin(bool is_admin) { if (m_invalid) { return; } REALM_ASSERT_DEBUG(m_realm); m_realm->verify_thread(); m_realm->begin_transaction(); m_row.set_bool(m_schema.idx_user_is_admin, is_admin); m_realm->commit_transaction(); }
inline int_fast64_t from_ref(ref_type v) noexcept { // Check that v is divisible by 8 (64-bit aligned). REALM_ASSERT_DEBUG(v % 8 == 0); static_assert(std::is_same<ref_type, size_t>::value, "If ref_type changes, from_ref and to_ref should probably be updated"); // Make sure that we preserve the bit pattern of the ref_type (without sign extension). return util::from_twos_compl<int_fast64_t>(uint_fast64_t(v)); }
// Implementing pure virtual method of ColumnBase. inline void BinaryColumn::erase_rows(size_t row_ndx, size_t num_rows_to_erase, size_t prior_num_rows, bool) { REALM_ASSERT_DEBUG(prior_num_rows == size()); REALM_ASSERT(num_rows_to_erase <= prior_num_rows); REALM_ASSERT(row_ndx <= prior_num_rows - num_rows_to_erase); bool is_last = (row_ndx + num_rows_to_erase == prior_num_rows); for (size_t i = num_rows_to_erase; i > 0; --i) { size_t row_ndx_2 = row_ndx + i - 1; erase(row_ndx_2, is_last); // Throws } }
// Implementing pure virtual method of ColumnBase. inline void BinaryColumn::insert_rows(size_t row_ndx, size_t num_rows_to_insert, size_t prior_num_rows, bool insert_nulls) { REALM_ASSERT_DEBUG(prior_num_rows == size()); REALM_ASSERT(row_ndx <= prior_num_rows); REALM_ASSERT(!insert_nulls || m_nullable); size_t row_ndx_2 = (row_ndx == prior_num_rows ? realm::npos : row_ndx); BinaryData value = m_nullable ? BinaryData() : BinaryData("", 0); bool add_zero_term = false; do_insert(row_ndx_2, value, add_zero_term, num_rows_to_insert); // Throws }
void SyncUserMetadata::set_state(util::Optional<std::string> server_url, util::Optional<std::string> user_token) { if (m_invalid) { return; } REALM_ASSERT_DEBUG(m_realm); m_realm->verify_thread(); m_realm->begin_transaction(); m_row.set_string(m_schema.idx_user_token, *user_token); m_row.set_string(m_schema.idx_auth_server_url, *server_url); m_realm->commit_transaction(); }
inline ref_type to_ref(int_fast64_t v) noexcept { // Check that v is divisible by 8 (64-bit aligned). REALM_ASSERT_DEBUG(v % 8 == 0); // C++11 standard, paragraph 4.7.2 [conv.integral]: // If the destination type is unsigned, the resulting value is the least unsigned integer congruent to the source // integer (modulo 2n where n is the number of bits used to represent the unsigned type). [ Note: In a two's // complement representation, this conversion is conceptual and there is no change in the bit pattern (if there is // no truncation). - end note ] static_assert(std::is_unsigned<ref_type>::value, "If ref_type changes, from_ref and to_ref should probably be updated"); return ref_type(v); }
inline ConstTableRef SubqueryExpression::get_dest_table() const { REALM_ASSERT_DEBUG(link_chain.size() > 0); return link_chain.back().table; }
inline size_t SubqueryExpression::get_dest_ndx() const { REALM_ASSERT_DEBUG(link_chain.size() > 0); return link_chain.back().col_ndx; }
inline bool SubqueryExpression::dest_type_is_backlink() const { REALM_ASSERT_DEBUG(link_chain.size() > 0); return link_chain.back().is_backlink; }
inline DataType SubqueryExpression::get_dest_type() const { REALM_ASSERT_DEBUG(link_chain.size() > 0); return link_chain.back().col_type; }
inline size_t SlabAlloc::get_baseline() const noexcept { REALM_ASSERT_DEBUG(is_attached()); return m_baseline; }
inline bool Allocator::is_read_only(ref_type ref) const noexcept { REALM_ASSERT_DEBUG(ref != 0); REALM_ASSERT_DEBUG(m_baseline != 0); // Attached SlabAlloc return ref < m_baseline; }
inline int_fast64_t from_ref(ref_type v) noexcept { // Check that v is divisible by 8 (64-bit aligned). REALM_ASSERT_DEBUG(v % 8 == 0); return util::from_twos_compl<int_fast64_t>(v); }
/// If \a data is 'null', \a size must be zero. OwnedData(std::unique_ptr<char[]> data, size_t size) noexcept : m_data(std::move(data)), m_size(size) { REALM_ASSERT_DEBUG(m_data || m_size == 0); }
// Safe cast from 64 to 32 bits on 32 bit architecture. Differs from to_ref() by not testing alignment and REF-bitflag. inline size_t to_size_t(int_fast64_t v) noexcept { REALM_ASSERT_DEBUG(!util::int_cast_has_overflow<size_t>(v)); return size_t(v); }