int seekLowerBound(fstring key, llong* id, valvec<byte>* retKey) override { assert(key.size() == sizeof(Int)); if (key.size() != sizeof(Int)) { THROW_STD(invalid_argument, "key.size must be sizeof(Int)=%d", int(sizeof(Int))); } auto owner = static_cast<const SeqNumIndex*>(m_index.get()); Int keyId = unaligned_load<Int>(key.udata()); if (keyId <= owner->m_min) { m_curr = 0; return -1; } else if (keyId > owner->m_min + owner->m_cnt) { m_curr = owner->m_cnt; *id = owner->m_cnt - 1; Int forwardMax = owner->m_min + owner->m_cnt - 1; retKey->assign((const byte*)&forwardMax, sizeof(Int)); return 1; } else { keyId -= owner->m_min; m_curr = keyId; *id = keyId; retKey->assign(key.udata(), key.size()); return 0; } }
llong MockWritableSegment::append(fstring row, DbContext*) { llong id = m_rows.size(); m_rows.push_back(); m_rows.back().assign(row); m_dataSize += row.size(); return id; }
std::pair<size_t, bool> ZipIntKeyIndex::IntVecLowerBound(fstring binkey) const { assert(binkey.size() == sizeof(Int)); Int rawkey = unaligned_load<Int>(binkey.data()); if (rawkey < Int(m_minKey)) { return std::make_pair(0, false); } auto indexData = m_index.data(); auto indexBits = m_index.uintbits(); auto indexMask = m_index.uintmask(); auto keysData = m_keys.data(); auto keysBits = m_keys.uintbits(); auto keysMask = m_keys.uintmask(); size_t key = size_t(rawkey - Int(m_minKey)); size_t hitPos = 0; size_t hitKey = 0; size_t i = 0, j = m_index.size(); while (i < j) { size_t mid = (i + j) / 2; hitPos = UintVecMin0::fast_get(indexData, indexBits, indexMask, mid); hitKey = UintVecMin0::fast_get(keysData, keysBits, keysMask, hitPos); if (hitKey < key) i = mid + 1; else j = mid; } if (i < m_index.size()) { hitPos = UintVecMin0::fast_get(indexData, indexBits, indexMask, i); hitKey = UintVecMin0::fast_get(keysData, keysBits, keysMask, hitPos); return std::make_pair(i, key == hitKey); } return std::make_pair(i, false); }
bool indexInsert(size_t indexId, fstring key, llong recId) override { assert(started == m_status); assert(indexId < m_indices.size()); WT_ITEM item; WT_SESSION* ses = m_session.ses; const Schema& schema = m_sconf.getIndexSchema(indexId); WT_CURSOR* cur = m_indices[indexId].insert; WtWritableIndex::setKeyVal(schema, cur, key, recId, &item, &m_wrtBuf); int err = cur->insert(cur); m_sizeDiff += sizeof(llong) + key.size(); if (schema.m_isUnique) { if (WT_DUPLICATE_KEY == err) { return false; } if (err) { THROW_STD(invalid_argument , "ERROR: wiredtiger insert unique index: %s", ses->strerror(ses, err)); } } else { if (WT_DUPLICATE_KEY == err) { assert(0); // assert in debug return true; // ignore in release } if (err) { THROW_STD(invalid_argument , "ERROR: wiredtiger insert multi index: %s", ses->strerror(ses, err)); } } return true; }
llong MockWritableStore::append(fstring row, DbContext*) { SpinRwLock lock(m_rwMutex, true); llong id = m_rows.size(); m_rows.push_back(); m_rows.back().assign(row); m_dataSize += row.size(); return id; }
bool SeqNumIndex<Int>::insert(fstring key, llong id, DbContext*) { assert(key.size() == sizeof(Int)); assert(id >= 0); if (key.size() != sizeof(Int)) { THROW_STD(invalid_argument, "key.size must be sizeof(Int)=%d", int(sizeof(Int))); } Int keyId = unaligned_load<Int>(key.udata()); if (keyId != m_min + id) { THROW_STD(invalid_argument, "key must be consistent with id in SeqNumIndex"); } if (llong(m_cnt) < id + 1) { m_cnt = id + 1; } return 1; }
void MockWritableSegment::replace(llong id, fstring row, DbContext*) { assert(id >= 0); assert(id < llong(m_rows.size())); size_t oldsize = m_rows[id].size(); m_rows[id].assign(row); m_dataSize -= oldsize; m_dataSize += row.size(); }
bool SeqNumIndex<Int>::replace(fstring key, llong id, llong newId, DbContext*) { assert(key.size() == sizeof(Int)); assert(id >= 0); assert(id == newId); if (key.size() != sizeof(Int)) { THROW_STD(invalid_argument, "key.size must be sizeof(Int)=%d", int(sizeof(Int))); } if (id != newId) { THROW_STD(invalid_argument, "replace with different id is not supported by SeqNumIndex"); } Int keyId = unaligned_load<Int>(key.udata()); if (keyId != m_min + newId) { THROW_STD(invalid_argument, "key must be consistent with id in SeqNumIndex"); } return 1; }
fstring FUFileManager::ExtractNetworkHostname(fstring& filename) { fstring hostname; #ifdef WIN32 // UNC network paths are only supported on WIN32, right now. if (filename.size() > 2 && (filename[0] == '/' || filename[0] == '\\') && filename[1] == filename[0]) { size_t nextSlash = min(filename.find('/', 2), filename.find('\\', 2)); FUAssert(nextSlash != fstring::npos, return hostname); // The UNC patch should always have at least one network path hostname = filename.substr(2, nextSlash - 2); filename.erase(0, nextSlash); // Keep the slash to indicate absolute path. }
std::pair<size_t, size_t> ZipIntKeyIndex::IntVecEqualRange(fstring binkey) const { assert(binkey.size() == sizeof(Int)); Int rawkey = unaligned_load<Int>(binkey.data()); if (rawkey < Int(m_minKey)) { return std::make_pair(0, false); } auto indexData = m_index.data(); auto indexBits = m_index.uintbits(); auto indexMask = m_index.uintmask(); auto keysData = m_keys.data(); auto keysBits = m_keys.uintbits(); auto keysMask = m_keys.uintmask(); size_t key = size_t(rawkey - Int(m_minKey)); size_t i = 0, j = m_index.size(); size_t mid = 0; while (i < j) { mid = (i + j) / 2; size_t hitPos = UintVecMin0::fast_get(indexData, indexBits, indexMask, mid); size_t hitKey = UintVecMin0::fast_get(keysData, keysBits, keysMask, hitPos); if (hitKey < key) i = mid + 1; else if (hitKey > key) j = mid; else goto Found; } return std::make_pair(i, i); Found: size_t lo = i, hi = mid; while (lo < hi) { size_t mid2 = (lo + hi) / 2; size_t hitPos = UintVecMin0::fast_get(indexData, indexBits, indexMask, mid2); size_t hitKey = UintVecMin0::fast_get(keysData, keysBits, keysMask, hitPos); if (hitKey < key) // for lower_bound lo = mid2 + 1; else hi = mid2; } i = lo; lo = mid + 1, hi = j; while (lo < hi) { size_t mid2 = (lo + hi) / 2; size_t hitPos = UintVecMin0::fast_get(indexData, indexBits, indexMask, mid2); size_t hitKey = UintVecMin0::fast_get(keysData, keysBits, keysMask, hitPos); if (hitKey <= key) // for upper_bound lo = mid2 + 1; else hi = mid2; } return std::make_pair(i, hi); }
void MockWritableStore::update(llong id, fstring row, DbContext* ctx) { assert(id >= 0); assert(id <= llong(m_rows.size())); if (llong(m_rows.size()) == id) { append(row, ctx); return; } SpinRwLock lock(m_rwMutex, true); size_t oldsize = m_rows[id].size(); m_rows[id].assign(row); m_dataSize -= oldsize; m_dataSize += row.size(); }
void operator()(int len, int idx, fstring value) { if (strnlen(value.p, value.n) < value.size()) { printf("%-20.*s idx=%08d bin=", len, text, idx); for (int i = 0; i < value.n; ++i) printf("%02X", (byte_t)value.p[i]); printf("\n"); } else { printf("%-20.*s idx=%08d val=%.*s\n" , len, text, idx, value.ilen(), value.data()); } klen = len; }
size_t split_by_any(fstring delims, Vec* F, size_t max_fields = ~size_t(0)) { size_t dlen = delims.size(); if (0 == dlen) // empty delims redirect to blank delim return split(' ', F); if (1 == dlen) return split(delims[0], F); F->resize(0); char *col = p, *end = p + n; while (col <= end && F->size()+1 < max_fields) { char* next = col; while (next < end && memchr(delims.data(), *next, dlen) == NULL) ++next; F->push_back(typename Vec::value_type(col, next)); *next = 0; col = next + 1; } if (col <= end) F->push_back(typename Vec::value_type(col, end)); return F->size(); }
size_t split(fstring delims, Vec* F, size_t max_fields = ~size_t(0)) { size_t dlen = delims.size(); if (0 == dlen) // empty delims redirect to blank delim return split(' ', F); if (1 == dlen) return split(delims[0], F); F->resize(0); char *col = p, *end = p + n; while (col <= end && F->size()+1 < max_fields) { char* next = (char*)memmem(col, end-col, delims.data(), dlen); if (NULL == next) next = end; F->push_back(typename Vec::value_type(col, next)); *next = 0; col = next + dlen; } if (col <= end) F->push_back(typename Vec::value_type(col, end)); return F->size(); }
//static int MockReadonlyIndex::forwardLowerBound(fstring key, size_t* pLower) const { const uint32_t* index = m_ids.data(); const size_t rows = m_ids.size(); const size_t fixlen = m_fixedLen; if (fixlen) { assert(m_keys.size() == 0); assert(key.size() == fixlen); FixedLenKeyCompare cmp; cmp.fixedLen = fixlen; cmp.strpool = m_keys.strpool.data(); cmp.schema = m_schema; size_t lo = nark::lower_bound_0(index, rows, key, cmp); *pLower = lo; if (lo < rows) { size_t jj = m_ids[lo]; if (key == fstring(cmp.strpool + fixlen*jj, fixlen)) return 0; else return 1; } } else { VarLenKeyCompare cmp; cmp.offsets = m_keys.offsets.data(); cmp.strpool = m_keys.strpool.data(); cmp.schema = m_schema; size_t lo = nark::lower_bound_0(index, rows, key, cmp); *pLower = lo; if (lo < rows) { size_t jj = m_ids[lo]; if (key == m_keys[jj]) return 0; else return 1; } } return -1; }
//static int MockReadonlyIndex::forwardLowerBound(fstring key, size_t* pLower) const { const uint32_t* index = m_ids.data(); const size_t rows = m_ids.size(); const size_t fixlen = m_fixedLen; const auto cmp = m_schema->compareData_less(); if (fixlen) { assert(m_keys.size() == 0); assert(key.size() == fixlen); FixedLenKeyExtractor keyEx; keyEx.fixedLen = fixlen; keyEx.strpool = m_keys.strpool.data(); size_t lo = lower_bound_ex_0(index, rows, key, keyEx, cmp); *pLower = lo; if (lo < rows) { size_t jj = m_ids[lo]; if (key == fstring(keyEx.strpool + fixlen*jj, fixlen)) return 0; else return 1; } } else { VarLenKeyExtractor keyEx; keyEx.offsets = m_keys.offsets.data(); keyEx.strpool = m_keys.strpool.data(); size_t lo = lower_bound_ex_0(index, rows, key, keyEx, cmp); *pLower = lo; if (lo < rows) { size_t jj = m_ids[lo]; if (key == m_keys[jj]) return 0; else return 1; } } return -1; }