ReadableStore* MockReadonlySegment:: buildDictZipStore(const Schema& schema, PathRef segDir, StoreIterator& iter, const bm_uint_t* isDel, const febitvec* isPurged) const { valvec<byte> rec; std::unique_ptr<MockReadonlyStore> store(new MockReadonlyStore(schema)); if (NULL == isPurged || isPurged->size() == 0) { llong recId; while (iter.increment(&recId, &rec)) { if (NULL == isDel || !terark_bit_test(isDel, recId)) { store->m_rows.push_back(rec); } } } else { assert(NULL != isDel); llong physicId = 0; size_t logicNum = isPurged->size(); const bm_uint_t* isPurgedptr = isPurged->bldata(); for (size_t logicId = 0; logicId < logicNum; ++logicId) { if (!terark_bit_test(isPurgedptr, logicId)) { if (!terark_bit_test(isDel, logicId)) { bool hasData = iter.seekExact(physicId, &rec); TERARK_RT_assert(hasData, std::logic_error); store->m_rows.push_back(rec); } physicId++; } } } fs::path fpath = segDir / ("colgroup-" + schema.m_name); store->save(fpath); return store.release(); }
DbContext::DbContext(const CompositeTable* tab) : m_tab(const_cast<CompositeTable*>(tab)) { // must calling the constructor in lock tab->m_rwMutex size_t oldtab_segArrayUpdateSeq = tab->getSegArrayUpdateSeq(); // tab->registerDbContext(this); regexMatchMemLimit = 16*1024*1024; // 16MB size_t indexNum = tab->getIndexNum(); size_t segNum = tab->getSegNum(); m_segCtx.resize(segNum, NULL); SegCtx** sctx = m_segCtx.data(); for (size_t i = 0; i < segNum; ++i) { sctx[i] = SegCtx::create(tab->getSegmentPtr(i), indexNum); } m_wrSegPtr = tab->m_wrSeg.get(); if (m_wrSegPtr) { m_transaction.reset(m_wrSegPtr->createTransaction()); } m_rowNumVec.assign(tab->m_rowNumVec); segArrayUpdateSeq = tab->m_segArrayUpdateSeq; syncIndex = true; isUpsertOverwritten = 0; TERARK_RT_assert(tab->getSegArrayUpdateSeq() == oldtab_segArrayUpdateSeq, std::logic_error); }
void DbContext::doSyncSegCtxNoLock(const CompositeTable* tab) { assert(tab == m_tab); assert(this->segArrayUpdateSeq < tab->getSegArrayUpdateSeq()); size_t indexNum = tab->getIndexNum(); size_t oldtab_segArrayUpdateSeq = tab->getSegArrayUpdateSeq(); size_t oldSegNum = m_segCtx.size(); size_t segNum = tab->getSegNum(); if (m_segCtx.size() < segNum) { m_segCtx.resize(segNum, NULL); for (size_t i = oldSegNum; i < segNum; ++i) m_segCtx[i] = SegCtx::create(tab->getSegmentPtr(i), indexNum); } if (tab->m_wrSeg.get() != m_wrSegPtr) { m_wrSegPtr = tab->m_wrSeg.get(); if (m_wrSegPtr) m_transaction.reset(m_wrSegPtr->createTransaction()); else m_transaction.reset(); } SegCtx** sctx = m_segCtx.data(); for (size_t i = 0; i < segNum; ++i) { ReadableSegment* seg = tab->getSegmentPtr(i); if (NULL == sctx[i]) { sctx[i] = SegCtx::create(seg, indexNum); continue; } if (sctx[i]->seg == seg) continue; for (size_t j = i; j < oldSegNum; ++j) { assert(NULL != sctx[j]); if (sctx[j]->seg == seg) { for (size_t k = i; k < j; ++k) { // this should be a merged segments range assert(NULL != sctx[k]); SegCtx::destory(sctx[k], indexNum); } for (size_t k = 0; k < oldSegNum - j; ++k) { sctx[i + k] = sctx[j + k]; sctx[j + k] = NULL; } oldSegNum -= j - i; goto Done; } } // a WritableSegment was compressed into a ReadonlySegment, or // a ReadonlySegment was purged into a new ReadonlySegment SegCtx::reset(sctx[i], indexNum, seg); Done:; } for (size_t i = segNum; i < m_segCtx.size(); ++i) { if (sctx[i]) SegCtx::destory(sctx[i], indexNum); } for (size_t i = 0; i < segNum; ++i) { TERARK_RT_assert(NULL != sctx[i], std::logic_error); TERARK_RT_assert(NULL != sctx[i]->seg, std::logic_error); TERARK_RT_assert(tab->getSegmentPtr(i) == sctx[i]->seg, std::logic_error); } m_segCtx.risk_set_size(segNum); m_rowNumVec.assign(tab->m_rowNumVec); TERARK_RT_assert(m_rowNumVec.size() == segNum + 1, std::logic_error); TERARK_RT_assert(tab->getSegArrayUpdateSeq() == oldtab_segArrayUpdateSeq, std::logic_error); segArrayUpdateSeq = tab->getSegArrayUpdateSeq(); }