void c4_HandlerSeq::Prepare(const t4_byte **ptr_, bool selfDesc_) { if (ptr_ != 0) { d4_dbgdef(t4_i32 sias =)c4_Column::PullValue(*ptr_); d4_assert(sias == 0); // not yet if (selfDesc_) { t4_i32 n = c4_Column::PullValue(*ptr_); if (n > 0) { c4_String s = "[" + c4_String((const char *) * ptr_, n) + "]"; const char *desc = s; c4_Field *f = d4_new c4_Field(desc); d4_assert(! *desc); Restructure(*f, false); *ptr_ += n; } } int rows = (int)c4_Column::PullValue(*ptr_); if (rows > 0) { SetNumRows(rows); for (int i = 0; i < NumFields(); ++i) { NthHandler(i).Define(rows, ptr_); } } }
void c4_Row::Release(c4_Cursor row_) { d4_assert(row_._seq != 0); d4_assert(row_._index == 0); row_._seq->DecRef(); }
c4_GroupByViewer::c4_GroupByViewer (c4_Sequence& seq_, const c4_View& keys_, const c4_Property& result_) : _parent (&seq_), _keys (keys_), _result (result_) { _sorted = _parent.SortOn(_keys); int n = _sorted.GetSize(); c4_Bytes temp; t4_byte* buf = temp.SetBufferClear(n); int groups = 0; if (n > 0) { ++buf[0]; // the first entry is always a transition groups = 1 + ScanTransitions(1, n, buf, _sorted.Project(_keys)); } // set up a map pointing to each transition _map.SetSize(groups + 1); int j = 0; for (int i = 0; i < n; ++i) if (buf[i]) _map.SetAt(j++, i); // also append an entry to point just past the end _map.SetAt(j, n); d4_assert(_map.GetAt(0) == 0); d4_assert(j == groups); }
int c4_GroupByViewer::ScanTransitions(int lo_, int hi_, t4_byte *flags_, const c4_View &match_)const { d4_assert(lo_ > 0); int m = hi_ - lo_; d4_assert(m >= 0); // done if nothing left or if entire range is identical if (m == 0 || match_[lo_ - 1] == match_[hi_ - 1]) return 0; // range has a transition, done if it is exactly of size one if (m == 1) { ++(flags_[lo_]); return 1; } // use binary splitting if the range has enough entries if (m >= 5) return ScanTransitions(lo_, lo_ + m / 2, flags_, match_) + ScanTransitions (lo_ + m / 2, hi_, flags_, match_); // else use a normal linear scan int n = 0; for (int i = lo_; i < hi_; ++i) if (match_[i] != match_[i - 1]) { ++(flags_[i]); ++n; } return n; }
/** Move attached rows to somewhere else in same storage * * There is a lot of trickery going on here. The whole point of this * code is that moving rows between (compatible!) subviews should not * use copying when potentially large memo's and subviews are involved. * In that case, the best solution is really to move pointers, not data. */ void c4_View::RelocateRows(int from_, int count_, c4_View& dest_, int pos_) { if (count_ < 0) count_ = GetSize() - from_; if (pos_ < 0) pos_ = dest_.GetSize(); d4_assert(0 <= from_ && from_ <= GetSize()); d4_assert(0 <= count_ && from_ + count_ <= GetSize()); d4_assert(0 <= pos_ && pos_ <= dest_.GetSize()); if (count_ > 0) { // the destination must not be inside the source rows d4_assert(&dest_ != this || from_ > pos_ || pos_ >= from_ + count_); // this test is slow, so do it only as a debug check d4_assert(IsCompatibleWith(dest_)); // make space, swap rows, drop originals c4_Row empty; dest_.InsertAt(pos_, empty, count_); // careful if insert moves origin if (&dest_ == this && pos_ <= from_) from_ += count_; for (int i = 0; i < count_; ++i) ((c4_HandlerSeq*) _seq)->ExchangeEntries(from_ + i, *(c4_HandlerSeq*) dest_._seq, pos_ + i); RemoveAt(from_, count_); } }
void c4_FormatV::SetupAllSubviews() { d4_assert(!_inited); _inited = true; if (_data.ColSize() > 0) { c4_Bytes temp; _data.FetchBytes(0, _data.ColSize(), temp, true); const t4_byte* ptr = temp.Contents(); for (int r = 0; r < _subSeqs.GetSize(); ++r) { // don't materialize subview if it is empty // duplicates code which is in c4_HandlerSeq::Prepare const t4_byte* p2 = ptr; d4_dbgdef(t4_i32 sias =) c4_Column::PullValue(p2); d4_assert(sias == 0); // not yet if (c4_Column::PullValue(p2) > 0) At(r).Prepare(&ptr, false); else ptr = p2; } d4_assert(ptr == temp.Contents() + temp.Size()); }
void c4_FormatB::Define(int, const t4_byte **ptr_) { d4_assert(_memos.GetSize() == 0); if (ptr_ != 0) { _data.PullLocation(*ptr_); if (_data.ColSize() > 0) _sizeCol.PullLocation(*ptr_); _memoCol.PullLocation(*ptr_); } // everything below this point could be delayed until use // in that case, watch out that column space use is properly tracked InitOffsets(_sizeCol); if (_memoCol.ColSize() > 0) { c4_Bytes walk; _memoCol.FetchBytes(0, _memoCol.ColSize(), walk, true); const t4_byte *p = walk.Contents(); for (int row = 0; p < walk.Contents() + walk.Size(); ++row) { row += c4_Column::PullValue(p); d4_assert(row < _memos.GetSize()); c4_Column *mc = d4_new c4_Column(_data.Persist()); d4_assert(mc != 0); _memos.SetAt(row, mc); mc->PullLocation(p); } d4_assert(p == walk.Contents() + walk.Size()); } }
void c4_FormatB::Remove(int index_, int count_) { _recalc = true; t4_i32 off = Offset(index_); t4_i32 n = Offset(index_ + count_) - off; d4_assert(n >= 0); // remove the columns, if present for (int i = 0; i < count_; ++i) delete (c4_Column*)_memos.GetAt(index_ + i); _memos.RemoveAt(index_, count_); if (n > 0) _data.Shrink(off, n); _offsets.RemoveAt(index_, count_); d4_assert(index_ < _offsets.GetSize()); // adjust all following entries while (index_ < _offsets.GetSize()) _offsets.ElementAt(index_++) -= n; d4_assert((t4_i32)_offsets.GetAt(index_ - 1) == _data.ColSize()); d4_assert(index_ <= _memos.GetSize() + 1); }
void c4_Persist::LoadAll() { c4_Column walk(this); if (!LoadIt(walk)) return ; if (_strategy._rootLen < 0) { _oldSeek = _strategy._rootPos; _oldBuf = d4_new t4_byte[512]; _oldCurr = _oldLimit = _oldBuf; t4_i32 n = FetchOldValue(); d4_assert(n == 0); n = FetchOldValue(); d4_assert(n > 0); c4_Bytes temp; t4_byte *buf = temp.SetBuffer(n); d4_dbgdef(int n2 = )OldRead(buf, n); d4_assert(n2 == n); c4_String s = "[" + c4_String((const char*)buf, n) + "]"; const char *desc = s; c4_Field *f = d4_new c4_Field(desc); d4_assert(! *desc); //?_root->DefineRoot(); _root->Restructure(*f, false); _root->OldPrepare(); // don't touch data inside while converting the file if (_strategy.FileSize() >= 0) OccupySpace(1, _strategy.FileSize()); } else {
void c4_FormatB::InitOffsets(c4_ColOfInts& sizes_) { int rows = Owner().NumRows(); if (sizes_.RowCount() != rows) { sizes_.SetRowCount(rows); } _memos.SetSize(rows); _offsets.SetSize(rows + 1); if (_data.ColSize() > 0) { t4_i32 total = 0; for (int r = 0; r < rows; ++r) { int n = sizes_.GetInt(r); d4_assert(n >= 0); total += n; _offsets.SetAt(r + 1, total); } d4_assert(total == _data.ColSize()); } }
/// Insert one or more rows into this sequence void c4_Sequence::InsertAt(int index_, c4_Cursor newElem_, int count_) { d4_assert(newElem_._seq != 0); c4_Notifier change(this); if (GetDependencies()) { change.StartInsertAt(index_, newElem_, count_); } SetNumRows(NumRows() + count_); c4_Bytes data; for (int i = 0; i < newElem_._seq->NumHandlers(); ++i) { c4_Handler &h = newElem_._seq->NthHandler(i); // added 06-12-1999 to do index remapping for derived seq's const c4_Sequence *hc = newElem_._seq->HandlerContext(i); int ri = newElem_._seq->RemapIndex(newElem_._index, hc); int colNum = PropIndex(h.Property()); d4_assert(colNum >= 0); if (h.Property().Type() == 'V') { // If inserting from self: Make sure we get a copy of the bytes, // so we don't get an invalid pointer if the memory get realloc'ed h.GetBytes(ri, data, newElem_._seq == this); // special treatment for subviews, insert empty, then overwrite // changed 19990904 - probably fixes a long-standing limitation c4_Bytes temp; h.ClearBytes(temp); c4_Handler &h2 = NthHandler(colNum); h2.Insert(index_, temp, count_); for (int j = 0; j < count_; ++j) { h2.Set(index_ + j, data); } } else { h.GetBytes(ri, data); NthHandler(colNum).Insert(index_, data, count_); } } // if number of props in dest is larger after adding, clear the rest // this way, new props get copied and undefined props get cleared if (newElem_._seq->NumHandlers() < NumHandlers()) { for (int j = 0; j < NumHandlers(); ++j) { c4_Handler &h = NthHandler(j); // if the property does not appear in the source if (newElem_._seq->PropIndex(h.PropId()) < 0) { h.ClearBytes(data); h.Insert(index_, data, count_); } } } }
void c4_CustomSeq::DoSet(int row_, int col_, const c4_Bytes& buf_) { d4_assert(_inited); d4_dbgdef(const bool f =) _viewer->SetItem(row_, col_, buf_); d4_assert(f); }
int c4_FormatL::DoCompare(const c4_Bytes &b1_, const c4_Bytes &b2_) { d4_assert(b1_.Size() == sizeof(t4_i64)); d4_assert(b2_.Size() == sizeof(t4_i64)); t4_i64 v1 = *(const t4_i64*)b1_.Contents(); t4_i64 v2 = *(const t4_i64*)b2_.Contents(); return v1 == v2 ? 0 : v1 < v2 ? - 1: + 1; }
int c4_FormatF::DoCompare(const c4_Bytes &b1_, const c4_Bytes &b2_) { d4_assert(b1_.Size() == sizeof(float)); d4_assert(b2_.Size() == sizeof(float)); float v1 = *(const float*)b1_.Contents(); float v2 = *(const float*)b2_.Contents(); return v1 == v2 ? 0 : v1 < v2 ? - 1: + 1; }
c4_Property::c4_Property(const c4_Property &prop_): _id(prop_.GetId()), _type (prop_.Type()) { c4_ThreadLock::Hold lock; d4_assert(sPropCounts != 0); d4_assert(sPropCounts->GetAt(_id) > 0); Refs( + 1); }
int c4_FormatD::DoCompare(const c4_Bytes &b1_, const c4_Bytes &b2_) { d4_assert(b1_.Size() == sizeof(double)); d4_assert(b2_.Size() == sizeof(double)); double v1 = *(const double*)b1_.Contents(); double v2 = *(const double*)b2_.Contents(); return v1 == v2 ? 0 : v1 < v2 ? - 1: + 1; }
c4_Sequence::~c4_Sequence() { d4_assert(_refCount == 0); d4_assert(!_dependencies); // there can be no dependencies left ClearCache(); delete _tempBuf; }
c4_FileMark::c4_FileMark(t4_i32 pos_, bool flipped_, bool extend_) { d4_assert(sizeof *this == 8); *(short*)_data = flipped_ ? kReverseFormat : kStorageFormat; _data[2] = extend_ ? 0x0A : 0x1A; _data[3] = 0; t4_byte *p = _data + 4; for (int i = 24; i >= 0; i -= 8) *p++ = (t4_byte)(pos_ >> i); d4_assert(p == _data + sizeof _data); }
c4_FileMark::c4_FileMark(t4_i32 pos_, int len_) { d4_assert(sizeof *this == 8); t4_byte *p = _data; *p++ = 0x80; for (int j = 16; j >= 0; j -= 8) *p++ = (t4_byte)(len_ >> j); for (int i = 24; i >= 0; i -= 8) *p++ = (t4_byte)(pos_ >> i); d4_assert(p == _data + sizeof _data); }
/// Assignment from a reference to a row. c4_Row &c4_Row::operator = (const c4_RowRef &rowRef_) { d4_assert(_cursor._seq != 0); if (_cursor != &rowRef_) { d4_assert(_cursor._index == 0); _cursor._seq->SetAt(0, &rowRef_); } return *this; }
void c4_HandlerSeq::DefineRoot() { d4_assert(_field == 0); d4_assert(_parent == 0); SetNumRows(1); const char *desc = "[]"; _field = d4_new c4_Field(desc); d4_assert(! *desc); _parent = this; }
void c4_FormatS::Insert(int index_, const c4_Bytes &buf_, int count_) { d4_assert(count_ > 0); int m = buf_.Size(); if (--m >= 0) { d4_assert(buf_.Contents()[m] == 0); if (m == 0) { c4_FormatB::Insert(index_, c4_Bytes(), count_); return ; } } c4_FormatB::Insert(index_, buf_, count_); }
/// Define the complete view structure of the storage void c4_Storage::SetStructure(const char *description_) { d4_assert(description_ != 0); if (description_ != Description()) { c4_String s = "[" + c4_String(description_) + "]"; description_ = s; c4_Field *field = d4_new c4_Field(description_); d4_assert(! *description_); d4_assert(field != 0); Persist()->Root().Restructure(*field, false); } }
d4_inline t4_i32 c4_FormatB::Offset(int index_)const { d4_assert((t4_i32)_offsets.GetAt(_offsets.GetSize() - 1) == _data.ColSize()); d4_assert(_offsets.GetSize() == _memos.GetSize() + 1); d4_assert(index_ < _offsets.GetSize()); // extend offset vectors for missing empty entries at end int n = _offsets.GetSize(); d4_assert(n > 0); if (index_ >= n) index_ = n - 1; return _offsets.GetAt(index_); }
bool c4_Dependencies::Remove(c4_Sequence *seq_) { int n = _refs.GetSize() - 1; d4_assert(n >= 0); for (int i = 0; i <= n; ++i) if (_refs.GetAt(i) == seq_) { _refs.SetAt(i, _refs.GetAt(n)); _refs.SetSize(n); return n > 0; } d4_assert(0); // dependency not found return true; }
/** Adjust the reference count * * This is part of the implementation and shouldn't normally be called. * This code is only called with the lock held, and always thread-safe. */ void c4_Property::Refs(int diff_)const { d4_assert(diff_ == - 1 || diff_ == + 1); d4_assert(sPropCounts != 0); sPropCounts->ElementAt(_id) += diff_; #if q4_CHECK // get rid of the cache when the last property goes away static t4_i32 sPropTotals; sPropTotals += diff_; if (sPropTotals == 0) CleanupInternalData(); #endif }
void c4_Column::PullLocation(const t4_byte * &ptr_) { d4_assert(_segments.GetSize() == 0); _size = PullValue(ptr_); _position = 0; if (_size > 0) { _position = PullValue(ptr_); if (_position > 0) { d4_assert(_persist != 0); _persist->OccupySpace(_position, _size); } } _dirty = false; }
void c4_FormatB::SetOne(int index_, const c4_Bytes &xbuf_, bool ignoreMemos_) { // this fixes bug in 2.4.0 when copying string from higher row // TODO: this fix is very conservative, figure out when to copy // (can probably look at pointer to see whether it's from us) int sz = xbuf_.Size(); c4_Bytes buf_(xbuf_.Contents(), sz, 0 < sz && sz <= c4_Column::kSegMax); c4_Column *cp = &_data; t4_i32 start = Offset(index_); int len = Offset(index_ + 1) - start; if (!ignoreMemos_ && _memos.GetAt(index_) != 0) len = ItemLenOffCol(index_, start, cp); int m = buf_.Size(); int n = m - len; if (n > 0) cp->Grow(start, n); else if (n < 0) cp->Shrink(start, - n); else if (m == 0) return ; // no size change and no contents _recalc = true; cp->StoreBytes(start, buf_); if (n && cp == &_data) { // if size has changed int k = _offsets.GetSize() - 1; // if filling in an empty entry at end: extend offsets first if (m > 0 && index_ >= k) { _offsets.InsertAt(k, _offsets.GetAt(k), index_ - k + 1); k = index_ + 1; d4_assert(k == _offsets.GetSize() - 1); } // adjust following entry offsets while (++index_ <= k) _offsets.ElementAt(index_) += n; } d4_assert((t4_i32)_offsets.GetAt(_offsets.GetSize() - 1) == _data.ColSize()); }
void c4_Column::SetupSegments() { d4_assert(_segments.GetSize() == 0); d4_assert(_gap == 0); d4_assert(_slack == 0); // The last entry in the _segments array is either a partial block // or a null pointer, so calling "fSegIndex(_size)" is always allowed. int n = fSegIndex(_size) + 1; _segments.SetSize(n); // treat last block differently if it is a partial entry int last = n; if (fSegRest(_size)) --last; // this block is partial, size is 1 .. kSegMax-1 else --n; // the last block is left as a null pointer int id = - 1; if (_position < 0) { // special aside id, figure out the real position d4_assert(_persist != 0); id = ~_position; _position = _persist->LookupAside(id); d4_assert(_position >= 0); } if (IsMapped()) { // setup for mapped files is quick, just fill in the pointers d4_assert(_position > 1); d4_assert(_position + (n - 1) *kSegMax <= Strategy()._dataSize); const t4_byte *map = Strategy()._mapStart + _position; for (int i = 0; i < n; ++i) { _segments.SetAt(i, (t4_byte*)map); // loses const map += kSegMax; } } else { int chunk = kSegMax; t4_i32 pos = _position; // allocate buffers, load them if necessary for (int i = 0; i < n; ++i) { if (i == last) chunk = fSegRest(_size); t4_byte *p = d4_new t4_byte[chunk]; _segments.SetAt(i, p); if (_position > 0) { d4_dbgdef(int n = )Strategy().DataRead(pos, p, chunk); d4_assert(n == chunk); pos += chunk; } } }
c4_HandlerSeq::~c4_HandlerSeq() { const bool rootLevel = _parent == this; c4_Persist *pers = _persist; if (rootLevel && pers != 0) { pers->DoAutoCommit(); } DetachFromParent(); DetachFromStorage(true); for (int i = 0; i < NumHandlers(); ++i) { delete &NthHandler(i); } _handlers.SetSize(0); ClearCache(); if (rootLevel) { delete _field; d4_assert(pers != 0); delete pers; } }