bool GCache::discard_seqno (int64_t seqno) { // seqno = std::min(seqno, seqno_released); for (seqno2ptr_t::iterator i = seqno2ptr.begin(); i != seqno2ptr.end() && i->first <= seqno;) { seqno2ptr_t::iterator j(i); ++i; BufferHeader* bh(ptr2BH (j->second)); if (gu_likely(BH_is_released(bh))) { assert (bh->seqno_g <= seqno); seqno2ptr.erase (j); bh->seqno_g = SEQNO_ILL; // will never be reused switch (bh->store) { case BUFFER_IN_MEM: mem.discard (bh); break; case BUFFER_IN_RB: rb.discard (bh); break; case BUFFER_IN_PAGE: ps.discard (bh); break; default: log_fatal << "Corrupt buffer header: " << bh; abort(); } } else { return false; } } return true; }
void free (const void* ptr) { assert (ptr > mmap_.ptr); assert (ptr <= (static_cast<uint8_t*>(mmap_.ptr) + mmap_.size)); assert (used_ > 0); used_--; BH_release (ptr2BH(ptr)); }
/* discard all seqnos preceeding and including seqno */ bool RingBuffer::discard_seqno (int64_t seqno) { for (seqno2ptr_t::iterator i = seqno2ptr_.begin(); i != seqno2ptr_.end() && i->first <= seqno;) { seqno2ptr_t::iterator j(i); ++i; BufferHeader* const bh (ptr2BH (j->second)); if (gu_likely (BH_is_released(bh))) { seqno2ptr_.erase (j); bh->seqno_g = SEQNO_ILL; // will never be accessed by seqno switch (bh->store) { case BUFFER_IN_RB: discard(bh); break; case BUFFER_IN_MEM: { MemStore* const ms(static_cast<MemStore*>(bh->ctx)); ms->discard(bh); break; } case BUFFER_IN_PAGE: { Page* const page (static_cast<Page*>(bh->ctx)); PageStore* const ps (PageStore::page_store(page)); ps->discard(bh); break; } default: log_fatal << "Corrupt buffer header: " << bh; abort(); } } else { return false; } } return true; }
bool MemStore::have_free_space (size_type size) { while ((size_ + size > max_size_) && !seqno2ptr_.empty()) { /* try to free some released bufs */ seqno2ptr_iter_t const i (seqno2ptr_.begin()); BufferHeader* const bh (ptr2BH (i->second)); if (BH_is_released(bh)) /* discard buffer */ { seqno2ptr_.erase(i); bh->seqno_g = SEQNO_ILL; switch (bh->store) { case BUFFER_IN_MEM: discard(bh); break; case BUFFER_IN_RB: bh->ctx->discard(bh); break; case BUFFER_IN_PAGE: { Page* const page (static_cast<Page*>(bh->ctx)); PageStore* const ps (PageStore::page_store(page)); ps->discard(bh); break; } default: log_fatal << "Corrupt buffer header: " << bh; abort(); } } else { break; } } return (size_ + size <= max_size_); }
void* realloc (void* ptr, ssize_t size) { BufferHeader* bh(0); ssize_t old_size(0); if (ptr) { bh = ptr2BH(ptr); assert (SEQNO_NONE == bh->seqno_g); old_size = bh->size; } ssize_t const diff_size(size - old_size); if (size > max_size_ || have_free_space(diff_size) == false) return 0; assert (size_ + diff_size <= max_size_); void* tmp = ::realloc (bh, size); if (tmp) { allocd_.erase(bh); allocd_.insert(tmp); bh = BH_cast(tmp); assert (bh->size == old_size); bh->size = size; size_ += diff_size; return (bh + 1); } return 0; }
void RingBuffer::seqno_reset() { if (size_cache_ == size_free_) return; /* Find the last seqno'd RB buffer. It is likely to be close to the * end of released buffers chain. */ BufferHeader* bh(0); for (seqno2ptr_t::reverse_iterator r(seqno2ptr_.rbegin()); r != seqno2ptr_.rend(); ++r) { BufferHeader* const b(ptr2BH(r->second)); if (BUFFER_IN_RB == b->store) { #ifndef NDEBUG if (!BH_is_released(b)) { log_fatal << "Buffer " << reinterpret_cast<const void*>(r->second) << ", seqno_g " << b->seqno_g << ", seqno_d " << b->seqno_d << " is not released."; assert(0); } #endif bh = b; break; } } if (!bh) return; assert(bh->size > 0); assert(BH_is_released(bh)); /* Seek the first unreleased buffer. * This should be called in isolation, when all seqno'd buffers are * freed, and the only unreleased buffers should come only from new * configuration. There should be no seqno'd buffers after it. */ ssize_t const old(size_free_); assert (0 == size_trail_ || first_ > next_); first_ = reinterpret_cast<uint8_t*>(bh); while (BH_is_released(bh)) // next_ is never released - no endless loop { first_ = reinterpret_cast<uint8_t*>(BH_next(bh)); if (gu_unlikely (0 == bh->size && first_ != next_)) { // rollover assert (first_ > next_); first_ = start_; } bh = BH_cast(first_); } BH_assert_clear(BH_cast(next_)); if (first_ == next_) { log_info << "GCache DEBUG: RingBuffer::seqno_reset(): full reset"; /* empty RB, reset it completely */ reset(); return; } assert ((BH_cast(first_))->size > 0); assert (first_ != next_); assert ((BH_cast(first_))->seqno_g == SEQNO_NONE); assert (!BH_is_released(BH_cast(first_))); /* Estimate how much space remains */ if (first_ < next_) { /* start_ first_ next_ end_ * | |###########| | */ size_used_ = next_ - first_; size_free_ = size_cache_ - size_used_; size_trail_ = 0; } else { /* start_ next_ first_ end_ * |#######| |#####| | * ^size_trail_ */ assert(size_trail_ > 0); size_free_ = first_ - next_ + size_trail_ - sizeof(BufferHeader); size_used_ = size_cache_ - size_free_; } assert_sizes(); assert(size_free_ < size_cache_); log_info << "GCache DEBUG: RingBuffer::seqno_reset(): discarded " << (size_free_ - old) << " bytes"; /* There is a small but non-0 probability that some released buffers * are locked within yet unreleased aborted local actions. * Seek all the way to next_, invalidate seqnos and update size_free_ */ assert(first_ != next_); assert(bh == BH_cast(first_)); long total(1); long locked(0); bh = BH_next(bh); while (bh != BH_cast(next_)) { if (gu_likely (bh->size > 0)) { total++; if (bh->seqno_g != SEQNO_NONE) { // either released or already discarded buffer assert (BH_is_released(bh)); bh->seqno_g = SEQNO_ILL; discard (bh); locked++; } else { assert(!BH_is_released(bh)); } bh = BH_next(bh); } else // rollover { assert (BH_cast(next_) < bh); bh = BH_cast(start_); } } log_info << "GCache DEBUG: RingBuffer::seqno_reset(): found " << locked << '/' << total << " locked buffers"; assert_sizes(); }