bool GCache::discard_seqno (int64_t seqno) { // seqno = std::min(seqno, seqno_released); for (seqno2ptr_t::iterator i = seqno2ptr.begin(); i != seqno2ptr.end() && i->first <= seqno;) { seqno2ptr_t::iterator j(i); ++i; BufferHeader* bh(ptr2BH (j->second)); if (gu_likely(BH_is_released(bh))) { assert (bh->seqno_g <= seqno); seqno2ptr.erase (j); bh->seqno_g = SEQNO_ILL; // will never be reused switch (bh->store) { case BUFFER_IN_MEM: mem.discard (bh); break; case BUFFER_IN_RB: rb.discard (bh); break; case BUFFER_IN_PAGE: ps.discard (bh); break; default: log_fatal << "Corrupt buffer header: " << bh; abort(); } } else { return false; } } return true; }
void discard (BufferHeader* const bh) { assert (BH_is_released(bh)); assert (SEQNO_ILL == bh->seqno_g); size_free_ += bh->size; assert (size_free_ <= size_cache_); }
void discard (BufferHeader* bh) { assert (BH_is_released(bh)); size_ -= bh->size; ::free (bh); allocd_.erase(bh); }
void RingBuffer::free (BufferHeader* const bh) { assert(BH_is_released(bh)); size_used_ -= bh->size; assert(size_used_ >= 0); if (SEQNO_NONE == bh->seqno_g) { bh->seqno_g = SEQNO_ILL; discard (bh); } }
/* discard all seqnos preceeding and including seqno */ bool RingBuffer::discard_seqno (int64_t seqno) { for (seqno2ptr_t::iterator i = seqno2ptr_.begin(); i != seqno2ptr_.end() && i->first <= seqno;) { seqno2ptr_t::iterator j(i); ++i; BufferHeader* const bh (ptr2BH (j->second)); if (gu_likely (BH_is_released(bh))) { seqno2ptr_.erase (j); bh->seqno_g = SEQNO_ILL; // will never be accessed by seqno switch (bh->store) { case BUFFER_IN_RB: discard(bh); break; case BUFFER_IN_MEM: { MemStore* const ms(static_cast<MemStore*>(bh->ctx)); ms->discard(bh); break; } case BUFFER_IN_PAGE: { Page* const page (static_cast<Page*>(bh->ctx)); PageStore* const ps (PageStore::page_store(page)); ps->discard(bh); break; } default: log_fatal << "Corrupt buffer header: " << bh; abort(); } } else { return false; } } return true; }
bool MemStore::have_free_space (size_type size) { while ((size_ + size > max_size_) && !seqno2ptr_.empty()) { /* try to free some released bufs */ seqno2ptr_iter_t const i (seqno2ptr_.begin()); BufferHeader* const bh (ptr2BH (i->second)); if (BH_is_released(bh)) /* discard buffer */ { seqno2ptr_.erase(i); bh->seqno_g = SEQNO_ILL; switch (bh->store) { case BUFFER_IN_MEM: discard(bh); break; case BUFFER_IN_RB: bh->ctx->discard(bh); break; case BUFFER_IN_PAGE: { Page* const page (static_cast<Page*>(bh->ctx)); PageStore* const ps (PageStore::page_store(page)); ps->discard(bh); break; } default: log_fatal << "Corrupt buffer header: " << bh; abort(); } } else { break; } } return (size_ + size <= max_size_); }
void MemStore::seqno_reset() { for (std::set<void*>::iterator buf(allocd_.begin()); buf != allocd_.end();) { std::set<void*>::iterator tmp(buf); ++buf; BufferHeader* const bh(ptr2BH(*tmp)); if (bh->seqno_g != SEQNO_NONE) { assert (BH_is_released(bh)); allocd_.erase (tmp); size_ -= bh->size; ::free (bh); } } }
void discard (BufferHeader* bh) { assert(BH_is_released(bh)); assert(SEQNO_ILL == bh->seqno_g); free_page_ptr(static_cast<Page*>(bh->ctx), bh); }
void RingBuffer::seqno_reset() { if (size_cache_ == size_free_) return; /* Find the last seqno'd RB buffer. It is likely to be close to the * end of released buffers chain. */ BufferHeader* bh(0); for (seqno2ptr_t::reverse_iterator r(seqno2ptr_.rbegin()); r != seqno2ptr_.rend(); ++r) { BufferHeader* const b(ptr2BH(r->second)); if (BUFFER_IN_RB == b->store) { #ifndef NDEBUG if (!BH_is_released(b)) { log_fatal << "Buffer " << reinterpret_cast<const void*>(r->second) << ", seqno_g " << b->seqno_g << ", seqno_d " << b->seqno_d << " is not released."; assert(0); } #endif bh = b; break; } } if (!bh) return; assert(bh->size > 0); assert(BH_is_released(bh)); /* Seek the first unreleased buffer. * This should be called in isolation, when all seqno'd buffers are * freed, and the only unreleased buffers should come only from new * configuration. There should be no seqno'd buffers after it. */ ssize_t const old(size_free_); assert (0 == size_trail_ || first_ > next_); first_ = reinterpret_cast<uint8_t*>(bh); while (BH_is_released(bh)) // next_ is never released - no endless loop { first_ = reinterpret_cast<uint8_t*>(BH_next(bh)); if (gu_unlikely (0 == bh->size && first_ != next_)) { // rollover assert (first_ > next_); first_ = start_; } bh = BH_cast(first_); } BH_assert_clear(BH_cast(next_)); if (first_ == next_) { log_info << "GCache DEBUG: RingBuffer::seqno_reset(): full reset"; /* empty RB, reset it completely */ reset(); return; } assert ((BH_cast(first_))->size > 0); assert (first_ != next_); assert ((BH_cast(first_))->seqno_g == SEQNO_NONE); assert (!BH_is_released(BH_cast(first_))); /* Estimate how much space remains */ if (first_ < next_) { /* start_ first_ next_ end_ * | |###########| | */ size_used_ = next_ - first_; size_free_ = size_cache_ - size_used_; size_trail_ = 0; } else { /* start_ next_ first_ end_ * |#######| |#####| | * ^size_trail_ */ assert(size_trail_ > 0); size_free_ = first_ - next_ + size_trail_ - sizeof(BufferHeader); size_used_ = size_cache_ - size_free_; } assert_sizes(); assert(size_free_ < size_cache_); log_info << "GCache DEBUG: RingBuffer::seqno_reset(): discarded " << (size_free_ - old) << " bytes"; /* There is a small but non-0 probability that some released buffers * are locked within yet unreleased aborted local actions. * Seek all the way to next_, invalidate seqnos and update size_free_ */ assert(first_ != next_); assert(bh == BH_cast(first_)); long total(1); long locked(0); bh = BH_next(bh); while (bh != BH_cast(next_)) { if (gu_likely (bh->size > 0)) { total++; if (bh->seqno_g != SEQNO_NONE) { // either released or already discarded buffer assert (BH_is_released(bh)); bh->seqno_g = SEQNO_ILL; discard (bh); locked++; } else { assert(!BH_is_released(bh)); } bh = BH_next(bh); } else // rollover { assert (BH_cast(next_) < bh); bh = BH_cast(start_); } } log_info << "GCache DEBUG: RingBuffer::seqno_reset(): found " << locked << '/' << total << " locked buffers"; assert_sizes(); }
// returns pointer to buffer data area or 0 if no space found BufferHeader* RingBuffer::get_new_buffer (ssize_t const size) { assert_size_free(); assert (size > 0); BH_assert_clear(BH_cast(next_)); uint8_t* ret(next_); ssize_t const size_next (size + sizeof(BufferHeader)); if (ret >= first_) { assert (0 == size_trail_); // try to find space at the end ssize_t const end_size(end_ - ret); if (end_size >= size_next) { assert(size_free_ >= size); goto found_space; } else { // no space at the end, go from the start size_trail_ = end_size; ret = start_; } } assert (ret <= first_); if ((first_ - ret) >= size_next) { assert(size_free_ >= size); } while ((first_ - ret) < size_next) { // try to discard first buffer to get more space BufferHeader* bh = BH_cast(first_); if (!BH_is_released(bh) /* true also when first_ == next_ */ || (bh->seqno_g > 0 && !discard_seqno (bh->seqno_g))) { // can't free any more space, so no buffer, next_ is unchanged // and revert size_trail_ if it was set above if (next_ >= first_) size_trail_ = 0; assert_sizes(); return 0; } assert (first_ != next_); /* buffer is either discarded already, or it must have seqno */ assert (SEQNO_ILL == bh->seqno_g); first_ += bh->size; assert_size_free(); if (gu_unlikely(0 == (BH_cast(first_))->size)) { // empty header: check if we fit at the end and roll over if not assert(first_ >= next_); assert(first_ >= ret); first_ = start_; // WRONG if (first_ != ret) size_trail_ = 0; // we're now contiguous: first_ < next_ assert_size_free(); if ((end_ - ret) >= size_next) { assert(size_free_ >= size); size_trail_ = 0; goto found_space; } else { size_trail_ = end_ - ret; ret = start_; } } } #ifndef NDEBUG if ((first_ - ret) < size_next) { log_fatal << "Assertion ((first - ret) >= size_next) failed: " << std::endl << "first offt = " << (first_ - start_) << std::endl << "next offt = " << (next_ - start_) << std::endl << "end offt = " << (end_ - start_) << std::endl << "ret offt = " << (ret - start_) << std::endl << "size_next = " << size_next << std::endl; abort(); } #endif found_space: size_used_ += size; assert (size_used_ <= size_cache_); size_free_ -= size; assert (size_free_ >= 0); BufferHeader* const bh(BH_cast(ret)); bh->size = size; bh->seqno_g = SEQNO_NONE; bh->seqno_d = SEQNO_ILL; bh->flags = 0; bh->store = BUFFER_IN_RB; bh->ctx = this; next_ = ret + size; assert (next_ + sizeof(BufferHeader) <= end_); BH_clear (BH_cast(next_)); assert_sizes(); return bh; }