tagBlock& make_last_block_not_full() { if (block_size * blocks.size() > total_size) return blocks.back(); return acquire_block(); }
/* Rearrange the list. This is called after we failed to * acquire a chip from the last block in the list (tail). * We don't know about other blocks in the list yet. */ void block_list::_change_blocks(size_t chip_size, size_t chip_count, size_t block_size) { (void) chip_size; // keep gcc happy // first time through? if(_tail == &_fake_block) { // remove fake block from the list. _tail = acquire_block(block_size); _tail->_next = _tail; return; } /* Check whether we're chewing through our blocks too fast for the current ring size If we consistently recycle blocks while they're still more than half full then we need a bigger ring so old blocks have more time to cool off between reuses. To respond to end-of-spike gracefully, we're going to be pretty aggressive about returning blocks to the global pool: when recycling blocks we check for runs of blocks which do not meet some minimum utilization threshold, discarding all but one of them (keep the last for buffering purposes). */ // decay_rate is used to compute average hit rate. // consider (roughly) the last 5 blocks static double const decay_rate = 1./5; _avg_hit_rate = _hit_count*(1-decay_rate) + _avg_hit_rate*decay_rate; // min_allocated is where we would like to see the hit counts // settle. If we find we are failing to acquire while the hit counts // are below this, we must increase the #blocks in the list (ring size). size_t const min_allocated = (chip_count+1)/2; // 50% // max_available is // an integral number and less than but close to chip_count; // TODO: better explanation of this: // Too many chips available in a block of chips // suggests we should unload some extra blocks. // Choose smaller of: chip_count-1 and .9 chip_count. size_t const max_available = chip_count - std::max((int)(.1*chip_count), 1); if(_hit_count < min_allocated && _avg_hit_rate < min_allocated) { // too fast.. grow the ring block_of_chips* new_block = acquire_block(block_size); new_block->_next = _tail->_next; _tail = _tail->_next = new_block; } else { // compress the run, if any block_of_chips* prev = 0; block_of_chips* cur = _tail; block_of_chips* next; while(1) { next = cur->_next; /* make all zombies in the block usable */ next->recycle(); /* now see how many of the chips are still in use. If too few, * move the tail, set hit count to 0 * and return so that the client ends up calling us again * and acquiring another block to become the new tail. */ if(next->_bits.usable_count() <= max_available) { // cause the tail to move to avoid certain perverse // behavior when the usable count is 0 but // chips have been freed at the front of the // list. We don't // want to forever allocate new blocks just because there's // one fully-allocated block in the list. // By moving the tail, it slowly circulates through the // list and our first inspection will be of a *diferent* block // after the newly allocated block is consumed. cur = next; break; } // This block has plenty of usable chips. Enough in fact // that it's worth releasing it to the underlying pool. if(prev) { assert(prev != cur); // assert(cur->_bits.usable_count() > max_available); assert(next->_bits.usable_count() > max_available); prev->_next = next; _pool->release_block(cur); cur = prev; // reset } // avoid the endless loop if(next == _tail) break; prev = cur; cur = next; } // while // recycle, repair the ring, and advance // NB: if we broke out of the while loop on the first try, // we will not mave moved the tail at all. _tail = cur; } // # fast acquires(hits) since last _change_blocks _hit_count = 0; }