void*
    RingBuffer::realloc (void* ptr, ssize_t const size)
    {
        assert_sizes();
        assert (NULL != ptr);
        assert (size > 0);
        // We can reliably allocate continuous buffer which is twice as small
        // as total cache area. So compare to half the space
        if (size > (size_cache_ / 2)) return 0;

        BufferHeader* const bh(ptr2BH(ptr));

//        reallocs_++;

        // first check if we can grow this buffer by allocating
        // adjacent buffer
        {
            ssize_t  const adj_size(size - bh->size);
            if (adj_size <= 0) return ptr;

            uint8_t* const adj_ptr(reinterpret_cast<uint8_t*>(BH_next(bh)));
            if (adj_ptr == next_)
            {
                ssize_t const size_trail_saved(size_trail_);
                void* const adj_buf (get_new_buffer (adj_size));

                BH_assert_clear(BH_cast(next_));

                if (adj_ptr == adj_buf)
                {
                    bh->size = next_ - static_cast<uint8_t*>(ptr) +
                        sizeof(BufferHeader);
                    return ptr;
                }
                else // adjacent buffer allocation failed, return it back
                {
                    next_ = adj_ptr;
                    BH_clear (BH_cast(next_));
                    size_used_ -= adj_size;
                    size_free_ += adj_size;
                    if (next_ < first_) size_trail_ = size_trail_saved;
                }
            }
        }

        BH_assert_clear(BH_cast(next_));
        assert_sizes();

        // find non-adjacent buffer
        void* ptr_new = malloc (size);
        if (ptr_new != 0) {
            memcpy (ptr_new, ptr, bh->size - sizeof(BufferHeader));
            free (bh);
        }

        BH_assert_clear(BH_cast(next_));
        assert_sizes();

        return ptr_new;
    }
示例#2
0
u16 bcm43xx_ofdmtab_read16(struct bcm43xx_wldev *dev, u16 table, u16 offset)
{
	assert_sizes();

	bcm43xx_phy_write(dev, BCM43xx_PHY_OTABLECTL, table + offset);
	return bcm43xx_phy_read(dev, BCM43xx_PHY_OTABLEI);
}
示例#3
0
u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset)
{
	assert_sizes();

	b43_phy_write(dev, B43_PHY_OTABLECTL, table + offset);
	return b43_phy_read(dev, B43_PHY_OTABLEI);
}
u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset)
{
	struct b43_phy_g *gphy = dev->phy.g;
	u16 addr;

	addr = table + offset;
	if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) ||
	    (addr - 1 != gphy->ofdmtab_addr)) {
		/* The hardware has a different address in memory. Update it. */
		b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
		gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ;
	}
	gphy->ofdmtab_addr = addr;

	return b43_phy_read(dev, B43_PHY_OTABLEI);

	/* Some compiletime assertions... */
	assert_sizes();
}
示例#5
0
u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset)
{
	struct b43_phy_g *gphy = dev->phy.g;
	u16 addr;

	addr = table + offset;
	if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) ||
	    (addr - 1 != gphy->ofdmtab_addr)) {
		
		b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
		gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ;
	}
	gphy->ofdmtab_addr = addr;

	return b43_phy_read(dev, B43_PHY_OTABLEI);

	
	assert_sizes();
}
    void*
    RingBuffer::malloc (ssize_t size)
    {
        void* ret(0);

        // We can reliably allocate continuous buffer which is 1/2
        // of a total cache space. So compare to half the space
        if (size <= (size_cache_ / 2) && size <= (size_cache_ - size_used_))
        {
            BufferHeader* const bh (get_new_buffer (size));

            BH_assert_clear(BH_cast(next_));
//            mallocs_++;

            if (gu_likely (0 != bh)) ret = bh + 1;
        }

        assert_sizes();

        return ret; // "out of memory"
    }
    void
    RingBuffer::seqno_reset()
    {
        if (size_cache_ == size_free_) return;

        /* Find the last seqno'd RB buffer. It is likely to be close to the
         * end of released buffers chain. */
        BufferHeader* bh(0);

        for (seqno2ptr_t::reverse_iterator r(seqno2ptr_.rbegin());
             r != seqno2ptr_.rend(); ++r)
        {
            BufferHeader* const b(ptr2BH(r->second));
            if (BUFFER_IN_RB == b->store)
            {
#ifndef NDEBUG
                if (!BH_is_released(b))
                {
                    log_fatal << "Buffer "
                              << reinterpret_cast<const void*>(r->second)
                              << ", seqno_g " << b->seqno_g << ", seqno_d "
                              << b->seqno_d << " is not released.";
                    assert(0);
                }
#endif
                bh = b;
                break;
            }
        }

        if (!bh) return;

        assert(bh->size > 0);
        assert(BH_is_released(bh));

        /* Seek the first unreleased buffer.
         * This should be called in isolation, when all seqno'd buffers are
         * freed, and the only unreleased buffers should come only from new
         * configuration. There should be no seqno'd buffers after it. */

        ssize_t const old(size_free_);

        assert (0 == size_trail_ || first_ > next_);
        first_ = reinterpret_cast<uint8_t*>(bh);

        while (BH_is_released(bh)) // next_ is never released - no endless loop
        {
             first_ = reinterpret_cast<uint8_t*>(BH_next(bh));

             if (gu_unlikely (0 == bh->size && first_ != next_))
             {
                 // rollover
                 assert (first_ > next_);
                 first_ = start_;
             }

             bh = BH_cast(first_);
        }

        BH_assert_clear(BH_cast(next_));

        if (first_ == next_)
        {
            log_info << "GCache DEBUG: RingBuffer::seqno_reset(): full reset";
            /* empty RB, reset it completely */
            reset();
            return;
        }

        assert ((BH_cast(first_))->size > 0);
        assert (first_ != next_);
        assert ((BH_cast(first_))->seqno_g == SEQNO_NONE);
        assert (!BH_is_released(BH_cast(first_)));

        /* Estimate how much space remains */
        if (first_ < next_)
        {
            /* start_  first_      next_    end_
             *   |       |###########|       |
             */
            size_used_ = next_ - first_;
            size_free_ = size_cache_ - size_used_;
            size_trail_ = 0;
        }
        else
        {
            /* start_  next_       first_   end_
             *   |#######|           |#####| |
             *                              ^size_trail_ */
            assert(size_trail_ > 0);
            size_free_ = first_ - next_ + size_trail_ - sizeof(BufferHeader);
            size_used_ = size_cache_ - size_free_;
        }

        assert_sizes();
        assert(size_free_ < size_cache_);

        log_info << "GCache DEBUG: RingBuffer::seqno_reset(): discarded "
                 << (size_free_ - old) << " bytes";

        /* There is a small but non-0 probability that some released buffers
         * are locked within yet unreleased aborted local actions.
         * Seek all the way to next_, invalidate seqnos and update size_free_ */

        assert(first_ != next_);
        assert(bh == BH_cast(first_));

        long total(1);
        long locked(0);

        bh = BH_next(bh);

        while (bh != BH_cast(next_))
        {
            if (gu_likely (bh->size > 0))
            {
                total++;

                if (bh->seqno_g != SEQNO_NONE)
                {
                    // either released or already discarded buffer
                    assert (BH_is_released(bh));
                    bh->seqno_g = SEQNO_ILL;
                    discard (bh);
                    locked++;
                }
                else
                {
                    assert(!BH_is_released(bh));
                }

                bh = BH_next(bh);
            }
            else // rollover
            {
                assert (BH_cast(next_) < bh);
                bh = BH_cast(start_);
            }
        }

        log_info << "GCache DEBUG: RingBuffer::seqno_reset(): found "
                 << locked << '/' << total << " locked buffers";

        assert_sizes();
    }
    // returns pointer to buffer data area or 0 if no space found
    BufferHeader*
    RingBuffer::get_new_buffer (ssize_t const size)
    {
        assert_size_free();
        assert (size > 0);

        BH_assert_clear(BH_cast(next_));

        uint8_t* ret(next_);

        ssize_t const size_next (size + sizeof(BufferHeader));

        if (ret >= first_) {
            assert (0 == size_trail_);
            // try to find space at the end
            ssize_t const end_size(end_ - ret);

            if (end_size >= size_next) {
                assert(size_free_ >= size);
                goto found_space;
            }
            else {
                // no space at the end, go from the start
                size_trail_ = end_size;
                ret = start_;
            }
        }

        assert (ret <= first_);
        if ((first_ - ret) >= size_next) { assert(size_free_ >= size); }

        while ((first_ - ret) < size_next) {
            // try to discard first buffer to get more space
            BufferHeader* bh = BH_cast(first_);

            if (!BH_is_released(bh) /* true also when first_ == next_ */ ||
                (bh->seqno_g > 0 && !discard_seqno (bh->seqno_g)))
            {
                // can't free any more space, so no buffer, next_ is unchanged
                // and revert size_trail_ if it was set above
                if (next_ >= first_) size_trail_ = 0;
                assert_sizes();
                return 0;
            }

            assert (first_ != next_);
            /* buffer is either discarded already, or it must have seqno */
            assert (SEQNO_ILL == bh->seqno_g);

            first_ += bh->size;
            assert_size_free();

            if (gu_unlikely(0 == (BH_cast(first_))->size))
            {
                // empty header: check if we fit at the end and roll over if not
                assert(first_ >= next_);
                assert(first_ >= ret);

                first_ = start_;
// WRONG               if (first_ != ret) size_trail_ = 0; // we're now contiguous: first_ < next_
                assert_size_free();

                if ((end_ - ret) >= size_next)
                {
                    assert(size_free_ >= size);
                    size_trail_ = 0;
                    goto found_space;
                }
                else
                {
                    size_trail_ = end_ - ret;
                    ret = start_;
                }
            }
        }

#ifndef NDEBUG
        if ((first_ - ret) < size_next) {
            log_fatal << "Assertion ((first - ret) >= size_next) failed: "
                      << std::endl
                      << "first offt = " << (first_ - start_) << std::endl
                      << "next  offt = " << (next_  - start_) << std::endl
                      << "end   offt = " << (end_   - start_) << std::endl
                      << "ret   offt = " << (ret    - start_) << std::endl
                      << "size_next  = " << size_next         << std::endl;
            abort();
        }
#endif

    found_space:
        size_used_ += size;
        assert (size_used_ <= size_cache_);
        size_free_ -= size;
        assert (size_free_ >= 0);

        BufferHeader* const bh(BH_cast(ret));
        bh->size    = size;
        bh->seqno_g = SEQNO_NONE;
        bh->seqno_d = SEQNO_ILL;
        bh->flags   = 0;
        bh->store   = BUFFER_IN_RB;
        bh->ctx     = this;

        next_ = ret + size;
        assert (next_ + sizeof(BufferHeader) <= end_);
        BH_clear (BH_cast(next_));
        assert_sizes();

        return bh;
    }