Exemplo n.º 1
0
/*
 * Push @len bytes from @u8 into the ring buffer. The buffer is resized if it
 * is too small. -ENOMEM is returned on OOM, 0 on success.
 */
int shl_ring_push(struct shl_ring *r, const void *u8, size_t size)
{
	int err;
	size_t pos, l;

	if (size == 0)
		return 0;

	err = ring_grow(r, size);
	if (err < 0)
		return err;

	pos = RING_MASK(r, r->start + r->used);
	l = r->size - pos;
	if (l >= size) {
		memcpy(&r->buf[pos], u8, size);
	} else {
		memcpy(&r->buf[pos], u8, l);
		memcpy(r->buf, (const uint8_t*)u8 + l, size - l);
	}

	r->used += size;

	return 0;
}
Exemplo n.º 2
0
void ring_put(Ring *ring, RingEntry *entry) {
    if (ring_full(ring)) {
        ring_grow(ring);
    }
    RingEntry *e = ring->data + ring->end;
    memcpy(e, entry, sizeof(RingEntry));
    ring->end = (ring->end + 1) % ring->capacity;
}
Exemplo n.º 3
0
/*
 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain
 * for 'num_descs'.
 *
 * If mflags contains M_WAITOK, blocks until enough space is available.
 *
 * Returns zero on success, or an errno on error.  If num_descs is beyond the
 * maximum ring size, returns EINVAl; if allocation would block and mflags
 * contains M_NOWAIT, returns EAGAIN.
 *
 * Must be called with the submit_lock held; returns with the lock held.  The
 * lock may be dropped to allocate the ring.
 *
 * (The submit_lock is needed to add any entries to the ring, so callers are
 * assured enough room is available.)
 */
static int
ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags)
{
	struct ioat_descriptor **new_ring;
	uint32_t order;
	int error;

	mtx_assert(&ioat->submit_lock, MA_OWNED);
	error = 0;

	if (num_descs < 1 || num_descs > (1 << IOAT_MAX_ORDER)) {
		error = EINVAL;
		goto out;
	}
	if (ioat->quiescing) {
		error = ENXIO;
		goto out;
	}

	for (;;) {
		if (ioat_get_ring_space(ioat) >= num_descs)
			goto out;

		order = ioat->ring_size_order;
		if (ioat->is_resize_pending || order == IOAT_MAX_ORDER) {
			if ((mflags & M_WAITOK) != 0) {
				msleep(&ioat->tail, &ioat->submit_lock, 0,
				    "ioat_rsz", 0);
				continue;
			}

			error = EAGAIN;
			break;
		}

		ioat->is_resize_pending = TRUE;
		for (;;) {
			mtx_unlock(&ioat->submit_lock);

			new_ring = ioat_prealloc_ring(ioat, 1 << (order + 1),
			    TRUE, mflags);

			mtx_lock(&ioat->submit_lock);
			KASSERT(ioat->ring_size_order == order,
			    ("is_resize_pending should protect order"));

			if (new_ring == NULL) {
				KASSERT((mflags & M_WAITOK) == 0,
				    ("allocation failed"));
				error = EAGAIN;
				break;
			}

			error = ring_grow(ioat, order, new_ring);
			if (error == 0)
				break;
		}
		ioat->is_resize_pending = FALSE;
		wakeup(&ioat->tail);
		if (error)
			break;
	}

out:
	mtx_assert(&ioat->submit_lock, MA_OWNED);
	return (error);
}