Ejemplo n.º 1
0
/*
 * pick up tick count scaled to reference tick count
 */
u_int
cc_get_timecount(struct timecounter *tc)
{
	struct cpu_info *ci;
	int64_t rcc, cc, ncsw;
	u_int gen;

 retry:
 	ncsw = curlwp->l_ncsw;
 	__insn_barrier();
	ci = curcpu();
	if (ci->ci_cc.cc_denom == 0) {
		/*
		 * This is our first time here on this CPU.  Just
		 * start with reasonable initial values.
		 */
	        ci->ci_cc.cc_cc    = cpu_counter32();
		ci->ci_cc.cc_val   = 0;
		if (ci->ci_cc.cc_gen == 0)
			ci->ci_cc.cc_gen++;

		ci->ci_cc.cc_denom = cpu_frequency(ci);
		if (ci->ci_cc.cc_denom == 0)
			ci->ci_cc.cc_denom = cc_timecounter.tc_frequency;
		ci->ci_cc.cc_delta = ci->ci_cc.cc_denom;
	}

	/*
	 * read counter and re-read when the re-calibration
	 * strikes inbetween
	 */
	do {
		/* pick up current generation number */
		gen = ci->ci_cc.cc_gen;

		/* determine local delta ticks */
		cc = cpu_counter32() - ci->ci_cc.cc_cc;
		if (cc < 0)
			cc += 0x100000000LL;

		/* scale to primary */
		rcc = (cc * ci->ci_cc.cc_delta) / ci->ci_cc.cc_denom
		    + ci->ci_cc.cc_val;
	} while (gen == 0 || gen != ci->ci_cc.cc_gen);
 	__insn_barrier();
 	if (ncsw != curlwp->l_ncsw) {
 		/* Was preempted */ 
 		goto retry;
	}

	return rcc;
}
Ejemplo n.º 2
0
Archivo: clock.c Proyecto: MarginC/kame
/*
 * Reads a consistent set of time-base values from Xen, into a shadow data
 * area.  Must be called at splclock.
 */
static void
get_time_values_from_xen(void)
{
	do {
		shadow_time_version = HYPERVISOR_shared_info->time_version2;
		__insn_barrier();
		shadow_tv.tv_sec = HYPERVISOR_shared_info->wc_sec;
		shadow_tv.tv_usec = HYPERVISOR_shared_info->wc_usec;
		shadow_tsc_stamp = HYPERVISOR_shared_info->tsc_timestamp <<
		    HYPERVISOR_shared_info->rdtsc_bitshift;
		shadow_system_time = HYPERVISOR_shared_info->system_time;
		__insn_barrier();
	} while (shadow_time_version != HYPERVISOR_shared_info->time_version1);
}
Ejemplo n.º 3
0
Archivo: xbd.c Proyecto: MarginC/kame
static void
init_interface(void)
{
	block_io_op_t op; 

	reset_interface();

	if (blk_ring == NULL) {
		op.cmd = BLOCK_IO_OP_RING_ADDRESS;
		(void)HYPERVISOR_block_io_op(&op);

		blk_ring = (blk_ring_t *)uvm_km_valloc_align(kernel_map,
		    PAGE_SIZE, PAGE_SIZE);
		pmap_kenter_ma((vaddr_t)blk_ring, op.u.ring_mfn << PAGE_SHIFT,
		    VM_PROT_READ|VM_PROT_WRITE);
		DPRINTF(XBDB_SETUP, ("init_interface: "
		    "ring va %p and wired to %p\n",
		    blk_ring, (void *)(op.u.ring_mfn << PAGE_SHIFT)));

		blk_ring->req_prod = blk_ring->resp_prod =
			resp_cons = req_prod = last_req_prod = 0;

		event_set_handler(_EVENT_BLKDEV, &xbd_response_handler,
		    NULL, IPL_BIO);
		hypervisor_enable_event(_EVENT_BLKDEV);
	}

	__insn_barrier();
	state = STATE_ACTIVE;
}
Ejemplo n.º 4
0
Archivo: smp.c Proyecto: Hooman3/minix
void smp_sched_handler(void)
{
	unsigned flgs;
	unsigned cpu = cpuid;

	flgs = sched_ipi_data[cpu].flags;

	if (flgs) {
		struct proc * p;
		p = (struct proc *)sched_ipi_data[cpu].data;

		if (flgs & SCHED_IPI_STOP_PROC) {
			RTS_SET(p, RTS_PROC_STOP);
		}
		if (flgs & SCHED_IPI_SAVE_CTX) {
			/* all context has been saved already, FPU remains */
			if (proc_used_fpu(p) &&
					get_cpulocal_var(fpu_owner) == p) {
				disable_fpu_exception();
				save_local_fpu(p, FALSE /*retain*/);
				/* we're preparing to migrate somewhere else */
				release_fpu(p);
			}
		}
		if (flgs & SCHED_IPI_VM_INHIBIT) {
			RTS_SET(p, RTS_VMINHIBIT);
		}
	}

	__insn_barrier();
	sched_ipi_data[cpu].flags = 0;
}
Ejemplo n.º 5
0
static void udelay(unsigned int usec)
{
    struct at91tctmr_softc *sc = at91tctmr_sc;
    u_int32_t prev_cvr, cvr, divi = READ_TC(sc, TC_RC), diff;
    int prev_ticks, ticks, ticks2;
    unsigned footick = (sc->sc_timerclock * 64ULL / 1000000UL);

    if (usec > 0) {
        prev_ticks = hardclock_ticks;
        __insn_barrier();
        prev_cvr = READ_TC(sc, TC_CV);
        ticks = hardclock_ticks;
        __insn_barrier();
        if (ticks != prev_ticks) {
            prev_cvr = READ_TC(sc, TC_CV);
            prev_ticks = ticks;
        }
        for (;;) {
            ticks = hardclock_ticks;
            __insn_barrier();
            cvr = READ_TC(sc, TC_CV);
            ticks2 = hardclock_ticks;
            __insn_barrier();
            if (ticks2 != ticks) {
                cvr = READ_TC(sc, TC_CV);
            }
            diff = (ticks2 - prev_ticks) * divi;
            if (cvr < prev_cvr) {
                if (!diff)
                    diff = divi;
                diff -= prev_cvr - cvr;
            } else
                diff += cvr - prev_cvr;
            diff = diff * 64 / footick;
            if (diff) {
                if (usec <= diff)
                    break;
                prev_ticks = ticks2;
                prev_cvr = (prev_cvr + footick * diff / 64) % divi;
                usec -= diff;
            }
        }
    }
}
Ejemplo n.º 6
0
void
delay(int ms)
{
	/*
	 * XXX need *real* clock calibration.
	 */
	volatile register int N = ms * DELAY_CALIBRATE;
	for (; --N;)
		__insn_barrier();
}
Ejemplo n.º 7
0
int
virtio_to_queue(struct virtio_device *dev, int qidx, struct vumap_phys *bufs,
	size_t num, void *data)
{
	u16_t free_first;
	int left;
	struct virtio_queue *q = &dev->queues[qidx];
	struct vring *vring = &q->vring;

	assert(0 <= qidx && qidx <= dev->num_queues);

	if (!data)
		panic("%s: NULL data received queue %d", dev->name, qidx);

	free_first = q->free_head;

	left = (int)q->free_num - (int)num;

	if (left < dev->threads)
		set_indirect_descriptors(dev, q, bufs, num);
	else
		set_direct_descriptors(q, bufs, num);

	/* Next index for host is old free_head */
	vring->avail->ring[vring->avail->idx % q->num] = free_first;

	/* Provided by the caller to identify this slot */
	q->data[free_first] = data;

	/* Make sure the host sees the new descriptors */
	__insn_barrier();

	/* advance last idx */
	vring->avail->idx += 1;

	/* Make sure the host sees the avail->idx */
	__insn_barrier();

	/* kick it! */
	kick_queue(dev, qidx);
	return 0;
}
Ejemplo n.º 8
0
/*
 * Add a mask to cpl, and return the old value of cpl.
 */
int
splraise(int nlevel)
{
	int olevel;
	struct cpu_info *ci = curcpu();

	olevel = ci->ci_ilevel;
	if (nlevel > olevel)
		ci->ci_ilevel = nlevel;
	__insn_barrier();
	return (olevel);
}
Ejemplo n.º 9
0
void
splx(int ncpl)
{
	struct cpu_info *ci = curcpu();

	__insn_barrier();
	__asm volatile("sync; eieio");	/* reorder protect */
	ci->ci_cpl = ncpl;
	if (have_pending_intr_p(ci, ncpl))
		pic_do_pending_int();

	__asm volatile("sync; eieio");	/* reorder protect */
}
Ejemplo n.º 10
0
int
spllower(int ncpl)
{
	struct cpu_info *ci = curcpu();
	int ocpl;

	__insn_barrier();
	__asm volatile("sync; eieio");	/* reorder protect */
	ocpl = ci->ci_cpl;
	ci->ci_cpl = ncpl;
	if (have_pending_intr_p(ci, ncpl))
		pic_do_pending_int();
	__asm volatile("sync; eieio");	/* reorder protect */
	return ocpl;
}
Ejemplo n.º 11
0
int
splraise(int ncpl)
{
	struct cpu_info *ci = curcpu();
	int ocpl;

	if (ncpl == ci->ci_cpl) return ncpl;
	__asm volatile("sync; eieio");	/* don't reorder.... */
	ocpl = ci->ci_cpl;
	KASSERT(ncpl < NIPL);
	ci->ci_cpl = max(ncpl, ocpl);
	__asm volatile("sync; eieio");	/* reorder protect */
	__insn_barrier();
	return ocpl;
}
Ejemplo n.º 12
0
static void
linux_work_unlock(struct work_struct *work)
{
	struct cpu_info *ci;
	int s;

	__cpu_simple_unlock(&work->w_lock);

	/* XXX Copypasta of MUTEX_SPIN_SPLRESTORE.  */
	ci = curcpu();
	s = ci->ci_mtx_oldspl;
	__insn_barrier();
	if (++ci->ci_mtx_count == 0)
		splx(s);
}
Ejemplo n.º 13
0
static void
linux_work_lock(struct work_struct *work)
{
	struct cpu_info *ci;
	int cnt, s;

	/* XXX Copypasta of MUTEX_SPIN_SPLRAISE.  */
	s = splvm();
	ci = curcpu();
	cnt = ci->ci_mtx_count--;
	__insn_barrier();
	if (cnt == 0)
		ci->ci_mtx_oldspl = s;

	__cpu_simple_lock(&work->w_lock);
}
Ejemplo n.º 14
0
Archivo: smp.c Proyecto: Hooman3/minix
/*
 * tell another cpu about a task to do and return only after the cpu acks that
 * the task is finished. Also wait before it finishes task sent by another cpu
 * to the same one.
 */
static void smp_schedule_sync(struct proc * p, unsigned task)
{
	unsigned cpu = p->p_cpu;
	unsigned mycpu = cpuid;

	assert(cpu != mycpu);
	/*
	 * if some other cpu made a request to the same cpu, wait until it is
	 * done before proceeding
	 */
	if (sched_ipi_data[cpu].flags != 0) {
		BKL_UNLOCK();
		while (sched_ipi_data[cpu].flags != 0) {
			if (sched_ipi_data[mycpu].flags) {
				BKL_LOCK();
				smp_sched_handler();
				BKL_UNLOCK();
			}
		}
		BKL_LOCK();
	}

	sched_ipi_data[cpu].data = (u32_t) p;
	sched_ipi_data[cpu].flags |= task;
	__insn_barrier();
	arch_send_smp_schedule_ipi(cpu);

	/* wait until the destination cpu finishes its job */
	BKL_UNLOCK();
	while (sched_ipi_data[cpu].flags != 0) {
		if (sched_ipi_data[mycpu].flags) {
			BKL_LOCK();
			smp_sched_handler();
			BKL_UNLOCK();
		}
	}
	BKL_LOCK();
}
Ejemplo n.º 15
0
/*
 * Restore a value to cpl (unmasking interrupts).  If any unmasked
 * interrupts are pending, call Xspllower() to process them.
 */
void
spllower(int nlevel)
{
	struct cpu_info *ci = curcpu();
	uint32_t imask;
	u_long psl;

	if (ci->ci_ilevel <= nlevel)
		return;

	__insn_barrier();

	imask = IUNMASK(ci, nlevel);
	psl = x86_read_psl();
	x86_disable_intr();
	if (ci->ci_ipending & imask) {
		KASSERT(psl == 0);
		Xspllower(nlevel);
		/* Xspllower does enable_intr() */
	} else {
		ci->ci_ilevel = nlevel;
		x86_write_psl(psl);
	}
}
Ejemplo n.º 16
0
Archivo: atl2.c Proyecto: ssinghi/minix
/*===========================================================================*
 *				atl2_writev				     *
 *===========================================================================*/
static void atl2_writev(const message *m, int from_int)
{
	/* Write packet data.
	 */
	iovec_s_t *iovp;
	size_t off, count, left, pos, skip;
	vir_bytes size;
	u8_t *sizep;
	int i, j, r, batch, maxnum;

	/* We can deal with only one write request from Inet at a time. */
	assert(from_int || !(state.flags & ATL2_FLAG_WRITE_PEND));

	state.task_endpt = m->m_source;

	/* If we are already certain that the packet won't fit, bail out.
	 * Keep at least some space between TxD head and tail, as it is not
	 * clear whether the device deals well with the case that they collide.
	 */
	if (state.txs_num >= ATL2_TXS_COUNT)
		goto suspend;
	maxnum = ATL2_TXD_BUFSIZE - ETH_MIN_PACK_SIZE - sizeof(u32_t);
	if (state.txd_num >= maxnum)
		goto suspend;

	/* Optimistically try to copy in the data; suspend if it turns out
	 * that it does not fit.
	 */
	off = 0;
	count = 0;
	left = state.txd_num - sizeof(u32_t);
	pos = (state.txd_tail + state.txd_num +
		sizeof(u32_t)) % ATL2_TXD_BUFSIZE;

	for (i = 0; i < m->DL_COUNT; i += batch) {
		/* Copy in the next batch. */
		batch = MIN(m->DL_COUNT - i, NR_IOREQS);

		r = sys_safecopyfrom(m->m_source, m->DL_GRANT, off, 
			(vir_bytes) iovec, batch * sizeof(iovec[0]));
		if (r != OK)
			panic("vector copy failed: %d", r);

		/* Copy in each element in the batch. */
		for (j = 0, iovp = iovec; j < batch; j++, iovp++) {
			size = iovp->iov_size;
			if (size > left)
				goto suspend;

			skip = 0;
			if (size > ATL2_TXD_BUFSIZE - pos) {
				skip = ATL2_TXD_BUFSIZE - pos;
				r = sys_safecopyfrom(m->m_source,
					iovp->iov_grant, 0,
					(vir_bytes) (state.txd_base + pos),
					skip);
				if (r != OK)
					panic("safe copy failed: %d", r);
				pos = 0;
			}

			r = sys_safecopyfrom(m->m_source, iovp->iov_grant,
				skip, (vir_bytes) (state.txd_base + pos),
				size - skip);
			if (r != OK)
				panic("safe copy failed: %d", r);

			pos = (pos + size - skip) % ATL2_TXD_BUFSIZE;
			left -= size;
			count += size;
		}

		off += batch * sizeof(iovec[0]);
	}

	assert(count <= ETH_MAX_PACK_SIZE_TAGGED);

	/* Write the length to the DWORD right before the packet. */
	sizep = state.txd_base +
		(state.txd_tail + state.txd_num) % ATL2_TXD_BUFSIZE;
	* (u32_t *) sizep = count;

	/* Update the TxD head. */
	state.txd_num += sizeof(u32_t) + ATL2_ALIGN_32(count);
	pos = ATL2_ALIGN_32(pos) % ATL2_TXD_BUFSIZE;
	assert((int) pos ==
		(state.txd_tail + state.txd_num) % ATL2_TXD_BUFSIZE);

	/* Initialize and update the TxS head. */
	state.txs_base[(state.txs_tail + state.txs_num) % ATL2_TXS_COUNT] = 0;
	state.txs_num++;

	/* Tell the device about our new position. */
	__insn_barrier();

	ATL2_WRITE_U32(ATL2_TXD_IDX_REG, pos / sizeof(u32_t));

	/* We have now successfully set up the transmission of a packet. */
	state.flags &= ~ATL2_FLAG_WRITE_PEND;
	state.flags |= ATL2_FLAG_PACK_SENT;

	/* If called from the interrupt handler, the caller will reply. */
	if (!from_int)
		atl2_reply();

	return;

suspend:
	/* We cannot transmit the packet at this time. If we were not already
	 * trying to resume transmission, save the write request for later,
	 * and tell Inet that the request has been suspended.
	 */
	if (from_int)
		return;

	state.flags |= ATL2_FLAG_WRITE_PEND;
	state.write_msg = *m;

	atl2_reply();
}
Ejemplo n.º 17
0
int
virtio_from_queue(struct virtio_device *dev, int qidx, void **data,
	size_t *len)
{
	struct virtio_queue *q;
	struct vring *vring;
	struct vring_used_elem *uel;
	struct vring_desc *vd;
	int count = 0;
	u16_t idx;
	u16_t used_idx;

	assert(0 <= qidx && qidx < dev->num_queues);

	q = &dev->queues[qidx];
	vring = &q->vring;

	/* Make sure we see changes done by the host */
	__insn_barrier();

	/* The index from the host */
	used_idx = vring->used->idx % q->num;

	/* We already saw this one, nothing to do here */
	if (q->last_used == used_idx)
		return -1;

	/* Get the vring_used element */
	uel = &q->vring.used->ring[q->last_used];

	/* Update the last used element */
	q->last_used = (q->last_used + 1) % q->num;

	/* index of the used element */
	idx = uel->id % q->num;

	assert(q->data[idx] != NULL);

	/* Get the descriptor */
	vd = &vring->desc[idx];

	/* Unconditionally set the tail->next to the first used one */
	assert(vring->desc[q->free_tail].flags & VRING_DESC_F_NEXT);
	vring->desc[q->free_tail].next = idx;

	/* Find the last index, eventually there has to be one
	 * without a the next flag.
	 *
	 * FIXME: Protect from endless loop
	 */
	while (vd->flags & VRING_DESC_F_NEXT) {

		if (vd->flags & VRING_DESC_F_INDIRECT)
			clear_indirect_table(dev, vd);

		idx = vd->next;
		vd = &vring->desc[idx];
		count++;
	}

	/* Didn't count the last one */
	count++;

	if (vd->flags & VRING_DESC_F_INDIRECT)
		clear_indirect_table(dev, vd);

	/* idx points to the tail now, update the queue */
	q->free_tail = idx;
	assert(!(vd->flags & VRING_DESC_F_NEXT));

	/* We can always connect the tail with the head */
	vring->desc[q->free_tail].next = q->free_head;
	vring->desc[q->free_tail].flags = VRING_DESC_F_NEXT;

	q->free_num += count;

	assert(q->free_num <= q->num);

	*data = q->data[uel->id];
	q->data[uel->id] = NULL;

	if (len != NULL)
		*len = uel->len;

	return 0;
}
Ejemplo n.º 18
0
/* _mcount; may be static, inline, etc */
_MCOUNT_DECL(u_long frompc, u_long selfpc)
{
	u_short *frompcindex;
	struct tostruct *top, *prevtop;
	struct gmonparam *p;
	long toindex;
#if defined(_KERNEL) && !defined(_RUMPKERNEL)
	int s;
#endif

#if defined(_REENTRANT) && !defined(_KERNEL)
	if (__isthreaded) {
		/* prevent re-entry via thr_getspecific */
		if (_gmonparam.state != GMON_PROF_ON)
			return;
		_gmonparam.state = GMON_PROF_BUSY;
		p = thr_getspecific(_gmonkey);
		if (p == NULL) {
			/* Prevent recursive calls while allocating */
			thr_setspecific(_gmonkey, &_gmondummy);
			p = _m_gmon_alloc();
		}
		_gmonparam.state = GMON_PROF_ON;
	} else
#endif
		p = &_gmonparam;
	/*
	 * check that we are profiling
	 * and that we aren't recursively invoked.
	 */
	if (p->state != GMON_PROF_ON)
		return;
#if defined(_KERNEL) && !defined(_RUMPKERNEL)
	MCOUNT_ENTER;
#ifdef MULTIPROCESSOR
	__cpu_simple_lock(&__mcount_lock);
	__insn_barrier();
#endif
#endif
	p->state = GMON_PROF_BUSY;
	/*
	 * check that frompcindex is a reasonable pc value.
	 * for example:	signal catchers get called from the stack,
	 *		not from text space.  too bad.
	 */
	frompc -= p->lowpc;
	if (frompc > p->textsize)
		goto done;

#if (HASHFRACTION & (HASHFRACTION - 1)) == 0
	if (p->hashfraction == HASHFRACTION)
		frompcindex =
		    &p->froms[
		    (size_t)(frompc / (HASHFRACTION * sizeof(*p->froms)))];
	else
#endif
		frompcindex =
		    &p->froms[
		    (size_t)(frompc / (p->hashfraction * sizeof(*p->froms)))];
	toindex = *frompcindex;
	if (toindex == 0) {
		/*
		 *	first time traversing this arc
		 */
		toindex = ++p->tos[0].link;
		if (toindex >= p->tolimit)
			/* halt further profiling */
			goto overflow;

		*frompcindex = (u_short)toindex;
		top = &p->tos[(size_t)toindex];
		top->selfpc = selfpc;
		top->count = 1;
		top->link = 0;
		goto done;
	}
	top = &p->tos[(size_t)toindex];
	if (top->selfpc == selfpc) {
		/*
		 * arc at front of chain; usual case.
		 */
		top->count++;
		goto done;
	}
	/*
	 * have to go looking down chain for it.
	 * top points to what we are looking at,
	 * prevtop points to previous top.
	 * we know it is not at the head of the chain.
	 */
	for (; /* goto done */; ) {
		if (top->link == 0) {
			/*
			 * top is end of the chain and none of the chain
			 * had top->selfpc == selfpc.
			 * so we allocate a new tostruct
			 * and link it to the head of the chain.
			 */
			toindex = ++p->tos[0].link;
			if (toindex >= p->tolimit)
				goto overflow;

			top = &p->tos[(size_t)toindex];
			top->selfpc = selfpc;
			top->count = 1;
			top->link = *frompcindex;
			*frompcindex = (u_short)toindex;
			goto done;
		}
		/*
		 * otherwise, check the next arc on the chain.
		 */
		prevtop = top;
		top = &p->tos[top->link];
		if (top->selfpc == selfpc) {
			/*
			 * there it is.
			 * increment its count
			 * move it to the head of the chain.
			 */
			top->count++;
			toindex = prevtop->link;
			prevtop->link = top->link;
			top->link = *frompcindex;
			*frompcindex = (u_short)toindex;
			goto done;
		}
	}
done:
	p->state = GMON_PROF_ON;
#if defined(_KERNEL) && !defined(_RUMPKERNEL)
#ifdef MULTIPROCESSOR
	__insn_barrier();
	__cpu_simple_unlock(&__mcount_lock);
#endif
	MCOUNT_EXIT;
#endif
	return;

overflow:
	p->state = GMON_PROF_ERROR;
#if defined(_KERNEL) && !defined(_RUMPKERNEL)
#ifdef MULTIPROCESSOR
	__insn_barrier();
	__cpu_simple_unlock(&__mcount_lock);
#endif
	MCOUNT_EXIT;
#endif
	return;
}
Ejemplo n.º 19
0
Archivo: atl2.c Proyecto: ssinghi/minix
/*===========================================================================*
 *				atl2_rx_advance				     *
 *===========================================================================*/
static void atl2_rx_advance(int next)
{
	/* Advance the RxD tail by as many failed receipts as possible, and
	 * see if there is an actual packet left to receive. If 'next' is set,
	 * the packet at the current tail has been processed.
	 */
	int update_tail;
	rxd_t *rxd;
	u32_t hdr, size;

	update_tail = FALSE;

	if (next) {
		state.rxd_tail = (state.rxd_tail + 1) % ATL2_RXD_COUNT;
		update_tail = TRUE;

		ATL2_DEBUG(("ATL2: successfully received packet\n"));

		state.flags &= ~ATL2_FLAG_RX_AVAIL;
	}

	assert(!(state.flags & ATL2_FLAG_RX_AVAIL));

	for (;;) {
		/* Check the RxD tail for updates. */
		rxd = &state.rxd_base[state.rxd_tail];

		hdr = rxd->hdr;

		if (!(hdr & ATL2_RXD_UPDATE))
			break;

		rxd->hdr = hdr & ~(ATL2_RXD_UPDATE);

		/* Update statistics. */
		atl2_rx_stat(hdr);

		/* Stop at the first successful receipt. The packet will be
		 * picked up by Inet later.
		 */
		size = hdr & ATL2_RXD_SIZE_MASK;

		if ((hdr & ATL2_RXD_SUCCESS) && size >= ETH_MIN_PACK_SIZE) {
			ATL2_DEBUG(("ATL2: packet available, size %ld\n",
				size));

			state.flags |= ATL2_FLAG_RX_AVAIL;
			break;
		}

		ATL2_DEBUG(("ATL2: packet receipt failed\n"));

		/* Advance tail. */
		state.rxd_tail = (state.rxd_tail + 1) % ATL2_RXD_COUNT;
		update_tail = TRUE;
	}

	/* If new RxD descriptors are now up for reuse, tell the device. */
	if (update_tail) {
		__insn_barrier();

		ATL2_WRITE_U32(ATL2_RXD_IDX_REG, state.rxd_tail);
	}
}