示例#1
0
/*
 * if we are a Commodore Amiga A4091 or possibly an A4000T
 */
int
afscmatch(struct device *pdp, struct cfdata *cfp, void *auxp)
{
	struct zbus_args *zap;
	siop_regmap_p rp;
	u_long temp, scratch;

	zap = auxp;
	if (zap->manid == 514 && zap->prodid == 84)
		return(1);		/* It's an A4091 SCSI card */
	if (!is_a4000() || !matchname("afsc", auxp))
		return(0);		/* Not on an A4000 or not A4000T SCSI */
	rp = ztwomap(0xdd0040);
	if (badaddr((void *)__UNVOLATILE(&rp->siop_scratch)) || 
	    badaddr((void *)__UNVOLATILE(&rp->siop_temp))) {
		return(0);
	}
	scratch = rp->siop_scratch;
	temp = rp->siop_temp;
	rp->siop_scratch = 0xdeadbeef;
	rp->siop_temp = 0xaaaa5555;
	if (rp->siop_scratch != 0xdeadbeef || rp->siop_temp != 0xaaaa5555)
		return(0);
	rp->siop_scratch = scratch;
	rp->siop_temp = temp;
	if (rp->siop_scratch != scratch || rp->siop_temp != temp)
		return(0);
	return(1);
}
示例#2
0
void
mfcsattach(device_t parent, device_t self, void *aux)
{
	int unit;
	struct mfcs_softc *sc;
	struct mfc_softc *scc;
	struct mfc_args *ma;
	struct mfc_regs *rp;

	sc = device_private(self);
	sc->sc_dev = self;
	scc = device_private(parent);
	ma = aux;

	printf (": input fifo %d output fifo %d\n", SERIBUF_SIZE,
	    SEROBUF_SIZE);

	unit = ma->unit;
	mfcs_active |= 1 << unit;
	sc->rptr = sc->wptr = sc->inbuf;
	sc->sc_mfc = scc;
	sc->sc_regs = rp = scc->sc_regs;
	sc->sc_duart = (struct duart_regs *) ((unit & 1) ? 
	    __UNVOLATILE(&rp->du_mr1b) : __UNVOLATILE(&rp->du_mr1a));
	sc->mfcs_si = softint_establish(SOFTINT_SERIAL, mfcs_intr_soft, sc);
	/*
	 * should have only one vbl routine to handle all ports?
	 */
	sc->vbl_node.function = (void (*) (void *)) mfcsmint;
	sc->vbl_node.data = (void *) unit;
	add_vbl_function(&sc->vbl_node, 1, (void *) unit);
}
void
db_cpu_cmd(db_expr_t addr, bool have_addr, db_expr_t count, const char *modif)
{
	struct cpu_info *ci;
	if (!have_addr) {
		cpu_debug_dump();
		return;
	}

	if ((addr < 0) || (addr >= sparc_ncpus)) {
		db_printf("%ld: CPU out of range\n", addr);
		return;
	}
	ci = cpus[addr];
	if (ci == NULL) {
		db_printf("CPU %ld not configured\n", addr);
		return;
	}
	if (ci != curcpu()) {
		if (!(ci->flags & CPUFLG_PAUSED)) {
			db_printf("CPU %ld not paused\n", addr);
			return;
		}
	}
	if (ci->ci_ddb_regs == 0) {
		db_printf("CPU %ld has no saved regs\n", addr);
		return;
	}
	db_printf("using CPU %ld", addr);
	ddb_regp = __UNVOLATILE(ci->ci_ddb_regs);
	ddb_cpuinfo = ci;
}
示例#4
0
static void
nouveaufb_attach_task(struct nouveau_task *task)
{
	struct nouveaufb_softc *const sc = container_of(task,
	    struct nouveaufb_softc, sc_attach_task);
	const struct nouveaufb_attach_args *const nfa = &sc->sc_nfa;
	const struct drmfb_attach_args da = {
		.da_dev = sc->sc_dev,
		.da_fb_helper = nfa->nfa_fb_helper,
		.da_fb_sizes = &nfa->nfa_fb_sizes,
		.da_fb_vaddr = __UNVOLATILE(nfa->nfa_fb_ptr),
		.da_params = &nouveaufb_drmfb_params,
	};
	int error;

	error = drmfb_attach(&sc->sc_drmfb, &da);
	if (error) {
		aprint_error_dev(sc->sc_dev, "failed to attach drmfb: %d\n",
		    error);
		return;
	}

	if (!pmf_device_register1(sc->sc_dev, NULL, NULL, &nouveaufb_shutdown))
		aprint_error_dev(sc->sc_dev,
		    "failed to register shutdown handler\n");

	sc->sc_attached = true;
}

static bool
nouveaufb_shutdown(device_t self, int flags)
{
	struct nouveaufb_softc *const sc = device_private(self);

	return drmfb_shutdown(&sc->sc_drmfb, flags);
}

static paddr_t
nouveaufb_drmfb_mmapfb(struct drmfb_softc *drmfb, off_t offset, int prot)
{
	struct nouveaufb_softc *const sc = container_of(drmfb,
	    struct nouveaufb_softc, sc_drmfb);
	struct drm_fb_helper *const helper = sc->sc_nfa.nfa_fb_helper;
	struct nouveau_fbdev *const fbdev = container_of(helper,
	    struct nouveau_fbdev, helper);
	struct nouveau_bo *const nvbo = fbdev->nouveau_fb.nvbo;
	const unsigned num_pages __diagused = nvbo->bo.num_pages;
	int flags = 0;

	KASSERT(0 <= offset);
	KASSERT(offset < (num_pages << PAGE_SHIFT));

	if (ISSET(nvbo->bo.mem.placement, TTM_PL_FLAG_WC))
		flags |= BUS_SPACE_MAP_PREFETCHABLE;

	return bus_space_mmap(nvbo->bo.bdev->memt, nvbo->bo.mem.bus.base,
	    nvbo->bo.mem.bus.offset + offset, prot, flags);
}
示例#5
0
void
xf86DisableIO()
{

    if (ioBase != MAP_FAILED) {
        munmap(__UNVOLATILE(ioBase), 0x10000);
        ioBase = MAP_FAILED;
    }
}
/* Get serial number of the card. */
static char * 
p5bus_cardsn(void)
{
	char *snr, *sn;

	sn = kmem_zalloc(P5_SN_LEN + 1, KM_SLEEP);
	snr = (char *)__UNVOLATILE(ztwomap(P5_ROM_OFF));

	memcpy(sn, snr, P5_SN_LEN);
	return sn;
}
示例#7
0
/*
 * Initialize
 */
void
tv_init(struct ite_softc *ip)
{
	short i;

	/*
	 * initialize private variables
	 */
	tv_top = 0;
	for (i = 0; i < PLANELINES; i++)
		tv_row[i] = (void *)__UNVOLATILE(&IODEVbase->tvram[ROWOFFSET(i)]);
	/* shadow ANK font */
	memcpy(kern_font, (void *)&IODEVbase->cgrom0_8x16, 256 * FONTHEIGHT);
	ite_set_glyph();
	/* set font address cache */
	for (i = 0; i < 256; i++)
		tv_font[i] = &kern_font[i * FONTHEIGHT];
	for (i = 0x21; i < 0x30; i++)
		tv_kfont[i] = &IODEVbase->cgrom0_16x16[KFONTBASE(i-0x21)];
	for (; i < 0x50; i++)
		tv_kfont[i] = &IODEVbase->cgrom1_16x16[KFONTBASE(i-0x30)];
	for (; i < 0x7f; i++)
		tv_kfont[i] = &IODEVbase->cgrom2_16x16[KFONTBASE(i-0x50)];

	/*
	 * initialize part of ip
	 */
	ip->cols = ip->grf->g_display.gd_dwidth  / FONTWIDTH;
	ip->rows = ip->grf->g_display.gd_dheight / FONTHEIGHT;
	/* set draw routine dynamically */
	ip->isw->ite_putc   = tv_putc;
	ip->isw->ite_cursor = tv_cursor;
	ip->isw->ite_clear  = tv_clear;
	ip->isw->ite_scroll = tv_scroll;

	/*
	 * Intialize colormap
	 */
#define RED   (0x1f << 6)
#define BLUE  (0x1f << 1)
#define GREEN (0x1f << 11)
	IODEVbase->tpalet[0] = 0;			/* black */
	IODEVbase->tpalet[1] = 1 | RED;			/* red */
	IODEVbase->tpalet[2] = 1 | GREEN;		/* green */
	IODEVbase->tpalet[3] = 1 | RED | GREEN;		/* yellow */
	IODEVbase->tpalet[4] = 1 | BLUE;		/* blue */
	IODEVbase->tpalet[5] = 1 | BLUE | RED;		/* magenta */
	IODEVbase->tpalet[6] = 1 | BLUE | GREEN;	/* cyan */
	IODEVbase->tpalet[7] = 1 | BLUE | RED | GREEN;	/* white */
}
示例#8
0
int
pthread_barrier_wait(pthread_barrier_t *barrier)
{
	pthread_mutex_t *interlock;
	pthread_t self;
	unsigned int gen;

	if (barrier->ptb_magic != _PT_BARRIER_MAGIC)
		return EINVAL;

	/*
	 * A single arbitrary thread is supposed to return
	 * PTHREAD_BARRIER_SERIAL_THREAD, and everone else
	 * is supposed to return 0.  Since pthread_barrier_wait()
	 * is not a cancellation point, this is trivial; we
	 * simply elect that the thread that causes the barrier
	 * to be satisfied gets the special return value.  Note
	 * that this final thread does not actually need to block,
	 * but instead is responsible for waking everyone else up.
	 */
	self = pthread__self();
	interlock = pthread__hashlock(barrier);
	pthread_mutex_lock(interlock);
	if (barrier->ptb_curcount + 1 == barrier->ptb_initcount) {
		barrier->ptb_generation++;
		barrier->ptb_curcount = 0;
		pthread__unpark_all(&barrier->ptb_waiters, self,
		    interlock);
		pthread_mutex_unlock(interlock);
		return PTHREAD_BARRIER_SERIAL_THREAD;
	}
	barrier->ptb_curcount++;
	gen = barrier->ptb_generation;
	for (;;) {
		PTQ_INSERT_TAIL(&barrier->ptb_waiters, self, pt_sleep);
		self->pt_sleepobj = &barrier->ptb_waiters;
		(void)pthread__park(self, interlock, &barrier->ptb_waiters,
		    NULL, 0, __UNVOLATILE(&interlock->ptm_waiters));
		if (__predict_true(gen != barrier->ptb_generation)) {
			break;
		}
		pthread_mutex_lock(interlock);
		if (gen != barrier->ptb_generation) {
			pthread_mutex_unlock(interlock);
			break;
		}
	}

	return 0;
}
示例#9
0
/**
 * radeon_ring_init - init driver ring struct.
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring structure holding ring information
 * @ring_size: size of the ring
 * @rptr_offs: offset of the rptr writeback location in the WB buffer
 * @nop: nop packet for this ring
 *
 * Initialize the driver information for the selected ring (all asics).
 * Returns 0 on success, error on failure.
 */
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
		     unsigned rptr_offs, u32 nop)
{
	int r;

	ring->ring_size = ring_size;
	ring->rptr_offs = rptr_offs;
	ring->nop = nop;
	/* Allocate ring buffer */
	if (ring->ring_obj == NULL) {
		r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
				     RADEON_GEM_DOMAIN_GTT,
				     NULL, &ring->ring_obj);
		if (r) {
			dev_err(rdev->dev, "(%d) ring create failed\n", r);
			return r;
		}
		r = radeon_bo_reserve(ring->ring_obj, false);
		if (unlikely(r != 0))
			return r;
		r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
					&ring->gpu_addr);
		if (r) {
			radeon_bo_unreserve(ring->ring_obj);
			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
			return r;
		}
		r = radeon_bo_kmap(ring->ring_obj,
				       (void **)__UNVOLATILE(&ring->ring));
		radeon_bo_unreserve(ring->ring_obj);
		if (r) {
			dev_err(rdev->dev, "(%d) ring map failed\n", r);
			return r;
		}
	}
	ring->ptr_mask = (ring->ring_size / 4) - 1;
	ring->ring_free_dw = ring->ring_size / 4;
	if (rdev->wb.enabled) {
		u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
		ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
		ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
	}
	if (radeon_debugfs_ring_init(rdev, ring)) {
		DRM_ERROR("Failed to register debugfs file for rings !\n");
	}
	radeon_ring_lockup_update(rdev, ring);
	return 0;
}
示例#10
0
/* Non-privileged console interrupt routine */
static int
xencons_handler(void *arg)
{
    struct xencons_softc *sc = arg;
    XENCONS_RING_IDX cons, prod, len;
    int s = spltty();

    if (sc->polling) {
        splx(s);
        return 1;
    }


#define XNC_IN (xencons_interface->in)

    cons = xencons_interface->in_cons;
    prod = xencons_interface->in_prod;
    xen_rmb();
    while (cons != prod) {
        if (MASK_XENCONS_IDX(cons, XNC_IN) <
                MASK_XENCONS_IDX(prod, XNC_IN))
            len = MASK_XENCONS_IDX(prod, XNC_IN) -
                  MASK_XENCONS_IDX(cons, XNC_IN);
        else
            len = sizeof(XNC_IN) - MASK_XENCONS_IDX(cons, XNC_IN);

        xencons_tty_input(sc, __UNVOLATILE(
                              &XNC_IN[MASK_XENCONS_IDX(cons, XNC_IN)]), len);
        if (__predict_false(xencons_interface->in_cons != cons)) {
            /* catch up with xenconscn_getc() */
            cons = xencons_interface->in_cons;
            prod = xencons_interface->in_prod;
            xen_rmb();
        } else {
            cons += len;
            xen_wmb();
            xencons_interface->in_cons = cons;
            xen_wmb();
        }
    }
    hypervisor_notify_via_evtchn(xen_start_info.console.domU.evtchn);
    splx(s);
    return 1;
#undef XNC_IN
}
static void
gencp_acafh_attach(device_t parent, device_t self, void *aux)
{
	struct gencp_softc *sc;
	struct bus_space_tag cpb_bst;
	struct clockportbus_attach_args cpb_aa;
	struct acafhbus_attach_args *aaa_aa;

	aaa_aa = aux;
	sc = device_private(self);
	sc->sc_dev = self;
	sc->cpb_aa = &cpb_aa;

	/* Set the address and bus access methods. */
	cpb_bst.base = (bus_addr_t) __UNVOLATILE(ztwomap(aaa_aa->aaa_pbase));
	cpb_bst.absm = &amiga_bus_stride_4;

	sc->cpb_aa->cp_iot = &cpb_bst;

	gencp_attach(sc);
}
示例#12
0
/*
 * RETINA_SPEED_HACK code seems to work on some boards and on others
 * it causes text to smear horizontally
 */
void
rh_scroll(struct ite_softc *ip, int sy, int sx, int count, int dir)
{
#ifndef	RETINA_SPEED_HACK
	u_long *fb = (u_long *)__UNVOLATILE(ip->grf->g_fbkva);
#endif

	rh_cursor(ip, ERASE_CURSOR);

	if (dir == SCROLL_UP) {
#ifdef	RETINA_SPEED_HACK
		screen_up(ip, sy - count, ip->bottom_margin, count);
#else
		bcopy(fb + sy * ip->cols, fb + (sy - count) * ip->cols,
		    4 * (ip->bottom_margin - sy + 1) * ip->cols);
		rh_clear(ip, ip->bottom_margin + 1 - count, 0, count, ip->cols);
#endif
	} else if (dir == SCROLL_DOWN) {
#ifdef	RETINA_SPEED_HACK
		screen_down(ip, sy, ip->bottom_margin, count);
#else
		bcopy(fb + sy * ip->cols, fb + (sy + count) * ip->cols,
		    4 * (ip->bottom_margin - sy - count + 1) * ip->cols);
		rh_clear(ip, sy, 0, count, ip->cols);
#endif
	} else if (dir == SCROLL_RIGHT) {
		RZ3AlphaCopy(ip->grf, sx, sy, sx + count, sy,
		    ip->cols - (sx + count), 1);
		RZ3AlphaErase(ip->grf, sx, sy, count, 1);
	} else {
		RZ3AlphaCopy(ip->grf, sx + count, sy, sx, sy,
		    ip->cols - (sx + count), 1);
		RZ3AlphaErase(ip->grf, ip->cols - count, sy, count, 1);
	}
#ifndef	RETINA_SPEED_HACK
	rh_cursor(ip, !ERASE_CURSOR);
#endif
}
示例#13
0
void
xencons_start(struct tty *tp)
{
    struct clist *cl;
    int s;

    s = spltty();
    if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP))
        goto out;
    tp->t_state |= TS_BUSY;
    splx(s);

    /*
     * We need to do this outside spl since it could be fairly
     * expensive and we don't want our serial ports to overflow.
     */
    cl = &tp->t_outq;
    if (xendomain_is_dom0()) {
        int len, r;
        u_char buf[XENCONS_BURST+1];

        len = q_to_b(cl, buf, XENCONS_BURST);
        while (len > 0) {
            r = HYPERVISOR_console_io(CONSOLEIO_write, len, buf);
            if (r <= 0)
                break;
            len -= r;
        }
    } else {
        XENCONS_RING_IDX cons, prod, len;

#define XNC_OUT (xencons_interface->out)
        cons = xencons_interface->out_cons;
        prod = xencons_interface->out_prod;
        xen_rmb();
        while (prod != cons + sizeof(xencons_interface->out)) {
            if (MASK_XENCONS_IDX(prod, XNC_OUT) <
                    MASK_XENCONS_IDX(cons, XNC_OUT)) {
                len = MASK_XENCONS_IDX(cons, XNC_OUT) -
                      MASK_XENCONS_IDX(prod, XNC_OUT);
            } else {
                len = sizeof(XNC_OUT) -
                      MASK_XENCONS_IDX(prod, XNC_OUT);
            }
            len = q_to_b(cl, __UNVOLATILE(
                             &XNC_OUT[MASK_XENCONS_IDX(prod, XNC_OUT)]), len);
            if (len == 0)
                break;
            prod = prod + len;
        }
        xen_wmb();
        xencons_interface->out_prod = prod;
        xen_wmb();
        hypervisor_notify_via_evtchn(xen_start_info.console.domU.evtchn);
#undef XNC_OUT
    }

    s = spltty();
    tp->t_state &= ~TS_BUSY;
    if (ttypull(tp)) {
        tp->t_state |= TS_TIMEOUT;
        callout_schedule(&tp->t_rstrt_ch, 1);
    }
out:
    splx(s);
}
示例#14
0
static enum segstat_t shm_query(volatile struct shmTime *shm_in, struct shm_stat_t *shm_stat)
/* try to grab a sample from the specified SHM segment */
{
    volatile struct shmTime shmcopy, *shm = shm_in;
    volatile int cnt;

    unsigned int cns_new, rns_new;

    /*
     * This is the main routine. It snatches the time from the shm
     * board and tacks on a local timestamp.
     */
    if (shm == NULL) {
	shm_stat->status = NO_SEGMENT;
	return NO_SEGMENT;
    }

    /*@-type@*//* splint is confused about struct timespec */
    shm_stat->tvc.tv_sec = shm_stat->tvc.tv_nsec = 0;
    {
	time_t now;

	time(&now);
	shm_stat->tvc.tv_sec = now;
    }

    /* relying on word access to be atomic here */
    if (shm->valid == 0) {
	shm_stat->status = NOT_READY;
	return NOT_READY;
    }

    cnt = shm->count;

    /*
     * This is proof against concurrency issues if either
     * (a) the memory_barrier() call works on this host, or
     * (b) memset compiles to an uninterruptible single-instruction bitblt.
     */
    memory_barrier();
    memcpy(__UNVOLATILE(&shmcopy), __UNVOLATILE(shm), sizeof(struct shmTime));
    shm->valid = 0;
    memory_barrier();

    /* 
     * Clash detection in case neither (a) nor (b) was true.
     * Not supported in mode 0, and word access to the count field 
     * must be atomic for this to work.
     */
    if (shmcopy.mode > 0 && cnt != shm->count) {
	shm_stat->status = CLASH;
	return shm_stat->status;
    }

    shm_stat->status = OK;
    shm_stat->mode = shmcopy.mode;

    switch (shmcopy.mode) {
    case 0:
	shm_stat->tvr.tv_sec	= shmcopy.receiveTimeStampSec;
	shm_stat->tvr.tv_nsec	= shmcopy.receiveTimeStampUSec * 1000;
	rns_new		= shmcopy.receiveTimeStampNSec;
	shm_stat->tvt.tv_sec	= shmcopy.clockTimeStampSec;
	shm_stat->tvt.tv_nsec	= shmcopy.clockTimeStampUSec * 1000;
	cns_new		= shmcopy.clockTimeStampNSec;

	/* Since the following comparisons are between unsigned
	** variables they are always well defined, and any
	** (signed) underflow will turn into very large unsigned
	** values, well above the 1000 cutoff.
	**
	** Note: The usecs *must* be a *truncated*
	** representation of the nsecs. This code will fail for
	** *rounded* usecs, and the logic to deal with
	** wrap-arounds in the presence of rounded values is
	** much more convoluted.
	*/
	if (   ((cns_new - (unsigned)shm_stat->tvt.tv_nsec) < 1000)
	       && ((rns_new - (unsigned)shm_stat->tvr.tv_nsec) < 1000)) {
	    shm_stat->tvt.tv_nsec = cns_new;
	    shm_stat->tvr.tv_nsec = rns_new;
	}
	/* At this point shm_stat->tvr and shm_stat->tvt contain valid ns-level
	** timestamps, possibly generated by extending the old
	** us-level timestamps
	*/
	break;

    case 1:

	shm_stat->tvr.tv_sec	= shmcopy.receiveTimeStampSec;
	shm_stat->tvr.tv_nsec	= shmcopy.receiveTimeStampUSec * 1000;
	rns_new		= shmcopy.receiveTimeStampNSec;
	shm_stat->tvt.tv_sec	= shmcopy.clockTimeStampSec;
	shm_stat->tvt.tv_nsec	= shmcopy.clockTimeStampUSec * 1000;
	cns_new		= shmcopy.clockTimeStampNSec;
		
	/* See the case above for an explanation of the
	** following test.
	*/
	if (   ((cns_new - (unsigned)shm_stat->tvt.tv_nsec) < 1000)
	       && ((rns_new - (unsigned)shm_stat->tvr.tv_nsec) < 1000)) {
	    shm_stat->tvt.tv_nsec = cns_new;
	    shm_stat->tvr.tv_nsec = rns_new;
	}
	/* At this point shm_stat->tvr and shm_stat->tvt contains valid ns-level
	** timestamps, possibly generated by extending the old
	** us-level timestamps
	*/
	break;

    default:
	shm_stat->status = BAD_MODE;
	break;
    }
    /*@-type@*/

    /*
     * leap field is not a leap offset but a leap notification code.
     * The values are magic numbers used by NTP and set by GPSD, if at all, in
     * the subframe code.
     */
    shm_stat->leap = shmcopy.leap;
    shm_stat->precision = shmcopy.precision;

    return shm_stat->status;
}
示例#15
0
/*
 * How Command Block List Processing is done.
 * 
 * A running CBL is never manipulated. If there is a CBL already running,
 * further CMDs are deferred until the current list is done. A new list is
 * setup when the old one has finished.
 * This eases programming. To manipulate a running CBL it is necessary to
 * suspend the Command Unit to avoid race conditions. After a suspend
 * is sent we have to wait for an interrupt that ACKs the suspend. Then
 * we can manipulate the CBL and resume operation. I am not sure that this
 * is more effective than the current, much simpler approach. => KISS
 * See i82596CA data sheet page 26.
 * 
 * A CBL is running or on the way to be set up when (sc->sc_next_cb != 0).
 * 
 * A CBL may consist of TX CMDs, and _only_ TX CMDs.
 * A TX CBL is running or on the way to be set up when
 * ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)).
 * 
 * A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_
 * non-TX CMDs.
 * 
 * This comes mostly through the way how an Ethernet driver works and
 * because running CBLs are not manipulated when they are on the way. If
 * if_start() is called there will be TX CMDs enqueued so we have a running
 * CBL and other CMDs from e.g. if_ioctl() will be deferred and vice versa.
 * 
 * The Multicast Setup Command is special. A MCS needs more space than
 * a single CB has. Actual space requirement depends on the length of the
 * multicast list. So we always defer MCS until other CBLs are finished,
 * then we setup a CONF CMD in the first CB. The CONF CMD is needed to
 * turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may
 * use all the remaining space in the CBL and the Transmit Buffer Descriptor
 * List. (Therefore CBL and TBDL must be continuous in physical and virtual
 * memory. This is guaranteed through the definitions of the list offsets
 * in i82596reg.h and because it is only a single DMA segment used for all
 * lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with
 * a multicast list length of 0, thus disabling the multicast filter.
 * A deferred MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0)
 */
void
iee_cb_setup(struct iee_softc *sc, uint32_t cmd)
{
	struct iee_cb *cb = SC_CB(sc, sc->sc_next_cb);
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	struct ether_multistep step;
	struct ether_multi *enm;

	memset(cb, 0, sc->sc_cb_sz);
	cb->cb_cmd = cmd;
	switch (cmd & IEE_CB_CMD) {
	case IEE_CB_CMD_NOP:	/* NOP CMD */
		break;
	case IEE_CB_CMD_IAS:	/* Individual Address Setup */
		memcpy(__UNVOLATILE(cb->cb_ind_addr), CLLADDR(ifp->if_sadl),
		    ETHER_ADDR_LEN);
		break;
	case IEE_CB_CMD_CONF:	/* Configure */
		memcpy(__UNVOLATILE(cb->cb_cf), sc->sc_cf, sc->sc_cf[0]
		    & IEE_CF_0_CNT_M);
		break;
	case IEE_CB_CMD_MCS:	/* Multicast Setup */
		if (sc->sc_next_cb != 0) {
			sc->sc_flags |= IEE_WANT_MCAST;
			return;
		}
		sc->sc_flags &= ~IEE_WANT_MCAST;
		if ((sc->sc_cf[8] & IEE_CF_8_PRM) != 0) {
			/* Need no multicast filter in promisc mode. */
			iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL
			    | IEE_CB_I);
			return;
		}
		/* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */
		cb = SC_CB(sc, sc->sc_next_cb + 1);
		cb->cb_cmd = cmd;
		cb->cb_mcast.mc_size = 0;
		ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
		while (enm != NULL) {
			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
			    ETHER_ADDR_LEN) != 0 || cb->cb_mcast.mc_size
			    * ETHER_ADDR_LEN + 2 * sc->sc_cb_sz >
			    sc->sc_cb_sz * IEE_NCB +
			    sc->sc_tbd_sz * IEE_NTBD * IEE_NCB) {
				cb->cb_mcast.mc_size = 0;
				break;
			}
			memcpy(__UNVOLATILE(&cb->cb_mcast.mc_addrs[
			    cb->cb_mcast.mc_size * ETHER_ADDR_LEN]),
			    enm->enm_addrlo, ETHER_ADDR_LEN);
			ETHER_NEXT_MULTI(step, enm);
			cb->cb_mcast.mc_size++;
		}
		if (cb->cb_mcast.mc_size == 0) {
			/* Can't do exact mcast filtering, do ALLMULTI mode. */
			ifp->if_flags |= IFF_ALLMULTI;
			sc->sc_cf[11] &= ~IEE_CF_11_MCALL;
		} else {
			/* disable ALLMULTI and load mcast list */
			ifp->if_flags &= ~IFF_ALLMULTI;
			sc->sc_cf[11] |= IEE_CF_11_MCALL;
			/* Mcast setup may need more than sc->sc_cb_sz bytes. */
			bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
			    sc->sc_cb_off,
			    sc->sc_cb_sz * IEE_NCB +
			    sc->sc_tbd_sz * IEE_NTBD * IEE_NCB,
			    BUS_DMASYNC_PREWRITE);
		}
		iee_cb_setup(sc, IEE_CB_CMD_CONF);
		break;
	case IEE_CB_CMD_TR:	/* Transmit */
		cb->cb_transmit.tx_tbd_addr =
		    IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_tbd_off
		    + sc->sc_tbd_sz * sc->sc_next_tbd));
		cb->cb_cmd |= IEE_CB_SF; /* Always use Flexible Mode. */
		break;
	case IEE_CB_CMD_TDR:	/* Time Domain Reflectometry */
		break;
	case IEE_CB_CMD_DUMP:	/* Dump */
		break;
	case IEE_CB_CMD_DIAG:	/* Diagnose */
		break;
	default:
		/* can't happen */
		break;
	}
	cb->cb_link_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_cb_off +
	    sc->sc_cb_sz * (sc->sc_next_cb + 1)));
	IEE_CBSYNC(sc, sc->sc_next_cb,
	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
	sc->sc_next_cb++;
	ifp->if_timer = 5;
}