Пример #1
0
static void
sq_attach(device_t parent, device_t self, void *aux)
{
	int i, err;
	const char* macaddr;
	struct sq_softc *sc = device_private(self);
	struct hpc_attach_args *haa = aux;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;

	sc->sc_dev = self;
	sc->sc_hpct = haa->ha_st;
	sc->hpc_regs = haa->hpc_regs;      /* HPC register definitions */

	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
	    haa->ha_dmaoff, sc->hpc_regs->enet_regs_size,
	    &sc->sc_hpch)) != 0) {
		printf(": unable to map HPC DMA registers, error = %d\n", err);
		goto fail_0;
	}

	sc->sc_regt = haa->ha_st;
	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
	    haa->ha_devoff, sc->hpc_regs->enet_devregs_size,
	    &sc->sc_regh)) != 0) {
		printf(": unable to map Seeq registers, error = %d\n", err);
		goto fail_0;
	}

	sc->sc_dmat = haa->ha_dmat;

	if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
	    PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 1, &sc->sc_ncdseg,
	    BUS_DMA_NOWAIT)) != 0) {
		printf(": unable to allocate control data, error = %d\n", err);
		goto fail_0;
	}

	if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
	    sizeof(struct sq_control), (void **)&sc->sc_control,
	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
		printf(": unable to map control data, error = %d\n", err);
		goto fail_1;
	}

	if ((err = bus_dmamap_create(sc->sc_dmat,
	    sizeof(struct sq_control), 1, sizeof(struct sq_control), PAGE_SIZE,
	    BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
		printf(": unable to create DMA map for control data, error "
		    "= %d\n", err);
		goto fail_2;
	}

	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap,
	    sc->sc_control, sizeof(struct sq_control), NULL,
	    BUS_DMA_NOWAIT)) != 0) {
		printf(": unable to load DMA map for control data, error "
		    "= %d\n", err);
		goto fail_3;
	}

	memset(sc->sc_control, 0, sizeof(struct sq_control));

	/* Create transmit buffer DMA maps */
	for (i = 0; i < SQ_NTXDESC; i++) {
		if ((err = bus_dmamap_create(sc->sc_dmat,
		    MCLBYTES, 1, MCLBYTES, 0,
		    BUS_DMA_NOWAIT, &sc->sc_txmap[i])) != 0) {
			printf(": unable to create tx DMA map %d, error = %d\n",
			    i, err);
			goto fail_4;
		}
	}

	/* Create receive buffer DMA maps */
	for (i = 0; i < SQ_NRXDESC; i++) {
		if ((err = bus_dmamap_create(sc->sc_dmat,
		    MCLBYTES, 1, MCLBYTES, 0,
		    BUS_DMA_NOWAIT, &sc->sc_rxmap[i])) != 0) {
			printf(": unable to create rx DMA map %d, error = %d\n",
			    i, err);
			goto fail_5;
		}
	}

	/* Pre-allocate the receive buffers.  */
	for (i = 0; i < SQ_NRXDESC; i++) {
		if ((err = sq_add_rxbuf(sc, i)) != 0) {
			printf(": unable to allocate or map rx buffer %d\n,"
			    " error = %d\n", i, err);
			goto fail_6;
		}
	}

	memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR],
	    ETHER_ADDR_LEN);

	/*
	 * If our mac address is bogus, obtain it from ARCBIOS. This will
	 * be true of the onboard HPC3 on IP22, since there is no eeprom,
	 * but rather the DS1386 RTC's battery-backed ram is used.
	 */
	if (sc->sc_enaddr[0] != SGI_OUI_0 ||
	    sc->sc_enaddr[1] != SGI_OUI_1 ||
	    sc->sc_enaddr[2] != SGI_OUI_2) {
		macaddr = arcbios_GetEnvironmentVariable("eaddr");
		if (macaddr == NULL) {
			printf(": unable to get MAC address!\n");
			goto fail_6;
		}
		ether_aton_r(sc->sc_enaddr, sizeof(sc->sc_enaddr), macaddr);
	}

	evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
	    device_xname(self), "intr");

	if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
		printf(": unable to establish interrupt!\n");
		goto fail_6;
	}

	/* Reset the chip to a known state. */
	sq_reset(sc);

	/*
	 * Determine if we're an 8003 or 80c03 by setting the first
	 * MAC address register to non-zero, and then reading it back.
	 * If it's zero, we have an 80c03, because we will have read
	 * the TxCollLSB register.
	 */
	sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5);
	if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0)
		sc->sc_type = SQ_TYPE_80C03;
	else
		sc->sc_type = SQ_TYPE_8003;
	sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00);

	printf(": SGI Seeq %s\n",
	    sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");

	printf("%s: Ethernet address %s\n",
	    device_xname(self), ether_sprintf(sc->sc_enaddr));

	strcpy(ifp->if_xname, device_xname(self));
	ifp->if_softc = sc;
	ifp->if_mtu = ETHERMTU;
	ifp->if_init = sq_init;
	ifp->if_stop = sq_stop;
	ifp->if_start = sq_start;
	ifp->if_ioctl = sq_ioctl;
	ifp->if_watchdog = sq_watchdog;
	ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
	IFQ_SET_READY(&ifp->if_snd);

	if_attach(ifp);
	ether_ifattach(ifp, sc->sc_enaddr);

	memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
	/* Done! */
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
 fail_6:
	for (i = 0; i < SQ_NRXDESC; i++) {
		if (sc->sc_rxmbuf[i] != NULL) {
			bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
			m_freem(sc->sc_rxmbuf[i]);
		}
	}
 fail_5:
	for (i = 0; i < SQ_NRXDESC; i++) {
		if (sc->sc_rxmap[i] != NULL)
			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
	}
 fail_4:
	for (i = 0; i < SQ_NTXDESC; i++) {
		if (sc->sc_txmap[i] !=  NULL)
			bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
 fail_3:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
 fail_2:
	bus_dmamem_unmap(sc->sc_dmat,
	    (void *)sc->sc_control, sizeof(struct sq_control));
 fail_1:
	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
 fail_0:
	return;
}
Пример #2
0
int
oosiop_alloc_cb(struct oosiop_softc *sc, int ncb)
{
	struct oosiop_cb *cb;
	struct oosiop_xfer *xfer;
	bus_size_t xfersize;
	bus_dma_segment_t seg;
	int i, s, err, nseg;

	/*
	 * Allocate oosiop_cb.
	 */
	cb = malloc(sizeof(*cb) * ncb, M_DEVBUF, M_NOWAIT | M_ZERO);
	if (cb == NULL) {
		printf(": failed to allocate cb memory\n");
		return (ENOMEM);
	}

	/*
	 * Allocate DMA-safe memory for the oosiop_xfer and map it.
	 */
	xfersize = sizeof(struct oosiop_xfer) * ncb;
	err = bus_dmamem_alloc(sc->sc_dmat, xfersize, PAGE_SIZE, 0, &seg, 1,
	    &nseg, BUS_DMA_NOWAIT);
	if (err) {
		printf(": failed to allocate xfer block memory, err=%d\n", err);
		return (err);
	}
	err = bus_dmamem_map(sc->sc_dmat, &seg, nseg, xfersize,
	    (caddr_t *)(void *)&xfer, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
	if (err) {
		printf(": failed to map xfer block memory, err=%d\n", err);
		return (err);
	}

	/* Initialize each command block */
	for (i = 0; i < ncb; i++) {
		err = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
		    0, BUS_DMA_NOWAIT, &cb->cmddma);
		if (err) {
			printf(": failed to create cmddma map, err=%d\n", err);
			return (err);
		}

		err = bus_dmamap_create(sc->sc_dmat, OOSIOP_MAX_XFER,
		    OOSIOP_NSG, OOSIOP_DBC_MAX, 0, BUS_DMA_NOWAIT,
		    &cb->datadma);
		if (err) {
			printf(": failed to create datadma map, err=%d\n", err);
			return (err);
		}

		err = bus_dmamap_create(sc->sc_dmat,
		    sizeof(struct oosiop_xfer), 1, sizeof(struct oosiop_xfer),
		    0, BUS_DMA_NOWAIT, &cb->xferdma);
		if (err) {
			printf(": failed to create xfer block map, err=%d\n",
			    err);
			return (err);
		}
		err = bus_dmamap_load(sc->sc_dmat, cb->xferdma, xfer,
		    sizeof(struct oosiop_xfer), NULL, BUS_DMA_NOWAIT);
		if (err) {
			printf(": failed to load xfer block, err=%d\n", err);
			return (err);
		}

		cb->xfer = xfer;

		s = splbio();
		TAILQ_INSERT_TAIL(&sc->sc_free_cb, cb, chain);
		splx(s);

		cb++;
		xfer++;
	}

	return (0);
}
Пример #3
0
/*
 * ae_attach:
 *
 *	Attach an ae interface to the system.
 */
void
ae_attach(device_t parent, device_t self, void *aux)
{
	const uint8_t *enaddr;
	prop_data_t ea;
	struct ae_softc *sc = device_private(self);
	struct arbus_attach_args *aa = aux;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	int i, error;

	sc->sc_dev = self;

	callout_init(&sc->sc_tick_callout, 0);

	printf(": Atheros AR531X 10/100 Ethernet\n");

	/*
	 * Try to get MAC address.
	 */
	ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-address");
	if (ea == NULL) {
		printf("%s: unable to get mac-addr property\n",
		    device_xname(sc->sc_dev));
		return;
	}
	KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
	KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
	enaddr = prop_data_data_nocopy(ea);

	/* Announce ourselves. */
	printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
	    ether_sprintf(enaddr));

	sc->sc_cirq = aa->aa_cirq;
	sc->sc_mirq = aa->aa_mirq;
	sc->sc_st = aa->aa_bst;
	sc->sc_dmat = aa->aa_dmat;

	SIMPLEQ_INIT(&sc->sc_txfreeq);
	SIMPLEQ_INIT(&sc->sc_txdirtyq);

	/*
	 * Map registers.
	 */
	sc->sc_size = aa->aa_size;
	if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
	    &sc->sc_sh)) != 0) {
		printf("%s: unable to map registers, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_0;
	}

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if ((error = bus_dmamem_alloc(sc->sc_dmat,
	    sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
	    1, &sc->sc_cdnseg, 0)) != 0) {
		printf("%s: unable to allocate control data, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_1;
	}

	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg,
	    sizeof(struct ae_control_data), (void **)&sc->sc_control_data,
	    BUS_DMA_COHERENT)) != 0) {
		printf("%s: unable to map control data, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_2;
	}

	if ((error = bus_dmamap_create(sc->sc_dmat,
	    sizeof(struct ae_control_data), 1,
	    sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
		printf("%s: unable to create control data DMA map, "
		    "error = %d\n", device_xname(sc->sc_dev), error);
		goto fail_3;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
	    sc->sc_control_data, sizeof(struct ae_control_data), NULL,
	    0)) != 0) {
		printf("%s: unable to load control data DMA map, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_4;
	}

	/*
	 * Create the transmit buffer DMA maps.
	 */
	for (i = 0; i < AE_TXQUEUELEN; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
		    AE_NTXSEGS, MCLBYTES, 0, 0,
		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
			printf("%s: unable to create tx DMA map %d, "
			    "error = %d\n", device_xname(sc->sc_dev), i, error);
			goto fail_5;
		}
	}

	/*
	 * Create the receive buffer DMA maps.
	 */
	for (i = 0; i < AE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
			printf("%s: unable to create rx DMA map %d, "
			    "error = %d\n", device_xname(sc->sc_dev), i, error);
			goto fail_6;
		}
		sc->sc_rxsoft[i].rxs_mbuf = NULL;
	}

	/*
	 * Reset the chip to a known state.
	 */
	ae_reset(sc);

	/*
	 * From this point forward, the attachment cannot fail.  A failure
	 * before this point releases all resources that may have been
	 * allocated.
	 */
	sc->sc_flags |= AE_ATTACHED;

	/*
	 * Initialize our media structures.  This may probe the MII, if
	 * present.
	 */
	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = ae_mii_readreg;
	sc->sc_mii.mii_writereg = ae_mii_writereg;
	sc->sc_mii.mii_statchg = ae_mii_statchg;
	sc->sc_ethercom.ec_mii = &sc->sc_mii;
	ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
	    ether_mediastatus);
	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);

	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
	} else
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);

	sc->sc_tick = ae_mii_tick;

	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	sc->sc_if_flags = ifp->if_flags;
	ifp->if_ioctl = ae_ioctl;
	ifp->if_start = ae_start;
	ifp->if_watchdog = ae_watchdog;
	ifp->if_init = ae_init;
	ifp->if_stop = ae_stop;
	IFQ_SET_READY(&ifp->if_snd);

	/*
	 * We can support 802.1Q VLAN-sized frames.
	 */
	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;

	/*
	 * Attach the interface.
	 */
	if_attach(ifp);
	ether_ifattach(ifp, enaddr);
	ether_set_ifflags_cb(&sc->sc_ethercom, ae_ifflags_cb);

	rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
	    RND_TYPE_NET, RND_FLAG_DEFAULT);

	/*
	 * Make sure the interface is shutdown during reboot.
	 */
	sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc);
	if (sc->sc_sdhook == NULL)
		printf("%s: WARNING: unable to establish shutdown hook\n",
		    device_xname(sc->sc_dev));

	/*
	 * Add a suspend hook to make sure we come back up after a
	 * resume.
	 */
	sc->sc_powerhook = powerhook_establish(device_xname(sc->sc_dev),
	    ae_power, sc);
	if (sc->sc_powerhook == NULL)
		printf("%s: WARNING: unable to establish power hook\n",
		    device_xname(sc->sc_dev));
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
 fail_6:
	for (i = 0; i < AE_NRXDESC; i++) {
		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_rxsoft[i].rxs_dmamap);
	}
 fail_5:
	for (i = 0; i < AE_TXQUEUELEN; i++) {
		if (sc->sc_txsoft[i].txs_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_txsoft[i].txs_dmamap);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
 fail_4:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
 fail_3:
	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
	    sizeof(struct ae_control_data));
 fail_2:
	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
 fail_1:
	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
 fail_0:
	return;
}
Пример #4
0
/* ARGSUSED */
void
ie_pcctwo_attach(device_t parent, device_t self, void *aux)
{
	struct pcctwo_attach_args *pa;
	struct ie_pcctwo_softc *ps;
	struct ie_softc *sc;
	bus_dma_segment_t seg;
	int rseg;

	pa = aux;
	ps = device_private(self);
	sc = &ps->ps_ie;
	sc->sc_dev = self;

	/* Map the MPU controller registers in PCCTWO space */
	ps->ps_bust = pa->pa_bust;
	bus_space_map(pa->pa_bust, pa->pa_offset, IE_MPUREG_SIZE,
	    0, &ps->ps_bush);

	/* Get contiguous DMA-able memory for the IE chip */
	if (bus_dmamem_alloc(pa->pa_dmat, ether_data_buff_size, PAGE_SIZE, 0,
		&seg, 1, &rseg,
		BUS_DMA_NOWAIT | BUS_DMA_ONBOARD_RAM | BUS_DMA_24BIT) != 0) {
		aprint_error_dev(self, "Failed to allocate ether buffer\n");
		return;
	}
	if (bus_dmamem_map(pa->pa_dmat, &seg, rseg, ether_data_buff_size,
	    (void **) & sc->sc_maddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) {
		aprint_error_dev(self, "Failed to map ether buffer\n");
		bus_dmamem_free(pa->pa_dmat, &seg, rseg);
		return;
	}
	sc->bt = pa->pa_bust;
	sc->bh = (bus_space_handle_t) sc->sc_maddr;	/* XXXSCW Better way? */
	sc->sc_iobase = (void *) seg.ds_addr;
	sc->sc_msize = ether_data_buff_size;
	memset(sc->sc_maddr, 0, ether_data_buff_size);

	sc->hwreset = ie_reset;
	sc->hwinit = ie_hwinit;
	sc->chan_attn = ie_atten;
	sc->intrhook = ie_intrhook;
	sc->memcopyin = ie_copyin;
	sc->memcopyout = ie_copyout;
	sc->ie_bus_barrier = NULL;
	sc->ie_bus_read16 = ie_read_16;
	sc->ie_bus_write16 = ie_write_16;
	sc->ie_bus_write24 = ie_write_24;
	sc->sc_mediachange = NULL;
	sc->sc_mediastatus = NULL;

	sc->scp = 0;
	sc->iscp = sc->scp + ((IE_SCP_SZ + 15) & ~15);
	sc->scb = sc->iscp + IE_ISCP_SZ;
	sc->buf_area = sc->scb + IE_SCB_SZ;
	sc->buf_area_sz = sc->sc_msize - (sc->buf_area - sc->scp);

	/*
	 * BUS_USE -> Interrupt Active High (edge-triggered),
	 *            Lock function enabled,
	 *            Internal bus throttle timer triggering,
	 *            82586 operating mode.
	 */
	ie_write_16(sc, IE_SCP_BUS_USE(sc->scp), IE_BUS_USE);
	ie_write_24(sc, IE_SCP_ISCP(sc->scp), sc->iscp);
	ie_write_16(sc, IE_ISCP_SCB(sc->iscp), sc->scb);
	ie_write_24(sc, IE_ISCP_BASE(sc->iscp), sc->scp);

	/* This has the side-effect of resetting the chip */
	i82586_proberam(sc);

	/* Attach the MI back-end */
	i82586_attach(sc, "onboard", mvme_ea, NULL, 0, 0);

	/* Register the event counter */
	evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_INTR,
	    pcctwointr_evcnt(pa->pa_ipl), "ether", device_xname(self));

	/* Finally, hook the hardware interrupt */
	pcctwointr_establish(PCCTWOV_LANC_IRQ, i82586_intr, pa->pa_ipl, sc,
	    &ps->ps_evcnt);
}
Пример #5
0
void
beattach(device_t parent, device_t self, void *aux)
{
	struct sbus_attach_args *sa = aux;
	struct qec_softc *qec = device_private(parent);
	struct be_softc *sc = device_private(self);
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	struct mii_data *mii = &sc->sc_mii;
	struct mii_softc *child;
	int node = sa->sa_node;
	bus_dma_tag_t dmatag = sa->sa_dmatag;
	bus_dma_segment_t seg;
	bus_size_t size;
	int instance;
	int rseg, error;
	uint32_t v;

	sc->sc_dev = self;

	if (sa->sa_nreg < 3) {
		printf(": only %d register sets\n", sa->sa_nreg);
		return;
	}

	if (bus_space_map(sa->sa_bustag,
	    (bus_addr_t)BUS_ADDR(sa->sa_reg[0].oa_space, sa->sa_reg[0].oa_base),
	    (bus_size_t)sa->sa_reg[0].oa_size,
	    0, &sc->sc_cr) != 0) {
		printf(": cannot map registers\n");
		return;
	}

	if (bus_space_map(sa->sa_bustag,
	    (bus_addr_t)BUS_ADDR(sa->sa_reg[1].oa_space, sa->sa_reg[1].oa_base),
	    (bus_size_t)sa->sa_reg[1].oa_size,
	    0, &sc->sc_br) != 0) {
		printf(": cannot map registers\n");
		return;
	}

	if (bus_space_map(sa->sa_bustag,
	    (bus_addr_t)BUS_ADDR(sa->sa_reg[2].oa_space, sa->sa_reg[2].oa_base),
	    (bus_size_t)sa->sa_reg[2].oa_size,
	    0, &sc->sc_tr) != 0) {
		printf(": cannot map registers\n");
		return;
	}

	sc->sc_bustag = sa->sa_bustag;
	sc->sc_qec = qec;
	sc->sc_qr = qec->sc_regs;

	sc->sc_rev = prom_getpropint(node, "board-version", -1);
	printf(": rev %x,", sc->sc_rev);

	callout_init(&sc->sc_tick_ch, 0);

	sc->sc_channel = prom_getpropint(node, "channel#", -1);
	if (sc->sc_channel == -1)
		sc->sc_channel = 0;

	sc->sc_burst = prom_getpropint(node, "burst-sizes", -1);
	if (sc->sc_burst == -1)
		sc->sc_burst = qec->sc_burst;

	/* Clamp at parent's burst sizes */
	sc->sc_burst &= qec->sc_burst;

	/* Establish interrupt handler */
	if (sa->sa_nintr)
		(void)bus_intr_establish(sa->sa_bustag, sa->sa_pri, IPL_NET,
		    beintr, sc);

	prom_getether(node, sc->sc_enaddr);
	printf(" address %s\n", ether_sprintf(sc->sc_enaddr));

	/*
	 * Allocate descriptor ring and buffers.
	 */

	/* for now, allocate as many bufs as there are ring descriptors */
	sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE;
	sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE;

	size =
	    QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
	    QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
	    sc->sc_rb.rb_ntbuf * BE_PKT_BUF_SZ +
	    sc->sc_rb.rb_nrbuf * BE_PKT_BUF_SZ;

	/* Get a DMA handle */
	if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
		aprint_error_dev(self, "DMA map create error %d\n", error);
		return;
	}

	/* Allocate DMA buffer */
	if ((error = bus_dmamem_alloc(sa->sa_dmatag, size, 0, 0,
	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(self, "DMA buffer alloc error %d\n", error);
		return;
	}

	/* Map DMA memory in CPU addressable space */
	if ((error = bus_dmamem_map(sa->sa_dmatag, &seg, rseg, size,
	    &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error_dev(self, "DMA buffer map error %d\n", error);
		bus_dmamem_free(sa->sa_dmatag, &seg, rseg);
		return;
	}

	/* Load the buffer */
	if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
	    sc->sc_rb.rb_membase, size, NULL, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(self, "DMA buffer map load error %d\n", error);
		bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size);
		bus_dmamem_free(dmatag, &seg, rseg);
		return;
	}
	sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;

	/*
	 * Initialize our media structures and MII info.
	 */
	mii->mii_ifp = ifp;
	mii->mii_readreg = be_mii_readreg;
	mii->mii_writereg = be_mii_writereg;
	mii->mii_statchg = be_mii_statchg;

	ifmedia_init(&mii->mii_media, 0, be_ifmedia_upd, be_ifmedia_sts);

	/*
	 * Initialize transceiver and determine which PHY connection to use.
	 */
	be_mii_sync(sc);
	v = bus_space_read_4(sc->sc_bustag, sc->sc_tr, BE_TRI_MGMTPAL);

	instance = 0;

	if ((v & MGMT_PAL_EXT_MDIO) != 0) {

		mii_attach(self, mii, 0xffffffff, BE_PHY_EXTERNAL,
		    MII_OFFSET_ANY, 0);

		child = LIST_FIRST(&mii->mii_phys);
		if (child == NULL) {
			/* No PHY attached */
			ifmedia_add(&sc->sc_media,
			    IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, instance),
			    0, NULL);
			ifmedia_set(&sc->sc_media,
			    IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, instance));
		} else {
			/*
			 * Note: we support just one PHY on the external
			 * MII connector.
			 */
#ifdef DIAGNOSTIC
			if (LIST_NEXT(child, mii_list) != NULL) {
				aprint_error_dev(self,
				    "spurious MII device %s attached\n",
				    device_xname(child->mii_dev));
			}
#endif
			if (child->mii_phy != BE_PHY_EXTERNAL ||
			    child->mii_inst > 0) {
				aprint_error_dev(self,
				    "cannot accommodate MII device %s"
				    " at phy %d, instance %d\n",
				       device_xname(child->mii_dev),
				       child->mii_phy, child->mii_inst);
			} else {
				sc->sc_phys[instance] = child->mii_phy;
			}

			/*
			 * XXX - we can really do the following ONLY if the
			 * phy indeed has the auto negotiation capability!!
			 */
			ifmedia_set(&sc->sc_media,
			    IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, instance));

			/* Mark our current media setting */
			be_pal_gate(sc, BE_PHY_EXTERNAL);
			instance++;
		}

	}

	if ((v & MGMT_PAL_INT_MDIO) != 0) {
		/*
		 * The be internal phy looks vaguely like MII hardware,
		 * but not enough to be able to use the MII device
		 * layer. Hence, we have to take care of media selection
		 * ourselves.
		 */

		sc->sc_mii_inst = instance;
		sc->sc_phys[instance] = BE_PHY_INTERNAL;

		/* Use `ifm_data' to store BMCR bits */
		ifmedia_add(&sc->sc_media,
		    IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, instance),
		    0, NULL);
		ifmedia_add(&sc->sc_media,
		    IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, instance),
		    BMCR_S100, NULL);
		ifmedia_add(&sc->sc_media,
		    IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, instance),
		    0, NULL);

		printf("on-board transceiver at %s: 10baseT, 100baseTX, auto\n",
		    device_xname(self));

		be_mii_reset(sc, BE_PHY_INTERNAL);
		/* Only set default medium here if there's no external PHY */
		if (instance == 0) {
			be_pal_gate(sc, BE_PHY_INTERNAL);
			ifmedia_set(&sc->sc_media,
			    IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, instance));
		} else
			be_mii_writereg(self,
			    BE_PHY_INTERNAL, MII_BMCR, BMCR_ISO);
	}

	memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_start = bestart;
	ifp->if_ioctl = beioctl;
	ifp->if_watchdog = bewatchdog;
	ifp->if_init = beinit;
	ifp->if_stop = bestop;
	ifp->if_flags =
	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
	IFQ_SET_READY(&ifp->if_snd);

	/* claim 802.1q capability */
	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;

	/* Attach the interface. */
	if_attach(ifp);
	ether_ifattach(ifp, sc->sc_enaddr);
}
Пример #6
0
/*
 * Initialise our interface to the controller.
 */
int
cac_init(struct cac_softc *sc, const char *intrstr, int startfw)
{
	struct cac_controller_info cinfo;
	int error, rseg, size, i;
	bus_dma_segment_t seg;
	struct cac_ccb *ccb;
	char firm[8];

	if (intrstr != NULL)
		aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);

	SIMPLEQ_INIT(&sc->sc_ccb_free);
	SIMPLEQ_INIT(&sc->sc_ccb_queue);
	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
	cv_init(&sc->sc_ccb_cv, "cacccb");

        size = sizeof(struct cac_ccb) * CAC_MAX_CCBS;

	if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
	    &rseg, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(sc->sc_dev, "unable to allocate CCBs, error = %d\n",
		    error);
		return (-1);
	}

	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
	    (void **)&sc->sc_ccbs,
	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
		aprint_error_dev(sc->sc_dev, "unable to map CCBs, error = %d\n",
		    error);
		return (-1);
	}

	if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
		aprint_error_dev(sc->sc_dev, "unable to create CCB DMA map, error = %d\n",
		    error);
		return (-1);
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_ccbs,
	    size, NULL, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(sc->sc_dev, "unable to load CCB DMA map, error = %d\n",
		    error);
		return (-1);
	}

	sc->sc_ccbs_paddr = sc->sc_dmamap->dm_segs[0].ds_addr;
	memset(sc->sc_ccbs, 0, size);
	ccb = (struct cac_ccb *)sc->sc_ccbs;

	for (i = 0; i < CAC_MAX_CCBS; i++, ccb++) {
		/* Create the DMA map for this CCB's data */
		error = bus_dmamap_create(sc->sc_dmat, CAC_MAX_XFER,
		    CAC_SG_SIZE, CAC_MAX_XFER, 0,
		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
		    &ccb->ccb_dmamap_xfer);

		if (error) {
			aprint_error_dev(sc->sc_dev, "can't create ccb dmamap (%d)\n",
			    error);
			break;
		}

		ccb->ccb_flags = 0;
		ccb->ccb_paddr = sc->sc_ccbs_paddr + i * sizeof(struct cac_ccb);
		SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_chain);
	}

	/* Start firmware background tasks, if needed. */
	if (startfw) {
		if (cac_cmd(sc, CAC_CMD_START_FIRMWARE, &cinfo, sizeof(cinfo),
		    0, 0, CAC_CCB_DATA_IN, NULL)) {
			aprint_error_dev(sc->sc_dev, "CAC_CMD_START_FIRMWARE failed\n");
			return (-1);
		}
	}

	if (cac_cmd(sc, CAC_CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo), 0, 0,
	    CAC_CCB_DATA_IN, NULL)) {
		aprint_error_dev(sc->sc_dev, "CAC_CMD_GET_CTRL_INFO failed\n");
		return (-1);
	}

	strlcpy(firm, cinfo.firm_rev, 4+1);
	printf("%s: %d channels, firmware <%s>\n", device_xname(sc->sc_dev),
	    cinfo.scsi_chips, firm);

	/* Limit number of units to size of our sc_unitmask */
	sc->sc_nunits = cinfo.num_drvs;
	if (sc->sc_nunits > sizeof(sc->sc_unitmask) * NBBY)
		sc->sc_nunits = sizeof(sc->sc_unitmask) * NBBY;

	/* Attach our units */
	sc->sc_unitmask = 0;
	cac_rescan(sc->sc_dev, "cac", 0);

	/* Set our `shutdownhook' before we start any device activity. */
	if (cac_sdh == NULL)
		cac_sdh = shutdownhook_establish(cac_shutdown, NULL);

	mutex_enter(&sc->sc_mutex);
	(*sc->sc_cl.cl_intr_enable)(sc, CAC_INTR_ENABLE);
	mutex_exit(&sc->sc_mutex);

#if NBIO > 0
	if (bio_register(sc->sc_dev, cac_ioctl) != 0)
		aprint_error_dev(sc->sc_dev, "controller registration failed");
	else
		sc->sc_ioctl = cac_ioctl;
	if (cac_create_sensors(sc) != 0)
		aprint_error_dev(sc->sc_dev, "unable to create sensors\n");
#endif

	return (0);
}
Пример #7
0
/*
 * Init UNIBUS for interface whose headers of size hlen are to
 * end on a page boundary.  We allocate a UNIBUS map register for the page
 * with the header, and nmr more UNIBUS map registers for i/o on the adapter,
 * doing this once for each read and once for each write buffer.  We also
 * allocate page frames in the mbuffer pool for these pages.
 *
 * Recent changes:
 *	No special "header pages" anymore.
 *	Recv packets are always put in clusters.
 *	"size" is the maximum buffer size, may not be bigger than MCLBYTES.
 */
int
if_ubaminit(struct ifubinfo *ifu, struct uba_softc *uh, int size,
            struct ifrw *ifr, int nr, struct ifxmt *ifw, int nw)
{
    struct mbuf *m;
    int totsz, i, error, rseg, nm = nr;
    bus_dma_segment_t seg;
    caddr_t vaddr;

#ifdef DIAGNOSTIC
    if (size > MCLBYTES)
        panic("if_ubaminit: size > MCLBYTES");
#endif
    ifu->iff_softc = uh;
    /*
     * Get DMA memory for transmit buffers.
     * Buffer size are rounded up to a multiple of the uba page size,
     * then allocated contiguous.
     */
    size = (size + UBA_PGOFSET) & ~UBA_PGOFSET;
    totsz = size * nw;
    if ((error = bus_dmamem_alloc(uh->uh_dmat, totsz, NBPG, 0,
                                  &seg, 1, &rseg, BUS_DMA_NOWAIT)))
        return error;
    if ((error = bus_dmamem_map(uh->uh_dmat, &seg, rseg, totsz, &vaddr,
                                BUS_DMA_NOWAIT|BUS_DMA_COHERENT))) {
        bus_dmamem_free(uh->uh_dmat, &seg, rseg);
        return error;
    }

    /*
     * Create receive and transmit maps.
     * Alloc all resources now so we won't fail in the future.
     */

    for (i = 0; i < nr; i++) {
        if ((error = bus_dmamap_create(uh->uh_dmat, size, 1,
                                       size, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
                                       &ifr[i].ifrw_map))) {
            nr = i;
            nm = nw = 0;
            goto bad;
        }
    }
    for (i = 0; i < nw; i++) {
        if ((error = bus_dmamap_create(uh->uh_dmat, size, 1,
                                       size, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
                                       &ifw[i].ifw_map))) {
            nw = i;
            nm = 0;
            goto bad;
        }
    }
    /*
     * Preload the rx maps with mbuf clusters.
     */
    for (i = 0; i < nm; i++) {
        if ((m = getmcl()) == NULL) {
            nm = i;
            goto bad;
        }
        ifr[i].ifrw_mbuf = m;
        bus_dmamap_load(uh->uh_dmat, ifr[i].ifrw_map,
                        m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);

    }
    /*
     * Load the tx maps with DMA memory (common case).
     */
    for (i = 0; i < nw; i++) {
        ifw[i].ifw_vaddr = vaddr + size * i;
        ifw[i].ifw_size = size;
        bus_dmamap_load(uh->uh_dmat, ifw[i].ifw_map,
                        ifw[i].ifw_vaddr, ifw[i].ifw_size, NULL, BUS_DMA_NOWAIT);
    }
    return 0;
bad:
    while (--nm >= 0) {
        bus_dmamap_unload(uh->uh_dmat, ifr[nw].ifrw_map);
        m_freem(ifr[nm].ifrw_mbuf);
    }
    while (--nw >= 0)
        bus_dmamap_destroy(uh->uh_dmat, ifw[nw].ifw_map);
    while (--nr >= 0)
        bus_dmamap_destroy(uh->uh_dmat, ifr[nw].ifrw_map);
    return (0);
}
Пример #8
0
int
pdq_os_memalloc_contig(
    pdq_t *pdq)
{
    pdq_softc_t * const sc = pdq->pdq_os_ctx;
    bus_dma_segment_t db_segs[1], ui_segs[1], cb_segs[1];
    int db_nsegs = 0, ui_nsegs = 0;
    int steps = 0;
    int not_ok;

    not_ok = bus_dmamem_alloc(sc->sc_dmatag,
			 sizeof(*pdq->pdq_dbp), sizeof(*pdq->pdq_dbp),
			 sizeof(*pdq->pdq_dbp), db_segs, 1, &db_nsegs,
			 BUS_DMA_NOWAIT);
    if (!not_ok) {
	steps = 1;
	not_ok = bus_dmamem_map(sc->sc_dmatag, db_segs, db_nsegs,
				sizeof(*pdq->pdq_dbp), (caddr_t *) &pdq->pdq_dbp,
				BUS_DMA_NOWAIT);
    }
    if (!not_ok) {
	steps = 2;
	not_ok = bus_dmamap_create(sc->sc_dmatag, db_segs[0].ds_len, 1,
				   0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_dbmap);
    }
    if (!not_ok) {
	steps = 3;
	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_dbmap,
				 pdq->pdq_dbp, sizeof(*pdq->pdq_dbp),
				 NULL, BUS_DMA_NOWAIT);
    }
    if (!not_ok) {
	steps = 4;
	pdq->pdq_pa_descriptor_block = sc->sc_dbmap->dm_segs[0].ds_addr;
	not_ok = bus_dmamem_alloc(sc->sc_dmatag,
			 PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE,
			 ui_segs, 1, &ui_nsegs, BUS_DMA_NOWAIT);
    }
    if (!not_ok) {
	steps = 5;
	not_ok = bus_dmamem_map(sc->sc_dmatag, ui_segs, ui_nsegs,
			    PDQ_OS_PAGESIZE,
			    (caddr_t *) &pdq->pdq_unsolicited_info.ui_events,
			    BUS_DMA_NOWAIT);
    }
    if (!not_ok) {
	steps = 6;
	not_ok = bus_dmamap_create(sc->sc_dmatag, ui_segs[0].ds_len, 1,
				   PDQ_OS_PAGESIZE, 0, BUS_DMA_NOWAIT,
				   &sc->sc_uimap);
    }
    if (!not_ok) {
	steps = 7;
	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_uimap,
				 pdq->pdq_unsolicited_info.ui_events,
				 PDQ_OS_PAGESIZE, NULL, BUS_DMA_NOWAIT);
    }
    if (!not_ok) {
	steps = 8;
	pdq->pdq_unsolicited_info.ui_pa_bufstart = sc->sc_uimap->dm_segs[0].ds_addr;
	cb_segs[0] = db_segs[0];
	cb_segs[0].ds_addr += offsetof(pdq_descriptor_block_t, pdqdb_consumer);
	cb_segs[0].ds_len = sizeof(pdq_consumer_block_t);
	not_ok = bus_dmamem_map(sc->sc_dmatag, cb_segs, 1,
				sizeof(*pdq->pdq_cbp), (caddr_t *) &pdq->pdq_cbp,
				BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
    }
    if (!not_ok) {
	steps = 9;
	not_ok = bus_dmamap_create(sc->sc_dmatag, cb_segs[0].ds_len, 1,
				   0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_cbmap);
    }
    if (!not_ok) {
	steps = 10;
	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_cbmap,
				 (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp),
				 NULL, BUS_DMA_NOWAIT);
    }
    if (!not_ok) {
	pdq->pdq_pa_consumer_block = sc->sc_cbmap->dm_segs[0].ds_addr;
	return not_ok;
    }

    switch (steps) {
	case 11: {
	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_cbmap);
	    /* FALL THROUGH */
	}
	case 10: {
	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cbmap);
	    /* FALL THROUGH */
	}
	case 9: {
	    bus_dmamem_unmap(sc->sc_dmatag,
			     (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp));
	    /* FALL THROUGH */
	}
	case 8: {
	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_uimap);
	    /* FALL THROUGH */
	}
	case 7: {
	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_uimap);
	    /* FALL THROUGH */
	}
	case 6: {
	    bus_dmamem_unmap(sc->sc_dmatag,
			     (caddr_t) pdq->pdq_unsolicited_info.ui_events,
			     PDQ_OS_PAGESIZE);
	    /* FALL THROUGH */
	}
	case 5: {
	    bus_dmamem_free(sc->sc_dmatag, ui_segs, ui_nsegs);
	    /* FALL THROUGH */
	}
	case 4: {
	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_dbmap);
	    /* FALL THROUGH */
	}
	case 3: {
	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_dbmap);
	    /* FALL THROUGH */
	}
	case 2: {
	    bus_dmamem_unmap(sc->sc_dmatag,
			     (caddr_t) pdq->pdq_dbp,
			     sizeof(*pdq->pdq_dbp));
	    /* FALL THROUGH */
	}
	case 1: {
	    bus_dmamem_free(sc->sc_dmatag, db_segs, db_nsegs);
	    /* FALL THROUGH */
	}
    }

    return not_ok;
}
Пример #9
0
void
bmac_attach(struct device *parent, struct device *self, void *aux)
{
	struct confargs *ca = aux;
	struct bmac_softc *sc = (void *)self;
	struct ifnet *ifp = &sc->arpcom.ac_if;
	struct mii_data *mii = &sc->sc_mii;
	u_char laddr[6];
	int nseg, error;

	timeout_set(&sc->sc_tick_ch, bmac_mii_tick, sc);

	sc->sc_flags =0;
	if (strcmp(ca->ca_name, "ethernet") == 0) {
		sc->sc_flags |= BMAC_BMACPLUS;
	}

	ca->ca_reg[0] += ca->ca_baseaddr;
	ca->ca_reg[2] += ca->ca_baseaddr;
	ca->ca_reg[4] += ca->ca_baseaddr;

	sc->sc_regs = (vaddr_t)mapiodev(ca->ca_reg[0], NBPG);

	bmac_write_reg(sc, INTDISABLE, NoEventsMask);

	if (OF_getprop(ca->ca_node, "local-mac-address", laddr, 6) == -1 &&
	    OF_getprop(ca->ca_node, "mac-address", laddr, 6) == -1) {
		printf(": cannot get mac-address\n");
		return;
	}
	bcopy(laddr, sc->arpcom.ac_enaddr, 6);

	sc->sc_dmat = ca->ca_dmat;
	sc->sc_txdma = mapiodev(ca->ca_reg[2], 0x100);
	sc->sc_rxdma = mapiodev(ca->ca_reg[4], 0x100);
	sc->sc_txdbdma = dbdma_alloc(sc->sc_dmat, BMAC_TXBUFS);
	sc->sc_txcmd = sc->sc_txdbdma->d_addr;
	sc->sc_rxdbdma = dbdma_alloc(sc->sc_dmat, BMAC_RXBUFS + 1);
	sc->sc_rxcmd = sc->sc_rxdbdma->d_addr;

	error = bus_dmamem_alloc(sc->sc_dmat, BMAC_BUFSZ,
	    PAGE_SIZE, 0, sc->sc_bufseg, 1, &nseg, BUS_DMA_NOWAIT);
	if (error) {
		printf(": cannot allocate buffers (%d)\n", error);
		return;
	}

	error = bus_dmamem_map(sc->sc_dmat, sc->sc_bufseg, nseg,
	    BMAC_BUFSZ, &sc->sc_txbuf, BUS_DMA_NOWAIT);
	if (error) {
		printf(": cannot map buffers (%d)\n", error);
		bus_dmamem_free(sc->sc_dmat, sc->sc_bufseg, 1);
		return;
	}

	error = bus_dmamap_create(sc->sc_dmat, BMAC_BUFSZ, 1, BMAC_BUFSZ, 0,
	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_bufmap);
	if (error) {
		printf(": cannot create buffer dmamap (%d)\n", error);
		bus_dmamem_unmap(sc->sc_dmat, sc->sc_txbuf, BMAC_BUFSZ);
		bus_dmamem_free(sc->sc_dmat, sc->sc_bufseg, 1);
		return;
	}

	error = bus_dmamap_load(sc->sc_dmat, sc->sc_bufmap, sc->sc_txbuf,
	    BMAC_BUFSZ, NULL, BUS_DMA_NOWAIT);
	if (error) {
		printf(": cannot load buffers dmamap (%d)\n", error);
		bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufmap);
		bus_dmamem_unmap(sc->sc_dmat, sc->sc_txbuf, BMAC_BUFSZ);
		bus_dmamem_free(sc->sc_dmat, sc->sc_bufseg, nseg);
		return;
	}

	sc->sc_txbuf_pa = sc->sc_bufmap->dm_segs->ds_addr;
	sc->sc_rxbuf = sc->sc_txbuf + BMAC_BUFLEN * BMAC_TXBUFS;
	sc->sc_rxbuf_pa = sc->sc_txbuf_pa + BMAC_BUFLEN * BMAC_TXBUFS;

	printf(" irq %d,%d: address %s\n", ca->ca_intr[0], ca->ca_intr[2],
		ether_sprintf(laddr));

	mac_intr_establish(parent, ca->ca_intr[0], IST_LEVEL, IPL_NET,
	    bmac_intr, sc, sc->sc_dev.dv_xname);
	mac_intr_establish(parent, ca->ca_intr[2], IST_LEVEL, IPL_NET,
	    bmac_rint, sc, sc->sc_dev.dv_xname);

	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_ioctl = bmac_ioctl;
	ifp->if_start = bmac_start;
	ifp->if_flags =
		IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
	ifp->if_watchdog = bmac_watchdog;
	IFQ_SET_READY(&ifp->if_snd);

	mii->mii_ifp = ifp;
	mii->mii_readreg = bmac_mii_readreg;
	mii->mii_writereg = bmac_mii_writereg;
	mii->mii_statchg = bmac_mii_statchg;

	ifmedia_init(&mii->mii_media, 0, bmac_mediachange, bmac_mediastatus);
	mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);

	/* Choose a default media. */
	if (LIST_FIRST(&mii->mii_phys) == NULL) {
		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_10_T, 0, NULL);
		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_10_T);
	} else
		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);

	bmac_reset_chip(sc);

	if_attach(ifp);
	ether_ifattach(ifp);
}
Пример #10
0
static int
gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
	     u32 memtype, struct nouveau_mem **pmem)
{
#if !defined(__NetBSD__)
	struct device *dev = nv_device_base(nv_device(pfb));
	int i;
#endif
	struct gk20a_mem *mem;
	u32 type = memtype & 0xff;
	u32 npages, order;

	nv_debug(pfb, "%s: size: %llx align: %x, ncmin: %x\n", __func__,
		 (unsigned long long)size, align, ncmin);

	npages = size >> PAGE_SHIFT;
	if (npages == 0)
		npages = 1;

	if (align == 0)
		align = PAGE_SIZE;
	align >>= PAGE_SHIFT;

	/* round alignment to the next power of 2, if needed */
#if defined(__NetBSD__)
	order = fls32(align);
#else
	order = fls(align);
#endif
	if ((align & (align - 1)) == 0)
		order--;
	align = BIT(order);

	/* ensure returned address is correctly aligned */
	npages = max(align, npages);

	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
	if (!mem)
		return -ENOMEM;

	mem->base.size = npages;
	mem->base.memtype = type;

#if defined(__NetBSD__)
	int ret, nsegs;

	if (align == 0)
		align = PAGE_SIZE;

	const bus_dma_tag_t dmat = nv_device(pfb)->platformdev->dmat;
	const bus_size_t dmasize = npages << PAGE_SHIFT;

	ret = -bus_dmamem_alloc(dmat, dmasize, align, 0,
	    &mem->dmaseg, 1, &nsegs, BUS_DMA_WAITOK);
	if (ret) {
fail0:		kfree(mem);
		return ret;
	}
	KASSERT(nsegs == 1);

	ret = -bus_dmamap_create(dmat, dmasize, nsegs, dmasize, 0,
	    BUS_DMA_WAITOK, &mem->base.pages);
	if (ret) {
fail1:		bus_dmamem_free(dmat, &mem->dmaseg, nsegs);
		goto fail0;
	}

	ret = -bus_dmamem_map(dmat, &mem->dmaseg, nsegs, dmasize,
	    &mem->cpuaddr, BUS_DMA_WAITOK | BUS_DMA_COHERENT);
	if (ret) {
fail2:		bus_dmamap_destroy(dmat, mem->base.pages);
		goto fail1;
	}
	memset(mem->cpuaddr, 0, dmasize);

	ret = -bus_dmamap_load(dmat, mem->base.pages, mem->cpuaddr,
	    dmasize, NULL, BUS_DMA_WAITOK);
	if (ret) {
fail3: __unused	bus_dmamem_unmap(dmat, mem->cpuaddr, dmasize);
		goto fail2;
	}

	nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %"PRIxPADDR
	   ", vaddr: %p\n", npages << PAGE_SHIFT, align,
	   mem->base.pages->dm_segs[0].ds_addr, mem->cpuaddr);

	mem->dmasize = dmasize;
	mem->base.offset = (u64)mem->base.pages->dm_segs[0].ds_addr;
	*pmem = &mem->base;
#else
	mem->base.pages = kzalloc(sizeof(dma_addr_t) * npages, GFP_KERNEL);
	if (!mem->base.pages) {
		kfree(mem);
		return -ENOMEM;
	}

	*pmem = &mem->base;

	mem->cpuaddr = dma_alloc_coherent(dev, npages << PAGE_SHIFT,
					  &mem->handle, GFP_KERNEL);
	if (!mem->cpuaddr) {
		nv_error(pfb, "%s: cannot allocate memory!\n", __func__);
		gk20a_ram_put(pfb, pmem);
		return -ENOMEM;
	}

	align <<= PAGE_SHIFT;

	/* alignment check */
	if (unlikely(mem->handle & (align - 1)))
		nv_warn(pfb, "memory not aligned as requested: %pad (0x%x)\n",
			&mem->handle, align);

	nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %pad, vaddr: %p\n",
		 npages << PAGE_SHIFT, align, &mem->handle, mem->cpuaddr);

	for (i = 0; i < npages; i++)
		mem->base.pages[i] = mem->handle + (PAGE_SIZE * i);

	mem->base.offset = (u64)mem->base.pages[0];
#endif

	return 0;
}
Пример #11
0
static int
drm_ati_alloc_pcigart_table(struct drm_device *dev,
			    struct drm_ati_pcigart_info *gart_info)
{
	struct drm_dma_handle *dmah;
	int flags, ret;
#if defined(__NetBSD__)
	int nsegs;
#endif

	dmah = malloc(sizeof(struct drm_dma_handle), DRM_MEM_DMA,
	    M_ZERO | M_NOWAIT);
	if (dmah == NULL)
		return ENOMEM;

#if defined(__FreeBSD__)
	DRM_UNLOCK();
	ret = bus_dma_tag_create(NULL, PAGE_SIZE, 0, /* tag, align, boundary */
	    gart_info->table_mask, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
	    NULL, NULL, /* filtfunc, filtfuncargs */
	    gart_info->table_size, 1, /* maxsize, nsegs */
	    gart_info->table_size, /* maxsegsize */
	    BUS_DMA_ALLOCNOW, NULL, NULL, /* flags, lockfunc, lockfuncargs */
	    &dmah->tag);
	if (ret != 0) {
		free(dmah, DRM_MEM_DMA);
		return ENOMEM;
	}

	flags = BUS_DMA_NOWAIT | BUS_DMA_ZERO;
	if (gart_info->gart_reg_if == DRM_ATI_GART_IGP)
	    flags |= BUS_DMA_NOCACHE;
	
	ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr, flags, &dmah->map);
	if (ret != 0) {
		bus_dma_tag_destroy(dmah->tag);
		free(dmah, DRM_MEM_DMA);
		return ENOMEM;
	}
	DRM_LOCK();

	ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr,
	    gart_info->table_size, drm_ati_alloc_pcigart_table_cb, dmah, 0);
	if (ret != 0) {
		bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
		bus_dma_tag_destroy(dmah->tag);
		free(dmah, DRM_MEM_DMA);
		return ENOMEM;
	}

#elif   defined(__NetBSD__)
	dmah->tag = dev->pa.pa_dmat;

	flags = BUS_DMA_NOWAIT;
	if (gart_info->gart_reg_if == DRM_ATI_GART_IGP)
		flags |= BUS_DMA_NOCACHE;

	ret = bus_dmamem_alloc(dmah->tag, gart_info->table_size, PAGE_SIZE,
	0, dmah->segs, 1, &nsegs, flags);
	if (ret != 0) {
		printf("drm: unable to allocate %zu bytes of DMA, error %d\n",
			(size_t)gart_info->table_size, ret);
		dmah->tag = NULL;
		free(dmah, DRM_MEM_DMA);
		return ENOMEM;
	}
	if (nsegs != 1) {
		printf("drm: bad segment count\n");
		bus_dmamem_free(dmah->tag, dmah->segs, 1);
		dmah->tag = NULL;
		free(dmah, DRM_MEM_DMA);
		return ENOMEM;
	}

	ret = bus_dmamem_map(dmah->tag, dmah->segs, nsegs,
			     gart_info->table_size, &dmah->vaddr,
			     flags | BUS_DMA_COHERENT);
	if (ret != 0) {
		printf("drm: Unable to map DMA, error %d\n", ret);
		bus_dmamem_free(dmah->tag, dmah->segs, 1);
		dmah->tag = NULL;
		free(dmah, DRM_MEM_DMA);
		return ENOMEM;
	}

	ret = bus_dmamap_create(dmah->tag, gart_info->table_size, 1,
				gart_info->table_size, 0,
				BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dmah->map);
	if (ret != 0) {
		printf("drm: Unable to create DMA map, error %d\n", ret);
		bus_dmamem_unmap(dmah->tag, dmah->vaddr, gart_info->table_size);
		bus_dmamem_free(dmah->tag, dmah->segs, 1);
		dmah->tag = NULL;
		free(dmah, DRM_MEM_DMA);
		return ENOMEM;
	}

	ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr,
			      gart_info->table_size, NULL, BUS_DMA_NOWAIT);
	if (ret != 0) {
		printf("drm: Unable to load DMA map, error %d\n", ret);
		bus_dmamap_destroy(dmah->tag, dmah->map);
		bus_dmamem_unmap(dmah->tag, dmah->vaddr, gart_info->table_size);
		bus_dmamem_free(dmah->tag, dmah->segs, 1);
		dmah->tag = NULL;
		free(dmah, DRM_MEM_DMA);
		return ENOMEM;
	}
	dmah->busaddr = dmah->map->dm_segs[0].ds_addr;
	dmah->size = gart_info->table_size;
	dmah->nsegs = 1;
#if 0
	/*
	 * Mirror here FreeBSD doing BUS_DMA_ZERO.
	 * But I see this same memset() is done in drm_ati_pcigart_init(),
	 * so maybe this is not needed.
	 */
	memset(dmah->vaddr, 0, gart_info->table_size);
#endif
#endif

	dev->sg->dmah = dmah;

	return 0;
}
Пример #12
0
/*
 * Generic device.
 */
static void
plb_tft_attach(struct device *parent, struct device *self, void *aux)
{
	struct xcvbus_attach_args *vaa = aux;
	struct plb_tft_softc 	*psc = device_private(self);
	struct tft_softc 	*sc = &psc->psc_sc;
	int 			nseg, error;

	psc->psc_dmat 	= vaa->vaa_dmat;
	sc->sc_iot 	= vaa->vaa_iot;

	printf(": PLB_TFT\n");

	if ((error = bus_space_map(sc->sc_iot, vaa->vaa_addr, TFT_SIZE,
	    0, &sc->sc_ioh)) != 0) {
	    	printf("%s: could not map device registers\n",
		    device_xname(self));
	    	goto fail_0;
	}

	/* Fill in resolution, depth, size. */
	tft_mode(self);

	/* Allocate and map framebuffer control data. */
	if ((error = bus_dmamem_alloc(psc->psc_dmat, sc->sc_size, ADDR_ALIGN,
	    0, &psc->psc_seg, 1, &nseg, 0)) != 0) {
	    	printf("%s: could not allocate framebuffer\n",
	    	    device_xname(self));
		goto fail_1;
	}
	if ((error = bus_dmamem_map(psc->psc_dmat, &psc->psc_seg, nseg,
	    sc->sc_size, &sc->sc_image, BUS_DMA_COHERENT)) != 0) {
	    	printf("%s: could not map framebuffer\n",
	    	    device_xname(self));
		goto fail_2;
	}
	if ((error = bus_dmamap_create(psc->psc_dmat, sc->sc_size, 1,
	    sc->sc_size, 0, 0, &psc->psc_dmap)) != 0) {
	    	printf("%s: could not create framebuffer DMA map\n",
	    	    device_xname(self));
		goto fail_3;
	}
	if ((error = bus_dmamap_load(psc->psc_dmat, psc->psc_dmap,
	    sc->sc_image, sc->sc_size, NULL, 0)) != 0) {
	    	printf("%s: could not load framebuffer DMA map\n",
	    	    device_xname(self));
		goto fail_4;
	}
	/* XXX hack, we linear map whole RAM and we have single segment */
	/* sc->sc_image = (void *)psc->psc_dmap->dm_segs[0].ds_addr; */
	/* XXX hack, use predefined base addr */
	sc->sc_image = (void *)(uintptr_t)0x3c00000;

	/* XXX f**k the hack above, use "virtex-tft-framebuffer-base" prop */

	tft_attach(self, &plb_tft_accessops);

	printf("%s: video memory pa 0x%08x\n", device_xname(self),
	    (uint32_t)psc->psc_dmap->dm_segs[0].ds_addr);

#if 0
	/* XXX powerpc bus_dma doesn't support COHERENT. */
	bus_dmamap_sync(psc->psc_dmat, psc->psc_dmap, 0,
	    sc->sc_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
#endif

	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TFT_CTRL, CTRL_RESET);
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TFT_CTRL, CTRL_ENABLE);

#if 0
	/* XXX how the f**k do we change framebuffer base? */
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TFT_CTRL, CTRL_RESET);
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TFT_CTRL, CTRL_ENABLE);
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TFT_ADDR,
	    ADDR_MAKE(psc->psc_dmap->dm_segs[0].ds_addr));
#endif

	return ;

 fail_4:
 	bus_dmamap_destroy(psc->psc_dmat, psc->psc_dmap);
 fail_3:
 	bus_dmamem_unmap(psc->psc_dmat, sc->sc_image, sc->sc_size);
 fail_2:
 	bus_dmamem_free(psc->psc_dmat, &psc->psc_seg, nseg);
 fail_1:
	bus_space_unmap(sc->sc_iot, sc->sc_ioh, TFT_SIZE);
 fail_0:
	printf("%s: error %d\n", device_xname(self), error);
}
Пример #13
0
void
ahci_attach(struct ahci_softc *sc)
{
	u_int32_t ahci_cap, ahci_rev, ahci_ports;
	int i, j, port;
	struct ahci_channel *achp;
	struct ata_channel *chp;
	int error;
	bus_dma_segment_t seg;
	int rseg;
	int dmasize;
	void *cmdhp;
	void *cmdtblp;

	if (ahci_reset(sc) != 0)
		return;

	ahci_cap = AHCI_READ(sc, AHCI_CAP);
	sc->sc_atac.atac_nchannels = (ahci_cap & AHCI_CAP_NPMASK) + 1;
	sc->sc_ncmds = ((ahci_cap & AHCI_CAP_NCS) >> 8) + 1;
	ahci_rev = AHCI_READ(sc, AHCI_VS);
	aprint_normal("%s: AHCI revision ", AHCINAME(sc));
	switch(ahci_rev) {
	case AHCI_VS_10:
		aprint_normal("1.0");
		break;
	case AHCI_VS_11:
		aprint_normal("1.1");
		break;
	case AHCI_VS_12:
		aprint_normal("1.2");
		break;
	default:
		aprint_normal("0x%x", ahci_rev);
		break;
	}

	aprint_normal(", %d ports, %d command slots, features 0x%x\n",
	    sc->sc_atac.atac_nchannels, sc->sc_ncmds,
	    ahci_cap & ~(AHCI_CAP_NPMASK|AHCI_CAP_NCS));
	sc->sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DMA | ATAC_CAP_UDMA;
	sc->sc_atac.atac_cap |= sc->sc_atac_capflags;
	sc->sc_atac.atac_pio_cap = 4;
	sc->sc_atac.atac_dma_cap = 2;
	sc->sc_atac.atac_udma_cap = 6;
	sc->sc_atac.atac_channels = sc->sc_chanarray;
	sc->sc_atac.atac_probe = ahci_probe_drive;
	sc->sc_atac.atac_bustype_ata = &ahci_ata_bustype;
	sc->sc_atac.atac_set_modes = ahci_setup_channel;
#if NATAPIBUS > 0
	sc->sc_atac.atac_atapibus_attach = ahci_atapibus_attach;
#endif

	dmasize =
	    (AHCI_RFIS_SIZE + AHCI_CMDH_SIZE) * sc->sc_atac.atac_nchannels;
	error = bus_dmamem_alloc(sc->sc_dmat, dmasize, PAGE_SIZE, 0,
	    &seg, 1, &rseg, BUS_DMA_NOWAIT);
	if (error) {
		aprint_error("%s: unable to allocate command header memory"
		    ", error=%d\n", AHCINAME(sc), error);
		return;
	}
	error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, dmasize,
	    &cmdhp, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
	if (error) {
		aprint_error("%s: unable to map command header memory"
		    ", error=%d\n", AHCINAME(sc), error);
		return;
	}
	error = bus_dmamap_create(sc->sc_dmat, dmasize, 1, dmasize, 0,
	    BUS_DMA_NOWAIT, &sc->sc_cmd_hdrd);
	if (error) {
		aprint_error("%s: unable to create command header map"
		    ", error=%d\n", AHCINAME(sc), error);
		return;
	}
	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmd_hdrd,
	    cmdhp, dmasize, NULL, BUS_DMA_NOWAIT);
	if (error) {
		aprint_error("%s: unable to load command header map"
		    ", error=%d\n", AHCINAME(sc), error);
		return;
	}
	sc->sc_cmd_hdr = cmdhp;

	ahci_enable_intrs(sc);

	ahci_ports = AHCI_READ(sc, AHCI_PI);
	for (i = 0, port = 0; i < AHCI_MAX_PORTS; i++) {
		if ((ahci_ports & (1 << i)) == 0)
			continue;
		if (port >= sc->sc_atac.atac_nchannels) {
			aprint_error("%s: more ports than announced\n",
			    AHCINAME(sc));
			break;
		}
		achp = &sc->sc_channels[i];
		chp = (struct ata_channel *)achp;
		sc->sc_chanarray[i] = chp;
		chp->ch_channel = i;
		chp->ch_atac = &sc->sc_atac;
		chp->ch_queue = malloc(sizeof(struct ata_queue),
		    M_DEVBUF, M_NOWAIT);
		if (chp->ch_queue == NULL) {
			aprint_error("%s port %d: can't allocate memory for "
			    "command queue", AHCINAME(sc), i);
			break;
		}
		dmasize = AHCI_CMDTBL_SIZE * sc->sc_ncmds;
		error = bus_dmamem_alloc(sc->sc_dmat, dmasize, PAGE_SIZE, 0,
		    &seg, 1, &rseg, BUS_DMA_NOWAIT);
		if (error) {
			aprint_error("%s: unable to allocate command table "
			    "memory, error=%d\n", AHCINAME(sc), error);
			break;
		}
		error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, dmasize,
		    &cmdtblp, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
		if (error) {
			aprint_error("%s: unable to map command table memory"
			    ", error=%d\n", AHCINAME(sc), error);
			break;
		}
		error = bus_dmamap_create(sc->sc_dmat, dmasize, 1, dmasize, 0,
		    BUS_DMA_NOWAIT, &achp->ahcic_cmd_tbld);
		if (error) {
			aprint_error("%s: unable to create command table map"
			    ", error=%d\n", AHCINAME(sc), error);
			break;
		}
		error = bus_dmamap_load(sc->sc_dmat, achp->ahcic_cmd_tbld,
		    cmdtblp, dmasize, NULL, BUS_DMA_NOWAIT);
		if (error) {
			aprint_error("%s: unable to load command table map"
			    ", error=%d\n", AHCINAME(sc), error);
			break;
		}
		achp->ahcic_cmdh  = (struct ahci_cmd_header *)
		    ((char *)cmdhp + AHCI_CMDH_SIZE * port);
		achp->ahcic_bus_cmdh = sc->sc_cmd_hdrd->dm_segs[0].ds_addr +
		    AHCI_CMDH_SIZE * port;
		achp->ahcic_rfis = (struct ahci_r_fis *)
		    ((char *)cmdhp + 
		     AHCI_CMDH_SIZE * sc->sc_atac.atac_nchannels + 
		     AHCI_RFIS_SIZE * port);
		achp->ahcic_bus_rfis = sc->sc_cmd_hdrd->dm_segs[0].ds_addr +
		     AHCI_CMDH_SIZE * sc->sc_atac.atac_nchannels + 
		     AHCI_RFIS_SIZE * port;
		AHCIDEBUG_PRINT(("port %d cmdh %p (0x%x) rfis %p (0x%x)\n", i,
		   achp->ahcic_cmdh, (u_int)achp->ahcic_bus_cmdh,
		   achp->ahcic_rfis, (u_int)achp->ahcic_bus_rfis),
		   DEBUG_PROBE);
		    
		for (j = 0; j < sc->sc_ncmds; j++) {
			achp->ahcic_cmd_tbl[j] = (struct ahci_cmd_tbl *)
			    ((char *)cmdtblp + AHCI_CMDTBL_SIZE * j);
			achp->ahcic_bus_cmd_tbl[j] =
			     achp->ahcic_cmd_tbld->dm_segs[0].ds_addr +
			     AHCI_CMDTBL_SIZE * j;
			achp->ahcic_cmdh[j].cmdh_cmdtba =
			    htole32(achp->ahcic_bus_cmd_tbl[j]);
			achp->ahcic_cmdh[j].cmdh_cmdtbau = htole32(0);
			AHCIDEBUG_PRINT(("port %d/%d tbl %p (0x%x)\n", i, j,
			    achp->ahcic_cmd_tbl[j],
			    (u_int)achp->ahcic_bus_cmd_tbl[j]), DEBUG_PROBE);
			/* The xfer DMA map */
			error = bus_dmamap_create(sc->sc_dmat, MAXPHYS,
			    AHCI_NPRD, 0x400000 /* 4MB */, 0,
			    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
			    &achp->ahcic_datad[j]);
			if (error) {
				aprint_error("%s: couldn't alloc xfer DMA map, "
				    "error=%d\n", AHCINAME(sc), error);
				goto end;
			}
		}
		ahci_setup_port(sc, i);
		chp->ch_ndrive = 1;
		if (bus_space_subregion(sc->sc_ahcit, sc->sc_ahcih,
		    AHCI_P_SSTS(i), 1,  &achp->ahcic_sstatus) != 0) {
			aprint_error("%s: couldn't map channel %d "
			    "sata_status regs\n", AHCINAME(sc), i);
			break;
		}
		if (bus_space_subregion(sc->sc_ahcit, sc->sc_ahcih,
		    AHCI_P_SCTL(i), 1,  &achp->ahcic_scontrol) != 0) {
			aprint_error("%s: couldn't map channel %d "
			    "sata_control regs\n", AHCINAME(sc), i);
			break;
		}
		if (bus_space_subregion(sc->sc_ahcit, sc->sc_ahcih,
		    AHCI_P_SERR(i), 1,  &achp->ahcic_serror) != 0) {
			aprint_error("%s: couldn't map channel %d "
			    "sata_error regs\n", AHCINAME(sc), i);
			break;
		}
		ata_channel_attach(chp);
		port++;
end:
		continue;
	}
}
Пример #14
0
static void
emac_init(struct emac_softc *sc)
{
	bus_dma_segment_t segs;
	void *addr;
	int rsegs, err, i;
	struct ifnet * ifp = &sc->sc_ec.ec_if;
	uint32_t u;
#if 0
	int mdcdiv = DEFAULT_MDCDIV;
#endif

	callout_init(&sc->emac_tick_ch, 0);

	// ok...
	EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);	// disable everything
	EMAC_WRITE(ETH_IDR, -1);		// disable interrupts
	EMAC_WRITE(ETH_RBQP, 0);		// clear receive
	EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
	EMAC_WRITE(ETH_TCR, 0);			// send nothing
//	(void)EMAC_READ(ETH_ISR);
	u = EMAC_READ(ETH_TSR);
	EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
				  | ETH_TSR_IDLE | ETH_TSR_RLE
				  | ETH_TSR_COL|ETH_TSR_OVR)));
	u = EMAC_READ(ETH_RSR);
	EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA)));

	/* configure EMAC */
	EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
	EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);
#if 0
	if (device_cfdata(&sc->sc_dev)->cf_flags)
		mdcdiv = device_cfdata(&sc->sc_dev)->cf_flags;
#endif
	/* set ethernet address */
	EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24)
		   | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
		   | (sc->sc_enaddr[0]));
	EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8)
		   | (sc->sc_enaddr[4]));
	EMAC_WRITE(ETH_SA2L, 0);
	EMAC_WRITE(ETH_SA2H, 0);
	EMAC_WRITE(ETH_SA3L, 0);
	EMAC_WRITE(ETH_SA3H, 0);
	EMAC_WRITE(ETH_SA4L, 0);
	EMAC_WRITE(ETH_SA4H, 0);

	/* Allocate a page of memory for receive queue descriptors */
	sc->rbqlen = (ETH_RDSC_SIZE * (RX_QLEN + 1) * 2 + PAGE_SIZE - 1) / PAGE_SIZE;
	sc->rbqlen *= PAGE_SIZE;
	DPRINTFN(1,("%s: rbqlen=%i\n", __FUNCTION__, sc->rbqlen));

	err = bus_dmamem_alloc(sc->sc_dmat, sc->rbqlen, 0,
		MAX(16384, PAGE_SIZE),	// see EMAC errata why forced to 16384 byte boundary
		&segs, 1, &rsegs, BUS_DMA_WAITOK);
	if (err == 0) {
		DPRINTFN(1,("%s: -> bus_dmamem_map\n", __FUNCTION__));
		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, sc->rbqlen,
			&sc->rbqpage, (BUS_DMA_WAITOK|BUS_DMA_COHERENT));
	}
	if (err == 0) {
		DPRINTFN(1,("%s: -> bus_dmamap_create\n", __FUNCTION__));
		err = bus_dmamap_create(sc->sc_dmat, sc->rbqlen, 1,
			sc->rbqlen, MAX(16384, PAGE_SIZE), BUS_DMA_WAITOK,
			&sc->rbqpage_dmamap);
	}
	if (err == 0) {
		DPRINTFN(1,("%s: -> bus_dmamap_load\n", __FUNCTION__));
		err = bus_dmamap_load(sc->sc_dmat, sc->rbqpage_dmamap,
			sc->rbqpage, sc->rbqlen, NULL, BUS_DMA_WAITOK);
	}
	if (err != 0) {
		panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev));
	}
	sc->rbqpage_dsaddr = sc->rbqpage_dmamap->dm_segs[0].ds_addr;

	bzero(sc->rbqpage, sc->rbqlen);

	/* Set up pointers to start of each queue in kernel addr space.
	 * Each descriptor queue or status queue entry uses 2 words
	 */
	sc->RDSC = (void*)sc->rbqpage;

	/* Populate the RXQ with mbufs */
	sc->rxqi = 0;
	for(i = 0; i < RX_QLEN; i++) {
		struct mbuf *m;

		err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, PAGE_SIZE,
			BUS_DMA_WAITOK, &sc->rxq[i].m_dmamap);
		if (err) {
			panic("%s: dmamap_create failed: %i\n", __FUNCTION__, err);
		}
		MGETHDR(m, M_WAIT, MT_DATA);
		MCLGET(m, M_WAIT);
		sc->rxq[i].m = m;
		if (mtod(m, intptr_t) & 3) {
			m_adj(m, mtod(m, intptr_t) & 3);
		}
		err = bus_dmamap_load(sc->sc_dmat, sc->rxq[i].m_dmamap, 
			m->m_ext.ext_buf, MCLBYTES, NULL, 
			BUS_DMA_WAITOK);
		if (err) {
			panic("%s: dmamap_load failed: %i\n", __FUNCTION__, err);
		}
		sc->RDSC[i].Addr = sc->rxq[i].m_dmamap->dm_segs[0].ds_addr
			| (i == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
		sc->RDSC[i].Info = 0;
		bus_dmamap_sync(sc->sc_dmat, sc->rxq[i].m_dmamap, 0,
			MCLBYTES, BUS_DMASYNC_PREREAD);
	}

	/* prepare transmit queue */
	for (i = 0; i < TX_QLEN; i++) {
		err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
					(BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW),
					&sc->txq[i].m_dmamap);
		if (err)
			panic("ARGH #1");
		sc->txq[i].m = NULL;
	}

	/* Program each queue's start addr, cur addr, and len registers
	 * with the physical addresses. 
	 */
	bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen,
			 BUS_DMASYNC_PREREAD);
	addr = (void *)sc->rbqpage_dmamap->dm_segs[0].ds_addr;
	EMAC_WRITE(ETH_RBQP, (u_int32_t)addr);

	/* Divide HCLK by 32 for MDC clock */
	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = emac_mii_readreg;
	sc->sc_mii.mii_writereg = emac_mii_writereg;
	sc->sc_mii.mii_statchg = emac_statchg;
	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, emac_mediachange,
		emac_mediastatus);
	mii_attach((device_t )sc, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
		MII_OFFSET_ANY, 0);
	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);

	// enable / disable interrupts

#if 0
	// enable / disable interrupts
	EMAC_WRITE(ETH_IDR, -1);
	EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
		   | ETH_ISR_RBNA | ETH_ISR_ROVR);
//	(void)EMAC_READ(ETH_ISR); // why

	// enable transmitter / receiver
	EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
		   | ETH_CTL_CSR | ETH_CTL_MPE);
#endif
	/*
	 * We can support 802.1Q VLAN-sized frames.
	 */
	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;

        strcpy(ifp->if_xname, device_xname(sc->sc_dev));
        ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
        ifp->if_ioctl = emac_ifioctl;
        ifp->if_start = emac_ifstart;
        ifp->if_watchdog = emac_ifwatchdog;
        ifp->if_init = emac_ifinit;
        ifp->if_stop = emac_ifstop;
        ifp->if_timer = 0;
	ifp->if_softc = sc;
        IFQ_SET_READY(&ifp->if_snd);
        if_attach(ifp);
        ether_ifattach(ifp, (sc)->sc_enaddr);
}
Пример #15
0
/*
 * sonic_attach:
 *
 *	Attach a SONIC interface to the system.
 */
void
sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr)
{
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	int i, rseg, error;
	bus_dma_segment_t seg;
	size_t cdatasize;
	uint8_t *nullbuf;

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if (sc->sc_32bit)
		cdatasize = sizeof(struct sonic_control_data32);
	else
		cdatasize = sizeof(struct sonic_control_data16);

	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN,
	     PAGE_SIZE, (64 * 1024), &seg, 1, &rseg,
	     BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(sc->sc_dev,
		    "unable to allocate control data, error = %d\n", error);
		goto fail_0;
	}

	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
	    cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16,
	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error_dev(sc->sc_dev,
		    "unable to map control data, error = %d\n", error);
		goto fail_1;
	}
	nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize;
	memset(nullbuf, 0, ETHER_PAD_LEN);

	if ((error = bus_dmamap_create(sc->sc_dmat,
	     cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT,
	     &sc->sc_cddmamap)) != 0) {
		aprint_error_dev(sc->sc_dev,
		    "unable to create control data DMA map, error = %d\n",
		    error);
		goto fail_2;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
	     sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(sc->sc_dev,
		    "unable to load control data DMA map, error = %d\n", error);
		goto fail_3;
	}

	/*
	 * Create the transmit buffer DMA maps.
	 */
	for (i = 0; i < SONIC_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
		     SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
		     &sc->sc_txsoft[i].ds_dmamap)) != 0) {
			aprint_error_dev(sc->sc_dev,
			    "unable to create tx DMA map %d, error = %d\n",
			    i, error);
			goto fail_4;
		}
	}

	/*
	 * Create the receive buffer DMA maps.
	 */
	for (i = 0; i < SONIC_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
		     MCLBYTES, 0, BUS_DMA_NOWAIT,
		     &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
			aprint_error_dev(sc->sc_dev,
			    "unable to create rx DMA map %d, error = %d\n",
			    i, error);
			goto fail_5;
		}
		sc->sc_rxsoft[i].ds_mbuf = NULL;
	}

	/*
	 * create and map the pad buffer
	 */
	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) {
		aprint_error_dev(sc->sc_dev,
		    "unable to create pad buffer DMA map, error = %d\n", error);
		goto fail_5;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(sc->sc_dev,
		    "unable to load pad buffer DMA map, error = %d\n", error);
		goto fail_6;
	}
	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
	    BUS_DMASYNC_PREWRITE);

	/*
	 * Reset the chip to a known state.
	 */
	sonic_reset(sc);

	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
	    ether_sprintf(enaddr));

	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = sonic_ioctl;
	ifp->if_start = sonic_start;
	ifp->if_watchdog = sonic_watchdog;
	ifp->if_init = sonic_init;
	ifp->if_stop = sonic_stop;
	IFQ_SET_READY(&ifp->if_snd);

	/*
	 * We can support 802.1Q VLAN-sized frames.
	 */
	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;

	/*
	 * Attach the interface.
	 */
	if_attach(ifp);
	ether_ifattach(ifp, enaddr);

	/*
	 * Make sure the interface is shutdown during reboot.
	 */
	sc->sc_sdhook = shutdownhook_establish(sonic_shutdown, sc);
	if (sc->sc_sdhook == NULL)
		aprint_error_dev(sc->sc_dev,
		    "WARNING: unable to establish shutdown hook\n");
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
 fail_6:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
 fail_5:
	for (i = 0; i < SONIC_NRXDESC; i++) {
		if (sc->sc_rxsoft[i].ds_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_rxsoft[i].ds_dmamap);
	}
 fail_4:
	for (i = 0; i < SONIC_NTXDESC; i++) {
		if (sc->sc_txsoft[i].ds_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_txsoft[i].ds_dmamap);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
 fail_3:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
 fail_2:
	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize);
 fail_1:
	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
 fail_0:
	return;
}
Пример #16
0
void
mec_attach(struct device *parent, struct device *self, void *aux)
{
    struct mec_softc *sc = (void *)self;
    struct confargs *ca = aux;
    struct ifnet *ifp = &sc->sc_ac.ac_if;
    uint32_t command;
    char *macaddr;
    struct mii_softc *child;
    bus_dma_segment_t seg;
    int i, err, rseg;

    sc->sc_st = ca->ca_iot;
    if (bus_space_map(sc->sc_st, ca->ca_baseaddr, MEC_NREGS, 0,
                      &sc->sc_sh) != 0) {
        printf(": can't map i/o space\n");
        return;
    }

    /* set up DMA structures */
    sc->sc_dmat = ca->ca_dmat;

    /*
     * Allocate the control data structures, and create and load the
     * DMA map for it.
     */
    if ((err = bus_dmamem_alloc(sc->sc_dmat,
                                sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
                                &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
        printf(": unable to allocate control data, error = %d\n", err);
        goto fail_0;
    }

    /*
     * XXX needs re-think...
     * control data structures contain whole RX data buffer, so
     * BUS_DMA_COHERENT (which disables cache) may cause some performance
     * issue on copying data from the RX buffer to mbuf on normal memory,
     * though we have to make sure all bus_dmamap_sync(9) ops are called
     * proprely in that case.
     */
    if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
                              sizeof(struct mec_control_data),
                              (caddr_t *)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
        printf(": unable to map control data, error = %d\n", err);
        goto fail_1;
    }
    memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));

    if ((err = bus_dmamap_create(sc->sc_dmat,
                                 sizeof(struct mec_control_data), 1,
                                 sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
        printf(": unable to create control data DMA map, error = %d\n",
               err);
        goto fail_2;
    }
    if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
                               sc->sc_control_data, sizeof(struct mec_control_data), NULL,
                               BUS_DMA_NOWAIT)) != 0) {
        printf(": unable to load control data DMA map, error = %d\n",
               err);
        goto fail_3;
    }

    /* create TX buffer DMA maps */
    for (i = 0; i < MEC_NTXDESC; i++) {
        if ((err = bus_dmamap_create(sc->sc_dmat,
                                     MCLBYTES, 1, MCLBYTES, 0, 0,
                                     &sc->sc_txsoft[i].txs_dmamap)) != 0) {
            printf(": unable to create tx DMA map %d, error = %d\n",
                   i, err);
            goto fail_4;
        }
    }

    timeout_set(&sc->sc_tick_ch, mec_tick, sc);

    /* get ethernet address from ARCBIOS */
    if ((macaddr = Bios_GetEnvironmentVariable("eaddr")) == NULL) {
        printf(": unable to get MAC address!\n");
        goto fail_4;
    }
    enaddr_aton(macaddr, sc->sc_ac.ac_enaddr);

    /* reset device */
    mec_reset(sc);

    command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);

    printf(": MAC-110 rev %d, address %s\n",
           (command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT,
           ether_sprintf(sc->sc_ac.ac_enaddr));

    /* Done, now attach everything */

    sc->sc_mii.mii_ifp = ifp;
    sc->sc_mii.mii_readreg = mec_mii_readreg;
    sc->sc_mii.mii_writereg = mec_mii_writereg;
    sc->sc_mii.mii_statchg = mec_statchg;

    /* Set up PHY properties */
    ifmedia_init(&sc->sc_mii.mii_media, 0, mec_mediachange,
                 mec_mediastatus);
    mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
               MII_OFFSET_ANY, 0);

    child = LIST_FIRST(&sc->sc_mii.mii_phys);
    if (child == NULL) {
        /* No PHY attached */
        ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
                    0, NULL);
        ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
    } else {
        ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
        sc->sc_phyaddr = child->mii_phy;
    }

    bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
    ifp->if_softc = sc;
    ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    ifp->if_ioctl = mec_ioctl;
    ifp->if_start = mec_start;
    ifp->if_watchdog = mec_watchdog;
    IFQ_SET_READY(&ifp->if_snd);

    if_attach(ifp);
    IFQ_SET_MAXLEN(&ifp->if_snd, MEC_NTXDESC - 1);
    ether_ifattach(ifp);

    /* establish interrupt */
    BUS_INTR_ESTABLISH(ca, NULL, ca->ca_intr, IST_EDGE, IPL_NET,
                       mec_intr, sc, sc->sc_dev.dv_xname);

    /* set shutdown hook to reset interface on powerdown */
    sc->sc_sdhook = shutdownhook_establish(mec_shutdown, sc);

    return;

    /*
     * Free any resources we've allocated during the failed attach
     * attempt.  Do this in reverse order and fall though.
     */
fail_4:
    for (i = 0; i < MEC_NTXDESC; i++) {
        if (sc->sc_txsoft[i].txs_dmamap != NULL)
            bus_dmamap_destroy(sc->sc_dmat,
                               sc->sc_txsoft[i].txs_dmamap);
    }
    bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
fail_3:
    bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
fail_2:
    bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
                     sizeof(struct mec_control_data));
fail_1:
    bus_dmamem_free(sc->sc_dmat, &seg, rseg);
fail_0:
    return;
}
int
siop_common_attach(struct siop_common_softc *sc)
{
	int error, i;
	bus_dma_segment_t seg;
	int rseg;

	/*
	 * Allocate DMA-safe memory for the script and map it.
	 */
	if ((sc->features & SF_CHIP_RAM) == 0) {
		error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
		    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
		if (error) {
			aprint_error_dev(sc->sc_dev,
			    "unable to allocate script DMA memory, "
			    "error = %d\n", error);
			return error;
		}
		error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
		    (void **)&sc->sc_script,
		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
		if (error) {
			aprint_error_dev(sc->sc_dev,
			    "unable to map script DMA memory, "
			    "error = %d\n", error);
			return error;
		}
		error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
		    PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
		if (error) {
			aprint_error_dev(sc->sc_dev,
			    "unable to create script DMA map, "
			    "error = %d\n", error);
			return error;
		}
		error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
		    sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
		if (error) {
			aprint_error_dev(sc->sc_dev,
			    "unable to load script DMA map, "
			    "error = %d\n", error);
			return error;
		}
		sc->sc_scriptaddr =
		    sc->sc_scriptdma->dm_segs[0].ds_addr;
		sc->ram_size = PAGE_SIZE;
	}

	sc->sc_adapt.adapt_dev = sc->sc_dev;
	sc->sc_adapt.adapt_nchannels = 1;
	sc->sc_adapt.adapt_openings = 0;
	sc->sc_adapt.adapt_ioctl = siop_ioctl;
	sc->sc_adapt.adapt_minphys = minphys;

	memset(&sc->sc_chan, 0, sizeof(sc->sc_chan));
	sc->sc_chan.chan_adapter = &sc->sc_adapt;
	sc->sc_chan.chan_bustype = &scsi_bustype;
	sc->sc_chan.chan_channel = 0;
	sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
	sc->sc_chan.chan_ntargets =
	    (sc->features & SF_BUS_WIDE) ? 16 : 8;
	sc->sc_chan.chan_nluns = 8;
	sc->sc_chan.chan_id =
	    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
	if (sc->sc_chan.chan_id == 0 ||
	    sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets)
		sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET;

	for (i = 0; i < 16; i++)
		sc->targets[i] = NULL;

	/* find min/max sync period for this chip */
	sc->st_maxsync = 0;
	sc->dt_maxsync = 0;
	sc->st_minsync = 255;
	sc->dt_minsync = 255;
	for (i = 0; i < __arraycount(scf_period); i++) {
		if (sc->clock_period != scf_period[i].clock)
			continue;
		if (sc->st_maxsync < scf_period[i].period)
			sc->st_maxsync = scf_period[i].period;
		if (sc->st_minsync > scf_period[i].period)
			sc->st_minsync = scf_period[i].period;
	}
	if (sc->st_maxsync == 255 || sc->st_minsync == 0)
		panic("siop: can't find my sync parameters");
	for (i = 0; i < __arraycount(dt_scf_period); i++) {
		if (sc->clock_period != dt_scf_period[i].clock)
			continue;
		if (sc->dt_maxsync < dt_scf_period[i].period)
			sc->dt_maxsync = dt_scf_period[i].period;
		if (sc->dt_minsync > dt_scf_period[i].period)
			sc->dt_minsync = dt_scf_period[i].period;
	}
	if (sc->dt_maxsync == 255 || sc->dt_minsync == 0)
		panic("siop: can't find my sync parameters");
	return 0;
}
Пример #18
0
static void
ste_attach(device_t parent, device_t self, void *aux)
{
	struct ste_softc *sc = device_private(self);
	struct pci_attach_args *pa = aux;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char *intrstr = NULL;
	bus_space_tag_t iot, memt;
	bus_space_handle_t ioh, memh;
	bus_dma_segment_t seg;
	int ioh_valid, memh_valid;
	int i, rseg, error;
	const struct ste_product *sp;
	uint8_t enaddr[ETHER_ADDR_LEN];
	uint16_t myea[ETHER_ADDR_LEN / 2];

	callout_init(&sc->sc_tick_ch, 0);

	sp = ste_lookup(pa);
	if (sp == NULL) {
		printf("\n");
		panic("ste_attach: impossible");
	}

	printf(": %s\n", sp->ste_name);

	/*
	 * Map the device.
	 */
	ioh_valid = (pci_mapreg_map(pa, STE_PCI_IOBA,
	    PCI_MAPREG_TYPE_IO, 0,
	    &iot, &ioh, NULL, NULL) == 0);
	memh_valid = (pci_mapreg_map(pa, STE_PCI_MMBA,
	    PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
	    &memt, &memh, NULL, NULL) == 0);

	if (memh_valid) {
		sc->sc_st = memt;
		sc->sc_sh = memh;
	} else if (ioh_valid) {
		sc->sc_st = iot;
		sc->sc_sh = ioh;
	} else {
		aprint_error_dev(&sc->sc_dev, "unable to map device registers\n");
		return;
	}

	sc->sc_dmat = pa->pa_dmat;

	/* Enable bus mastering. */
	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
	    pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
	    PCI_COMMAND_MASTER_ENABLE);

	/* power up chip */
	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
	    NULL)) && error != EOPNOTSUPP) {
		aprint_error_dev(&sc->sc_dev, "cannot activate %d\n",
		    error);
		return;
	}

	/*
	 * Map and establish our interrupt.
	 */
	if (pci_intr_map(pa, &ih)) {
		aprint_error_dev(&sc->sc_dev, "unable to map interrupt\n");
		return;
	}
	intrstr = pci_intr_string(pc, ih);
	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc);
	if (sc->sc_ih == NULL) {
		aprint_error_dev(&sc->sc_dev, "unable to establish interrupt");
		if (intrstr != NULL)
			printf(" at %s", intrstr);
		printf("\n");
		return;
	}
	printf("%s: interrupting at %s\n", device_xname(&sc->sc_dev), intrstr);

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if ((error = bus_dmamem_alloc(sc->sc_dmat,
	    sizeof(struct ste_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
	    0)) != 0) {
		aprint_error_dev(&sc->sc_dev, "unable to allocate control data, error = %d\n",
		    error);
		goto fail_0;
	}

	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
	    sizeof(struct ste_control_data), (void **)&sc->sc_control_data,
	    BUS_DMA_COHERENT)) != 0) {
		aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n",
		    error);
		goto fail_1;
	}

	if ((error = bus_dmamap_create(sc->sc_dmat,
	    sizeof(struct ste_control_data), 1,
	    sizeof(struct ste_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
		aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
		    "error = %d\n", error);
		goto fail_2;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
	    sc->sc_control_data, sizeof(struct ste_control_data), NULL,
	    0)) != 0) {
		aprint_error_dev(&sc->sc_dev, "unable to load control data DMA map, error = %d\n",
		    error);
		goto fail_3;
	}

	/*
	 * Create the transmit buffer DMA maps.
	 */
	for (i = 0; i < STE_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
		    STE_NTXFRAGS, MCLBYTES, 0, 0,
		    &sc->sc_txsoft[i].ds_dmamap)) != 0) {
			aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, "
			    "error = %d\n", i, error);
			goto fail_4;
		}
	}

	/*
	 * Create the receive buffer DMA maps.
	 */
	for (i = 0; i < STE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
			aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, "
			    "error = %d\n", i, error);
			goto fail_5;
		}
		sc->sc_rxsoft[i].ds_mbuf = NULL;
	}

	/*
	 * Reset the chip to a known state.
	 */
	ste_reset(sc, AC_GlobalReset | AC_RxReset | AC_TxReset | AC_DMA |
	    AC_FIFO | AC_Network | AC_Host | AC_AutoInit | AC_RstOut);

	/*
	 * Read the Ethernet address from the EEPROM.
	 */
	for (i = 0; i < 3; i++) {
		ste_read_eeprom(sc, STE_EEPROM_StationAddress0 + i, &myea[i]);
		myea[i] = le16toh(myea[i]);
	}
	memcpy(enaddr, myea, sizeof(enaddr));

	printf("%s: Ethernet address %s\n", device_xname(&sc->sc_dev),
	    ether_sprintf(enaddr));

	/*
	 * Initialize our media structures and probe the MII.
	 */
	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = ste_mii_readreg;
	sc->sc_mii.mii_writereg = ste_mii_writereg;
	sc->sc_mii.mii_statchg = ste_mii_statchg;
	sc->sc_ethercom.ec_mii = &sc->sc_mii;
	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange,
	    ether_mediastatus);
	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);
	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
	} else
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);

	ifp = &sc->sc_ethercom.ec_if;
	strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = ste_ioctl;
	ifp->if_start = ste_start;
	ifp->if_watchdog = ste_watchdog;
	ifp->if_init = ste_init;
	ifp->if_stop = ste_stop;
	IFQ_SET_READY(&ifp->if_snd);

	/*
	 * Default the transmit threshold to 128 bytes.
	 */
	sc->sc_txthresh = 128;

	/*
	 * Disable MWI if the PCI layer tells us to.
	 */
	sc->sc_DMACtrl = 0;
	if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0)
		sc->sc_DMACtrl |= DC_MWIDisable;

	/*
	 * We can support 802.1Q VLAN-sized frames.
	 */
	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;

	/*
	 * Attach the interface.
	 */
	if_attach(ifp);
	ether_ifattach(ifp, enaddr);

	/*
	 * Make sure the interface is shutdown during reboot.
	 */
	sc->sc_sdhook = shutdownhook_establish(ste_shutdown, sc);
	if (sc->sc_sdhook == NULL)
		printf("%s: WARNING: unable to establish shutdown hook\n",
		    device_xname(&sc->sc_dev));
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
 fail_5:
	for (i = 0; i < STE_NRXDESC; i++) {
		if (sc->sc_rxsoft[i].ds_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_rxsoft[i].ds_dmamap);
	}
 fail_4:
	for (i = 0; i < STE_NTXDESC; i++) {
		if (sc->sc_txsoft[i].ds_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_txsoft[i].ds_dmamap);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
 fail_3:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
 fail_2:
	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
	    sizeof(struct ste_control_data));
 fail_1:
	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
 fail_0:
	return;
}
Пример #19
0
void
oosiop_attach(struct oosiop_softc *sc)
{
	bus_size_t scrsize;
	bus_dma_segment_t seg;
	struct oosiop_cb *cb;
	int err, i, nseg;

	/*
	 * Allocate DMA-safe memory for the script and map it.
	 */
	scrsize = sizeof(oosiop_script);
	err = bus_dmamem_alloc(sc->sc_dmat, scrsize, PAGE_SIZE, 0, &seg, 1,
	    &nseg, BUS_DMA_NOWAIT);
	if (err) {
		printf(": failed to allocate script memory, err=%d\n", err);
		return;
	}
	err = bus_dmamem_map(sc->sc_dmat, &seg, nseg, scrsize,
	    (caddr_t *)&sc->sc_scr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
	if (err) {
		printf(": failed to map script memory, err=%d\n", err);
		return;
	}
	err = bus_dmamap_create(sc->sc_dmat, scrsize, 1, scrsize, 0,
	    BUS_DMA_NOWAIT, &sc->sc_scrdma);
	if (err) {
		printf(": failed to create script map, err=%d\n", err);
		return;
	}
	err = bus_dmamap_load(sc->sc_dmat, sc->sc_scrdma, sc->sc_scr, scrsize,
	    NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
	if (err) {
		printf(": failed to load script map, err=%d\n", err);
		return;
	}
	sc->sc_scrbase = sc->sc_scrdma->dm_segs[0].ds_addr;

	/* Initialize command block array */
	TAILQ_INIT(&sc->sc_free_cb);
	TAILQ_INIT(&sc->sc_cbq);
	if (oosiop_alloc_cb(sc, OOSIOP_NCB) != 0)
		return;

	/* Use first cb to reselection msgin buffer */
	cb = TAILQ_FIRST(&sc->sc_free_cb);
	sc->sc_reselbuf = cb->xferdma->dm_segs[0].ds_addr +
	    offsetof(struct oosiop_xfer, msgin[0]);

	for (i = 0; i < OOSIOP_NTGT; i++) {
		sc->sc_tgt[i].nexus = NULL;
		sc->sc_tgt[i].flags = 0;
	}

	/* Setup asynchronous clock divisor parameters */
	if (sc->sc_freq <= 25000000) {
		sc->sc_ccf = 10;
		sc->sc_dcntl = OOSIOP_DCNTL_CF_1;
	} else if (sc->sc_freq <= 37500000) {
		sc->sc_ccf = 15;
		sc->sc_dcntl = OOSIOP_DCNTL_CF_1_5;
	} else if (sc->sc_freq <= 50000000) {
		sc->sc_ccf = 20;
		sc->sc_dcntl = OOSIOP_DCNTL_CF_2;
	} else {
		sc->sc_ccf = 30;
		sc->sc_dcntl = OOSIOP_DCNTL_CF_3;
	}

	if (sc->sc_chip == OOSIOP_700)
		sc->sc_minperiod = oosiop_period(sc, 4, sc->sc_ccf);
	else
		sc->sc_minperiod = oosiop_period(sc, 4, 10);

	if (sc->sc_minperiod < 25)
		sc->sc_minperiod = 25;	/* limit to 10MB/s */

	printf(": NCR53C700%s rev %d, %dMHz, SCSI ID %d\n",
	    sc->sc_chip == OOSIOP_700_66 ? "-66" : "",
	    oosiop_read_1(sc, OOSIOP_CTEST7) >> 4,
	    sc->sc_freq / 1000000, sc->sc_id);
	/*
	 * Reset all
	 */
	oosiop_reset(sc);
	oosiop_reset_bus(sc);

	/*
	 * Start SCRIPTS processor
	 */
	oosiop_load_script(sc);
	sc->sc_active = 0;
	oosiop_write_4(sc, OOSIOP_DSP, sc->sc_scrbase + Ent_wait_reselect);

	/*
	 * Fill in the scsipi_adapter.
	 */
	sc->sc_adapter.adapt_dev = &sc->sc_dev;
	sc->sc_adapter.adapt_nchannels = 1;
	sc->sc_adapter.adapt_openings = OOSIOP_NCB;
	sc->sc_adapter.adapt_max_periph = 1;
	sc->sc_adapter.adapt_ioctl = NULL;
	sc->sc_adapter.adapt_minphys = oosiop_minphys;
	sc->sc_adapter.adapt_request = oosiop_scsipi_request;

	/*
	 * Fill in the scsipi_channel.
	 */
	sc->sc_channel.chan_adapter = &sc->sc_adapter;
	sc->sc_channel.chan_bustype = &scsi_bustype;
	sc->sc_channel.chan_channel = 0;
	sc->sc_channel.chan_ntargets = OOSIOP_NTGT;
	sc->sc_channel.chan_nluns = 8;
	sc->sc_channel.chan_id = sc->sc_id;

	/*
	 * Now try to attach all the sub devices.
	 */
	config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
}
Пример #20
0
static int
ld_virtio_alloc_reqs(struct ld_virtio_softc *sc, int qsize)
{
	int allocsize, r, rsegs, i;
	struct ld_softc *ld = &sc->sc_ld;
	void *vaddr;

	allocsize = sizeof(struct virtio_blk_req) * qsize;
	r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
			     &sc->sc_reqs_seg, 1, &rsegs, BUS_DMA_NOWAIT);
	if (r != 0) {
		aprint_error_dev(sc->sc_dev,
				 "DMA memory allocation failed, size %d, "
				 "error code %d\n", allocsize, r);
		goto err_none;
	}
	r = bus_dmamem_map(sc->sc_virtio->sc_dmat,
			   &sc->sc_reqs_seg, 1, allocsize,
			   &vaddr, BUS_DMA_NOWAIT);
	if (r != 0) {
		aprint_error_dev(sc->sc_dev,
				 "DMA memory map failed, "
				 "error code %d\n", r);
		goto err_dmamem_alloc;
	}
	sc->sc_reqs = vaddr;
	memset(vaddr, 0, allocsize);
	for (i = 0; i < qsize; i++) {
		struct virtio_blk_req *vr = &sc->sc_reqs[i];
		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
				      offsetof(struct virtio_blk_req, vr_bp),
				      1,
				      offsetof(struct virtio_blk_req, vr_bp),
				      0,
				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
				      &vr->vr_cmdsts);
		if (r != 0) {
			aprint_error_dev(sc->sc_dev,
					 "command dmamap creation failed, "
					 "error code %d\n", r);
			goto err_reqs;
		}
		r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
				    &vr->vr_hdr,
				    offsetof(struct virtio_blk_req, vr_bp),
				    NULL, BUS_DMA_NOWAIT);
		if (r != 0) {
			aprint_error_dev(sc->sc_dev,
					 "command dmamap load failed, "
					 "error code %d\n", r);
			goto err_reqs;
		}
		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
				      ld->sc_maxxfer,
				      (ld->sc_maxxfer / NBPG) +
				      VIRTIO_BLK_MIN_SEGMENTS,
				      ld->sc_maxxfer,
				      0,
				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
				      &vr->vr_payload);
		if (r != 0) {
			aprint_error_dev(sc->sc_dev,
					 "payload dmamap creation failed, "
					 "error code %d\n", r);
			goto err_reqs;
		}
	}
	return 0;

err_reqs:
	for (i = 0; i < qsize; i++) {
		struct virtio_blk_req *vr = &sc->sc_reqs[i];
		if (vr->vr_cmdsts) {
			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
					   vr->vr_cmdsts);
			vr->vr_cmdsts = 0;
		}
		if (vr->vr_payload) {
			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
					   vr->vr_payload);
			vr->vr_payload = 0;
		}
	}
	bus_dmamem_unmap(sc->sc_virtio->sc_dmat, sc->sc_reqs, allocsize);
err_dmamem_alloc:
	bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_seg, 1);
err_none:
	return -1;
}
Пример #21
0
void
stge_attach(struct device *parent, struct device *self, void *aux)
{
	struct stge_softc *sc = (struct stge_softc *) self;
	struct pci_attach_args *pa = aux;
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char *intrstr = NULL;
	bus_space_tag_t iot, memt;
	bus_space_handle_t ioh, memh;
	bus_dma_segment_t seg;
	bus_size_t iosize;
	int ioh_valid, memh_valid;
	int i, rseg, error;

	timeout_set(&sc->sc_timeout, stge_tick, sc);

	sc->sc_rev = PCI_REVISION(pa->pa_class);

	/*
	 * Map the device.
	 */
	ioh_valid = (pci_mapreg_map(pa, STGE_PCI_IOBA,
	    PCI_MAPREG_TYPE_IO, 0,
	    &iot, &ioh, NULL, &iosize, 0) == 0);
	memh_valid = (pci_mapreg_map(pa, STGE_PCI_MMBA,
	    PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
	    &memt, &memh, NULL, &iosize, 0) == 0);

	if (memh_valid) {
		sc->sc_st = memt;
		sc->sc_sh = memh;
	} else if (ioh_valid) {
		sc->sc_st = iot;
		sc->sc_sh = ioh;
	} else {
		printf(": unable to map device registers\n");
		return;
	}

	sc->sc_dmat = pa->pa_dmat;

	/* Get it out of power save mode if needed. */
	pci_set_powerstate(pc, pa->pa_tag, PCI_PMCSR_STATE_D0);

	/*
	 * Map and establish our interrupt.
	 */
	if (pci_intr_map(pa, &ih)) {
		printf(": unable to map interrupt\n");
		goto fail_0;
	}
	intrstr = pci_intr_string(pc, ih);
	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, stge_intr, sc,
				       sc->sc_dev.dv_xname);
	if (sc->sc_ih == NULL) {
		printf(": unable to establish interrupt");
		if (intrstr != NULL)
			printf(" at %s", intrstr);
		printf("\n");
		goto fail_0;
	}
	printf(": %s", intrstr);

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if ((error = bus_dmamem_alloc(sc->sc_dmat,
	    sizeof(struct stge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
	    0)) != 0) {
		printf("%s: unable to allocate control data, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_0;
	}

	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
	    sizeof(struct stge_control_data), (caddr_t *)&sc->sc_control_data,
	    BUS_DMA_COHERENT)) != 0) {
		printf("%s: unable to map control data, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_1;
	}

	if ((error = bus_dmamap_create(sc->sc_dmat,
	    sizeof(struct stge_control_data), 1,
	    sizeof(struct stge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
		printf("%s: unable to create control data DMA map, "
		    "error = %d\n", sc->sc_dev.dv_xname, error);
		goto fail_2;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
	    sc->sc_control_data, sizeof(struct stge_control_data), NULL,
	    0)) != 0) {
		printf("%s: unable to load control data DMA map, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_3;
	}

	/*
	 * Create the transmit buffer DMA maps.  Note that rev B.3
	 * and earlier seem to have a bug regarding multi-fragment
	 * packets.  We need to limit the number of Tx segments on
	 * such chips to 1.
	 */
	for (i = 0; i < STGE_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat,
		    STGE_JUMBO_FRAMELEN, STGE_NTXFRAGS, MCLBYTES, 0, 0,
		    &sc->sc_txsoft[i].ds_dmamap)) != 0) {
			printf("%s: unable to create tx DMA map %d, "
			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
			goto fail_4;
		}
	}

	/*
	 * Create the receive buffer DMA maps.
	 */
	for (i = 0; i < STGE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
			printf("%s: unable to create rx DMA map %d, "
			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
			goto fail_5;
		}
		sc->sc_rxsoft[i].ds_mbuf = NULL;
	}

	/*
	 * Determine if we're copper or fiber.  It affects how we
	 * reset the card.
	 */
	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
		sc->sc_usefiber = 1;
	else
		sc->sc_usefiber = 0;

	/*
	 * Reset the chip to a known state.
	 */
	stge_reset(sc);

	/*
	 * Reading the station address from the EEPROM doesn't seem
	 * to work, at least on my sample boards.  Instead, since
	 * the reset sequence does AutoInit, read it from the station
	 * address registers. For Sundance 1023 you can only read it
	 * from EEPROM.
	 */
	if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_SUNDANCE_ST1023) {
		sc->sc_arpcom.ac_enaddr[0] = CSR_READ_2(sc,
		    STGE_StationAddress0) & 0xff;
		sc->sc_arpcom.ac_enaddr[1] = CSR_READ_2(sc,
		    STGE_StationAddress0) >> 8;
		sc->sc_arpcom.ac_enaddr[2] = CSR_READ_2(sc,
		    STGE_StationAddress1) & 0xff;
		sc->sc_arpcom.ac_enaddr[3] = CSR_READ_2(sc,
		    STGE_StationAddress1) >> 8;
		sc->sc_arpcom.ac_enaddr[4] = CSR_READ_2(sc,
		    STGE_StationAddress2) & 0xff;
		sc->sc_arpcom.ac_enaddr[5] = CSR_READ_2(sc,
		    STGE_StationAddress2) >> 8;
		sc->sc_stge1023 = 0;
	} else {
Пример #22
0
/* ARGSUSED */
STATIC void
gtmpscattach(device_t parent, device_t self, void *aux)
{
	struct gtmpsc_softc *sc = device_private(self);
	struct marvell_attach_args *mva = aux;
	bus_dma_segment_t segs;
	struct tty *tp;
	int rsegs, err, unit;
	void *kva;

	aprint_naive("\n");
	aprint_normal(": Multi-Protocol Serial Controller\n");

	if (mva->mva_unit != MVA_UNIT_DEFAULT)
		unit = mva->mva_unit;
	else
		unit = (mva->mva_offset == GTMPSC_BASE(0)) ? 0 : 1;

#ifdef MPSC_CONSOLE
	if (cn_tab == &gtmpsc_consdev &&
	    cn_tab->cn_dev == makedev(0, unit)) {
		gtmpsc_cn_softc.sc_dev = self;
		memcpy(sc, &gtmpsc_cn_softc, sizeof(struct gtmpsc_softc));
		sc->sc_flags = GTMPSC_CONSOLE;
	} else
#endif
	{
		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
		    mva->mva_offset, mva->mva_size, &sc->sc_mpsch)) {
			aprint_error_dev(self, "Cannot map MPSC registers\n");
			return;
		}
		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
		    GTSDMA_BASE(unit), GTSDMA_SIZE, &sc->sc_sdmah)) {
			aprint_error_dev(self, "Cannot map SDMA registers\n");
			return;
		}
		sc->sc_dev = self;
		sc->sc_unit = unit;
		sc->sc_iot = mva->mva_iot;
		sc->sc_dmat = mva->mva_dmat;

		err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
		    &segs, 1, &rsegs, BUS_DMA_NOWAIT);
		if (err) {
			aprint_error_dev(sc->sc_dev,
			    "bus_dmamem_alloc error 0x%x\n", err);
			goto fail0;
		}
		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, &kva,
		    BUS_DMA_NOWAIT);
		if (err) {
			aprint_error_dev(sc->sc_dev,
			    "bus_dmamem_map error 0x%x\n", err);
			goto fail1;
		}
		memset(kva, 0, PAGE_SIZE);	/* paranoid/superfluous */
		sc->sc_poll_sdmapage = kva;

		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_polltx_t), 1,
		   sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT,
		   &sc->sc_txdma_map);
		if (err != 0) {
			aprint_error_dev(sc->sc_dev,
			    "bus_dmamap_create error 0x%x\n", err);
			goto fail2;
		}
		err = bus_dmamap_load(sc->sc_dmat, sc->sc_txdma_map,
		    sc->sc_poll_sdmapage->tx, sizeof(gtmpsc_polltx_t),
		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
		if (err != 0) {
			aprint_error_dev(sc->sc_dev,
			    "bus_dmamap_load tx error 0x%x\n", err);
			goto fail3;
		}
		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_pollrx_t), 1,
		   sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
		   &sc->sc_rxdma_map);
		if (err != 0) {
			aprint_error_dev(sc->sc_dev,
			    "bus_dmamap_create rx error 0x%x\n", err);
			goto fail4;
		}
		err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxdma_map,
		    sc->sc_poll_sdmapage->rx, sizeof(gtmpsc_pollrx_t),
		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
		if (err != 0) {
			aprint_error_dev(sc->sc_dev,
			    "bus_dmamap_load rx error 0x%x\n", err);
			goto fail5;
		}

		sc->sc_brg = unit;		/* XXXXX */
		sc->sc_baudrate = GT_MPSC_DEFAULT_BAUD_RATE;
	}
	aprint_normal_dev(self, "with SDMA offset 0x%04x-0x%04x\n",
	    GTSDMA_BASE(unit), GTSDMA_BASE(unit) + GTSDMA_SIZE - 1);

	sc->sc_rx_ready = 0;
	sc->sc_tx_busy = 0;
	sc->sc_tx_done = 0;
	sc->sc_tx_stopped = 0;
	sc->sc_heldchange = 0;

	gtmpsc_txdesc_init(sc);
	gtmpsc_rxdesc_init(sc);

	sc->sc_tty = tp = tty_alloc();
	tp->t_oproc = gtmpscstart;
	tp->t_param = gtmpscparam;
	tty_attach(tp);

	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);

	/*
	 * clear any pending SDMA interrupts for this unit
	 */
	(void) gt_sdma_icause(device_parent(sc->sc_dev),
	    SDMA_INTR_RXBUF(sc->sc_unit) |
	    SDMA_INTR_RXERR(sc->sc_unit) |
	    SDMA_INTR_TXBUF(sc->sc_unit) |
	    SDMA_INTR_TXEND(sc->sc_unit));

	sc->sc_si = softint_establish(SOFTINT_SERIAL, gtmpsc_softintr, sc);
	if (sc->sc_si == NULL)
		panic("mpscattach: cannot softint_establish IPL_SOFTSERIAL");

	shutdownhook_establish(gtmpsc_shutdownhook, sc);

	gtmpscinit_stop(sc);
	gtmpscinit_start(sc);

	if (sc->sc_flags & GTMPSC_CONSOLE) {
		int maj;

		/* locate the major number */
		maj = cdevsw_lookup_major(&gtmpsc_cdevsw);

		tp->t_dev = cn_tab->cn_dev =
		    makedev(maj, device_unit(sc->sc_dev));

		aprint_normal_dev(self, "console\n");
	}

#ifdef KGDB
	/*
	 * Allow kgdb to "take over" this port.  If this is
	 * the kgdb device, it has exclusive use.
	 */
	if (sc->sc_unit == gtmpsckgdbport) {
#ifdef MPSC_CONSOLE
		if (sc->sc_unit == MPSC_CONSOLE) {
			aprint_error_dev(self,
			    "(kgdb): cannot share with console\n");
			return;
		}
#endif

		sc->sc_flags |= GTMPSC_KGDB;
		aprint_normal_dev(self, "kgdb\n");

		gtmpsc_txflush(sc);

		kgdb_attach(gtmpsc_kgdb_getc, gtmpsc_kgdb_putc, NULL);
		kgdb_dev = 123;	/* unneeded, only to satisfy some tests */
		gtmpsc_kgdb_attached = 1;
		kgdb_connect(1);
	}
#endif /* KGDB */

	return;


fail5:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxdma_map);
fail4:
	bus_dmamap_unload(sc->sc_dmat, sc->sc_txdma_map);
fail3:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_txdma_map);
fail2:
	bus_dmamem_unmap(sc->sc_dmat, kva, PAGE_SIZE);
fail1:
	bus_dmamem_free(sc->sc_dmat, &segs, 1);
fail0:
	return;
}
Пример #23
0
void
leattach_ledma(device_t parent, device_t self, void *aux)
{
	struct le_softc *lesc = device_private(self);
	struct lance_softc *sc = &lesc->sc_am7990.lsc;
	struct lsi64854_softc *lsi = device_private(parent);
	struct sbus_attach_args *sa = aux;
	bus_dma_tag_t dmatag = sa->sa_dmatag;
	bus_dma_segment_t seg;
	int rseg, error;

	sc->sc_dev = self;
	lesc->sc_bustag = sa->sa_bustag;

	/* Establish link to `ledma' device */
	lesc->sc_dma = lsi;
	lesc->sc_dma->sc_client = lesc;

	/* Map device registers */
	if (sbus_bus_map(sa->sa_bustag,
			 sa->sa_slot,
			 sa->sa_offset,
			 sa->sa_size,
			 0, &lesc->sc_reg) != 0) {
		aprint_error(": cannot map registers\n");
		return;
	}

	/* Allocate buffer memory */
	sc->sc_memsize = MEMSIZE;

	/* Get a DMA handle */
	if ((error = bus_dmamap_create(dmatag, MEMSIZE, 1, MEMSIZE,
					LEDMA_BOUNDARY, BUS_DMA_NOWAIT,
					&lesc->sc_dmamap)) != 0) {
		aprint_error(": DMA map create error %d\n", error);
		return;
	}

	/* Allocate DMA buffer */
	if ((error = bus_dmamem_alloc(dmatag, MEMSIZE, 0, LEDMA_BOUNDARY,
				 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
		aprint_error(": DMA buffer alloc error %d\n",error);
		return;
	}

	/* Map DMA buffer into kernel space */
	if ((error = bus_dmamem_map(dmatag, &seg, rseg, MEMSIZE,
			       (void **)&sc->sc_mem,
			       BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error(": DMA buffer map error %d\n", error);
		bus_dmamem_free(dmatag, &seg, rseg);
		return;
	}

	/* Load DMA buffer */
	if ((error = bus_dmamap_load(dmatag, lesc->sc_dmamap, sc->sc_mem,
			MEMSIZE, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error(": DMA buffer map load error %d\n", error);
		bus_dmamem_free(dmatag, &seg, rseg);
		bus_dmamem_unmap(dmatag, sc->sc_mem, MEMSIZE);
		return;
	}

	lesc->sc_laddr = lesc->sc_dmamap->dm_segs[0].ds_addr;
	sc->sc_addr = lesc->sc_laddr & 0xffffff;
	sc->sc_conf3 = LE_C3_BSWP | LE_C3_ACON | LE_C3_BCON;


	/* Assume SBus is grandparent */
	lesc->sc_sd.sd_reset = (void *)lance_reset;
	sbus_establish(&lesc->sc_sd, parent);

	sc->sc_mediachange = lemediachange;
	sc->sc_mediastatus = lemediastatus;
	sc->sc_supmedia = lemedia;
	sc->sc_nsupmedia = NLEMEDIA;
	sc->sc_defaultmedia = IFM_ETHER|IFM_AUTO;

	prom_getether(sa->sa_node, sc->sc_enaddr);

	sc->sc_copytodesc = lance_copytobuf_contig;
	sc->sc_copyfromdesc = lance_copyfrombuf_contig;
	sc->sc_copytobuf = lance_copytobuf_contig;
	sc->sc_copyfrombuf = lance_copyfrombuf_contig;
	sc->sc_zerobuf = lance_zerobuf_contig;

	sc->sc_rdcsr = lerdcsr;
	sc->sc_wrcsr = lewrcsr;
	sc->sc_hwinit = lehwinit;
	sc->sc_nocarrier = lenocarrier;
	sc->sc_hwreset = lehwreset;

	/* Establish interrupt handler */
	if (sa->sa_nintr != 0)
		(void)bus_intr_establish(sa->sa_bustag, sa->sa_pri, IPL_NET,
					 am7990_intr, sc);

	am7990_config(&lesc->sc_am7990);

	/* now initialize DMA */
	lehwreset(sc);
}
Пример #24
0
void
njata32_attach(struct njata32_softc *sc)
{
	bus_addr_t dmaaddr;
	int i, devno, error;
	struct wdc_regs *wdr;

	/*
	 * allocate DMA resource
	 */
	if ((error = bus_dmamem_alloc(sc->sc_dmat,
	    sizeof(struct njata32_dma_page), PAGE_SIZE, 0,
	    &sc->sc_sgt_seg, 1, &sc->sc_sgt_nsegs, BUS_DMA_NOWAIT)) != 0) {
		aprint_error("%s: unable to allocate sgt page, error = %d\n",
		    NJATA32NAME(sc), error);
		return;
	}
	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_sgt_seg,
	    sc->sc_sgt_nsegs, sizeof(struct njata32_dma_page),
	    (void **)&sc->sc_sgtpg,
	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
		aprint_error("%s: unable to map sgt page, error = %d\n",
		    NJATA32NAME(sc), error);
		goto fail1;
	}
	if ((error = bus_dmamap_create(sc->sc_dmat,
	    sizeof(struct njata32_dma_page), 1,
	    sizeof(struct njata32_dma_page), 0, BUS_DMA_NOWAIT,
	    &sc->sc_dmamap_sgt)) != 0) {
		aprint_error("%s: unable to create sgt DMA map, error = %d\n",
		    NJATA32NAME(sc), error);
		goto fail2;
	}
	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_sgt,
	    sc->sc_sgtpg, sizeof(struct njata32_dma_page),
	    NULL, BUS_DMA_NOWAIT)) != 0) {
		aprint_error("%s: unable to load sgt DMA map, error = %d\n",
		    NJATA32NAME(sc), error);
		goto fail3;
	}

	dmaaddr = sc->sc_dmamap_sgt->dm_segs[0].ds_addr;

	for (devno = 0; devno < NJATA32_NUM_DEV; devno++) {
		sc->sc_dev[devno].d_sgt = sc->sc_sgtpg->dp_sg[devno];
		sc->sc_dev[devno].d_sgt_dma = dmaaddr +
		    offsetof(struct njata32_dma_page, dp_sg[devno]);

		error = bus_dmamap_create(sc->sc_dmat,
		    NJATA32_MAX_XFER,		/* max total map size */
		    NJATA32_NUM_SG,		/* max number of segments */
		    NJATA32_SGT_MAXSEGLEN,	/* max size of a segment */
		    0,				/* boundary */
		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
		    &sc->sc_dev[devno].d_dmamap_xfer);
		if (error) {
			aprint_error("%s: failed to create DMA map "
			    "(error = %d)\n", NJATA32NAME(sc), error);
			goto fail4;
		}
	}

	/* device properties */
	sc->sc_wdcdev.sc_atac.atac_cap =
	    ATAC_CAP_DATA16 | ATAC_CAP_DATA32 | ATAC_CAP_PIOBM;
	sc->sc_wdcdev.irqack = njata32_irqack;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = NJATA32_NCHAN;	/* 1 */
	sc->sc_wdcdev.sc_atac.atac_pio_cap = NJATA32_MODE_MAX_PIO;
#if 0	/* ATA DMA is currently unused */
	sc->sc_wdcdev.sc_atac.atac_dma_cap = NJATA32_MODE_MAX_DMA;
#endif
	sc->sc_wdcdev.sc_atac.atac_set_modes = njata32_setup_channel;

	/* DMA control functions */
	sc->sc_wdcdev.dma_arg = sc;
	sc->sc_wdcdev.dma_init = njata32_dma_init;
	sc->sc_wdcdev.piobm_start = njata32_piobm_start;
	sc->sc_wdcdev.dma_finish = njata32_dma_finish;
	sc->sc_wdcdev.piobm_done = njata32_piobm_done;

	sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_EXTRA_RESETS;

	sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs;

	/* only one channel */
	sc->sc_wdc_chanarray[0] = &sc->sc_ch[0].ch_ata_channel;
	sc->sc_ch[0].ch_ata_channel.ch_channel = 0;
	sc->sc_ch[0].ch_ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
	sc->sc_ch[0].ch_ata_channel.ch_queue = &sc->sc_wdc_chqueue;
	sc->sc_ch[0].ch_ata_channel.ch_ndrive = 2; /* max number of drives */

	/* map ATA registers */
	for (i = 0; i < WDC_NREG; i++) {
		if (bus_space_subregion(NJATA32_REGT(sc), NJATA32_REGH(sc),
		    NJATA32_OFFSET_WDCREGS + i,
		    i == wd_data ? 4 : 1, &wdr->cmd_iohs[i]) != 0) {
			aprint_error("%s: couldn't subregion cmd regs\n",
			    NJATA32NAME(sc));
			goto fail4;
		}
	}
	wdc_init_shadow_regs(&sc->sc_ch[0].ch_ata_channel);
	wdr->data32iot = NJATA32_REGT(sc);
	wdr->data32ioh = wdr->cmd_iohs[wd_data];

	/* map ATA ctl reg */
	wdr->ctl_iot = NJATA32_REGT(sc);
	if (bus_space_subregion(NJATA32_REGT(sc), NJATA32_REGH(sc),
	    NJATA32_REG_WD_ALTSTATUS, 1, &wdr->ctl_ioh) != 0) {
		aprint_error("%s: couldn't subregion ctl regs\n",
		    NJATA32NAME(sc));
		goto fail4;
	}

	sc->sc_flags |= NJATA32_CMDPG_MAPPED;

	/* use flags value as busmaster wait */
	if ((sc->sc_atawait =
	    (uint8_t)device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags))
		aprint_normal("%s: ATA wait = %#x\n",
		    NJATA32NAME(sc), sc->sc_atawait);

	njata32_init(sc, cold);

	wdcattach(&sc->sc_ch[0].ch_ata_channel);

	return;

	/*
	 * cleanup
	 */
fail4:	while (--devno >= 0) {
		bus_dmamap_destroy(sc->sc_dmat,
		    sc->sc_dev[devno].d_dmamap_xfer);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap_sgt);
fail3:	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_sgt);
fail2:	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_sgtpg,
	    sizeof(struct njata32_dma_page));
fail1:	bus_dmamem_free(sc->sc_dmat, &sc->sc_sgt_seg, sc->sc_sgt_nsegs);
}
Пример #25
0
void
oosiop_attach(struct oosiop_softc *sc)
{
	struct scsibus_attach_args saa;
	bus_size_t scrsize;
	bus_dma_segment_t seg;
	struct oosiop_cb *cb;
	int err, i, nseg;

	/*
	 * Allocate DMA-safe memory for the script and map it.
	 */
	scrsize = round_page(sizeof(oosiop_script));
	err = bus_dmamem_alloc(sc->sc_dmat, scrsize, PAGE_SIZE, 0, &seg, 1,
	    &nseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
	if (err) {
		printf(": failed to allocate script memory, err=%d\n", err);
		return;
	}
	err = bus_dmamem_map(sc->sc_dmat, &seg, nseg, scrsize,
	    (caddr_t *)&sc->sc_scr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
	if (err) {
		printf(": failed to map script memory, err=%d\n", err);
		return;
	}
	err = bus_dmamap_create(sc->sc_dmat, scrsize, 1, scrsize, 0,
	    BUS_DMA_NOWAIT, &sc->sc_scrdma);
	if (err) {
		printf(": failed to create script map, err=%d\n", err);
		return;
	}
	err = bus_dmamap_load_raw(sc->sc_dmat, sc->sc_scrdma,
	    &seg, nseg, scrsize, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
	if (err) {
		printf(": failed to load script map, err=%d\n", err);
		return;
	}
	sc->sc_scrbase = sc->sc_scrdma->dm_segs[0].ds_addr;

	/* Initialize command block array */
	TAILQ_INIT(&sc->sc_free_cb);
	TAILQ_INIT(&sc->sc_cbq);
	if (oosiop_alloc_cb(sc, OOSIOP_NCB) != 0)
		return;

	/* Use first cb to reselection msgin buffer */
	cb = TAILQ_FIRST(&sc->sc_free_cb);
	sc->sc_reselbuf = cb->xferdma->dm_segs[0].ds_addr +
	    offsetof(struct oosiop_xfer, msgin[0]);

	for (i = 0; i < OOSIOP_NTGT; i++) {
		sc->sc_tgt[i].nexus = NULL;
		sc->sc_tgt[i].flags = 0;
	}

	/* Setup asynchronous clock divisor parameters */
	if (sc->sc_freq <= 25000000) {
		sc->sc_ccf = 10;
		sc->sc_dcntl = OOSIOP_DCNTL_CF_1;
	} else if (sc->sc_freq <= 37500000) {
		sc->sc_ccf = 15;
		sc->sc_dcntl = OOSIOP_DCNTL_CF_1_5;
	} else if (sc->sc_freq <= 50000000) {
		sc->sc_ccf = 20;
		sc->sc_dcntl = OOSIOP_DCNTL_CF_2;
	} else {
		sc->sc_ccf = 30;
		sc->sc_dcntl = OOSIOP_DCNTL_CF_3;
	}

	if (sc->sc_chip == OOSIOP_700)
		sc->sc_minperiod = oosiop_period(sc, 4, sc->sc_ccf);
	else
		sc->sc_minperiod = oosiop_period(sc, 4, 10);

	if (sc->sc_minperiod < 25)
		sc->sc_minperiod = 25;	/* limit to 10MB/s */

	mtx_init(&sc->sc_cb_mtx, IPL_BIO);
	scsi_iopool_init(&sc->sc_iopool, sc, oosiop_cb_alloc, oosiop_cb_free);

	printf(": NCR53C700%s rev %d, %dMHz\n",
	    sc->sc_chip == OOSIOP_700_66 ? "-66" : "",
	    oosiop_read_1(sc, OOSIOP_CTEST7) >> 4,
	    sc->sc_freq / 1000000);
	/*
	 * Reset all
	 */
	oosiop_reset(sc, TRUE);
	oosiop_reset_bus(sc);

	/*
	 * Start SCRIPTS processor
	 */
	oosiop_load_script(sc);
	sc->sc_active = 0;
	oosiop_write_4(sc, OOSIOP_DSP, sc->sc_scrbase + Ent_wait_reselect);

	/*
	 * Fill in the sc_link.
	 */
	sc->sc_link.adapter = &oosiop_adapter;
	sc->sc_link.adapter_softc = sc;
	sc->sc_link.openings = 1;	/* XXX */
	sc->sc_link.adapter_buswidth = OOSIOP_NTGT;
	sc->sc_link.adapter_target = sc->sc_id;
	sc->sc_link.pool = &sc->sc_iopool;
	sc->sc_link.quirks = ADEV_NODOORLOCK;

	bzero(&saa, sizeof(saa));
	saa.saa_sc_link = &sc->sc_link;

	/*
	 * Now try to attach all the sub devices.
	 */
	config_found(&sc->sc_dev, &saa, scsiprint);
}
Пример #26
0
/*
 * Interface exists: make available by filling in network interface
 * record.  System will initialize the interface when it is ready
 * to accept packets.
 */
void
sgec_attach(struct ze_softc *sc)
{
	struct ifnet *ifp = &sc->sc_if;
	struct ze_tdes *tp;
	struct ze_rdes *rp;
	bus_dma_segment_t seg;
	int i, rseg, error;

        /*
         * Allocate DMA safe memory for descriptors and setup memory.
         */
	error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ze_cdata),
	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
	if (error) {
		aprint_error(": unable to allocate control data, error = %d\n",
		    error);
		goto fail_0;
	}

	error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct ze_cdata),
	    (void **)&sc->sc_zedata, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
	if (error) {
		aprint_error(
		    ": unable to map control data, error = %d\n", error);
		goto fail_1;
	}

	error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ze_cdata), 1,
	    sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT, &sc->sc_cmap);
	if (error) {
		aprint_error(
		    ": unable to create control data DMA map, error = %d\n",
		    error);
		goto fail_2;
	}

	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap, sc->sc_zedata,
	    sizeof(struct ze_cdata), NULL, BUS_DMA_NOWAIT);
	if (error) {
		aprint_error(
		    ": unable to load control data DMA map, error = %d\n",
		    error);
		goto fail_3;
	}

	/*
	 * Zero the newly allocated memory.
	 */
	memset(sc->sc_zedata, 0, sizeof(struct ze_cdata));

	/*
	 * Create the transmit descriptor DMA maps.
	 */
	for (i = 0; error == 0 && i < TXDESCS; i++) {
		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
		    TXDESCS - 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
		    &sc->sc_xmtmap[i]);
	}
	if (error) {
		aprint_error(": unable to create tx DMA map %d, error = %d\n",
		    i, error);
		goto fail_4;
	}

	/*
	 * Create receive buffer DMA maps.
	 */
	for (i = 0; error == 0 && i < RXDESCS; i++) {
		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
		    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rcvmap[i]);
	}
	if (error) {
		aprint_error(": unable to create rx DMA map %d, error = %d\n",
		    i, error);
		goto fail_5;
	}

	/*
	 * Pre-allocate the receive buffers.
	 */
	for (i = 0; error == 0 && i < RXDESCS; i++) {
		error = ze_add_rxbuf(sc, i);
	}

	if (error) {
		aprint_error(
		    ": unable to allocate or map rx buffer %d, error = %d\n",
		    i, error);
		goto fail_6;
	}

	/* For vmstat -i
	 */
	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
	    device_xname(sc->sc_dev), "intr");
	evcnt_attach_dynamic(&sc->sc_rxintrcnt, EVCNT_TYPE_INTR,
	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "rx intr");
	evcnt_attach_dynamic(&sc->sc_txintrcnt, EVCNT_TYPE_INTR,
	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx intr");
	evcnt_attach_dynamic(&sc->sc_txdraincnt, EVCNT_TYPE_INTR,
	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx drain");
	evcnt_attach_dynamic(&sc->sc_nobufintrcnt, EVCNT_TYPE_INTR,
	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "nobuf intr");
	evcnt_attach_dynamic(&sc->sc_nointrcnt, EVCNT_TYPE_INTR,
	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "no intr");

	/*
	 * Create ring loops of the buffer chains.
	 * This is only done once.
	 */
	sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;

	rp = sc->sc_zedata->zc_recv;
	rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
	rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
	rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;

	tp = sc->sc_zedata->zc_xmit;
	tp[TXDESCS].ze_tdr = ZE_TDR_OW;
	tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
	tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;

	if (zereset(sc))
		return;

	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_start = zestart;
	ifp->if_ioctl = zeioctl;
	ifp->if_watchdog = zetimeout;
	IFQ_SET_READY(&ifp->if_snd);

	/*
	 * Attach the interface.
	 */
	if_attach(ifp);
	ether_ifattach(ifp, sc->sc_enaddr);

	aprint_normal("\n");
	aprint_normal_dev(sc->sc_dev, "hardware address %s\n",
	    ether_sprintf(sc->sc_enaddr));
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
 fail_6:
	for (i = 0; i < RXDESCS; i++) {
		if (sc->sc_rxmbuf[i] != NULL) {
			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
			m_freem(sc->sc_rxmbuf[i]);
		}
	}
 fail_5:
	for (i = 0; i < RXDESCS; i++) {
		if (sc->sc_xmtmap[i] != NULL)
			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
	}
 fail_4:
	for (i = 0; i < TXDESCS; i++) {
		if (sc->sc_rcvmap[i] != NULL)
			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
 fail_3:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
 fail_2:
	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_zedata,
	    sizeof(struct ze_cdata));
 fail_1:
	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
 fail_0:
	return;
}
Пример #27
0
/*
 * Allocate a drm dma handle, allocate memory fit for DMA, and map it.
 *
 * XXX This is called drm_pci_alloc for hysterical raisins; it is not
 * specific to PCI.
 *
 * XXX For now, we use non-blocking allocations because this is called
 * by ioctls with the drm global mutex held.
 *
 * XXX Error information is lost because this returns NULL on failure,
 * not even an error embedded in a pointer.
 */
struct drm_dma_handle *
drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
{
	int nsegs;
	int error;

	/*
	 * Allocate a drm_dma_handle record.
	 */
	struct drm_dma_handle *const dmah = kmem_alloc(sizeof(*dmah),
	    KM_NOSLEEP);
	if (dmah == NULL) {
		error = -ENOMEM;
		goto out;
	}
	dmah->dmah_tag = dev->dmat;

	/*
	 * Allocate the requested amount of DMA-safe memory.
	 */
	/* XXX errno NetBSD->Linux */
	error = -bus_dmamem_alloc(dmah->dmah_tag, size, align, 0,
	    &dmah->dmah_seg, 1, &nsegs, BUS_DMA_NOWAIT);
	if (error)
		goto fail0;
	KASSERT(nsegs == 1);

	/*
	 * XXX Old drm passed BUS_DMA_NOWAIT below but BUS_DMA_WAITOK
	 * above.  WTF?
	 */

	/*
	 * Map the DMA-safe memory into kernel virtual address space.
	 */
	/* XXX errno NetBSD->Linux */
	error = -bus_dmamem_map(dmah->dmah_tag, &dmah->dmah_seg, 1, size,
	    &dmah->vaddr,
	    (BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_NOCACHE));
	if (error)
		goto fail1;
	dmah->size = size;

	/*
	 * Create a map for DMA transfers.
	 */
	/* XXX errno NetBSD->Linux */
	error = -bus_dmamap_create(dmah->dmah_tag, size, 1, size, 0,
	    BUS_DMA_NOWAIT, &dmah->dmah_map);
	if (error)
		goto fail2;

	/*
	 * Load the kva buffer into the map for DMA transfers.
	 */
	/* XXX errno NetBSD->Linux */
	error = -bus_dmamap_load(dmah->dmah_tag, dmah->dmah_map, dmah->vaddr,
	    size, NULL, (BUS_DMA_NOWAIT | BUS_DMA_NOCACHE));
	if (error)
		goto fail3;

	/* Record the bus address for convenient reference.  */
	dmah->busaddr = dmah->dmah_map->dm_segs[0].ds_addr;

	/* Zero the DMA buffer.  XXX Yikes!  Is this necessary?  */
	memset(dmah->vaddr, 0, size);

	/* Success!  */
	return dmah;

fail3:	bus_dmamap_destroy(dmah->dmah_tag, dmah->dmah_map);
fail2:	bus_dmamem_unmap(dmah->dmah_tag, dmah->vaddr, dmah->size);
fail1:	bus_dmamem_free(dmah->dmah_tag, &dmah->dmah_seg, 1);
fail0:	dmah->dmah_tag = NULL;	/* XXX paranoia */
	kmem_free(dmah, sizeof(*dmah));
out:	DRM_DEBUG("drm_pci_alloc failed: %d\n", error);
	return NULL;
}
Пример #28
0
/*
 * Channel initialization/deinitialization per user device.
 */
struct dmac_channel_stat *
dmac_alloc_channel(device_t self, int ch, const char *name, int normalv,
    dmac_intr_handler_t normal, void *normalarg, int errorv,
    dmac_intr_handler_t error, void *errorarg)
{
	struct intio_softc *intio = device_private(self);
	struct dmac_softc *sc = device_private(intio->sc_dmac);
	struct dmac_channel_stat *chan = &sc->sc_channels[ch];
#ifdef DMAC_ARRAYCHAIN
	int r, dummy;
#endif

	aprint_normal_dev(sc->sc_dev, "allocating ch %d for %s.\n",
		ch, name);
	DPRINTF(3, ("dmamap=%p\n", (void *)chan->ch_xfer.dx_dmamap));
#ifdef DIAGNOSTIC
	if (ch < 0 || ch >= DMAC_NCHAN)
		panic("Invalid DMAC channel.");
	if (chan->ch_name[0])
		panic("DMAC: channel in use.");
	if (strlen(name) > 8)
	  	panic("DMAC: wrong user name.");
#endif

#ifdef DMAC_ARRAYCHAIN
	/* allocate the DMAC arraychaining map */
	r = bus_dmamem_alloc(intio->sc_dmat,
			     sizeof(struct dmac_sg_array) * DMAC_MAPSIZE,
			     4, 0, &chan->ch_seg[0], 1, &dummy,
			     BUS_DMA_NOWAIT);
	if (r)
		panic("DMAC: cannot alloc DMA safe memory");
	r = bus_dmamem_map(intio->sc_dmat,
			   &chan->ch_seg[0], 1,
			   sizeof(struct dmac_sg_array) * DMAC_MAPSIZE,
			   (void **) &chan->ch_map,
			   BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
	if (r)
		panic("DMAC: cannot map DMA safe memory");
#endif

	/* fill the channel status structure by the default values. */
	strcpy(chan->ch_name, name);
	chan->ch_dcr = (DMAC_DCR_XRM_CSWH | DMAC_DCR_OTYP_EASYNC |
			DMAC_DCR_OPS_8BIT);
	chan->ch_ocr = (DMAC_OCR_SIZE_BYTE | DMAC_OCR_REQG_EXTERNAL);
	chan->ch_normalv = normalv;
	chan->ch_errorv = errorv;
	chan->ch_normal = normal;
	chan->ch_error = error;
	chan->ch_normalarg = normalarg;
	chan->ch_errorarg = errorarg;
	chan->ch_xfer.dx_dmamap = 0;

	/* setup the device-specific registers */
	bus_space_write_1(sc->sc_bst, chan->ch_bht, DMAC_REG_CSR, 0xff);
	bus_space_write_1(sc->sc_bst, chan->ch_bht,
			   DMAC_REG_DCR, chan->ch_dcr);
	bus_space_write_1(sc->sc_bst, chan->ch_bht, DMAC_REG_CPR, 0);

	/*
	 * X68k physical user space is a subset of the kernel space;
	 * the memory is always included in the physical user space,
	 * while the device is not.
	 */
	bus_space_write_1(sc->sc_bst, chan->ch_bht,
			   DMAC_REG_BFCR, DMAC_FC_USER_DATA);
	bus_space_write_1(sc->sc_bst, chan->ch_bht,
			   DMAC_REG_MFCR, DMAC_FC_USER_DATA);
	bus_space_write_1(sc->sc_bst, chan->ch_bht,
			   DMAC_REG_DFCR, DMAC_FC_KERNEL_DATA);

	/* setup the interrupt handlers */
	bus_space_write_1(sc->sc_bst, chan->ch_bht, DMAC_REG_NIVR, normalv);
	bus_space_write_1(sc->sc_bst, chan->ch_bht, DMAC_REG_EIVR, errorv);

	intio_intr_establish_ext(normalv, name, "dma", dmac_done, chan);
	intio_intr_establish_ext(errorv, name, "dmaerr", dmac_error, chan);

	return chan;
}
Пример #29
0
/*
 * Attach this instance, and then all the sub-devices
 */
static void
pcscp_attach(device_t parent, device_t self, void *aux)
{
	struct pcscp_softc *esc = device_private(self);
	struct ncr53c9x_softc *sc = &esc->sc_ncr53c9x;
	struct pci_attach_args *pa = aux;
	bus_space_tag_t iot;
	bus_space_handle_t ioh;
	pci_intr_handle_t ih;
	const char *intrstr;
	pcireg_t csr;
	bus_dma_segment_t seg;
	int error, rseg;

	sc->sc_dev = self;
	pci_aprint_devinfo(pa, NULL);
	aprint_normal("%s", device_xname(sc->sc_dev));

	if (pci_mapreg_map(pa, IO_MAP_REG, PCI_MAPREG_TYPE_IO, 0,
	    &iot, &ioh, NULL, NULL)) {
		aprint_error(": unable to map registers\n");
		return;
	}

	sc->sc_glue = &pcscp_glue;

	esc->sc_st = iot;
	esc->sc_sh = ioh;
	esc->sc_dmat = pa->pa_dmat;

	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
	    csr | PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE);

	/*
	 * XXX More of this should be in ncr53c9x_attach(), but
	 * XXX should we really poke around the chip that much in
	 * XXX the MI code?  Think about this more...
	 */

	/*
	 * Set up static configuration info.
	 */

	/*
	 * XXX should read configuration from EEPROM?
	 *
	 * MI ncr53c9x driver does not support configuration
	 * per each target device, though...
	 */
	sc->sc_id = 7;
	sc->sc_cfg1 = sc->sc_id | NCRCFG1_PARENB;
	sc->sc_cfg2 = NCRCFG2_SCSI2 | NCRCFG2_FE;
	sc->sc_cfg3 = NCRAMDCFG3_IDM | NCRAMDCFG3_FCLK;
	sc->sc_cfg4 = NCRAMDCFG4_GE12NS | NCRAMDCFG4_RADE;
	sc->sc_rev = NCR_VARIANT_AM53C974;
	sc->sc_features = NCR_F_FASTSCSI;
	sc->sc_cfg3_fscsi = NCRAMDCFG3_FSCSI;
	sc->sc_freq = 40; /* MHz */

	/*
	 * XXX minsync and maxxfer _should_ be set up in MI code,
	 * XXX but it appears to have some dependency on what sort
	 * XXX of DMA we're hooked up to, etc.
	 */

	/*
	 * This is the value used to start sync negotiations
	 * Note that the NCR register "SYNCTP" is programmed
	 * in "clocks per byte", and has a minimum value of 4.
	 * The SCSI period used in negotiation is one-fourth
	 * of the time (in nanoseconds) needed to transfer one byte.
	 * Since the chip's clock is given in MHz, we have the following
	 * formula: 4 * period = (1000 / freq) * 4
	 */

	sc->sc_minsync = 1000 / sc->sc_freq;

	/* Really no limit, but since we want to fit into the TCR... */
	sc->sc_maxxfer = 16 * 1024 * 1024;

	/*
	 * Create the DMA maps for the data transfers.
	 */

#define MDL_SEG_SIZE	0x1000 /* 4kbyte per segment */
#define MDL_SEG_OFFSET	0x0FFF
#define MDL_SIZE	(MAXPHYS / MDL_SEG_SIZE + 1) /* no hardware limit? */

	if (bus_dmamap_create(esc->sc_dmat, MAXPHYS, MDL_SIZE, MDL_SEG_SIZE,
	    MDL_SEG_SIZE, BUS_DMA_NOWAIT, &esc->sc_xfermap)) {
		aprint_error(": can't create DMA maps\n");
		return;
	}

	/*
	 * Allocate and map memory for the MDL.
	 */

	if ((error = bus_dmamem_alloc(esc->sc_dmat,
	    sizeof(uint32_t) * MDL_SIZE, PAGE_SIZE, 0, &seg, 1, &rseg,
	    BUS_DMA_NOWAIT)) != 0) {
		aprint_error(": unable to allocate memory for the MDL,"
		    " error = %d\n", error);
		goto fail_0;
	}
	if ((error = bus_dmamem_map(esc->sc_dmat, &seg, rseg,
	    sizeof(uint32_t) * MDL_SIZE , (void **)&esc->sc_mdladdr,
	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error(": unable to map the MDL memory, error = %d\n",
		    error);
		goto fail_1;
	}
	if ((error = bus_dmamap_create(esc->sc_dmat,
	    sizeof(uint32_t) * MDL_SIZE, 1, sizeof(uint32_t) * MDL_SIZE,
	    0, BUS_DMA_NOWAIT, &esc->sc_mdldmap)) != 0) {
		aprint_error(": unable to map_create for the MDL, error = %d\n",
		    error);
		goto fail_2;
	}
	if ((error = bus_dmamap_load(esc->sc_dmat, esc->sc_mdldmap,
	     esc->sc_mdladdr, sizeof(uint32_t) * MDL_SIZE,
	     NULL, BUS_DMA_NOWAIT)) != 0) {
		aprint_error(": unable to load for the MDL, error = %d\n",
		    error);
		goto fail_3;
	}

	/* map and establish interrupt */
	if (pci_intr_map(pa, &ih)) {
		aprint_error(": couldn't map interrupt\n");
		goto fail_4;
	}

	intrstr = pci_intr_string(pa->pa_pc, ih);
	esc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
	    ncr53c9x_intr, esc);
	if (esc->sc_ih == NULL) {
		aprint_error(": couldn't establish interrupt");
		if (intrstr != NULL)
			aprint_error(" at %s", intrstr);
		aprint_error("\n");
		goto fail_4;
	}
	if (intrstr != NULL) {
		aprint_normal(": interrupting at %s\n", intrstr);
		aprint_normal("%s", device_xname(sc->sc_dev));
	}

	/* Do the common parts of attachment. */
	sc->sc_adapter.adapt_minphys = minphys;
	sc->sc_adapter.adapt_request = ncr53c9x_scsipi_request;
	ncr53c9x_attach(sc);

	/* Turn on target selection using the `DMA' method */
	sc->sc_features |= NCR_F_DMASELECT;

	return;

 fail_4:
	bus_dmamap_unload(esc->sc_dmat, esc->sc_mdldmap);
 fail_3:
	bus_dmamap_destroy(esc->sc_dmat, esc->sc_mdldmap);
 fail_2:
	bus_dmamem_unmap(esc->sc_dmat, (void *)esc->sc_mdldmap,
	    sizeof(uint32_t) * MDL_SIZE);
 fail_1:
	bus_dmamem_free(esc->sc_dmat, &seg, rseg);
 fail_0:
	bus_dmamap_destroy(esc->sc_dmat, esc->sc_xfermap);
}
Пример #30
0
static void
iavc_pci_attach(struct device * parent, struct device * self, void *aux)
{
	struct iavc_pci_softc *psc = (void *) self;
	struct iavc_softc *sc = (void *) self;
	struct pci_attach_args *pa = aux;
	pci_chipset_tag_t pc = pa->pa_pc;
	const struct iavc_pci_product *pp;
	pci_intr_handle_t ih;
	const char *intrstr;
	int ret;

	pp = find_cardname(pa);
	if (pp == NULL)
		return;

	sc->sc_t1 = 0;
	sc->sc_dma = 0;
	sc->dmat = pa->pa_dmat;

	iavc_b1dma_reset(sc);

	if (pci_mapreg_map(pa, IAVC_PCI_IOBA, PCI_MAPREG_TYPE_IO, 0,
		&sc->sc_io_bt, &sc->sc_io_bh, &psc->io_base, &psc->io_size)) {
		aprint_error(": unable to map i/o registers\n");
		return;
	}

	if (pci_mapreg_map(pa, IAVC_PCI_MMBA, PCI_MAPREG_TYPE_MEM, 0,
	     &sc->sc_mem_bt, &sc->sc_mem_bh, &psc->mem_base, &psc->mem_size)) {
		aprint_error(": unable to map mem registers\n");
		return;
	}
	aprint_normal(": %s\n", pp->name);

	if (pp->npp_product == PCI_PRODUCT_AVM_T1) {
		aprint_error("%s: sorry, PRI not yet supported\n",
		    sc->sc_dev.dv_xname);
		return;

#if 0
		sc->sc_capi.card_type = CARD_TYPEC_AVM_T1_PCI;
		sc->sc_capi.sc_nbch = NBCH_PRI;
		ret = iavc_t1_detect(sc);
		if (ret) {
			if (ret < 6) {
				aprint_error("%s: no card detected?\n",
				    sc->sc_dev.dv_xname);
			} else {
				aprint_error("%s: black box not on\n",
				    sc->sc_dev.dv_xname);
			}
			return;
		} else {
			sc->sc_dma = 1;
			sc->sc_t1 = 1;
		}
#endif

	} else if (pp->npp_product == PCI_PRODUCT_AVM_B1) {
		sc->sc_capi.card_type = CARD_TYPEC_AVM_B1_PCI;
		sc->sc_capi.sc_nbch = NBCH_BRI;
		ret = iavc_b1dma_detect(sc);
		if (ret) {
			ret = iavc_b1_detect(sc);
			if (ret) {
				aprint_error("%s: no card detected?\n",
				    sc->sc_dev.dv_xname);
				return;
			}
		} else {
			sc->sc_dma = 1;
		}
	}
	if (sc->sc_dma)
		iavc_b1dma_reset(sc);

#if 0
	/*
         * XXX: should really be done this way, but this freezes the card
         */
	if (sc->sc_t1)
		iavc_t1_reset(sc);
	else
		iavc_b1_reset(sc);
#endif

	if (pci_intr_map(pa, &ih)) {
		aprint_error("%s: couldn't map interrupt\n",
		    sc->sc_dev.dv_xname);
		return;
	}

	intrstr = pci_intr_string(pc, ih);
	psc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, iavc_pci_intr, psc);
	if (psc->sc_ih == NULL) {
		aprint_error("%s: couldn't establish interrupt",
		    sc->sc_dev.dv_xname);
		if (intrstr != NULL)
			aprint_normal(" at %s", intrstr);
		aprint_normal("\n");
		return;
	}
	psc->sc_pc = pc;
	aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);

	memset(&sc->sc_txq, 0, sizeof(struct ifqueue));
	sc->sc_txq.ifq_maxlen = sc->sc_capi.sc_nbch * 4;

	sc->sc_intr = 0;
	sc->sc_state = IAVC_DOWN;
	sc->sc_blocked = 0;

	/* setup capi link */
	sc->sc_capi.load = iavc_load;
	sc->sc_capi.reg_appl = iavc_register;
	sc->sc_capi.rel_appl = iavc_release;
	sc->sc_capi.send = iavc_send;
	sc->sc_capi.ctx = (void *) sc;

	/* lock & load DMA for TX */
	if ((ret = bus_dmamem_alloc(sc->dmat, IAVC_DMA_SIZE, PAGE_SIZE, 0,
	    &sc->txseg, 1, &sc->ntxsegs, BUS_DMA_ALLOCNOW)) != 0) {
		aprint_error("%s: can't allocate tx DMA memory, error = %d\n",
		    sc->sc_dev.dv_xname, ret);
		goto fail1;
	}

	if ((ret = bus_dmamem_map(sc->dmat, &sc->txseg, sc->ntxsegs,
	    IAVC_DMA_SIZE, &sc->sc_sendbuf, BUS_DMA_NOWAIT)) != 0) {
		aprint_error("%s: can't map tx DMA memory, error = %d\n",
		    sc->sc_dev.dv_xname, ret);
		goto fail2;
	}

	if ((ret = bus_dmamap_create(sc->dmat, IAVC_DMA_SIZE, 1,
	    IAVC_DMA_SIZE, 0, BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
	    &sc->tx_map)) != 0) {
		aprint_error("%s: can't create tx DMA map, error = %d\n",
		    sc->sc_dev.dv_xname, ret);
		goto fail3;
	}

	if ((ret = bus_dmamap_load(sc->dmat, sc->tx_map, sc->sc_sendbuf,
	    IAVC_DMA_SIZE, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0) {
		aprint_error("%s: can't load tx DMA map, error = %d\n",
		    sc->sc_dev.dv_xname, ret);
		goto fail4;
	}

	/* do the same for RX */
	if ((ret = bus_dmamem_alloc(sc->dmat, IAVC_DMA_SIZE, PAGE_SIZE, 0,
	    &sc->rxseg, 1, &sc->nrxsegs, BUS_DMA_ALLOCNOW)) != 0) {
		aprint_error("%s: can't allocate rx DMA memory, error = %d\n",
		    sc->sc_dev.dv_xname, ret);
		goto fail5;
	}

	if ((ret = bus_dmamem_map(sc->dmat, &sc->rxseg, sc->nrxsegs,
	    IAVC_DMA_SIZE, &sc->sc_recvbuf, BUS_DMA_NOWAIT)) != 0) {
		aprint_error("%s: can't map rx DMA memory, error = %d\n",
		    sc->sc_dev.dv_xname, ret);
		goto fail6;
	}

	if ((ret = bus_dmamap_create(sc->dmat, IAVC_DMA_SIZE, 1, IAVC_DMA_SIZE,
	    0, BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT, &sc->rx_map)) != 0) {
		aprint_error("%s: can't create rx DMA map, error = %d\n",
		    sc->sc_dev.dv_xname, ret);
		goto fail7;
	}

	if ((ret = bus_dmamap_load(sc->dmat, sc->rx_map, sc->sc_recvbuf,
	    IAVC_DMA_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) != 0) {
		aprint_error("%s: can't load rx DMA map, error = %d\n",
		    sc->sc_dev.dv_xname, ret);
		goto fail8;
	}

	if (capi_ll_attach(&sc->sc_capi, sc->sc_dev.dv_xname, pp->name)) {
		aprint_error("%s: capi attach failed\n", sc->sc_dev.dv_xname);
		goto fail9;
	}
	return;

	/* release resources in case of failed attach */
fail9:
	bus_dmamap_unload(sc->dmat, sc->rx_map);
fail8:
	bus_dmamap_destroy(sc->dmat, sc->rx_map);
fail7:
	bus_dmamem_unmap(sc->dmat, sc->sc_recvbuf, IAVC_DMA_SIZE);
fail6:
	bus_dmamem_free(sc->dmat, &sc->rxseg, sc->nrxsegs);
fail5:
	bus_dmamap_unload(sc->dmat, sc->tx_map);
fail4:
	bus_dmamap_destroy(sc->dmat, sc->tx_map);
fail3:
	bus_dmamem_unmap(sc->dmat, sc->sc_sendbuf, IAVC_DMA_SIZE);
fail2:
	bus_dmamem_free(sc->dmat, &sc->txseg, sc->ntxsegs);
fail1:
	pci_intr_disestablish(psc->sc_pc, psc->sc_ih);

	return;
}