コード例 #1
0
ファイル: if_uba.c プロジェクト: MarginC/kame
/*
 * Init UNIBUS for interface whose headers of size hlen are to
 * end on a page boundary.  We allocate a UNIBUS map register for the page
 * with the header, and nmr more UNIBUS map registers for i/o on the adapter,
 * doing this once for each read and once for each write buffer.  We also
 * allocate page frames in the mbuffer pool for these pages.
 *
 * Recent changes:
 *	No special "header pages" anymore.
 *	Recv packets are always put in clusters.
 *	"size" is the maximum buffer size, may not be bigger than MCLBYTES.
 */
int
if_ubaminit(struct ifubinfo *ifu, struct uba_softc *uh, int size,
            struct ifrw *ifr, int nr, struct ifxmt *ifw, int nw)
{
    struct mbuf *m;
    int totsz, i, error, rseg, nm = nr;
    bus_dma_segment_t seg;
    caddr_t vaddr;

#ifdef DIAGNOSTIC
    if (size > MCLBYTES)
        panic("if_ubaminit: size > MCLBYTES");
#endif
    ifu->iff_softc = uh;
    /*
     * Get DMA memory for transmit buffers.
     * Buffer size are rounded up to a multiple of the uba page size,
     * then allocated contiguous.
     */
    size = (size + UBA_PGOFSET) & ~UBA_PGOFSET;
    totsz = size * nw;
    if ((error = bus_dmamem_alloc(uh->uh_dmat, totsz, NBPG, 0,
                                  &seg, 1, &rseg, BUS_DMA_NOWAIT)))
        return error;
    if ((error = bus_dmamem_map(uh->uh_dmat, &seg, rseg, totsz, &vaddr,
                                BUS_DMA_NOWAIT|BUS_DMA_COHERENT))) {
        bus_dmamem_free(uh->uh_dmat, &seg, rseg);
        return error;
    }

    /*
     * Create receive and transmit maps.
     * Alloc all resources now so we won't fail in the future.
     */

    for (i = 0; i < nr; i++) {
        if ((error = bus_dmamap_create(uh->uh_dmat, size, 1,
                                       size, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
                                       &ifr[i].ifrw_map))) {
            nr = i;
            nm = nw = 0;
            goto bad;
        }
    }
    for (i = 0; i < nw; i++) {
        if ((error = bus_dmamap_create(uh->uh_dmat, size, 1,
                                       size, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
                                       &ifw[i].ifw_map))) {
            nw = i;
            nm = 0;
            goto bad;
        }
    }
    /*
     * Preload the rx maps with mbuf clusters.
     */
    for (i = 0; i < nm; i++) {
        if ((m = getmcl()) == NULL) {
            nm = i;
            goto bad;
        }
        ifr[i].ifrw_mbuf = m;
        bus_dmamap_load(uh->uh_dmat, ifr[i].ifrw_map,
                        m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);

    }
    /*
     * Load the tx maps with DMA memory (common case).
     */
    for (i = 0; i < nw; i++) {
        ifw[i].ifw_vaddr = vaddr + size * i;
        ifw[i].ifw_size = size;
        bus_dmamap_load(uh->uh_dmat, ifw[i].ifw_map,
                        ifw[i].ifw_vaddr, ifw[i].ifw_size, NULL, BUS_DMA_NOWAIT);
    }
    return 0;
bad:
    while (--nm >= 0) {
        bus_dmamap_unload(uh->uh_dmat, ifr[nw].ifrw_map);
        m_freem(ifr[nm].ifrw_mbuf);
    }
    while (--nw >= 0)
        bus_dmamap_destroy(uh->uh_dmat, ifw[nw].ifw_map);
    while (--nr >= 0)
        bus_dmamap_destroy(uh->uh_dmat, ifr[nw].ifrw_map);
    return (0);
}
コード例 #2
0
static usbd_status
usb_block_allocmem(bus_dma_tag_t tag, size_t size, size_t align,
		   usb_dma_block_t **dmap)
{
        usb_dma_block_t *p;
	int s;

	DPRINTFN(5, ("usb_block_allocmem: size=%lu align=%lu\n",
		     (u_long)size, (u_long)align));

#ifdef DIAGNOSTIC
	if (!curproc) {
		printf("usb_block_allocmem: in interrupt context, size=%lu\n",
		    (unsigned long) size);
	}
#endif

	s = splusb();
	/* First check the free list. */
	for (p = LIST_FIRST(&usb_blk_freelist); p; p = LIST_NEXT(p, next)) {
		if (p->tag == tag && p->size >= size && p->size < size * 2 &&
		    p->align >= align) {
			LIST_REMOVE(p, next);
			usb_blk_nfree--;
			splx(s);
			*dmap = p;
			DPRINTFN(6,("usb_block_allocmem: free list size=%lu\n",
				    (u_long)p->size));
			return (USBD_NORMAL_COMPLETION);
		}
	}
	splx(s);

#ifdef DIAGNOSTIC
	if (!curproc) {
		printf("usb_block_allocmem: in interrupt context, failed\n");
		return (USBD_NOMEM);
	}
#endif

	DPRINTFN(6, ("usb_block_allocmem: no free\n"));
	p = malloc(sizeof *p, M_USB, M_NOWAIT);
	if (p == NULL)
		return (USBD_NOMEM);

	if (bus_dma_tag_create(tag, align, 0,
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
	    size, sizeof(p->segs) / sizeof(p->segs[0]), size,
	    0, NULL, NULL, &p->tag) == ENOMEM)
	{
		goto free;
	}

	p->size = size;
	p->align = align;
	if (bus_dmamem_alloc(p->tag, &p->kaddr,
	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT, &p->map))
		goto tagfree;

	if (bus_dmamap_load(p->tag, p->map, p->kaddr, p->size,
	    usbmem_callback, p, 0))
		goto memfree;

	/* XXX - override the tag, ok since we never free it */
	p->tag = tag;
	*dmap = p;
	return (USBD_NORMAL_COMPLETION);

	/*
	 * XXX - do we need to _unload? is the order of _free and _destroy
	 * correct?
	 */
memfree:
	bus_dmamem_free(p->tag, p->kaddr, p->map);
tagfree:
	bus_dma_tag_destroy(p->tag);
free:
	free(p, M_USB);
	return (USBD_NOMEM);
}
コード例 #3
0
ファイル: pcscp.c プロジェクト: sofuture/bitrig
/*
 * Attach this instance, and then all the sub-devices
 */
void
pcscp_attach(struct device *parent, struct device *self, void *aux)
{
	struct pci_attach_args *pa = aux;
	struct pcscp_softc *esc = (void *)self;
	struct ncr53c9x_softc *sc = &esc->sc_ncr53c9x;
	bus_space_tag_t iot;
	bus_space_handle_t ioh;
	pci_intr_handle_t ih;
	const char *intrstr;
	bus_dma_segment_t seg;
	int error, rseg;

	if (pci_mapreg_map(pa, IO_MAP_REG, PCI_MAPREG_TYPE_IO, 0,
	     &iot, &ioh, NULL, NULL, 0)) {
		printf("%s: unable to map registers\n", sc->sc_dev.dv_xname);
		return;
	}

	sc->sc_glue = &pcscp_glue;

	esc->sc_st = iot;
	esc->sc_sh = ioh;
	esc->sc_dmat = pa->pa_dmat;

	/*
	 * XXX More of this should be in ncr53c9x_attach(), but
	 * XXX should we really poke around the chip that much in
	 * XXX the MI code?  Think about this more...
	 */

	/*
	 * Set up static configuration info.
	 */

	/*
	 * XXX should read configuration from EEPROM?
	 *
	 * MI ncr53c9x driver does not support configuration
	 * per each target device, though...
	 */
	sc->sc_id = 7;
	sc->sc_cfg1 = sc->sc_id | NCRCFG1_PARENB;
	sc->sc_cfg2 = NCRCFG2_SCSI2 | NCRCFG2_FE;
	sc->sc_cfg3 = NCRAMDCFG3_IDM | NCRAMDCFG3_FCLK;
	sc->sc_cfg4 = NCRAMDCFG4_GE12NS | NCRAMDCFG4_RADE;
	sc->sc_rev = NCR_VARIANT_AM53C974;
	sc->sc_features = NCR_F_FASTSCSI;
	sc->sc_cfg3_fscsi = NCRAMDCFG3_FSCSI;
	sc->sc_freq = 40; /* MHz */

	/*
	 * XXX minsync and maxxfer _should_ be set up in MI code,
	 * XXX but it appears to have some dependency on what sort
	 * XXX of DMA we're hooked up to, etc.
	 */

	/*
	 * This is the value used to start sync negotiations
	 * Note that the NCR register "SYNCTP" is programmed
	 * in "clocks per byte", and has a minimum value of 4.
	 * The SCSI period used in negotiation is one-fourth
	 * of the time (in nanoseconds) needed to transfer one byte.
	 * Since the chip's clock is given in MHz, we have the following
	 * formula: 4 * period = (1000 / freq) * 4
	 */

	sc->sc_minsync = 1000 / sc->sc_freq; 

	/* Really no limit, but since we want to fit into the TCR... */
	sc->sc_maxxfer = 16 * 1024 * 1024;

	/*
	 * Create the DMA maps for the data transfers.
         */

#define MDL_SEG_SIZE	0x1000 /* 4kbyte per segment */
#define MDL_SEG_OFFSET	0x0FFF
#define MDL_SIZE	(MAXPHYS / MDL_SEG_SIZE + 1) /* no hardware limit? */

	if (bus_dmamap_create(esc->sc_dmat, MAXPHYS, MDL_SIZE, MDL_SEG_SIZE,
	    MDL_SEG_SIZE, BUS_DMA_NOWAIT, &esc->sc_xfermap)) {
		printf("%s: can't create dma maps\n", sc->sc_dev.dv_xname);
		return;
	}

	/*
	 * Allocate and map memory for the MDL.
	 */

	if ((error = bus_dmamem_alloc(esc->sc_dmat,
	    sizeof(u_int32_t) * MDL_SIZE, PAGE_SIZE, 0, &seg, 1, &rseg,
	    BUS_DMA_NOWAIT)) != 0) {
		printf("%s: unable to allocate memory for the MDL, "
		    "error = %d\n", sc->sc_dev.dv_xname, error);
		goto fail_0;
	}
	if ((error = bus_dmamem_map(esc->sc_dmat, &seg, rseg,
	    sizeof(u_int32_t) * MDL_SIZE , (caddr_t *)&esc->sc_mdladdr,
	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		printf("%s: unable to map the MDL memory, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_1;
	}
	if ((error = bus_dmamap_create(esc->sc_dmat, 
	    sizeof(u_int32_t) * MDL_SIZE, 1, sizeof(u_int32_t) * MDL_SIZE,
	    0, BUS_DMA_NOWAIT, &esc->sc_mdldmap)) != 0) {
		printf("%s: unable to map_create for the MDL, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_2;
	}
	if ((error = bus_dmamap_load(esc->sc_dmat, esc->sc_mdldmap,
	     esc->sc_mdladdr, sizeof(u_int32_t) * MDL_SIZE,
	     NULL, BUS_DMA_NOWAIT)) != 0) {
		printf("%s: unable to load for the MDL, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_3;
	}

	/* map and establish interrupt */
	if (pci_intr_map(pa, &ih)) {
		printf(": couldn't map interrupt\n");
		goto fail_4;
	}

	intrstr = pci_intr_string(pa->pa_pc, ih);
	esc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
	    ncr53c9x_intr, esc, sc->sc_dev.dv_xname);
	if (esc->sc_ih == NULL) {
		printf(": couldn't establish interrupt");
		if (intrstr != NULL)
			printf(" at %s", intrstr);
		printf("\n");
		goto fail_4;
	}
	if (intrstr != NULL)
		printf(": %s\n", intrstr);

	/* Do the common parts of attachment. */
	printf("%s", sc->sc_dev.dv_xname);

	ncr53c9x_attach(sc, &pcscp_adapter);

	/* Turn on target selection using the `dma' method */
	sc->sc_features |= NCR_F_DMASELECT;

	return;

fail_4:
	bus_dmamap_unload(esc->sc_dmat, esc->sc_mdldmap);
fail_3:
	bus_dmamap_destroy(esc->sc_dmat, esc->sc_mdldmap);
fail_2:
	bus_dmamem_unmap(esc->sc_dmat, (caddr_t)esc->sc_mdldmap,
	    sizeof(uint32_t) * MDL_SIZE);
fail_1:
	bus_dmamem_free(esc->sc_dmat, &seg, rseg);
fail_0:
	bus_dmamap_destroy(esc->sc_dmat, esc->sc_xfermap);
}
コード例 #4
0
ファイル: if_lnc_isa.c プロジェクト: vishesh/DragonFlyBSD
static int
le_isa_attach(device_t dev)
{
	struct le_isa_softc *lesc;
	struct lance_softc *sc;
	bus_size_t macstart, rap, rdp;
	int error, i, macstride;

	lesc = device_get_softc(dev);
	sc = &lesc->sc_am7990.lsc;

	lesc->sc_rrid = 0;
	switch (ISA_PNP_PROBE(device_get_parent(dev), dev, le_isa_ids)) {
	case 0:
		lesc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
		    &lesc->sc_rrid, RF_ACTIVE);
		rap = PCNET_RAP;
		rdp = PCNET_RDP;
		macstart = 0;
		macstride = 1;
		break;
	case ENOENT:
		for (i = 0; i < NELEM(le_isa_params); i++) {
			if (le_isa_probe_legacy(dev, &le_isa_params[i]) == 0) {
				lesc->sc_rres = bus_alloc_resource(dev,
				    SYS_RES_IOPORT, &lesc->sc_rrid, 0, ~0,
				    le_isa_params[i].iosize, RF_ACTIVE);
				rap = le_isa_params[i].rap;
				rdp = le_isa_params[i].rdp;
				macstart = le_isa_params[i].macstart;
				macstride = le_isa_params[i].macstride;
				goto found;
			}
		}
		/* FALLTHROUGH */
	case ENXIO:
	default:
		device_printf(dev, "cannot determine chip\n");
		error = ENXIO;
		goto fail_mtx;
	}

 found:
	if (lesc->sc_rres == NULL) {
		device_printf(dev, "cannot allocate registers\n");
		error = ENXIO;
		goto fail_mtx;
	}
	lesc->sc_regt = rman_get_bustag(lesc->sc_rres);
	lesc->sc_regh = rman_get_bushandle(lesc->sc_rres);
	lesc->sc_rap = rap;
	lesc->sc_rdp = rdp;

	lesc->sc_drid = 0;
	if ((lesc->sc_dres = bus_alloc_resource_any(dev, SYS_RES_DRQ,
	    &lesc->sc_drid, RF_ACTIVE)) == NULL) {
		device_printf(dev, "cannot allocate DMA channel\n");
		error = ENXIO;
		goto fail_rres;
	}

	lesc->sc_irid = 0;
	if ((lesc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ,
	    &lesc->sc_irid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
		device_printf(dev, "cannot allocate interrupt\n");
		error = ENXIO;
		goto fail_dres;
	}

	error = bus_dma_tag_create(
	    NULL,			/* parent */
	    1, 0,			/* alignment, boundary */
	    BUS_SPACE_MAXADDR_24BIT,	/* lowaddr */
	    BUS_SPACE_MAXADDR,		/* highaddr */
	    NULL, NULL,			/* filter, filterarg */
	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
	    0,				/* nsegments */
	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
	    BUS_DMA_WAITOK,		/* flags */
	    &lesc->sc_pdmat);
	if (error != 0) {
		device_printf(dev, "cannot allocate parent DMA tag\n");
		goto fail_ires;
	}

	sc->sc_memsize = LE_ISA_MEMSIZE;
	/*
	 * For Am79C90, Am79C961 and Am79C961A the init block must be 2-byte
	 * aligned and the ring descriptors must be 8-byte aligned.
	 */
	error = bus_dma_tag_create(
	    lesc->sc_pdmat,		/* parent */
	    8, 0,			/* alignment, boundary */
	    BUS_SPACE_MAXADDR_24BIT,	/* lowaddr */
	    BUS_SPACE_MAXADDR,		/* highaddr */
	    NULL, NULL,			/* filter, filterarg */
	    sc->sc_memsize,		/* maxsize */
	    1,				/* nsegments */
	    sc->sc_memsize,		/* maxsegsize */
	    BUS_DMA_WAITOK,		/* flags */
	    &lesc->sc_dmat);
	if (error != 0) {
		device_printf(dev, "cannot allocate buffer DMA tag\n");
		goto fail_pdtag;
	}

	error = bus_dmamem_alloc(lesc->sc_dmat, (void **)&sc->sc_mem,
	    BUS_DMA_WAITOK | BUS_DMA_COHERENT, &lesc->sc_dmam);
	if (error != 0) {
		device_printf(dev, "cannot allocate DMA buffer memory\n");
		goto fail_dtag;
	}

	sc->sc_addr = 0;
	error = bus_dmamap_load(lesc->sc_dmat, lesc->sc_dmam, sc->sc_mem,
	    sc->sc_memsize, le_isa_dma_callback, sc, 0);
	if (error != 0 || sc->sc_addr == 0) {
                device_printf(dev, "cannot load DMA buffer map\n");
		goto fail_dmem;
	}

	isa_dmacascade(rman_get_start(lesc->sc_dres));

	sc->sc_flags = 0;
	sc->sc_conf3 = 0;

	/*
	 * Extract the physical MAC address from the ROM.
	 */
	for (i = 0; i < sizeof(sc->sc_enaddr); i++)
		sc->sc_enaddr[i] =  bus_space_read_1(lesc->sc_regt,
		    lesc->sc_regh, macstart + i * macstride);

	sc->sc_copytodesc = lance_copytobuf_contig;
	sc->sc_copyfromdesc = lance_copyfrombuf_contig;
	sc->sc_copytobuf = lance_copytobuf_contig;
	sc->sc_copyfrombuf = lance_copyfrombuf_contig;
	sc->sc_zerobuf = lance_zerobuf_contig;

	sc->sc_rdcsr = le_isa_rdcsr;
	sc->sc_wrcsr = le_isa_wrcsr;
	sc->sc_hwreset = NULL;
	sc->sc_hwinit = NULL;
	sc->sc_hwintr = NULL;
	sc->sc_nocarrier = NULL;
	sc->sc_mediachange = NULL;
	sc->sc_mediastatus = NULL;
	sc->sc_supmedia = NULL;

	error = am7990_config(&lesc->sc_am7990, device_get_name(dev),
	    device_get_unit(dev));
	if (error != 0) {
		device_printf(dev, "cannot attach Am7990\n");
		goto fail_dmap;
	}

	ifq_set_cpuid(&sc->ifp->if_snd, rman_get_cpuid(lesc->sc_ires));

	error = bus_setup_intr(dev, lesc->sc_ires, INTR_MPSAFE,
	    am7990_intr, sc, &lesc->sc_ih, sc->ifp->if_serializer);
	if (error != 0) {
		device_printf(dev, "cannot set up interrupt\n");
		goto fail_am7990;
	}

	return (0);

 fail_am7990:
	am7990_detach(&lesc->sc_am7990);
 fail_dmap:
	bus_dmamap_unload(lesc->sc_dmat, lesc->sc_dmam);
 fail_dmem:
	bus_dmamem_free(lesc->sc_dmat, sc->sc_mem, lesc->sc_dmam);
 fail_dtag:
	bus_dma_tag_destroy(lesc->sc_dmat);
 fail_pdtag:
	bus_dma_tag_destroy(lesc->sc_pdmat);
 fail_ires:
	bus_release_resource(dev, SYS_RES_IRQ, lesc->sc_irid, lesc->sc_ires);
 fail_dres:
	bus_release_resource(dev, SYS_RES_DRQ, lesc->sc_drid, lesc->sc_dres);
 fail_rres:
	bus_release_resource(dev, SYS_RES_IOPORT, lesc->sc_rrid, lesc->sc_rres);
 fail_mtx:
	return (error);
}
コード例 #5
0
ファイル: usb_busdma.c プロジェクト: ragunath3252/patch-rtems
/*------------------------------------------------------------------------*
 *	usb_pc_alloc_mem - allocate DMA'able memory
 *
 * Returns:
 *    0: Success
 * Else: Failure
 *------------------------------------------------------------------------*/
uint8_t
usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
    usb_size_t size, usb_size_t align)
{
	struct usb_dma_parent_tag *uptag;
	struct usb_dma_tag *utag;
	bus_dmamap_t map;
	void *ptr;
	int err;

	uptag = pc->tag_parent;

	if (align != 1) {
		/*
	         * The alignment must be greater or equal to the
	         * "size" else the object can be split between two
	         * memory pages and we get a problem!
	         */
		while (align < size) {
			align *= 2;
			if (align == 0) {
				goto error;
			}
		}
#if 1
		/*
		 * XXX BUS-DMA workaround - FIXME later:
		 *
		 * We assume that that the aligment at this point of
		 * the code is greater than or equal to the size and
		 * less than two times the size, so that if we double
		 * the size, the size will be greater than the
		 * alignment.
		 *
		 * The bus-dma system has a check for "alignment"
		 * being less than "size". If that check fails we end
		 * up using contigmalloc which is page based even for
		 * small allocations. Try to avoid that to save
		 * memory, hence we sometimes to a large number of
		 * small allocations!
		 */
		if (size <= (USB_PAGE_SIZE / 2)) {
			size *= 2;
		}
#endif
	}
	/* get the correct DMA tag */
	utag = usb_dma_tag_find(uptag, size, align);
	if (utag == NULL) {
		goto error;
	}
	/* allocate memory */
	if (bus_dmamem_alloc(
	    utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) {
		goto error;
	}
	/* setup page cache */
	pc->buffer = ptr;
	pc->page_start = pg;
	pc->page_offset_buf = 0;
	pc->page_offset_end = size;
	pc->map = map;
	pc->tag = utag->tag;
	pc->ismultiseg = (align == 1);

	mtx_lock(uptag->mtx);

	/* load memory into DMA */
	err = bus_dmamap_load(
	    utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb,
	    pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));

	if (err == EINPROGRESS) {
		cv_wait(uptag->cv, uptag->mtx);
		err = 0;
	}
	mtx_unlock(uptag->mtx);

	if (err || uptag->dma_error) {
		bus_dmamem_free(utag->tag, ptr, map);
		goto error;
	}
	memset(ptr, 0, size);

	usb_pc_cpu_flush(pc);

	return (0);

error:
	/* reset most of the page cache */
	pc->buffer = NULL;
	pc->page_start = NULL;
	pc->page_offset_buf = 0;
	pc->page_offset_end = 0;
	pc->map = NULL;
	pc->tag = NULL;
	return (1);
}
コード例 #6
0
/*
 * Function name:	tw_osli_free_resources
 * Description:		Performs clean-up at the time of going down.
 *
 * Input:		sc	-- ptr to OSL internal ctlr context
 * Output:		None
 * Return value:	None
 */
static TW_VOID
tw_osli_free_resources(struct twa_softc *sc)
{
	struct tw_osli_req_context	*req;
	TW_INT32			error = 0;

	tw_osli_dbg_dprintf(3, sc, "entered");

	/* Detach from CAM */
	tw_osli_cam_detach(sc);

	if (sc->req_ctx_buf)
		while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
			NULL) {
			mtx_destroy(req->ioctl_wake_timeout_lock);

			if ((error = bus_dmamap_destroy(sc->dma_tag,
					req->dma_map)))
				tw_osli_dbg_dprintf(1, sc,
					"dmamap_destroy(dma) returned %d",
					error);
		}

	if ((sc->ioctl_tag) && (sc->ioctl_map))
		if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
			tw_osli_dbg_dprintf(1, sc,
				"dmamap_destroy(ioctl) returned %d", error);

	/* Free all memory allocated so far. */
	if (sc->req_ctx_buf)
		free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS);

	if (sc->non_dma_mem)
		free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);

	if (sc->dma_mem) {
		bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
		bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
			sc->cmd_map);
	}
	if (sc->cmd_tag)
		if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
			tw_osli_dbg_dprintf(1, sc,
				"dma_tag_destroy(cmd) returned %d", error);

	if (sc->dma_tag)
		if ((error = bus_dma_tag_destroy(sc->dma_tag)))
			tw_osli_dbg_dprintf(1, sc,
				"dma_tag_destroy(dma) returned %d", error);

	if (sc->ioctl_tag)
		if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
			tw_osli_dbg_dprintf(1, sc,
				"dma_tag_destroy(ioctl) returned %d", error);

	if (sc->parent_tag)
		if ((error = bus_dma_tag_destroy(sc->parent_tag)))
			tw_osli_dbg_dprintf(1, sc,
				"dma_tag_destroy(parent) returned %d", error);


	/* Disconnect the interrupt handler. */
	if ((error = twa_teardown_intr(sc)))
			tw_osli_dbg_dprintf(1, sc,
				"teardown_intr returned %d", error);

	if (sc->irq_res != NULL)
		if ((error = bus_release_resource(sc->bus_dev,
				SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
			tw_osli_dbg_dprintf(1, sc,
				"release_resource(irq) returned %d", error);


	/* Release the register window mapping. */
	if (sc->reg_res != NULL)
		if ((error = bus_release_resource(sc->bus_dev,
				SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
			tw_osli_dbg_dprintf(1, sc,
				"release_resource(io) returned %d", error);


	/* Destroy the control device. */
	if (sc->ctrl_dev != (struct cdev *)NULL)
		destroy_dev(sc->ctrl_dev);

	if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
		tw_osli_dbg_dprintf(1, sc,
			"sysctl_ctx_free returned %d", error);

}
コード例 #7
0
ファイル: if_bce.c プロジェクト: repos-holder/openbsd-patches
void
bce_attach(struct device *parent, struct device *self, void *aux)
{
	struct bce_softc *sc = (struct bce_softc *) self;
	struct pci_attach_args *pa = aux;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char     *intrstr = NULL;
	caddr_t         kva;
	bus_dma_segment_t seg;
	int             rseg;
	struct ifnet   *ifp;
	pcireg_t        memtype;
	bus_addr_t      memaddr;
	bus_size_t      memsize;
	int             pmreg;
	pcireg_t        pmode;
	int             error;
	int             i;

	sc->bce_pa = *pa;
	sc->bce_dmatag = pa->pa_dmat;

	/*
	 * Map control/status registers.
	 */
	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
	if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
	    &sc->bce_bhandle, &memaddr, &memsize, 0)) {
		printf(": unable to find mem space\n");
		return;
	}

	/* Get it out of power save mode if needed. */
	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
		if (pmode == 3) {
			/*
			 * The card has lost all configuration data in
			 * this state, so punt.
			 */
			printf(": unable to wake up from power state D3\n");
			return;
		}
		if (pmode != 0) {
			printf(": waking up from power state D%d\n",
			       pmode);
			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
		}
	}

	if (pci_intr_map(pa, &ih)) {
		printf(": couldn't map interrupt\n");
		return;
	}

	intrstr = pci_intr_string(pc, ih);
	sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc,
	    self->dv_xname);
	if (sc->bce_intrhand == NULL) {
		printf(": couldn't establish interrupt");
		if (intrstr != NULL)
			printf(" at %s", intrstr);
		printf("\n");
		return;
	}

	/* reset the chip */
	bce_reset(sc);

	/*
	 * Allocate DMA-safe memory for ring descriptors.
	 * The receive, and transmit rings can not share the same
	 * 4k space, however both are allocated at once here.
	 */
	/*
	 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
	 * due to the limition above. ??
	 */
	if ((error = bus_dmamem_alloc(sc->bce_dmatag,
	    2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE,
				      &seg, 1, &rseg, BUS_DMA_NOWAIT))) {
		printf(": unable to alloc space for ring descriptors, "
		       "error = %d\n", error);
		return;
	}

	/* map ring space to kernel */
	if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
	    2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
		printf(": unable to map DMA buffers, error = %d\n",
		    error);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}

	/* create a dma map for the ring */
	if ((error = bus_dmamap_create(sc->bce_dmatag,
	    2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT,
				       &sc->bce_ring_map))) {
		printf(": unable to create ring DMA map, error = %d\n",
		    error);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}

	/* connect the ring space to the dma map */
	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
	    2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
		printf(": unable to load ring DMA map\n");
		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}

	/* save the ring space in softc */
	sc->bce_rx_ring = (struct bce_dma_slot *) kva;
	sc->bce_tx_ring = (struct bce_dma_slot *) (kva + PAGE_SIZE);

	/* Create the transmit buffer DMA maps. */
	for (i = 0; i < BCE_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES,
		    BCE_NTXFRAGS, MCLBYTES, 0, 0, &sc->bce_cdata.bce_tx_map[i])) != 0) {
			printf(": unable to create tx DMA map, error = %d\n",
			    error);
		}
		sc->bce_cdata.bce_tx_chain[i] = NULL;
	}

	/* Create the receive buffer DMA maps. */
	for (i = 0; i < BCE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->bce_cdata.bce_rx_map[i])) != 0) {
			printf(": unable to create rx DMA map, error = %d\n",
			    error);
		}
		sc->bce_cdata.bce_rx_chain[i] = NULL;
	}

	/* Set up ifnet structure */
	ifp = &sc->bce_ac.ac_if;
	strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE);
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = bce_ioctl;
	ifp->if_start = bce_start;
	ifp->if_watchdog = bce_watchdog;
	ifp->if_init = bce_init;
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_capabilities = IFCAP_VLAN_MTU;

	/* MAC address */
	sc->bce_ac.ac_enaddr[0] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0);
	sc->bce_ac.ac_enaddr[1] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1);
	sc->bce_ac.ac_enaddr[2] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2);
	sc->bce_ac.ac_enaddr[3] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3);
	sc->bce_ac.ac_enaddr[4] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4);
	sc->bce_ac.ac_enaddr[5] =
	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5);

	printf(": %s, address %s\n", intrstr,
	    ether_sprintf(sc->bce_ac.ac_enaddr));

	/* Initialize our media structures and probe the MII. */
	sc->bce_mii.mii_ifp = ifp;
	sc->bce_mii.mii_readreg = bce_mii_read;
	sc->bce_mii.mii_writereg = bce_mii_write;
	sc->bce_mii.mii_statchg = bce_statchg;
	ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
	    bce_mediastatus);
	mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);
	if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
	} else
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);

	/* get the phy */
	sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_PHY) & 0x1f;

	/*
	 * Enable activity led.
	 * XXX This should be in a phy driver, but not currently.
	 */
	bce_mii_write((struct device *) sc, 1, 26,	 /* MAGIC */
	    bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);	 /* MAGIC */

	/* enable traffic meter led mode */
	bce_mii_write((struct device *) sc, 1, 27,	 /* MAGIC */
	    bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));	 /* MAGIC */

	/* Attach the interface */
	if_attach(ifp);
	ether_ifattach(ifp);

	timeout_set(&sc->bce_timeout, bce_tick, sc);
}
コード例 #8
0
ファイル: if_malohal.c プロジェクト: coyizumi/cs111
/*
 * Setup for communication with the device.  We allocate
 * a command buffer and map it for bus dma use.  The pci
 * device id is used to identify whether the device has
 * SRAM on it (in which case f/w download must include a
 * memory controller reset).  All bus i/o operations happen
 * in BAR 1; the driver passes in the tag and handle we need.
 */
struct malo_hal *
malo_hal_attach(device_t dev, uint16_t devid,
    bus_space_handle_t ioh, bus_space_tag_t iot, bus_dma_tag_t tag)
{
	int error;
	struct malo_hal *mh;

	mh = malloc(sizeof(struct malo_hal), M_DEVBUF, M_NOWAIT | M_ZERO);
	if (mh == NULL)
		return NULL;

	mh->mh_dev = dev;
	mh->mh_ioh = ioh;
	mh->mh_iot = iot;

	snprintf(mh->mh_mtxname, sizeof(mh->mh_mtxname),
	    "%s_hal", device_get_nameunit(dev));
	mtx_init(&mh->mh_mtx, mh->mh_mtxname, NULL, MTX_DEF);

	/*
	 * Allocate the command buffer and map into the address
	 * space of the h/w.  We request "coherent" memory which
	 * will be uncached on some architectures.
	 */
	error = bus_dma_tag_create(tag,		/* parent */
		       PAGE_SIZE, 0,		/* alignment, bounds */
		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
		       BUS_SPACE_MAXADDR,	/* highaddr */
		       NULL, NULL,		/* filter, filterarg */
		       MALO_CMDBUF_SIZE,	/* maxsize */
		       1,			/* nsegments */
		       MALO_CMDBUF_SIZE,	/* maxsegsize */
		       BUS_DMA_ALLOCNOW,	/* flags */
		       NULL,			/* lockfunc */
		       NULL,			/* lockarg */
		       &mh->mh_dmat);
	if (error != 0) {
		device_printf(dev, "unable to allocate memory for cmd tag, "
			"error %u\n", error);
		goto fail;
	}

	/* allocate descriptors */
	error = bus_dmamap_create(mh->mh_dmat, BUS_DMA_NOWAIT, &mh->mh_dmamap);
	if (error != 0) {
		device_printf(dev, "unable to create dmamap for cmd buffers, "
			"error %u\n", error);
		goto fail;
	}

	error = bus_dmamem_alloc(mh->mh_dmat, (void**) &mh->mh_cmdbuf,
				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 
				 &mh->mh_dmamap);
	if (error != 0) {
		device_printf(dev, "unable to allocate memory for cmd buffer, "
			"error %u\n", error);
		goto fail;
	}

	error = bus_dmamap_load(mh->mh_dmat, mh->mh_dmamap,
				mh->mh_cmdbuf, MALO_CMDBUF_SIZE,
				malo_hal_load_cb, &mh->mh_cmdaddr,
				BUS_DMA_NOWAIT);
	if (error != 0) {
		device_printf(dev, "unable to load cmd buffer, error %u\n",
			error);
		goto fail;
	}

	return (mh);

fail:
	if (mh->mh_dmamap != NULL) {
		bus_dmamap_unload(mh->mh_dmat, mh->mh_dmamap);
		if (mh->mh_cmdbuf != NULL)
			bus_dmamem_free(mh->mh_dmat, mh->mh_cmdbuf,
			    mh->mh_dmamap);
		bus_dmamap_destroy(mh->mh_dmat, mh->mh_dmamap);
	}
	if (mh->mh_dmat)
		bus_dma_tag_destroy(mh->mh_dmat);
	free(mh, M_DEVBUF);

	return (NULL);
}
コード例 #9
0
ファイル: be.c プロジェクト: yazshel/netbsd-kernel
void
beattach(device_t parent, device_t self, void *aux)
{
	struct sbus_attach_args *sa = aux;
	struct qec_softc *qec = device_private(parent);
	struct be_softc *sc = device_private(self);
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	struct mii_data *mii = &sc->sc_mii;
	struct mii_softc *child;
	int node = sa->sa_node;
	bus_dma_tag_t dmatag = sa->sa_dmatag;
	bus_dma_segment_t seg;
	bus_size_t size;
	int instance;
	int rseg, error;
	uint32_t v;

	sc->sc_dev = self;

	if (sa->sa_nreg < 3) {
		printf(": only %d register sets\n", sa->sa_nreg);
		return;
	}

	if (bus_space_map(sa->sa_bustag,
	    (bus_addr_t)BUS_ADDR(sa->sa_reg[0].oa_space, sa->sa_reg[0].oa_base),
	    (bus_size_t)sa->sa_reg[0].oa_size,
	    0, &sc->sc_cr) != 0) {
		printf(": cannot map registers\n");
		return;
	}

	if (bus_space_map(sa->sa_bustag,
	    (bus_addr_t)BUS_ADDR(sa->sa_reg[1].oa_space, sa->sa_reg[1].oa_base),
	    (bus_size_t)sa->sa_reg[1].oa_size,
	    0, &sc->sc_br) != 0) {
		printf(": cannot map registers\n");
		return;
	}

	if (bus_space_map(sa->sa_bustag,
	    (bus_addr_t)BUS_ADDR(sa->sa_reg[2].oa_space, sa->sa_reg[2].oa_base),
	    (bus_size_t)sa->sa_reg[2].oa_size,
	    0, &sc->sc_tr) != 0) {
		printf(": cannot map registers\n");
		return;
	}

	sc->sc_bustag = sa->sa_bustag;
	sc->sc_qec = qec;
	sc->sc_qr = qec->sc_regs;

	sc->sc_rev = prom_getpropint(node, "board-version", -1);
	printf(": rev %x,", sc->sc_rev);

	callout_init(&sc->sc_tick_ch, 0);

	sc->sc_channel = prom_getpropint(node, "channel#", -1);
	if (sc->sc_channel == -1)
		sc->sc_channel = 0;

	sc->sc_burst = prom_getpropint(node, "burst-sizes", -1);
	if (sc->sc_burst == -1)
		sc->sc_burst = qec->sc_burst;

	/* Clamp at parent's burst sizes */
	sc->sc_burst &= qec->sc_burst;

	/* Establish interrupt handler */
	if (sa->sa_nintr)
		(void)bus_intr_establish(sa->sa_bustag, sa->sa_pri, IPL_NET,
		    beintr, sc);

	prom_getether(node, sc->sc_enaddr);
	printf(" address %s\n", ether_sprintf(sc->sc_enaddr));

	/*
	 * Allocate descriptor ring and buffers.
	 */

	/* for now, allocate as many bufs as there are ring descriptors */
	sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE;
	sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE;

	size =
	    QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
	    QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
	    sc->sc_rb.rb_ntbuf * BE_PKT_BUF_SZ +
	    sc->sc_rb.rb_nrbuf * BE_PKT_BUF_SZ;

	/* Get a DMA handle */
	if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
		aprint_error_dev(self, "DMA map create error %d\n", error);
		return;
	}

	/* Allocate DMA buffer */
	if ((error = bus_dmamem_alloc(sa->sa_dmatag, size, 0, 0,
	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(self, "DMA buffer alloc error %d\n", error);
		return;
	}

	/* Map DMA memory in CPU addressable space */
	if ((error = bus_dmamem_map(sa->sa_dmatag, &seg, rseg, size,
	    &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error_dev(self, "DMA buffer map error %d\n", error);
		bus_dmamem_free(sa->sa_dmatag, &seg, rseg);
		return;
	}

	/* Load the buffer */
	if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
	    sc->sc_rb.rb_membase, size, NULL, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(self, "DMA buffer map load error %d\n", error);
		bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size);
		bus_dmamem_free(dmatag, &seg, rseg);
		return;
	}
	sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;

	/*
	 * Initialize our media structures and MII info.
	 */
	mii->mii_ifp = ifp;
	mii->mii_readreg = be_mii_readreg;
	mii->mii_writereg = be_mii_writereg;
	mii->mii_statchg = be_mii_statchg;

	ifmedia_init(&mii->mii_media, 0, be_ifmedia_upd, be_ifmedia_sts);

	/*
	 * Initialize transceiver and determine which PHY connection to use.
	 */
	be_mii_sync(sc);
	v = bus_space_read_4(sc->sc_bustag, sc->sc_tr, BE_TRI_MGMTPAL);

	instance = 0;

	if ((v & MGMT_PAL_EXT_MDIO) != 0) {

		mii_attach(self, mii, 0xffffffff, BE_PHY_EXTERNAL,
		    MII_OFFSET_ANY, 0);

		child = LIST_FIRST(&mii->mii_phys);
		if (child == NULL) {
			/* No PHY attached */
			ifmedia_add(&sc->sc_media,
			    IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, instance),
			    0, NULL);
			ifmedia_set(&sc->sc_media,
			    IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, instance));
		} else {
			/*
			 * Note: we support just one PHY on the external
			 * MII connector.
			 */
#ifdef DIAGNOSTIC
			if (LIST_NEXT(child, mii_list) != NULL) {
				aprint_error_dev(self,
				    "spurious MII device %s attached\n",
				    device_xname(child->mii_dev));
			}
#endif
			if (child->mii_phy != BE_PHY_EXTERNAL ||
			    child->mii_inst > 0) {
				aprint_error_dev(self,
				    "cannot accommodate MII device %s"
				    " at phy %d, instance %d\n",
				       device_xname(child->mii_dev),
				       child->mii_phy, child->mii_inst);
			} else {
				sc->sc_phys[instance] = child->mii_phy;
			}

			/*
			 * XXX - we can really do the following ONLY if the
			 * phy indeed has the auto negotiation capability!!
			 */
			ifmedia_set(&sc->sc_media,
			    IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, instance));

			/* Mark our current media setting */
			be_pal_gate(sc, BE_PHY_EXTERNAL);
			instance++;
		}

	}

	if ((v & MGMT_PAL_INT_MDIO) != 0) {
		/*
		 * The be internal phy looks vaguely like MII hardware,
		 * but not enough to be able to use the MII device
		 * layer. Hence, we have to take care of media selection
		 * ourselves.
		 */

		sc->sc_mii_inst = instance;
		sc->sc_phys[instance] = BE_PHY_INTERNAL;

		/* Use `ifm_data' to store BMCR bits */
		ifmedia_add(&sc->sc_media,
		    IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, instance),
		    0, NULL);
		ifmedia_add(&sc->sc_media,
		    IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, instance),
		    BMCR_S100, NULL);
		ifmedia_add(&sc->sc_media,
		    IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, instance),
		    0, NULL);

		printf("on-board transceiver at %s: 10baseT, 100baseTX, auto\n",
		    device_xname(self));

		be_mii_reset(sc, BE_PHY_INTERNAL);
		/* Only set default medium here if there's no external PHY */
		if (instance == 0) {
			be_pal_gate(sc, BE_PHY_INTERNAL);
			ifmedia_set(&sc->sc_media,
			    IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, instance));
		} else
			be_mii_writereg(self,
			    BE_PHY_INTERNAL, MII_BMCR, BMCR_ISO);
	}

	memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_start = bestart;
	ifp->if_ioctl = beioctl;
	ifp->if_watchdog = bewatchdog;
	ifp->if_init = beinit;
	ifp->if_stop = bestop;
	ifp->if_flags =
	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
	IFQ_SET_READY(&ifp->if_snd);

	/* claim 802.1q capability */
	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;

	/* Attach the interface. */
	if_attach(ifp);
	ether_ifattach(ifp, sc->sc_enaddr);
}
コード例 #10
0
void
leattach_ledma(device_t parent, device_t self, void *aux)
{
	struct le_softc *lesc = device_private(self);
	struct lance_softc *sc = &lesc->sc_am7990.lsc;
	struct lsi64854_softc *lsi = device_private(parent);
	struct sbus_attach_args *sa = aux;
	bus_dma_tag_t dmatag = sa->sa_dmatag;
	bus_dma_segment_t seg;
	int rseg, error;

	sc->sc_dev = self;
	lesc->sc_bustag = sa->sa_bustag;

	/* Establish link to `ledma' device */
	lesc->sc_dma = lsi;
	lesc->sc_dma->sc_client = lesc;

	/* Map device registers */
	if (sbus_bus_map(sa->sa_bustag,
			 sa->sa_slot,
			 sa->sa_offset,
			 sa->sa_size,
			 0, &lesc->sc_reg) != 0) {
		aprint_error(": cannot map registers\n");
		return;
	}

	/* Allocate buffer memory */
	sc->sc_memsize = MEMSIZE;

	/* Get a DMA handle */
	if ((error = bus_dmamap_create(dmatag, MEMSIZE, 1, MEMSIZE,
					LEDMA_BOUNDARY, BUS_DMA_NOWAIT,
					&lesc->sc_dmamap)) != 0) {
		aprint_error(": DMA map create error %d\n", error);
		return;
	}

	/* Allocate DMA buffer */
	if ((error = bus_dmamem_alloc(dmatag, MEMSIZE, 0, LEDMA_BOUNDARY,
				 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
		aprint_error(": DMA buffer alloc error %d\n",error);
		return;
	}

	/* Map DMA buffer into kernel space */
	if ((error = bus_dmamem_map(dmatag, &seg, rseg, MEMSIZE,
			       (void **)&sc->sc_mem,
			       BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error(": DMA buffer map error %d\n", error);
		bus_dmamem_free(dmatag, &seg, rseg);
		return;
	}

	/* Load DMA buffer */
	if ((error = bus_dmamap_load(dmatag, lesc->sc_dmamap, sc->sc_mem,
			MEMSIZE, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error(": DMA buffer map load error %d\n", error);
		bus_dmamem_free(dmatag, &seg, rseg);
		bus_dmamem_unmap(dmatag, sc->sc_mem, MEMSIZE);
		return;
	}

	lesc->sc_laddr = lesc->sc_dmamap->dm_segs[0].ds_addr;
	sc->sc_addr = lesc->sc_laddr & 0xffffff;
	sc->sc_conf3 = LE_C3_BSWP | LE_C3_ACON | LE_C3_BCON;


	/* Assume SBus is grandparent */
	lesc->sc_sd.sd_reset = (void *)lance_reset;
	sbus_establish(&lesc->sc_sd, parent);

	sc->sc_mediachange = lemediachange;
	sc->sc_mediastatus = lemediastatus;
	sc->sc_supmedia = lemedia;
	sc->sc_nsupmedia = NLEMEDIA;
	sc->sc_defaultmedia = IFM_ETHER|IFM_AUTO;

	prom_getether(sa->sa_node, sc->sc_enaddr);

	sc->sc_copytodesc = lance_copytobuf_contig;
	sc->sc_copyfromdesc = lance_copyfrombuf_contig;
	sc->sc_copytobuf = lance_copytobuf_contig;
	sc->sc_copyfrombuf = lance_copyfrombuf_contig;
	sc->sc_zerobuf = lance_zerobuf_contig;

	sc->sc_rdcsr = lerdcsr;
	sc->sc_wrcsr = lewrcsr;
	sc->sc_hwinit = lehwinit;
	sc->sc_nocarrier = lenocarrier;
	sc->sc_hwreset = lehwreset;

	/* Establish interrupt handler */
	if (sa->sa_nintr != 0)
		(void)bus_intr_establish(sa->sa_bustag, sa->sa_pri, IPL_NET,
					 am7990_intr, sc);

	am7990_config(&lesc->sc_am7990);

	/* now initialize DMA */
	lehwreset(sc);
}
コード例 #11
0
ファイル: zy7_devcfg.c プロジェクト: 2trill2spill/freebsd
static int
zy7_devcfg_write(struct cdev *dev, struct uio *uio, int ioflag)
{
	struct zy7_devcfg_softc *sc = dev->si_drv1;
	void *dma_mem;
	bus_addr_t dma_physaddr;
	int segsz, err;

	DEVCFG_SC_LOCK(sc);

	/* First write?  Reset PL. */
	if (uio->uio_offset == 0 && uio->uio_resid > 0)	{
		zy7_devcfg_init_hw(sc);
		zy7_slcr_preload_pl();
		err = zy7_devcfg_reset_pl(sc);
		if (err != 0) {
			DEVCFG_SC_UNLOCK(sc);
			return (err);
		}
	}

	/* Allocate dma memory and load. */
	err = bus_dmamem_alloc(sc->dma_tag, &dma_mem, BUS_DMA_NOWAIT,
			       &sc->dma_map);
	if (err != 0) {
		DEVCFG_SC_UNLOCK(sc);
		return (err);
	}
	err = bus_dmamap_load(sc->dma_tag, sc->dma_map, dma_mem, PAGE_SIZE,
			      zy7_dma_cb2, &dma_physaddr, 0);
	if (err != 0) {
		bus_dmamem_free(sc->dma_tag, dma_mem, sc->dma_map);
		DEVCFG_SC_UNLOCK(sc);
		return (err);
	}

	while (uio->uio_resid > 0) {
		/* If DONE signal has been set, we shouldn't write anymore. */
		if ((RD4(sc, ZY7_DEVCFG_INT_STATUS) &
		     ZY7_DEVCFG_INT_PCFG_DONE) != 0) {
			err = EIO;
			break;
		}

		/* uiomove the data from user buffer to our dma map. */
		segsz = MIN(PAGE_SIZE, uio->uio_resid);
		DEVCFG_SC_UNLOCK(sc);
		err = uiomove(dma_mem, segsz, uio);
		DEVCFG_SC_LOCK(sc);
		if (err != 0)
			break;

		/* Flush the cache to memory. */
		bus_dmamap_sync(sc->dma_tag, sc->dma_map,
				BUS_DMASYNC_PREWRITE);

		/* Program devcfg's DMA engine.  The ordering of these
		 * register writes is critical.
		 */
		if (uio->uio_resid > segsz)
			WR4(sc, ZY7_DEVCFG_DMA_SRC_ADDR,
			    (uint32_t) dma_physaddr);
		else
			WR4(sc, ZY7_DEVCFG_DMA_SRC_ADDR,
			    (uint32_t) dma_physaddr |
			    ZY7_DEVCFG_DMA_ADDR_WAIT_PCAP);
		WR4(sc, ZY7_DEVCFG_DMA_DST_ADDR, ZY7_DEVCFG_DMA_ADDR_ILLEGAL);
		WR4(sc, ZY7_DEVCFG_DMA_SRC_LEN, (segsz+3)/4);
		WR4(sc, ZY7_DEVCFG_DMA_DST_LEN, 0);

		/* Now clear done bit and set up DMA done interrupt. */
		WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL);
		WR4(sc, ZY7_DEVCFG_INT_MASK, ~ZY7_DEVCFG_INT_DMA_DONE);

		/* Wait for DMA done interrupt. */
		err = mtx_sleep(sc->dma_map, &sc->sc_mtx, PCATCH,
				"zy7dma", hz);
		if (err != 0)
			break;

		bus_dmamap_sync(sc->dma_tag, sc->dma_map,
				BUS_DMASYNC_POSTWRITE);

		/* Check DONE signal. */
		if ((RD4(sc, ZY7_DEVCFG_INT_STATUS) &
		     ZY7_DEVCFG_INT_PCFG_DONE) != 0)
			zy7_slcr_postload_pl(zy7_en_level_shifters);
	}

	bus_dmamap_unload(sc->dma_tag, sc->dma_map);
	bus_dmamem_free(sc->dma_tag, dma_mem, sc->dma_map);
	DEVCFG_SC_UNLOCK(sc);
	return (err);
}
コード例 #12
0
/* ARGSUSED */
void
ie_pcctwo_attach(device_t parent, device_t self, void *aux)
{
	struct pcctwo_attach_args *pa;
	struct ie_pcctwo_softc *ps;
	struct ie_softc *sc;
	bus_dma_segment_t seg;
	int rseg;

	pa = aux;
	ps = device_private(self);
	sc = &ps->ps_ie;
	sc->sc_dev = self;

	/* Map the MPU controller registers in PCCTWO space */
	ps->ps_bust = pa->pa_bust;
	bus_space_map(pa->pa_bust, pa->pa_offset, IE_MPUREG_SIZE,
	    0, &ps->ps_bush);

	/* Get contiguous DMA-able memory for the IE chip */
	if (bus_dmamem_alloc(pa->pa_dmat, ether_data_buff_size, PAGE_SIZE, 0,
		&seg, 1, &rseg,
		BUS_DMA_NOWAIT | BUS_DMA_ONBOARD_RAM | BUS_DMA_24BIT) != 0) {
		aprint_error_dev(self, "Failed to allocate ether buffer\n");
		return;
	}
	if (bus_dmamem_map(pa->pa_dmat, &seg, rseg, ether_data_buff_size,
	    (void **) & sc->sc_maddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) {
		aprint_error_dev(self, "Failed to map ether buffer\n");
		bus_dmamem_free(pa->pa_dmat, &seg, rseg);
		return;
	}
	sc->bt = pa->pa_bust;
	sc->bh = (bus_space_handle_t) sc->sc_maddr;	/* XXXSCW Better way? */
	sc->sc_iobase = (void *) seg.ds_addr;
	sc->sc_msize = ether_data_buff_size;
	memset(sc->sc_maddr, 0, ether_data_buff_size);

	sc->hwreset = ie_reset;
	sc->hwinit = ie_hwinit;
	sc->chan_attn = ie_atten;
	sc->intrhook = ie_intrhook;
	sc->memcopyin = ie_copyin;
	sc->memcopyout = ie_copyout;
	sc->ie_bus_barrier = NULL;
	sc->ie_bus_read16 = ie_read_16;
	sc->ie_bus_write16 = ie_write_16;
	sc->ie_bus_write24 = ie_write_24;
	sc->sc_mediachange = NULL;
	sc->sc_mediastatus = NULL;

	sc->scp = 0;
	sc->iscp = sc->scp + ((IE_SCP_SZ + 15) & ~15);
	sc->scb = sc->iscp + IE_ISCP_SZ;
	sc->buf_area = sc->scb + IE_SCB_SZ;
	sc->buf_area_sz = sc->sc_msize - (sc->buf_area - sc->scp);

	/*
	 * BUS_USE -> Interrupt Active High (edge-triggered),
	 *            Lock function enabled,
	 *            Internal bus throttle timer triggering,
	 *            82586 operating mode.
	 */
	ie_write_16(sc, IE_SCP_BUS_USE(sc->scp), IE_BUS_USE);
	ie_write_24(sc, IE_SCP_ISCP(sc->scp), sc->iscp);
	ie_write_16(sc, IE_ISCP_SCB(sc->iscp), sc->scb);
	ie_write_24(sc, IE_ISCP_BASE(sc->iscp), sc->scp);

	/* This has the side-effect of resetting the chip */
	i82586_proberam(sc);

	/* Attach the MI back-end */
	i82586_attach(sc, "onboard", mvme_ea, NULL, 0, 0);

	/* Register the event counter */
	evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_INTR,
	    pcctwointr_evcnt(pa->pa_ipl), "ether", device_xname(self));

	/* Finally, hook the hardware interrupt */
	pcctwointr_establish(PCCTWOV_LANC_IRQ, i82586_intr, pa->pa_ipl, sc,
	    &ps->ps_evcnt);
}
コード例 #13
0
ファイル: if_le_pci.c プロジェクト: AmirAbrams/haiku
static int
le_pci_attach(device_t dev)
{
	struct le_pci_softc *lesc;
	struct lance_softc *sc;
	int error, i;

	lesc = device_get_softc(dev);
	sc = &lesc->sc_am79900.lsc;

	LE_LOCK_INIT(sc, device_get_nameunit(dev));

	pci_enable_busmaster(dev);

	i = PCIR_BAR(0);
	lesc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
	    &i, RF_ACTIVE);
	if (lesc->sc_rres == NULL) {
		device_printf(dev, "cannot allocate registers\n");
		error = ENXIO;
		goto fail_mtx;
	}

	i = 0;
	if ((lesc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ,
	    &i, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
		device_printf(dev, "cannot allocate interrupt\n");
		error = ENXIO;
		goto fail_rres;
	}

	error = bus_dma_tag_create(
	    bus_get_dma_tag(dev),	/* parent */
	    1, 0,			/* alignment, boundary */
	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
	    BUS_SPACE_MAXADDR,		/* highaddr */
	    NULL, NULL,			/* filter, filterarg */
	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
	    0,				/* nsegments */
	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
	    0,				/* flags */
	    NULL, NULL,			/* lockfunc, lockarg */
	    &lesc->sc_pdmat);
	if (error != 0) {
		device_printf(dev, "cannot allocate parent DMA tag\n");
		goto fail_ires;
	}

	sc->sc_memsize = PCNET_MEMSIZE;
	/*
	 * For Am79C970A, Am79C971 and Am79C978 the init block must be 2-byte
	 * aligned and the ring descriptors must be 16-byte aligned when using
	 * a 32-bit software style.
	 */
	error = bus_dma_tag_create(
	    lesc->sc_pdmat,		/* parent */
	    16, 0,			/* alignment, boundary */
	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
	    BUS_SPACE_MAXADDR,		/* highaddr */
	    NULL, NULL,			/* filter, filterarg */
	    sc->sc_memsize,		/* maxsize */
	    1,				/* nsegments */
	    sc->sc_memsize,		/* maxsegsize */
	    0,				/* flags */
	    NULL, NULL,			/* lockfunc, lockarg */
	    &lesc->sc_dmat);
	if (error != 0) {
		device_printf(dev, "cannot allocate buffer DMA tag\n");
		goto fail_pdtag;
	}

	error = bus_dmamem_alloc(lesc->sc_dmat, (void **)&sc->sc_mem,
	    BUS_DMA_WAITOK | BUS_DMA_COHERENT, &lesc->sc_dmam);
	if (error != 0) {
		device_printf(dev, "cannot allocate DMA buffer memory\n");
		goto fail_dtag;
	}

	sc->sc_addr = 0;
	error = bus_dmamap_load(lesc->sc_dmat, lesc->sc_dmam, sc->sc_mem,
	    sc->sc_memsize, le_pci_dma_callback, sc, 0);
	if (error != 0 || sc->sc_addr == 0) {
		device_printf(dev, "cannot load DMA buffer map\n");
		goto fail_dmem;
	}

	sc->sc_flags = LE_BSWAP;
	sc->sc_conf3 = 0;

	sc->sc_mediastatus = NULL;
	switch (pci_get_device(dev)) {
	case AMD_PCNET_HOME:
		sc->sc_mediachange = le_pci_mediachange;
		sc->sc_supmedia = le_home_supmedia;
		sc->sc_nsupmedia = sizeof(le_home_supmedia) / sizeof(int);
		sc->sc_defaultmedia = le_home_supmedia[0];
		break;
	default:
		sc->sc_mediachange = le_pci_mediachange;
		sc->sc_supmedia = le_pci_supmedia;
		sc->sc_nsupmedia = sizeof(le_pci_supmedia) / sizeof(int);
		sc->sc_defaultmedia = le_pci_supmedia[0];
	}

	/*
	 * Extract the physical MAC address from the ROM.
	 */
	bus_read_region_1(lesc->sc_rres, 0, sc->sc_enaddr,
	    sizeof(sc->sc_enaddr));

	sc->sc_copytodesc = lance_copytobuf_contig;
	sc->sc_copyfromdesc = lance_copyfrombuf_contig;
	sc->sc_copytobuf = lance_copytobuf_contig;
	sc->sc_copyfrombuf = lance_copyfrombuf_contig;
	sc->sc_zerobuf = lance_zerobuf_contig;

	sc->sc_rdcsr = le_pci_rdcsr;
	sc->sc_wrcsr = le_pci_wrcsr;
	sc->sc_hwreset = le_pci_hwreset;
	sc->sc_hwinit = NULL;
	sc->sc_hwintr = NULL;
	sc->sc_nocarrier = NULL;

	error = am79900_config(&lesc->sc_am79900, device_get_name(dev),
	    device_get_unit(dev));
	if (error != 0) {
		device_printf(dev, "cannot attach Am79900\n");
		goto fail_dmap;
	}

	error = bus_setup_intr(dev, lesc->sc_ires, INTR_TYPE_NET | INTR_MPSAFE,
	    NULL, am79900_intr, sc, &lesc->sc_ih);
	if (error != 0) {
		device_printf(dev, "cannot set up interrupt\n");
		goto fail_am79900;
	}

	return (0);

 fail_am79900:
	am79900_detach(&lesc->sc_am79900);
 fail_dmap:
	bus_dmamap_unload(lesc->sc_dmat, lesc->sc_dmam);
 fail_dmem:
	bus_dmamem_free(lesc->sc_dmat, sc->sc_mem, lesc->sc_dmam);
 fail_dtag:
	bus_dma_tag_destroy(lesc->sc_dmat);
 fail_pdtag:
	bus_dma_tag_destroy(lesc->sc_pdmat);
 fail_ires:
	bus_release_resource(dev, SYS_RES_IRQ,
	    rman_get_rid(lesc->sc_ires), lesc->sc_ires);
 fail_rres:
	bus_release_resource(dev, SYS_RES_IOPORT,
	    rman_get_rid(lesc->sc_rres), lesc->sc_rres);
 fail_mtx:
	LE_LOCK_DESTROY(sc);
	return (error);
}
コード例 #14
0
ファイル: mrsas_ioctl.c プロジェクト: JabirTech/Source
/**
 * mrsas_passthru:        Handle pass-through commands
 * input:                 Adapter instance soft state
 *                        argument pointer
 *
 * This function is called from mrsas_ioctl() to handle pass-through and
 * ioctl commands to Firmware.
 */
int mrsas_passthru( struct mrsas_softc *sc, void *arg )
{
    struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
    union  mrsas_frame *in_cmd = (union mrsas_frame *) &(user_ioc->frame.raw);
    struct mrsas_mfi_cmd *cmd = NULL;
    bus_dma_tag_t ioctl_data_tag[MAX_IOCTL_SGE];
    bus_dmamap_t ioctl_data_dmamap[MAX_IOCTL_SGE];
    void *ioctl_data_mem[MAX_IOCTL_SGE];  // ioctl data virtual addr
    bus_addr_t ioctl_data_phys_addr[MAX_IOCTL_SGE]; // ioctl data phys addr
    bus_dma_tag_t ioctl_sense_tag = 0;
    bus_dmamap_t ioctl_sense_dmamap = 0;
    void *ioctl_sense_mem = 0;
    bus_addr_t ioctl_sense_phys_addr = 0;
    int i, adapter, ioctl_data_size, ioctl_sense_size, ret=0;
    struct mrsas_sge32 *kern_sge32;
    unsigned long *sense_ptr;

    /* For debug - uncomment the following line for debug output */
    //mrsas_dump_ioctl(sc, user_ioc);

    /*
     * Check for NOP from MegaCli... MegaCli can issue a DCMD of 0.  In this
     * case do nothing and return 0 to it as status.
     */
    if (in_cmd->dcmd.opcode == 0) {
        device_printf(sc->mrsas_dev, "In %s() Got a NOP\n", __func__);
        user_ioc->frame.hdr.cmd_status = MFI_STAT_OK;
        return (0);
    }

    /* Validate host_no */
    adapter = user_ioc->host_no;
    if (adapter != device_get_unit(sc->mrsas_dev)) {
        device_printf(sc->mrsas_dev, "In %s() IOCTL not for me!\n", __func__);
        return(ENOENT);
    }

    /* Validate SGL length */
    if (user_ioc->sge_count > MAX_IOCTL_SGE) {
        device_printf(sc->mrsas_dev, "In %s() SGL is too long (%d > 8).\n",
                      __func__, user_ioc->sge_count);
        return(ENOENT);
    }

    /* Get a command */
    cmd = mrsas_get_mfi_cmd(sc);
    if (!cmd) {
        device_printf(sc->mrsas_dev, "Failed to get a free cmd for IOCTL\n");
        return(ENOMEM);
    }

    /*
     * User's IOCTL packet has 2 frames (maximum). Copy those two
     * frames into our cmd's frames. cmd->frame's context will get
     * overwritten when we copy from user's frames. So set that value
     * alone separately
     */
    memcpy(cmd->frame, user_ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
    cmd->frame->hdr.context = cmd->index;
    cmd->frame->hdr.pad_0 = 0;
    cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
                               MFI_FRAME_SENSE64);

    /*
     * The management interface between applications and the fw uses
     * MFI frames. E.g, RAID configuration changes, LD property changes
     * etc are accomplishes through different kinds of MFI frames. The
     * driver needs to care only about substituting user buffers with
     * kernel buffers in SGLs. The location of SGL is embedded in the
     * struct iocpacket itself.
     */
    kern_sge32 = (struct mrsas_sge32 *)
                 ((unsigned long)cmd->frame + user_ioc->sgl_off);

    /*
     * For each user buffer, create a mirror buffer and copy in
     */
    for (i=0; i < user_ioc->sge_count; i++) {
        if (!user_ioc->sgl[i].iov_len)
            continue;
        ioctl_data_size = user_ioc->sgl[i].iov_len;
        if (bus_dma_tag_create( sc->mrsas_parent_tag,   // parent
                                1, 0,                   // algnmnt, boundary
                                BUS_SPACE_MAXADDR_32BIT,// lowaddr
                                BUS_SPACE_MAXADDR,      // highaddr
                                NULL, NULL,             // filter, filterarg
                                ioctl_data_size,        // maxsize
                                1,                      // msegments
                                ioctl_data_size,        // maxsegsize
                                BUS_DMA_ALLOCNOW,       // flags
                                NULL, NULL,             // lockfunc, lockarg
                                &ioctl_data_tag[i])) {
            device_printf(sc->mrsas_dev, "Cannot allocate ioctl data tag\n");
            return (ENOMEM);
        }
        if (bus_dmamem_alloc(ioctl_data_tag[i], (void **)&ioctl_data_mem[i],
                             (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_data_dmamap[i])) {
            device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n");
            return (ENOMEM);
        }
        if (bus_dmamap_load(ioctl_data_tag[i], ioctl_data_dmamap[i],
                            ioctl_data_mem[i], ioctl_data_size, mrsas_alloc_cb,
                            &ioctl_data_phys_addr[i], BUS_DMA_NOWAIT)) {
            device_printf(sc->mrsas_dev, "Cannot load ioctl data mem\n");
            return (ENOMEM);
        }

        /* Save the physical address and length */
        kern_sge32[i].phys_addr = (u_int32_t)ioctl_data_phys_addr[i];
        kern_sge32[i].length = user_ioc->sgl[i].iov_len;

        /* Copy in data from user space */
        ret = copyin(user_ioc->sgl[i].iov_base, ioctl_data_mem[i],
                     user_ioc->sgl[i].iov_len);
        if (ret) {
            device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n");
            goto out;
        }
    }

    ioctl_sense_size = user_ioc->sense_len;
    if (user_ioc->sense_len) {
        if (bus_dma_tag_create( sc->mrsas_parent_tag,   // parent
                                1, 0,                   // algnmnt, boundary
                                BUS_SPACE_MAXADDR_32BIT,// lowaddr
                                BUS_SPACE_MAXADDR,      // highaddr
                                NULL, NULL,             // filter, filterarg
                                ioctl_sense_size,       // maxsize
                                1,                      // msegments
                                ioctl_sense_size,       // maxsegsize
                                BUS_DMA_ALLOCNOW,       // flags
                                NULL, NULL,             // lockfunc, lockarg
                                &ioctl_sense_tag)) {
            device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense tag\n");
            return (ENOMEM);
        }
        if (bus_dmamem_alloc(ioctl_sense_tag, (void **)&ioctl_sense_mem,
                             (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_sense_dmamap)) {
            device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n");
            return (ENOMEM);
        }
        if (bus_dmamap_load(ioctl_sense_tag, ioctl_sense_dmamap,
                            ioctl_sense_mem, ioctl_sense_size, mrsas_alloc_cb,
                            &ioctl_sense_phys_addr, BUS_DMA_NOWAIT)) {
            device_printf(sc->mrsas_dev, "Cannot load ioctl sense mem\n");
            return (ENOMEM);
        }
        sense_ptr =
            (unsigned long *)((unsigned long)cmd->frame + user_ioc->sense_off);
        sense_ptr = ioctl_sense_mem;
    }

    /*
     * Set the sync_cmd flag so that the ISR knows not to complete this
     * cmd to the SCSI mid-layer
     */
    cmd->sync_cmd = 1;
    mrsas_issue_blocked_cmd(sc, cmd);
    cmd->sync_cmd = 0;

    /*
     * copy out the kernel buffers to user buffers
     */
    for (i = 0; i < user_ioc->sge_count; i++) {
        ret = copyout(ioctl_data_mem[i], user_ioc->sgl[i].iov_base,
                      user_ioc->sgl[i].iov_len);
        if (ret) {
            device_printf(sc->mrsas_dev, "IOCTL copyout failed!\n");
            goto out;
        }
    }

    /*
     * copy out the sense
     */
    if (user_ioc->sense_len) {
        /*
         * sense_buff points to the location that has the user
         * sense buffer address
         */
        sense_ptr = (unsigned long *) ((unsigned long)user_ioc->frame.raw +
                                       user_ioc->sense_off);
        ret = copyout(ioctl_sense_mem, (unsigned long*)*sense_ptr,
                      user_ioc->sense_len);
        if (ret) {
            device_printf(sc->mrsas_dev, "IOCTL sense copyout failed!\n");
            goto out;
        }
    }

    /*
     * Return command status to user space
     */
    memcpy(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status,
           sizeof(u_int8_t));

out:
    /*
     * Release sense buffer
     */
    if (ioctl_sense_phys_addr)
        bus_dmamap_unload(ioctl_sense_tag, ioctl_sense_dmamap);
    if (ioctl_sense_mem)
        bus_dmamem_free(ioctl_sense_tag, ioctl_sense_mem, ioctl_sense_dmamap);
    if (ioctl_sense_tag)
        bus_dma_tag_destroy(ioctl_sense_tag);

    /*
     * Release data buffers
     */
    for (i = 0; i < user_ioc->sge_count; i++) {
        if (!user_ioc->sgl[i].iov_len)
            continue;
        if (ioctl_data_phys_addr[i])
            bus_dmamap_unload(ioctl_data_tag[i], ioctl_data_dmamap[i]);
        if (ioctl_data_mem[i] != NULL)
            bus_dmamem_free(ioctl_data_tag[i], ioctl_data_mem[i],
                            ioctl_data_dmamap[i]);
        if (ioctl_data_tag[i] != NULL)
            bus_dma_tag_destroy(ioctl_data_tag[i]);
    }

    /* Free command */
    mrsas_release_mfi_cmd(cmd);

    return(ret);
}
コード例 #15
0
ファイル: if_kr.c プロジェクト: edgar-pek/PerspicuOS
static void
kr_dma_free(struct kr_softc *sc)
{
	struct kr_txdesc	*txd;
	struct kr_rxdesc	*rxd;
	int			i;

	/* Tx ring. */
	if (sc->kr_cdata.kr_tx_ring_tag) {
		if (sc->kr_cdata.kr_tx_ring_map)
			bus_dmamap_unload(sc->kr_cdata.kr_tx_ring_tag,
			    sc->kr_cdata.kr_tx_ring_map);
		if (sc->kr_cdata.kr_tx_ring_map &&
		    sc->kr_rdata.kr_tx_ring)
			bus_dmamem_free(sc->kr_cdata.kr_tx_ring_tag,
			    sc->kr_rdata.kr_tx_ring,
			    sc->kr_cdata.kr_tx_ring_map);
		sc->kr_rdata.kr_tx_ring = NULL;
		sc->kr_cdata.kr_tx_ring_map = NULL;
		bus_dma_tag_destroy(sc->kr_cdata.kr_tx_ring_tag);
		sc->kr_cdata.kr_tx_ring_tag = NULL;
	}
	/* Rx ring. */
	if (sc->kr_cdata.kr_rx_ring_tag) {
		if (sc->kr_cdata.kr_rx_ring_map)
			bus_dmamap_unload(sc->kr_cdata.kr_rx_ring_tag,
			    sc->kr_cdata.kr_rx_ring_map);
		if (sc->kr_cdata.kr_rx_ring_map &&
		    sc->kr_rdata.kr_rx_ring)
			bus_dmamem_free(sc->kr_cdata.kr_rx_ring_tag,
			    sc->kr_rdata.kr_rx_ring,
			    sc->kr_cdata.kr_rx_ring_map);
		sc->kr_rdata.kr_rx_ring = NULL;
		sc->kr_cdata.kr_rx_ring_map = NULL;
		bus_dma_tag_destroy(sc->kr_cdata.kr_rx_ring_tag);
		sc->kr_cdata.kr_rx_ring_tag = NULL;
	}
	/* Tx buffers. */
	if (sc->kr_cdata.kr_tx_tag) {
		for (i = 0; i < KR_TX_RING_CNT; i++) {
			txd = &sc->kr_cdata.kr_txdesc[i];
			if (txd->tx_dmamap) {
				bus_dmamap_destroy(sc->kr_cdata.kr_tx_tag,
				    txd->tx_dmamap);
				txd->tx_dmamap = NULL;
			}
		}
		bus_dma_tag_destroy(sc->kr_cdata.kr_tx_tag);
		sc->kr_cdata.kr_tx_tag = NULL;
	}
	/* Rx buffers. */
	if (sc->kr_cdata.kr_rx_tag) {
		for (i = 0; i < KR_RX_RING_CNT; i++) {
			rxd = &sc->kr_cdata.kr_rxdesc[i];
			if (rxd->rx_dmamap) {
				bus_dmamap_destroy(sc->kr_cdata.kr_rx_tag,
				    rxd->rx_dmamap);
				rxd->rx_dmamap = NULL;
			}
		}
		if (sc->kr_cdata.kr_rx_sparemap) {
			bus_dmamap_destroy(sc->kr_cdata.kr_rx_tag,
			    sc->kr_cdata.kr_rx_sparemap);
			sc->kr_cdata.kr_rx_sparemap = 0;
		}
		bus_dma_tag_destroy(sc->kr_cdata.kr_rx_tag);
		sc->kr_cdata.kr_rx_tag = NULL;
	}

	if (sc->kr_cdata.kr_parent_tag) {
		bus_dma_tag_destroy(sc->kr_cdata.kr_parent_tag);
		sc->kr_cdata.kr_parent_tag = NULL;
	}
}
コード例 #16
0
/*
 * Allocate a drm dma handle, allocate memory fit for DMA, and map it.
 *
 * XXX This is called drm_pci_alloc for hysterical raisins; it is not
 * specific to PCI.
 *
 * XXX For now, we use non-blocking allocations because this is called
 * by ioctls with the drm global mutex held.
 *
 * XXX Error information is lost because this returns NULL on failure,
 * not even an error embedded in a pointer.
 */
struct drm_dma_handle *
drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
{
	int nsegs;
	int error;

	/*
	 * Allocate a drm_dma_handle record.
	 */
	struct drm_dma_handle *const dmah = kmem_alloc(sizeof(*dmah),
	    KM_NOSLEEP);
	if (dmah == NULL) {
		error = -ENOMEM;
		goto out;
	}
	dmah->dmah_tag = dev->dmat;

	/*
	 * Allocate the requested amount of DMA-safe memory.
	 */
	/* XXX errno NetBSD->Linux */
	error = -bus_dmamem_alloc(dmah->dmah_tag, size, align, 0,
	    &dmah->dmah_seg, 1, &nsegs, BUS_DMA_NOWAIT);
	if (error)
		goto fail0;
	KASSERT(nsegs == 1);

	/*
	 * XXX Old drm passed BUS_DMA_NOWAIT below but BUS_DMA_WAITOK
	 * above.  WTF?
	 */

	/*
	 * Map the DMA-safe memory into kernel virtual address space.
	 */
	/* XXX errno NetBSD->Linux */
	error = -bus_dmamem_map(dmah->dmah_tag, &dmah->dmah_seg, 1, size,
	    &dmah->vaddr,
	    (BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_NOCACHE));
	if (error)
		goto fail1;
	dmah->size = size;

	/*
	 * Create a map for DMA transfers.
	 */
	/* XXX errno NetBSD->Linux */
	error = -bus_dmamap_create(dmah->dmah_tag, size, 1, size, 0,
	    BUS_DMA_NOWAIT, &dmah->dmah_map);
	if (error)
		goto fail2;

	/*
	 * Load the kva buffer into the map for DMA transfers.
	 */
	/* XXX errno NetBSD->Linux */
	error = -bus_dmamap_load(dmah->dmah_tag, dmah->dmah_map, dmah->vaddr,
	    size, NULL, (BUS_DMA_NOWAIT | BUS_DMA_NOCACHE));
	if (error)
		goto fail3;

	/* Record the bus address for convenient reference.  */
	dmah->busaddr = dmah->dmah_map->dm_segs[0].ds_addr;

	/* Zero the DMA buffer.  XXX Yikes!  Is this necessary?  */
	memset(dmah->vaddr, 0, size);

	/* Success!  */
	return dmah;

fail3:	bus_dmamap_destroy(dmah->dmah_tag, dmah->dmah_map);
fail2:	bus_dmamem_unmap(dmah->dmah_tag, dmah->vaddr, dmah->size);
fail1:	bus_dmamem_free(dmah->dmah_tag, &dmah->dmah_seg, 1);
fail0:	dmah->dmah_tag = NULL;	/* XXX paranoia */
	kmem_free(dmah, sizeof(*dmah));
out:	DRM_DEBUG("drm_pci_alloc failed: %d\n", error);
	return NULL;
}
コード例 #17
0
int
mpt_dma_mem_alloc(mpt_softc_t *mpt)
{
	bus_dma_segment_t reply_seg, request_seg;
	int reply_rseg, request_rseg;
	bus_addr_t pptr, end;
	char *vptr;
	size_t len;
	int error, i;

	/* Check if we have already allocated the reply memory. */
	if (mpt->reply != NULL)
		return (0);

	/*
	 * Allocate the request pool.  This isn't really DMA'd memory,
	 * but it's a convenient place to do it.
	 */
	len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
	mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
	if (mpt->request_pool == NULL) {
		aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
		return (ENOMEM);
	}

	/*
	 * Allocate DMA resources for reply buffers.
	 */
	error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
	    &reply_seg, 1, &reply_rseg, 0);
	if (error) {
		aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
		    error);
		goto fail_0;
	}

	error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
	    (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
	if (error) {
		aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
		    error);
		goto fail_1;
	}

	error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
	    0, 0, &mpt->reply_dmap);
	if (error) {
		aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
		    error);
		goto fail_2;
	}

	error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
	    PAGE_SIZE, NULL, 0);
	if (error) {
		aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
		    error);
		goto fail_3;
	}
	mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;

	/*
	 * Allocate DMA resources for request buffers.
	 */
	error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
	    PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
	if (error) {
		aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
		    "error = %d\n", error);
		goto fail_4;
	}

	error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
	    MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
	if (error) {
		aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
		    error);
		goto fail_5;
	}

	error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
	    MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
	if (error) {
		aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
		    "error = %d\n", error);
		goto fail_6;
	}

	error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
	    MPT_REQ_MEM_SIZE(mpt), NULL, 0);
	if (error) {
		aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
		    error);
		goto fail_7;
	}
	mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;

	pptr = mpt->request_phys;
	vptr = (void *) mpt->request;
	end = pptr + MPT_REQ_MEM_SIZE(mpt);

	for (i = 0; pptr < end; i++) {
		request_t *req = &mpt->request_pool[i];
		req->index = i;

		/* Store location of Request Data */
		req->req_pbuf = pptr;
		req->req_vbuf = vptr;

		pptr += MPT_REQUEST_AREA;
		vptr += MPT_REQUEST_AREA;

		req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
		req->sense_vbuf = (vptr - MPT_SENSE_SIZE);

		error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
		    MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
		if (error) {
			aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
			    "error = %d\n", i, error);
			goto fail_8;
		}
	}

	return (0);

 fail_8:
	for (--i; i >= 0; i--) {
		request_t *req = &mpt->request_pool[i];
		if (req->dmap != NULL)
			bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
	}
	bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
 fail_7:
	bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
 fail_6:
	bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
 fail_5:
	bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
 fail_4:
	bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
 fail_3:
	bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
 fail_2:
	bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
 fail_1:
	bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
 fail_0:
	free(mpt->request_pool, M_DEVBUF);

	mpt->reply = NULL;
	mpt->request = NULL;
	mpt->request_pool = NULL;

	return (error);
}
コード例 #18
0
ファイル: if_ae.c プロジェクト: yazshel/netbsd-kernel
/*
 * ae_attach:
 *
 *	Attach an ae interface to the system.
 */
void
ae_attach(device_t parent, device_t self, void *aux)
{
	const uint8_t *enaddr;
	prop_data_t ea;
	struct ae_softc *sc = device_private(self);
	struct arbus_attach_args *aa = aux;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	int i, error;

	sc->sc_dev = self;

	callout_init(&sc->sc_tick_callout, 0);

	printf(": Atheros AR531X 10/100 Ethernet\n");

	/*
	 * Try to get MAC address.
	 */
	ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-address");
	if (ea == NULL) {
		printf("%s: unable to get mac-addr property\n",
		    device_xname(sc->sc_dev));
		return;
	}
	KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
	KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
	enaddr = prop_data_data_nocopy(ea);

	/* Announce ourselves. */
	printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
	    ether_sprintf(enaddr));

	sc->sc_cirq = aa->aa_cirq;
	sc->sc_mirq = aa->aa_mirq;
	sc->sc_st = aa->aa_bst;
	sc->sc_dmat = aa->aa_dmat;

	SIMPLEQ_INIT(&sc->sc_txfreeq);
	SIMPLEQ_INIT(&sc->sc_txdirtyq);

	/*
	 * Map registers.
	 */
	sc->sc_size = aa->aa_size;
	if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
	    &sc->sc_sh)) != 0) {
		printf("%s: unable to map registers, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_0;
	}

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if ((error = bus_dmamem_alloc(sc->sc_dmat,
	    sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
	    1, &sc->sc_cdnseg, 0)) != 0) {
		printf("%s: unable to allocate control data, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_1;
	}

	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg,
	    sizeof(struct ae_control_data), (void **)&sc->sc_control_data,
	    BUS_DMA_COHERENT)) != 0) {
		printf("%s: unable to map control data, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_2;
	}

	if ((error = bus_dmamap_create(sc->sc_dmat,
	    sizeof(struct ae_control_data), 1,
	    sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
		printf("%s: unable to create control data DMA map, "
		    "error = %d\n", device_xname(sc->sc_dev), error);
		goto fail_3;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
	    sc->sc_control_data, sizeof(struct ae_control_data), NULL,
	    0)) != 0) {
		printf("%s: unable to load control data DMA map, error = %d\n",
		    device_xname(sc->sc_dev), error);
		goto fail_4;
	}

	/*
	 * Create the transmit buffer DMA maps.
	 */
	for (i = 0; i < AE_TXQUEUELEN; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
		    AE_NTXSEGS, MCLBYTES, 0, 0,
		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
			printf("%s: unable to create tx DMA map %d, "
			    "error = %d\n", device_xname(sc->sc_dev), i, error);
			goto fail_5;
		}
	}

	/*
	 * Create the receive buffer DMA maps.
	 */
	for (i = 0; i < AE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
			printf("%s: unable to create rx DMA map %d, "
			    "error = %d\n", device_xname(sc->sc_dev), i, error);
			goto fail_6;
		}
		sc->sc_rxsoft[i].rxs_mbuf = NULL;
	}

	/*
	 * Reset the chip to a known state.
	 */
	ae_reset(sc);

	/*
	 * From this point forward, the attachment cannot fail.  A failure
	 * before this point releases all resources that may have been
	 * allocated.
	 */
	sc->sc_flags |= AE_ATTACHED;

	/*
	 * Initialize our media structures.  This may probe the MII, if
	 * present.
	 */
	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = ae_mii_readreg;
	sc->sc_mii.mii_writereg = ae_mii_writereg;
	sc->sc_mii.mii_statchg = ae_mii_statchg;
	sc->sc_ethercom.ec_mii = &sc->sc_mii;
	ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
	    ether_mediastatus);
	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);

	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
	} else
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);

	sc->sc_tick = ae_mii_tick;

	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	sc->sc_if_flags = ifp->if_flags;
	ifp->if_ioctl = ae_ioctl;
	ifp->if_start = ae_start;
	ifp->if_watchdog = ae_watchdog;
	ifp->if_init = ae_init;
	ifp->if_stop = ae_stop;
	IFQ_SET_READY(&ifp->if_snd);

	/*
	 * We can support 802.1Q VLAN-sized frames.
	 */
	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;

	/*
	 * Attach the interface.
	 */
	if_attach(ifp);
	ether_ifattach(ifp, enaddr);
	ether_set_ifflags_cb(&sc->sc_ethercom, ae_ifflags_cb);

	rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
	    RND_TYPE_NET, RND_FLAG_DEFAULT);

	/*
	 * Make sure the interface is shutdown during reboot.
	 */
	sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc);
	if (sc->sc_sdhook == NULL)
		printf("%s: WARNING: unable to establish shutdown hook\n",
		    device_xname(sc->sc_dev));

	/*
	 * Add a suspend hook to make sure we come back up after a
	 * resume.
	 */
	sc->sc_powerhook = powerhook_establish(device_xname(sc->sc_dev),
	    ae_power, sc);
	if (sc->sc_powerhook == NULL)
		printf("%s: WARNING: unable to establish power hook\n",
		    device_xname(sc->sc_dev));
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
 fail_6:
	for (i = 0; i < AE_NRXDESC; i++) {
		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_rxsoft[i].rxs_dmamap);
	}
 fail_5:
	for (i = 0; i < AE_TXQUEUELEN; i++) {
		if (sc->sc_txsoft[i].txs_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_txsoft[i].txs_dmamap);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
 fail_4:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
 fail_3:
	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
	    sizeof(struct ae_control_data));
 fail_2:
	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
 fail_1:
	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
 fail_0:
	return;
}
コード例 #19
0
ファイル: if_mec.c プロジェクト: repos-holder/openbsd-patches
void
mec_attach(struct device *parent, struct device *self, void *aux)
{
	struct mec_softc *sc = (void *)self;
	struct confargs *ca = aux;
	struct ifnet *ifp = &sc->sc_ac.ac_if;
	uint32_t command;
	struct mii_softc *child;
	bus_dma_segment_t seg;
	int i, err, rseg;

	sc->sc_st = ca->ca_iot;
	if (bus_space_map(sc->sc_st, ca->ca_baseaddr, MEC_NREGS, 0,
	    &sc->sc_sh) != 0) {
		printf(": can't map i/o space\n");
		return;
	}

	/* Set up DMA structures. */
	sc->sc_dmat = ca->ca_dmat;

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if ((err = bus_dmamem_alloc(sc->sc_dmat,
	    sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
		printf(": unable to allocate control data, error = %d\n", err);
		goto fail_0;
	}

	/*
	 * XXX needs re-think...
	 * control data structures contain whole RX data buffer, so
	 * BUS_DMA_COHERENT (which disables cache) may cause some performance
	 * issue on copying data from the RX buffer to mbuf on normal memory,
	 * though we have to make sure all bus_dmamap_sync(9) ops are called
	 * properly in that case.
	 */
	if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
	    sizeof(struct mec_control_data),
	    (caddr_t *)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
		printf(": unable to map control data, error = %d\n", err);
		goto fail_1;
	}
	memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));

	if ((err = bus_dmamap_create(sc->sc_dmat,
	    sizeof(struct mec_control_data), 1,
	    sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
		printf(": unable to create control data DMA map, error = %d\n",
		    err);
		goto fail_2;
	}
	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
	    sc->sc_control_data, sizeof(struct mec_control_data), NULL,
	    BUS_DMA_NOWAIT)) != 0) {
		printf(": unable to load control data DMA map, error = %d\n",
		    err);
		goto fail_3;
	}

	/* Create TX buffer DMA maps. */
	for (i = 0; i < MEC_NTXDESC; i++) {
		if ((err = bus_dmamap_create(sc->sc_dmat,
		    MCLBYTES, 1, MCLBYTES, 0, 0,
		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
			printf(": unable to create tx DMA map %d, error = %d\n",
			    i, err);
			goto fail_4;
		}
	}

	timeout_set(&sc->sc_tick_ch, mec_tick, sc);

	/* Use the Ethernet address from the ARCBIOS. */
	enaddr_aton(bios_enaddr, sc->sc_ac.ac_enaddr);

	/* Reset device. */
	mec_reset(sc);

	command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);

	printf(": MAC-110 rev %d, address %s\n",
	    (command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT,
	    ether_sprintf(sc->sc_ac.ac_enaddr));

	/* Done, now attach everything. */

	sc->sc_mii.mii_ifp = ifp;
	sc->sc_mii.mii_readreg = mec_mii_readreg;
	sc->sc_mii.mii_writereg = mec_mii_writereg;
	sc->sc_mii.mii_statchg = mec_statchg;

	/* Set up PHY properties. */
	ifmedia_init(&sc->sc_mii.mii_media, 0, mec_mediachange,
	    mec_mediastatus);
	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);

	child = LIST_FIRST(&sc->sc_mii.mii_phys);
	if (child == NULL) {
		/* No PHY attached. */
		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
		    0, NULL);
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
	} else {
		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
		sc->sc_phyaddr = child->mii_phy;
	}

	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = mec_ioctl;
	ifp->if_start = mec_start;
	ifp->if_watchdog = mec_watchdog;
	IFQ_SET_READY(&ifp->if_snd);

	if_attach(ifp);
	IFQ_SET_MAXLEN(&ifp->if_snd, MEC_NTXDESC - 1);
	ether_ifattach(ifp);

	/* Establish interrupt handler. */
	macebus_intr_establish(NULL, ca->ca_intr, IST_EDGE, IPL_NET,
	    mec_intr, sc, sc->sc_dev.dv_xname);

	/* Set hook to stop interface on shutdown. */
	sc->sc_sdhook = shutdownhook_establish(mec_shutdown, sc);

	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt. Do this in reverse order and fall though.
	 */
 fail_4:
	for (i = 0; i < MEC_NTXDESC; i++) {
		if (sc->sc_txsoft[i].txs_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_txsoft[i].txs_dmamap);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
 fail_3:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
 fail_2:
	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
	    sizeof(struct mec_control_data));
 fail_1:
	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
 fail_0:
	return;
}
コード例 #20
0
ファイル: if_ae.c プロジェクト: yazshel/netbsd-kernel
/*
 * ae_detach:
 *
 *	Detach a device interface.
 */
int
ae_detach(device_t self, int flags)
{
	struct ae_softc *sc = device_private(self);
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	struct ae_rxsoft *rxs;
	struct ae_txsoft *txs;
	int i;

	/*
	 * Succeed now if there isn't any work to do.
	 */
	if ((sc->sc_flags & AE_ATTACHED) == 0)
		return (0);

	/* Unhook our tick handler. */
	if (sc->sc_tick)
		callout_stop(&sc->sc_tick_callout);

	/* Detach all PHYs */
	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);

	/* Delete all remaining media. */
	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);

	rnd_detach_source(&sc->sc_rnd_source);
	ether_ifdetach(ifp);
	if_detach(ifp);

	for (i = 0; i < AE_NRXDESC; i++) {
		rxs = &sc->sc_rxsoft[i];
		if (rxs->rxs_mbuf != NULL) {
			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
			m_freem(rxs->rxs_mbuf);
			rxs->rxs_mbuf = NULL;
		}
		bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap);
	}
	for (i = 0; i < AE_TXQUEUELEN; i++) {
		txs = &sc->sc_txsoft[i];
		if (txs->txs_mbuf != NULL) {
			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
			m_freem(txs->txs_mbuf);
			txs->txs_mbuf = NULL;
		}
		bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
	    sizeof(struct ae_control_data));
	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);

	shutdownhook_disestablish(sc->sc_sdhook);
	powerhook_disestablish(sc->sc_powerhook);

	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);


	return (0);
}
コード例 #21
0
ファイル: mrsas_ioctl.c プロジェクト: Lxg1582/freebsd
/*
 * mrsas_passthru:	Handle pass-through commands
 * input:			Adapter instance soft state argument pointer
 *
 * This function is called from mrsas_ioctl() to handle pass-through and ioctl
 * commands to Firmware.
 */
int
mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd)
{
	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;

#ifdef COMPAT_FREEBSD32
	struct mrsas_iocpacket32 *user_ioc32 = (struct mrsas_iocpacket32 *)arg;

#endif
	union mrsas_frame *in_cmd = (union mrsas_frame *)&(user_ioc->frame.raw);
	struct mrsas_mfi_cmd *cmd = NULL;
	bus_dma_tag_t ioctl_data_tag[MAX_IOCTL_SGE];
	bus_dmamap_t ioctl_data_dmamap[MAX_IOCTL_SGE];
	void *ioctl_data_mem[MAX_IOCTL_SGE];
	bus_addr_t ioctl_data_phys_addr[MAX_IOCTL_SGE];
	bus_dma_tag_t ioctl_sense_tag = 0;
	bus_dmamap_t ioctl_sense_dmamap = 0;
	void *ioctl_sense_mem = 0;
	bus_addr_t ioctl_sense_phys_addr = 0;
	int i, ioctl_data_size = 0, ioctl_sense_size, ret = 0;
	struct mrsas_sge32 *kern_sge32;
	unsigned long *sense_ptr;
	uint8_t *iov_base_ptrin = NULL;
	size_t iov_len = 0;

	/*
	 * Check for NOP from MegaCli... MegaCli can issue a DCMD of 0.  In
	 * this case do nothing and return 0 to it as status.
	 */
	if (in_cmd->dcmd.opcode == 0) {
		device_printf(sc->mrsas_dev, "In %s() Got a NOP\n", __func__);
		user_ioc->frame.hdr.cmd_status = MFI_STAT_OK;
		return (0);
	}
	/* Validate SGL length */
	if (user_ioc->sge_count > MAX_IOCTL_SGE) {
		device_printf(sc->mrsas_dev, "In %s() SGL is too long (%d > 8).\n",
		    __func__, user_ioc->sge_count);
		return (ENOENT);
	}
	/* Get a command */
	cmd = mrsas_get_mfi_cmd(sc);
	if (!cmd) {
		device_printf(sc->mrsas_dev, "Failed to get a free cmd for IOCTL\n");
		return (ENOMEM);
	}
	/*
	 * User's IOCTL packet has 2 frames (maximum). Copy those two frames
	 * into our cmd's frames. cmd->frame's context will get overwritten
	 * when we copy from user's frames. So set that value alone
	 * separately
	 */
	memcpy(cmd->frame, user_ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
	cmd->frame->hdr.context = cmd->index;
	cmd->frame->hdr.pad_0 = 0;
	cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
	    MFI_FRAME_SENSE64);

	/*
	 * The management interface between applications and the fw uses MFI
	 * frames. E.g, RAID configuration changes, LD property changes etc
	 * are accomplishes through different kinds of MFI frames. The driver
	 * needs to care only about substituting user buffers with kernel
	 * buffers in SGLs. The location of SGL is embedded in the struct
	 * iocpacket itself.
	 */
	kern_sge32 = (struct mrsas_sge32 *)
	    ((unsigned long)cmd->frame + user_ioc->sgl_off);

	/*
	 * For each user buffer, create a mirror buffer and copy in
	 */
	for (i = 0; i < user_ioc->sge_count; i++) {
		if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) {
			if (!user_ioc->sgl[i].iov_len)
				continue;
			ioctl_data_size = user_ioc->sgl[i].iov_len;
#ifdef COMPAT_FREEBSD32
		} else {
			if (!user_ioc32->sgl[i].iov_len)
				continue;
			ioctl_data_size = user_ioc32->sgl[i].iov_len;
#endif
		}
		if (bus_dma_tag_create(sc->mrsas_parent_tag,
		    1, 0,
		    BUS_SPACE_MAXADDR_32BIT,
		    BUS_SPACE_MAXADDR,
		    NULL, NULL,
		    ioctl_data_size,
		    1,
		    ioctl_data_size,
		    BUS_DMA_ALLOCNOW,
		    NULL, NULL,
		    &ioctl_data_tag[i])) {
			device_printf(sc->mrsas_dev, "Cannot allocate ioctl data tag\n");
			ret = ENOMEM;
			goto out;
		}
		if (bus_dmamem_alloc(ioctl_data_tag[i], (void **)&ioctl_data_mem[i],
		    (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_data_dmamap[i])) {
			device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n");
			ret = ENOMEM;
			goto out;
		}
		if (bus_dmamap_load(ioctl_data_tag[i], ioctl_data_dmamap[i],
		    ioctl_data_mem[i], ioctl_data_size, mrsas_alloc_cb,
		    &ioctl_data_phys_addr[i], BUS_DMA_NOWAIT)) {
			device_printf(sc->mrsas_dev, "Cannot load ioctl data mem\n");
			ret = ENOMEM;
			goto out;
		}
		/* Save the physical address and length */
		kern_sge32[i].phys_addr = (u_int32_t)ioctl_data_phys_addr[i];

		if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) {
			kern_sge32[i].length = user_ioc->sgl[i].iov_len;

			iov_base_ptrin = user_ioc->sgl[i].iov_base;
			iov_len = user_ioc->sgl[i].iov_len;
#ifdef COMPAT_FREEBSD32
		} else {
			kern_sge32[i].length = user_ioc32->sgl[i].iov_len;

			iov_base_ptrin = PTRIN(user_ioc32->sgl[i].iov_base);
			iov_len = user_ioc32->sgl[i].iov_len;
#endif
		}

		/* Copy in data from user space */
		ret = copyin(iov_base_ptrin, ioctl_data_mem[i], iov_len);
		if (ret) {
			device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n");
			goto out;
		}
	}

	ioctl_sense_size = user_ioc->sense_len;

	if (user_ioc->sense_len) {
		if (bus_dma_tag_create(sc->mrsas_parent_tag,
		    1, 0,
		    BUS_SPACE_MAXADDR_32BIT,
		    BUS_SPACE_MAXADDR,
		    NULL, NULL,
		    ioctl_sense_size,
		    1,
		    ioctl_sense_size,
		    BUS_DMA_ALLOCNOW,
		    NULL, NULL,
		    &ioctl_sense_tag)) {
			device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense tag\n");
			ret = ENOMEM;
			goto out;
		}
		if (bus_dmamem_alloc(ioctl_sense_tag, (void **)&ioctl_sense_mem,
		    (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_sense_dmamap)) {
			device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense mem\n");
			ret = ENOMEM;
			goto out;
		}
		if (bus_dmamap_load(ioctl_sense_tag, ioctl_sense_dmamap,
		    ioctl_sense_mem, ioctl_sense_size, mrsas_alloc_cb,
		    &ioctl_sense_phys_addr, BUS_DMA_NOWAIT)) {
			device_printf(sc->mrsas_dev, "Cannot load ioctl sense mem\n");
			ret = ENOMEM;
			goto out;
		}
		sense_ptr =
		    (unsigned long *)((unsigned long)cmd->frame + user_ioc->sense_off);
		sense_ptr = ioctl_sense_mem;
	}
	/*
	 * Set the sync_cmd flag so that the ISR knows not to complete this
	 * cmd to the SCSI mid-layer
	 */
	cmd->sync_cmd = 1;
	mrsas_issue_blocked_cmd(sc, cmd);
	cmd->sync_cmd = 0;

	/*
	 * copy out the kernel buffers to user buffers
	 */
	for (i = 0; i < user_ioc->sge_count; i++) {
		if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) {
			iov_base_ptrin = user_ioc->sgl[i].iov_base;
			iov_len = user_ioc->sgl[i].iov_len;
#ifdef COMPAT_FREEBSD32
		} else {
			iov_base_ptrin = PTRIN(user_ioc32->sgl[i].iov_base);
			iov_len = user_ioc32->sgl[i].iov_len;
#endif
		}

		ret = copyout(ioctl_data_mem[i], iov_base_ptrin, iov_len);
		if (ret) {
			device_printf(sc->mrsas_dev, "IOCTL copyout failed!\n");
			goto out;
		}
	}

	/*
	 * copy out the sense
	 */
	if (user_ioc->sense_len) {
		/*
		 * sense_buff points to the location that has the user sense
		 * buffer address
		 */
		sense_ptr = (unsigned long *)((unsigned long)user_ioc->frame.raw +
		    user_ioc->sense_off);
		ret = copyout(ioctl_sense_mem, (unsigned long *)*sense_ptr,
		    user_ioc->sense_len);
		if (ret) {
			device_printf(sc->mrsas_dev, "IOCTL sense copyout failed!\n");
			goto out;
		}
	}
	/*
	 * Return command status to user space
	 */
	memcpy(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status,
	    sizeof(u_int8_t));

out:
	/*
	 * Release sense buffer
	 */
	if (ioctl_sense_phys_addr)
		bus_dmamap_unload(ioctl_sense_tag, ioctl_sense_dmamap);
	if (ioctl_sense_mem != NULL)
		bus_dmamem_free(ioctl_sense_tag, ioctl_sense_mem, ioctl_sense_dmamap);
	if (ioctl_sense_tag != NULL)
		bus_dma_tag_destroy(ioctl_sense_tag);

	/*
	 * Release data buffers
	 */
	for (i = 0; i < user_ioc->sge_count; i++) {
		if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) {
			if (!user_ioc->sgl[i].iov_len)
				continue;
#ifdef COMPAT_FREEBSD32
		} else {
			if (!user_ioc32->sgl[i].iov_len)
				continue;
#endif
		}
		if (ioctl_data_phys_addr[i])
			bus_dmamap_unload(ioctl_data_tag[i], ioctl_data_dmamap[i]);
		if (ioctl_data_mem[i] != NULL)
			bus_dmamem_free(ioctl_data_tag[i], ioctl_data_mem[i],
			    ioctl_data_dmamap[i]);
		if (ioctl_data_tag[i] != NULL)
			bus_dma_tag_destroy(ioctl_data_tag[i]);
	}
	/* Free command */
	mrsas_release_mfi_cmd(cmd);

	return (ret);
}
コード例 #22
0
ファイル: tws.c プロジェクト: dcui/FreeBSD-9.3_kernel
static int
tws_detach(device_t dev)
{
    struct tws_softc *sc = device_get_softc(dev);
    int i;
    u_int32_t reg;

    TWS_TRACE_DEBUG(sc, "entry", 0, 0);

    mtx_lock(&sc->gen_lock);
    tws_send_event(sc, TWS_UNINIT_START);
    mtx_unlock(&sc->gen_lock);

    /* needs to disable interrupt before detaching from cam */
    tws_turn_off_interrupts(sc);
    /* clear door bell */
    tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
    reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
    TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0);
    sc->obfl_q_overrun = false;
    tws_init_connect(sc, 1);

    /* Teardown the state in our softc created in our attach routine. */
    /* Disconnect the interrupt handler. */
    tws_teardown_intr(sc);

    /* Release irq resource */
    for(i=0;i<sc->irqs;i++) {
        if ( sc->irq_res[i] ){
            if (bus_release_resource(sc->tws_dev,
                     SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
                TWS_TRACE(sc, "bus release irq resource", 
                                       i, sc->irq_res_id[i]);
        }
    }
    if ( sc->intr_type == TWS_MSI ) {
        pci_release_msi(sc->tws_dev);
    }

    tws_cam_detach(sc);

    /* Release memory resource */
    if ( sc->mfa_res ){
        if (bus_release_resource(sc->tws_dev,
                 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
            TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id);
    }
    if ( sc->reg_res ){
        if (bus_release_resource(sc->tws_dev,
                 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
            TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
    }

    free(sc->reqs, M_TWS);
    free(sc->sense_bufs, M_TWS);
    free(sc->scan_ccb, M_TWS);
    if (sc->ioctl_data_mem)
            bus_dmamem_free(sc->data_tag, sc->ioctl_data_mem, sc->ioctl_data_map);
    free(sc->aen_q.q, M_TWS);
    free(sc->trace_q.q, M_TWS);
    mtx_destroy(&sc->q_lock);
    mtx_destroy(&sc->sim_lock);
    mtx_destroy(&sc->gen_lock);
    mtx_destroy(&sc->io_lock);
    destroy_dev(sc->tws_cdev);
    sysctl_ctx_free(&sc->tws_clist);
    return (0);
}
コード例 #23
0
ファイル: tft_ll.c プロジェクト: lacombar/netbsd-alc
static void
ll_tft_attach(struct device *parent, struct device *self, void *aux)
{
	struct xcvbus_attach_args *vaa = aux;
	struct ll_dmac 		*tx = vaa->vaa_tx_dmac;
	struct ll_tft_softc 	*lsc = (struct ll_tft_softc *)self;
	struct tft_softc 	*sc = &lsc->lsc_sc;
	int 			nseg, error;

	KASSERT(tx);

	lsc->lsc_dma_iot 	= tx->dmac_iot;
	lsc->lsc_dmat 		= vaa->vaa_dmat;
	sc->sc_iot 		= vaa->vaa_iot;

	printf(": LL_TFT\n");

	if ((error = bus_space_map(sc->sc_iot, vaa->vaa_addr, TFT_SIZE,
	    0, &sc->sc_ioh)) != 0) {
	    	printf("%s: could not map device registers\n",
		    device_xname(self));
	    	goto fail_0;
	}
	if ((error = bus_space_map(lsc->lsc_dma_iot, tx->dmac_ctrl_addr,
	    CDMAC_CTRL_SIZE, 0, &lsc->lsc_dma_ioh)) != 0) {
		printf("%s: could not map dmac registers\n",
		    device_xname(self));
		goto fail_1;
	}

	/* Fill in resolution, depth, size. */
	tft_mode(&sc->sc_dev);

	/* Allocate and map framebuffer control data. */
	if ((error = bus_dmamem_alloc(lsc->lsc_dmat,
	    sizeof(struct ll_tft_control) + sc->sc_size, 8, 0,
	    &lsc->lsc_seg, 1, &nseg, 0)) != 0) {
	    	printf("%s: could not allocate framebuffer\n",
		    device_xname(self));
		goto fail_2;
	}
	if ((error = bus_dmamem_map(lsc->lsc_dmat, &lsc->lsc_seg, nseg,
	    sizeof(struct ll_tft_control) + sc->sc_size,
	    (void **)&lsc->lsc_cd, BUS_DMA_COHERENT)) != 0) {
	    	printf("%s: could not map framebuffer\n",
		    device_xname(self));
		goto fail_3;
	}
	if ((error = bus_dmamap_create(lsc->lsc_dmat,
	    sizeof(struct ll_tft_control) + sc->sc_size, 1,
	    sizeof(struct ll_tft_control) + sc->sc_size, 0, 0,
	    &lsc->lsc_dmap)) != 0) {
	    	printf("%s: could not create framebuffer DMA map\n",
		    device_xname(self));
		goto fail_4;
	}
	if ((error = bus_dmamap_load(lsc->lsc_dmat, lsc->lsc_dmap, lsc->lsc_cd,
	    sizeof(struct ll_tft_control) + sc->sc_size, NULL, 0)) != 0) {
	    	printf("%s: could not load framebuffer DMA map\n",
		    device_xname(self));
		goto fail_5;
	}

	/* Clear screen, setup descriptor. */
	memset(lsc->lsc_cd, 0x00, sizeof(struct ll_tft_control));
	sc->sc_image = lsc->lsc_cd->cd_img;

	lsc->lsc_cd->cd_dsc.desc_next = lsc->lsc_dmap->dm_segs[0].ds_addr;
	lsc->lsc_cd->cd_dsc.desc_addr = lsc->lsc_dmap->dm_segs[0].ds_addr +
	    offsetof(struct ll_tft_control, cd_img);
	lsc->lsc_cd->cd_dsc.desc_size = sc->sc_size;
	lsc->lsc_cd->cd_dsc.desc_stat = CDMAC_STAT_SOP;

	bus_dmamap_sync(lsc->lsc_dmat, lsc->lsc_dmap, 0,
	    sizeof(struct ll_tft_control) + sc->sc_size,
	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

	sc->sc_sdhook = shutdownhook_establish(ll_tft_shutdown, sc);
	if (sc->sc_sdhook == NULL)
		printf("%s: WARNING: unable to establish shutdown hook\n",
		    device_xname(self));

	tft_attach(self, &ll_tft_accessops);

	printf("%s: video memory pa 0x%08x\n", device_xname(self),
	    (uint32_t)lsc->lsc_cd->cd_dsc.desc_addr);

	/* Timing sensitive... */
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TFT_CTRL, CTRL_RESET);
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, TFT_CTRL, CTRL_ENABLE);
	bus_space_write_4(lsc->lsc_dma_iot, lsc->lsc_dma_ioh, CDMAC_CURDESC,
	    lsc->lsc_dmap->dm_segs[0].ds_addr);

	return ;

 fail_5:
 	bus_dmamap_destroy(lsc->lsc_dmat, lsc->lsc_dmap);
 fail_4:
 	bus_dmamem_unmap(lsc->lsc_dmat, (void *)lsc->lsc_cd,
 	    sizeof(struct ll_tft_control) + sc->sc_size);
 fail_3:
 	bus_dmamem_free(lsc->lsc_dmat, &lsc->lsc_seg, nseg);
 fail_2:
	bus_space_unmap(lsc->lsc_dma_iot, lsc->lsc_dma_ioh, CDMAC_CTRL_SIZE);
 fail_1:
	bus_space_unmap(sc->sc_iot, sc->sc_ioh, TFT_SIZE);
 fail_0:
	printf("%s: error %d\n", device_xname(self), error);
}
コード例 #24
0
ファイル: if_sq.c プロジェクト: MarginC/kame
static void
sq_attach(struct device *parent, struct device *self, void *aux)
{
	int i, err;
	char* macaddr;
	struct sq_softc *sc = (void *)self;
	struct hpc_attach_args *haa = aux;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;

	sc->sc_hpct = haa->ha_st;
	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
				       haa->ha_dmaoff,
				       HPC_ENET_REGS_SIZE,
				       &sc->sc_hpch)) != 0) {
		printf(": unable to map HPC DMA registers, error = %d\n", err);
		goto fail_0;
	}

	sc->sc_regt = haa->ha_st;
	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
				       haa->ha_devoff,
				       HPC_ENET_DEVREGS_SIZE,
				       &sc->sc_regh)) != 0) {
		printf(": unable to map Seeq registers, error = %d\n", err);
		goto fail_0;
	}

	sc->sc_dmat = haa->ha_dmat;

	if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
				    PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
				    1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
		printf(": unable to allocate control data, error = %d\n", err);
		goto fail_0;
	}

	if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
				  sizeof(struct sq_control),
				  (caddr_t *)&sc->sc_control,
				  BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
		printf(": unable to map control data, error = %d\n", err);
		goto fail_1;
	}

	if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
				     1, sizeof(struct sq_control), PAGE_SIZE,
				     BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
		printf(": unable to create DMA map for control data, error "
			"= %d\n", err);
		goto fail_2;
	}

	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
				   sizeof(struct sq_control),
				   NULL, BUS_DMA_NOWAIT)) != 0) {
		printf(": unable to load DMA map for control data, error "
			"= %d\n", err);
		goto fail_3;
	}

	memset(sc->sc_control, 0, sizeof(struct sq_control));

	/* Create transmit buffer DMA maps */
	for (i = 0; i < SQ_NTXDESC; i++) {
	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
					 0, BUS_DMA_NOWAIT,
					 &sc->sc_txmap[i])) != 0) {
		    printf(": unable to create tx DMA map %d, error = %d\n",
			   i, err);
		    goto fail_4;
	    }
	}

	/* Create transmit buffer DMA maps */
	for (i = 0; i < SQ_NRXDESC; i++) {
	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
					 0, BUS_DMA_NOWAIT,
					 &sc->sc_rxmap[i])) != 0) {
		    printf(": unable to create rx DMA map %d, error = %d\n",
			   i, err);
		    goto fail_5;
	    }
	}

	/* Pre-allocate the receive buffers.  */
	for (i = 0; i < SQ_NRXDESC; i++) {
		if ((err = sq_add_rxbuf(sc, i)) != 0) {
			printf(": unable to allocate or map rx buffer %d\n,"
			       " error = %d\n", i, err);
			goto fail_6;
		}
	}

	if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
		printf(": unable to get MAC address!\n");
		goto fail_6;
	}

	evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
					      self->dv_xname, "intr");

	if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
		printf(": unable to establish interrupt!\n");
		goto fail_6;
	}

	/* Reset the chip to a known state. */
	sq_reset(sc);

	/*
	 * Determine if we're an 8003 or 80c03 by setting the first
	 * MAC address register to non-zero, and then reading it back.
	 * If it's zero, we have an 80c03, because we will have read
	 * the TxCollLSB register.
	 */
	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5);
	if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0)
		sc->sc_type = SQ_TYPE_80C03;
	else
		sc->sc_type = SQ_TYPE_8003;
	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00);

	printf(": SGI Seeq %s\n",
	    sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");

	enaddr_aton(macaddr, sc->sc_enaddr);

	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
					   ether_sprintf(sc->sc_enaddr));

	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
	ifp->if_softc = sc;
	ifp->if_mtu = ETHERMTU;
	ifp->if_init = sq_init;
	ifp->if_stop = sq_stop;
	ifp->if_start = sq_start;
	ifp->if_ioctl = sq_ioctl;
	ifp->if_watchdog = sq_watchdog;
	ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
	IFQ_SET_READY(&ifp->if_snd);

	if_attach(ifp);
	ether_ifattach(ifp, sc->sc_enaddr);

	memset(&sq_trace, 0, sizeof(sq_trace));
	/* Done! */
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
fail_6:
	for (i = 0; i < SQ_NRXDESC; i++) {
		if (sc->sc_rxmbuf[i] != NULL) {
			bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
			m_freem(sc->sc_rxmbuf[i]);
		}
	}
fail_5:
	for (i = 0; i < SQ_NRXDESC; i++) {
	    if (sc->sc_rxmap[i] != NULL)
		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
	}
fail_4:
	for (i = 0; i < SQ_NTXDESC; i++) {
	    if (sc->sc_txmap[i] !=  NULL)
		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
fail_3:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
fail_2:
	bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
				      sizeof(struct sq_control));
fail_1:
	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
fail_0:
	return;
}
コード例 #25
0
ファイル: adv_isa.c プロジェクト: AhmadTux/freebsd
static int
adv_isa_probe(device_t dev)
{
	int	port_index;
	int	max_port_index;
	u_long	iobase, iocount, irq;
	int	user_iobase = 0;
	int	rid = 0;
	void	*ih;
	struct resource	*iores, *irqres;

	/*
	 * We don't know of any PnP ID's for these cards.
	 */
	if (isa_get_logicalid(dev) != 0)
		return (ENXIO);

	/*
	 * Default to scanning all possible device locations.
	 */
	port_index = 0;
	max_port_index = MAX_ISA_IOPORT_INDEX;

	if (bus_get_resource(dev, SYS_RES_IOPORT, 0, &iobase, &iocount) == 0) {
		user_iobase = 1;
		for (;port_index <= max_port_index; port_index++)
			if (iobase <= adv_isa_ioports[port_index])
				break;
		if ((port_index > max_port_index)
		 || (iobase != adv_isa_ioports[port_index])) {
			if (bootverbose)
			    printf("adv%d: Invalid baseport of 0x%lx specified. "
				"Nearest valid baseport is 0x%x.  Failing "
				"probe.\n", device_get_unit(dev), iobase,
				(port_index <= max_port_index) ?
					adv_isa_ioports[port_index] :
					adv_isa_ioports[max_port_index]);
			return ENXIO;
		}
		max_port_index = port_index;
	}

	/* Perform the actual probing */
	adv_set_isapnp_wait_for_key();
	for (;port_index <= max_port_index; port_index++) {
		u_int16_t port_addr = adv_isa_ioports[port_index];
		bus_size_t maxsegsz;
		bus_size_t maxsize;
		bus_addr_t lowaddr;
		int error;
		struct adv_softc *adv;

		if (port_addr == 0)
			/* Already been attached */
			continue;
		
		if (bus_set_resource(dev, SYS_RES_IOPORT, 0, port_addr, 1))
			continue;

		/* XXX what is the real portsize? */
		iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
					       RF_ACTIVE);
		if (iores == NULL)
			continue;

		if (adv_find_signature(rman_get_bustag(iores),
				       rman_get_bushandle(iores)) == 0) {
			bus_release_resource(dev, SYS_RES_IOPORT, 0, iores);
			continue;
		}

		/*
		 * Got one.  Now allocate our softc
		 * and see if we can initialize the card.
		 */
		adv = adv_alloc(dev, rman_get_bustag(iores),
				rman_get_bushandle(iores));
		if (adv == NULL) {
			bus_release_resource(dev, SYS_RES_IOPORT, 0, iores);
			break;
		}

		/*
		 * Stop the chip.
		 */
		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
		ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
		/*
		 * Determine the chip version.
		 */
		adv->chip_version = ADV_INB(adv, ADV_NONEISA_CHIP_REVISION);
		if ((adv->chip_version >= ADV_CHIP_MIN_VER_VL)
		    && (adv->chip_version <= ADV_CHIP_MAX_VER_VL)) {
			adv->type = ADV_VL;
			maxsegsz = ADV_VL_MAX_DMA_COUNT;
			maxsize = BUS_SPACE_MAXSIZE_32BIT;
			lowaddr = ADV_VL_MAX_DMA_ADDR;
			bus_delete_resource(dev, SYS_RES_DRQ, 0);
		} else if ((adv->chip_version >= ADV_CHIP_MIN_VER_ISA)
			   && (adv->chip_version <= ADV_CHIP_MAX_VER_ISA)) {
			if (adv->chip_version >= ADV_CHIP_MIN_VER_ISA_PNP) {
				adv->type = ADV_ISAPNP;
				ADV_OUTB(adv, ADV_REG_IFC,
					 ADV_IFC_INIT_DEFAULT);
			} else {
				adv->type = ADV_ISA;
			}
			maxsegsz = ADV_ISA_MAX_DMA_COUNT;
			maxsize = BUS_SPACE_MAXSIZE_24BIT;
			lowaddr = ADV_ISA_MAX_DMA_ADDR;
			adv->isa_dma_speed = ADV_DEF_ISA_DMA_SPEED;
			adv->isa_dma_channel = adv_get_isa_dma_channel(adv);
			bus_set_resource(dev, SYS_RES_DRQ, 0,
					 adv->isa_dma_channel, 1);
		} else {
			panic("advisaprobe: Unknown card revision\n");
		}

		/*
		 * Allocate a parent dmatag for all tags created
		 * by the MI portions of the advansys driver
		 */
		error = bus_dma_tag_create(
				/* parent	*/ bus_get_dma_tag(dev),
				/* alignemnt	*/ 1,
				/* boundary	*/ 0,
				/* lowaddr	*/ lowaddr,
				/* highaddr	*/ BUS_SPACE_MAXADDR,
				/* filter	*/ NULL,
				/* filterarg	*/ NULL,
				/* maxsize	*/ maxsize,
				/* nsegments	*/ ~0,
				/* maxsegsz	*/ maxsegsz,
				/* flags	*/ 0,
				/* lockfunc	*/ busdma_lock_mutex,
				/* lockarg	*/ &Giant,
				&adv->parent_dmat); 

		if (error != 0) {
			printf("%s: Could not allocate DMA tag - error %d\n",
			       adv_name(adv), error); 
			adv_free(adv); 
			bus_release_resource(dev, SYS_RES_IOPORT, 0, iores);
			break;
		}

		adv->init_level += 2;

		if (overrun_buf == NULL) {
			/* Need to allocate our overrun buffer */
			if (bus_dma_tag_create(
				/* parent	*/ adv->parent_dmat,
				/* alignment	*/ 8,
				/* boundary	*/ 0,
				/* lowaddr	*/ ADV_ISA_MAX_DMA_ADDR,
				/* highaddr	*/ BUS_SPACE_MAXADDR,
				/* filter	*/ NULL,
				/* filterarg	*/ NULL,
				/* maxsize	*/ ADV_OVERRUN_BSIZE,
				/* nsegments	*/ 1,
				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
				/* flags	*/ 0,
				/* lockfunc	*/ NULL,
				/* lockarg	*/ NULL,
				&overrun_dmat) != 0) {
				adv_free(adv);
				bus_release_resource(dev, SYS_RES_IOPORT, 0,
						     iores);
				break;
			}
			if (bus_dmamem_alloc(overrun_dmat,
					     (void **)&overrun_buf,
					     BUS_DMA_NOWAIT,
					     &overrun_dmamap) != 0) {
				bus_dma_tag_destroy(overrun_dmat);
				adv_free(adv);
				bus_release_resource(dev, SYS_RES_IOPORT, 0,
						     iores);
				break;
			}
			/* And permanently map it in */  
			bus_dmamap_load(overrun_dmat, overrun_dmamap,
					overrun_buf, ADV_OVERRUN_BSIZE,
					adv_map, &overrun_physbase,
					/*flags*/0);
		}

		adv->overrun_physbase = overrun_physbase;

		if (adv_init(adv) != 0) {
			bus_dmamap_unload(overrun_dmat, overrun_dmamap);
			bus_dmamem_free(overrun_dmat, overrun_buf,
			    overrun_dmamap);
			bus_dma_tag_destroy(overrun_dmat);
			adv_free(adv);
			bus_release_resource(dev, SYS_RES_IOPORT, 0, iores);
			break;
		}

		switch (adv->type) {
		case ADV_ISAPNP:
			if (adv->chip_version == ADV_CHIP_VER_ASYN_BUG) {
				adv->bug_fix_control
				    |= ADV_BUG_FIX_ASYN_USE_SYN;
				adv->fix_asyn_xfer = ~0;
			}
			/* Fall Through */
		case ADV_ISA:
			adv->max_dma_count = ADV_ISA_MAX_DMA_COUNT;
			adv->max_dma_addr = ADV_ISA_MAX_DMA_ADDR;
			adv_set_isa_dma_settings(adv);
			break;

		case ADV_VL:
			adv->max_dma_count = ADV_VL_MAX_DMA_COUNT;
			adv->max_dma_addr = ADV_VL_MAX_DMA_ADDR;
			break;
		default:
			panic("advisaprobe: Invalid card type\n");
		}
			
		/* Determine our IRQ */
		if (bus_get_resource(dev, SYS_RES_IRQ, 0, &irq, NULL))
			bus_set_resource(dev, SYS_RES_IRQ, 0,
					 adv_get_chip_irq(adv), 1);
		else
			adv_set_chip_irq(adv, irq);

		irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
						RF_ACTIVE);
		if (irqres == NULL ||
		    bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY,
		        NULL, adv_intr, adv, &ih)) {
			bus_dmamap_unload(overrun_dmat, overrun_dmamap);
			bus_dmamem_free(overrun_dmat, overrun_buf,
			    overrun_dmamap);
			bus_dma_tag_destroy(overrun_dmat);
			adv_free(adv);
			bus_release_resource(dev, SYS_RES_IOPORT, 0, iores);
			break;
		}

		/* Mark as probed */
		adv_isa_ioports[port_index] = 0;
		return 0;
	}

	if (user_iobase)
		bus_set_resource(dev, SYS_RES_IOPORT, 0, iobase, iocount);
	else
		bus_delete_resource(dev, SYS_RES_IOPORT, 0);

	return ENXIO;
}
コード例 #26
0
ファイル: if_bce.c プロジェクト: eyberg/rumpkernel-netbsd-src
static void
bce_attach(device_t parent, device_t self, void *aux)
{
	struct bce_softc *sc = device_private(self);
	struct pci_attach_args *pa = aux;
	const struct bce_product *bp;
	pci_chipset_tag_t pc = pa->pa_pc;
	pci_intr_handle_t ih;
	const char     *intrstr = NULL;
	uint32_t	command;
	pcireg_t	memtype, pmode;
	bus_addr_t	memaddr;
	bus_size_t	memsize;
	void		*kva;
	bus_dma_segment_t seg;
	int             error, i, pmreg, rseg;
	struct ifnet   *ifp;
	char intrbuf[PCI_INTRSTR_LEN];

	sc->bce_dev = self;

	bp = bce_lookup(pa);
	KASSERT(bp != NULL);

	sc->bce_pa = *pa;

	/* BCM440x can only address 30 bits (1GB) */
	if (bus_dmatag_subregion(pa->pa_dmat, 0, (1 << 30),
	    &(sc->bce_dmatag), BUS_DMA_NOWAIT) != 0) {
		aprint_error_dev(self,
		    "WARNING: failed to restrict dma range,"
		    " falling back to parent bus dma range\n");
		sc->bce_dmatag = pa->pa_dmat;
	}

	 aprint_naive(": Ethernet controller\n");
	 aprint_normal(": %s\n", bp->bp_name);

	/*
	 * Map control/status registers.
	 */
	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);

	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
		aprint_error_dev(self, "failed to enable memory mapping!\n");
		return;
	}
	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
	switch (memtype) {
	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
		if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
		    &sc->bce_bhandle, &memaddr, &memsize) == 0)
			break;
	default:
		aprint_error_dev(self, "unable to find mem space\n");
		return;
	}

	/* Get it out of power save mode if needed. */
	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, NULL)) {
		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
		if (pmode == 3) {
			/*
			 * The card has lost all configuration data in
			 * this state, so punt.
			 */
			aprint_error_dev(self,
			    "unable to wake up from power state D3\n");
			return;
		}
		if (pmode != 0) {
			aprint_normal_dev(self,
			    "waking up from power state D%d\n", pmode);
			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
		}
	}
	if (pci_intr_map(pa, &ih)) {
		aprint_error_dev(self, "couldn't map interrupt\n");
		return;
	}
	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));

	sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc);

	if (sc->bce_intrhand == NULL) {
		aprint_error_dev(self, "couldn't establish interrupt\n");
		if (intrstr != NULL)
			aprint_error(" at %s", intrstr);
		aprint_error("\n");
		return;
	}
	aprint_normal_dev(self, "interrupting at %s\n", intrstr);

	/* reset the chip */
	bce_reset(sc);

	/*
	 * Allocate DMA-safe memory for ring descriptors.
	 * The receive, and transmit rings can not share the same
	 * 4k space, however both are allocated at once here.
	 */
	/*
	 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
	 * due to the limition above. ??
	 */
	if ((error = bus_dmamem_alloc(sc->bce_dmatag,
	    2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE,
	    &seg, 1, &rseg, BUS_DMA_NOWAIT))) {
		aprint_error_dev(self,
		    "unable to alloc space for ring descriptors, error = %d\n",
		    error);
		return;
	}
	/* map ring space to kernel */
	if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
	    2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
		aprint_error_dev(self,
		    "unable to map DMA buffers, error = %d\n", error);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}
	/* create a dma map for the ring */
	if ((error = bus_dmamap_create(sc->bce_dmatag,
	    2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT,
	    &sc->bce_ring_map))) {
		aprint_error_dev(self,
		    "unable to create ring DMA map, error = %d\n", error);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}
	/* connect the ring space to the dma map */
	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
	    2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
		bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE);
		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
		return;
	}
	/* save the ring space in softc */
	sc->bce_rx_ring = (struct bce_dma_slot *) kva;
	sc->bce_tx_ring = (struct bce_dma_slot *) ((char *)kva + PAGE_SIZE);

	/* Create the transmit buffer DMA maps. */
	for (i = 0; i < BCE_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES,
		    BCE_NTXFRAGS, MCLBYTES, 0, 0, &sc->bce_cdata.bce_tx_map[i])) != 0) {
			aprint_error_dev(self,
			    "unable to create tx DMA map, error = %d\n", error);
		}
		sc->bce_cdata.bce_tx_chain[i] = NULL;
	}

	/* Create the receive buffer DMA maps. */
	for (i = 0; i < BCE_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 1,
		    MCLBYTES, 0, 0, &sc->bce_cdata.bce_rx_map[i])) != 0) {
			aprint_error_dev(self,
			    "unable to create rx DMA map, error = %d\n", error);
		}
		sc->bce_cdata.bce_rx_chain[i] = NULL;
	}

	/* Set up ifnet structure */
	ifp = &sc->ethercom.ec_if;
	strcpy(ifp->if_xname, device_xname(self));
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = bce_ioctl;
	ifp->if_start = bce_start;
	ifp->if_watchdog = bce_watchdog;
	ifp->if_init = bce_init;
	ifp->if_stop = bce_stop;
	IFQ_SET_READY(&ifp->if_snd);

	/* Initialize our media structures and probe the MII. */

	sc->bce_mii.mii_ifp = ifp;
	sc->bce_mii.mii_readreg = bce_mii_read;
	sc->bce_mii.mii_writereg = bce_mii_write;
	sc->bce_mii.mii_statchg = bce_statchg;

	sc->ethercom.ec_mii = &sc->bce_mii;
	ifmedia_init(&sc->bce_mii.mii_media, 0, ether_mediachange,
	    ether_mediastatus);
	mii_attach(sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, MIIF_FORCEANEG|MIIF_DOPAUSE);
	if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
		ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
	} else
		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);
	/* get the phy */
	sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_PHY) & 0x1f;
	/*
	 * Enable activity led.
	 * XXX This should be in a phy driver, but not currently.
	 */
	bce_mii_write(sc->bce_dev, 1, 26,	 /* MAGIC */
	    bce_mii_read(sc->bce_dev, 1, 26) & 0x7fff);	 /* MAGIC */
	/* enable traffic meter led mode */
	bce_mii_write(sc->bce_dev, 1, 27,	 /* MAGIC */
	    bce_mii_read(sc->bce_dev, 1, 27) | (1 << 6));	 /* MAGIC */

	/* Attach the interface */
	if_attach(ifp);
	sc->enaddr[0] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET0);
	sc->enaddr[1] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET1);
	sc->enaddr[2] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET2);
	sc->enaddr[3] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET3);
	sc->enaddr[4] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET4);
	sc->enaddr[5] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
	    BCE_MAGIC_ENET5);
	aprint_normal_dev(self, "Ethernet address %s\n",
	    ether_sprintf(sc->enaddr));
	ether_ifattach(ifp, sc->enaddr);
	rnd_attach_source(&sc->rnd_source, device_xname(self),
	    RND_TYPE_NET, 0);
	callout_init(&sc->bce_timeout, 0);

	if (pmf_device_register(self, NULL, bce_resume))
		pmf_class_network_register(self, ifp);
	else
		aprint_error_dev(self, "couldn't establish power handler\n");
}
コード例 #27
0
ファイル: if_cas.c プロジェクト: repos-holder/openbsd-patches
/*
 * cas_config:
 *
 *	Attach a Cassini interface to the system.
 */
void
cas_config(struct cas_softc *sc)
{
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	struct mii_data *mii = &sc->sc_mii;
	struct mii_softc *child;
	int i, error;

	/* Make sure the chip is stopped. */
	ifp->if_softc = sc;
	cas_reset(sc);

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if ((error = bus_dmamem_alloc(sc->sc_dmatag,
	    sizeof(struct cas_control_data), CAS_PAGE_SIZE, 0, &sc->sc_cdseg,
	    1, &sc->sc_cdnseg, 0)) != 0) {
		printf("\n%s: unable to allocate control data, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_0;
	}

	/* XXX should map this in with correct endianness */
	if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
	    sizeof(struct cas_control_data), (caddr_t *)&sc->sc_control_data,
	    BUS_DMA_COHERENT)) != 0) {
		printf("\n%s: unable to map control data, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_1;
	}

	if ((error = bus_dmamap_create(sc->sc_dmatag,
	    sizeof(struct cas_control_data), 1,
	    sizeof(struct cas_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
		printf("\n%s: unable to create control data DMA map, "
		    "error = %d\n", sc->sc_dev.dv_xname, error);
		goto fail_2;
	}

	if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
	    sc->sc_control_data, sizeof(struct cas_control_data), NULL,
	    0)) != 0) {
		printf("\n%s: unable to load control data DMA map, error = %d\n",
		    sc->sc_dev.dv_xname, error);
		goto fail_3;
	}

	bzero(sc->sc_control_data, sizeof(struct cas_control_data));

	/*
	 * Create the receive buffer DMA maps.
	 */
	for (i = 0; i < CAS_NRXDESC; i++) {
		bus_dma_segment_t seg;
		caddr_t kva;
		int rseg;

		if ((error = bus_dmamem_alloc(sc->sc_dmatag, CAS_PAGE_SIZE,
		    CAS_PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
			printf("\n%s: unable to alloc rx DMA mem %d, "
			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
			goto fail_5;
		}
		sc->sc_rxsoft[i].rxs_dmaseg = seg;

		if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
		    CAS_PAGE_SIZE, &kva, BUS_DMA_NOWAIT)) != 0) {
			printf("\n%s: unable to alloc rx DMA mem %d, "
			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
			goto fail_5;
		}
		sc->sc_rxsoft[i].rxs_kva = kva;

		if ((error = bus_dmamap_create(sc->sc_dmatag, CAS_PAGE_SIZE, 1,
		    CAS_PAGE_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
			printf("\n%s: unable to create rx DMA map %d, "
			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
			goto fail_5;
		}

		if ((error = bus_dmamap_load(sc->sc_dmatag,
		   sc->sc_rxsoft[i].rxs_dmamap, kva, CAS_PAGE_SIZE, NULL,
		   BUS_DMA_NOWAIT)) != 0) {
			printf("\n%s: unable to load rx DMA map %d, "
			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
			goto fail_5;
		}
	}

	/*
	 * Create the transmit buffer DMA maps.
	 */
	for (i = 0; i < CAS_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,
		    CAS_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
		    &sc->sc_txd[i].sd_map)) != 0) {
			printf("\n%s: unable to create tx DMA map %d, "
			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
			goto fail_6;
		}
		sc->sc_txd[i].sd_mbuf = NULL;
	}

	/*
	 * From this point forward, the attachment cannot fail.  A failure
	 * before this point releases all resources that may have been
	 * allocated.
	 */

	/* Announce ourselves. */
	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));

	/* Get RX FIFO size */
	sc->sc_rxfifosize = 16 * 1024;

	/* Initialize ifnet structure. */
	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
	ifp->if_softc = sc;
	ifp->if_flags =
	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
	ifp->if_start = cas_start;
	ifp->if_ioctl = cas_ioctl;
	ifp->if_watchdog = cas_watchdog;
	IFQ_SET_MAXLEN(&ifp->if_snd, CAS_NTXDESC - 1);
	IFQ_SET_READY(&ifp->if_snd);

	ifp->if_capabilities = IFCAP_VLAN_MTU;

	/* Initialize ifmedia structures and MII info */
	mii->mii_ifp = ifp;
	mii->mii_readreg = cas_mii_readreg;
	mii->mii_writereg = cas_mii_writereg;
	mii->mii_statchg = cas_mii_statchg;

	ifmedia_init(&mii->mii_media, 0, cas_mediachange, cas_mediastatus);

	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_MII_DATAPATH_MODE, 0);

	cas_mifinit(sc);

	if (sc->sc_mif_config & CAS_MIF_CONFIG_MDI1) {
		sc->sc_mif_config |= CAS_MIF_CONFIG_PHY_SEL;
		bus_space_write_4(sc->sc_memt, sc->sc_memh,
	            CAS_MIF_CONFIG, sc->sc_mif_config);
	}

	mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
	    MII_OFFSET_ANY, 0);

	child = LIST_FIRST(&mii->mii_phys);
	if (child == NULL &&
	    sc->sc_mif_config & (CAS_MIF_CONFIG_MDI0|CAS_MIF_CONFIG_MDI1)) {
		/* 
		 * Try the external PCS SERDES if we didn't find any
		 * MII devices.
		 */
		bus_space_write_4(sc->sc_memt, sc->sc_memh,
		    CAS_MII_DATAPATH_MODE, CAS_MII_DATAPATH_SERDES);

		bus_space_write_4(sc->sc_memt, sc->sc_memh,
		     CAS_MII_CONFIG, CAS_MII_CONFIG_ENABLE);

		mii->mii_readreg = cas_pcs_readreg;
		mii->mii_writereg = cas_pcs_writereg;

		mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
		    MII_OFFSET_ANY, MIIF_NOISOLATE);
	}

	child = LIST_FIRST(&mii->mii_phys);
	if (child == NULL) {
		/* No PHY attached */
		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
	} else {
		/*
		 * Walk along the list of attached MII devices and
		 * establish an `MII instance' to `phy number'
		 * mapping. We'll use this mapping in media change
		 * requests to determine which phy to use to program
		 * the MIF configuration register.
		 */
		for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
			/*
			 * Note: we support just two PHYs: the built-in
			 * internal device and an external on the MII
			 * connector.
			 */
			if (child->mii_phy > 1 || child->mii_inst > 1) {
				printf("%s: cannot accommodate MII device %s"
				       " at phy %d, instance %d\n",
				       sc->sc_dev.dv_xname,
				       child->mii_dev.dv_xname,
				       child->mii_phy, child->mii_inst);
				continue;
			}

			sc->sc_phys[child->mii_inst] = child->mii_phy;
		}

		/*
		 * XXX - we can really do the following ONLY if the
		 * phy indeed has the auto negotiation capability!!
		 */
		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
	}

	/* Attach the interface. */
	if_attach(ifp);
	ether_ifattach(ifp);

	sc->sc_sh = shutdownhook_establish(cas_shutdown, sc);
	if (sc->sc_sh == NULL)
		panic("cas_config: can't establish shutdownhook");

	timeout_set(&sc->sc_tick_ch, cas_tick, sc);
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
 fail_6:
	for (i = 0; i < CAS_NTXDESC; i++) {
		if (sc->sc_txd[i].sd_map != NULL)
			bus_dmamap_destroy(sc->sc_dmatag,
			    sc->sc_txd[i].sd_map);
	}
 fail_5:
	for (i = 0; i < CAS_NRXDESC; i++) {
		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmatag,
			    sc->sc_rxsoft[i].rxs_dmamap);
	}
	bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
 fail_3:
	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
 fail_2:
	bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
	    sizeof(struct cas_control_data));
 fail_1:
	bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
 fail_0:
	return;
}
コード例 #28
0
static void
iavc_pci_attach(struct device * parent,
	struct device * self, void *aux)
{
	struct iavc_pci_softc *psc = (void *) self;
	struct iavc_softc *sc = (void *) self;
	struct pci_attach_args *pa = aux;
	pci_chipset_tag_t pc = pa->pa_pc;
	const struct iavc_pci_product *pp;
	pci_intr_handle_t ih;
	const char *intrstr;
	int ret;

	pp = find_cardname(pa);
	if (pp == NULL)
		return;

	sc->sc_t1 = 0;
	sc->sc_dma = 0;
	sc->dmat = pa->pa_dmat;

	iavc_b1dma_reset(sc);

	if (pci_mapreg_map(pa, IAVC_PCI_IOBA, PCI_MAPREG_TYPE_IO, 0,
		&sc->sc_io_bt, &sc->sc_io_bh, &psc->io_base, &psc->io_size)) {
		aprint_error(": unable to map i/o registers\n");
		return;
	}

	if (pci_mapreg_map(pa, IAVC_PCI_MMBA, PCI_MAPREG_TYPE_MEM, 0,
	     &sc->sc_mem_bt, &sc->sc_mem_bh, &psc->mem_base, &psc->mem_size)) {
		aprint_error(": unable to map mem registers\n");
		return;
	}
	aprint_normal(": %s\n", pp->name);

	if (pp->npp_product == PCI_PRODUCT_AVM_T1) {
		aprint_error_dev(&sc->sc_dev, "sorry, PRI not yet supported\n");
		return;

#if 0
		sc->sc_capi.card_type = CARD_TYPEC_AVM_T1_PCI;
		sc->sc_capi.sc_nbch = NBCH_PRI;
		ret = iavc_t1_detect(sc);
		if (ret) {
			if (ret < 6) {
				aprint_error_dev(&sc->sc_dev, "no card detected?\n");
			} else {
				aprint_error_dev(&sc->sc_dev, "black box not on\n");
			}
			return;
		} else {
			sc->sc_dma = 1;
			sc->sc_t1 = 1;
		}
#endif

	} else if (pp->npp_product == PCI_PRODUCT_AVM_B1) {
		sc->sc_capi.card_type = CARD_TYPEC_AVM_B1_PCI;
		sc->sc_capi.sc_nbch = NBCH_BRI;
		ret = iavc_b1dma_detect(sc);
		if (ret) {
			ret = iavc_b1_detect(sc);
			if (ret) {
				aprint_error_dev(&sc->sc_dev, "no card detected?\n");
				return;
			}
		} else {
			sc->sc_dma = 1;
		}
	}
	if (sc->sc_dma)
		iavc_b1dma_reset(sc);

#if 0
	/*
         * XXX: should really be done this way, but this freezes the card
         */
	if (sc->sc_t1)
		iavc_t1_reset(sc);
	else
		iavc_b1_reset(sc);
#endif

	if (pci_intr_map(pa, &ih)) {
		aprint_error_dev(&sc->sc_dev, "couldn't map interrupt\n");
		return;
	}

	intrstr = pci_intr_string(pc, ih);
	psc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, iavc_pci_intr, psc);
	if (psc->sc_ih == NULL) {
		aprint_error_dev(&sc->sc_dev, "couldn't establish interrupt");
		if (intrstr != NULL)
			aprint_normal(" at %s", intrstr);
		aprint_normal("\n");
		return;
	}
	psc->sc_pc = pc;
	aprint_normal("%s: interrupting at %s\n", device_xname(&sc->sc_dev), intrstr);

	memset(&sc->sc_txq, 0, sizeof(struct ifqueue));
	sc->sc_txq.ifq_maxlen = sc->sc_capi.sc_nbch * 4;

	sc->sc_intr = 0;
	sc->sc_state = IAVC_DOWN;
	sc->sc_blocked = 0;

	/* setup capi link */
	sc->sc_capi.load = iavc_load;
	sc->sc_capi.reg_appl = iavc_register;
	sc->sc_capi.rel_appl = iavc_release;
	sc->sc_capi.send = iavc_send;
	sc->sc_capi.ctx = (void *) sc;

	/* lock & load DMA for TX */
	if ((ret = bus_dmamem_alloc(sc->dmat, IAVC_DMA_SIZE, PAGE_SIZE, 0,
	    &sc->txseg, 1, &sc->ntxsegs, BUS_DMA_ALLOCNOW)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't allocate tx DMA memory, error = %d\n",
		    ret);
		goto fail1;
	}

	if ((ret = bus_dmamem_map(sc->dmat, &sc->txseg, sc->ntxsegs,
	    IAVC_DMA_SIZE, &sc->sc_sendbuf, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't map tx DMA memory, error = %d\n",
		    ret);
		goto fail2;
	}

	if ((ret = bus_dmamap_create(sc->dmat, IAVC_DMA_SIZE, 1,
	    IAVC_DMA_SIZE, 0, BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
	    &sc->tx_map)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't create tx DMA map, error = %d\n",
		    ret);
		goto fail3;
	}

	if ((ret = bus_dmamap_load(sc->dmat, sc->tx_map, sc->sc_sendbuf,
	    IAVC_DMA_SIZE, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't load tx DMA map, error = %d\n",
		    ret);
		goto fail4;
	}

	/* do the same for RX */
	if ((ret = bus_dmamem_alloc(sc->dmat, IAVC_DMA_SIZE, PAGE_SIZE, 0,
	    &sc->rxseg, 1, &sc->nrxsegs, BUS_DMA_ALLOCNOW)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't allocate rx DMA memory, error = %d\n",
		    ret);
		goto fail5;
	}

	if ((ret = bus_dmamem_map(sc->dmat, &sc->rxseg, sc->nrxsegs,
	    IAVC_DMA_SIZE, &sc->sc_recvbuf, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't map rx DMA memory, error = %d\n",
		    ret);
		goto fail6;
	}

	if ((ret = bus_dmamap_create(sc->dmat, IAVC_DMA_SIZE, 1, IAVC_DMA_SIZE,
	    0, BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT, &sc->rx_map)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't create rx DMA map, error = %d\n",
		    ret);
		goto fail7;
	}

	if ((ret = bus_dmamap_load(sc->dmat, sc->rx_map, sc->sc_recvbuf,
	    IAVC_DMA_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(&sc->sc_dev, "can't load rx DMA map, error = %d\n",
		    ret);
		goto fail8;
	}

	if (capi_ll_attach(&sc->sc_capi, device_xname(&sc->sc_dev), pp->name)) {
		aprint_error_dev(&sc->sc_dev, "capi attach failed\n");
		goto fail9;
	}
	return;

	/* release resources in case of failed attach */
fail9:
	bus_dmamap_unload(sc->dmat, sc->rx_map);
fail8:
	bus_dmamap_destroy(sc->dmat, sc->rx_map);
fail7:
	bus_dmamem_unmap(sc->dmat, sc->sc_recvbuf, IAVC_DMA_SIZE);
fail6:
	bus_dmamem_free(sc->dmat, &sc->rxseg, sc->nrxsegs);
fail5:
	bus_dmamap_unload(sc->dmat, sc->tx_map);
fail4:
	bus_dmamap_destroy(sc->dmat, sc->tx_map);
fail3:
	bus_dmamem_unmap(sc->dmat, sc->sc_sendbuf, IAVC_DMA_SIZE);
fail2:
	bus_dmamem_free(sc->dmat, &sc->txseg, sc->ntxsegs);
fail1:
	pci_intr_disestablish(psc->sc_pc, psc->sc_ih);

	return;
}
コード例 #29
0
ファイル: ld_virtio.c プロジェクト: ryo/netbsd-src
static int
ld_virtio_alloc_reqs(struct ld_virtio_softc *sc, int qsize)
{
	int allocsize, r, rsegs, i;
	struct ld_softc *ld = &sc->sc_ld;
	void *vaddr;

	allocsize = sizeof(struct virtio_blk_req) * qsize;
	r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
			     &sc->sc_reqs_seg, 1, &rsegs, BUS_DMA_NOWAIT);
	if (r != 0) {
		aprint_error_dev(sc->sc_dev,
				 "DMA memory allocation failed, size %d, "
				 "error code %d\n", allocsize, r);
		goto err_none;
	}
	r = bus_dmamem_map(sc->sc_virtio->sc_dmat,
			   &sc->sc_reqs_seg, 1, allocsize,
			   &vaddr, BUS_DMA_NOWAIT);
	if (r != 0) {
		aprint_error_dev(sc->sc_dev,
				 "DMA memory map failed, "
				 "error code %d\n", r);
		goto err_dmamem_alloc;
	}
	sc->sc_reqs = vaddr;
	memset(vaddr, 0, allocsize);
	for (i = 0; i < qsize; i++) {
		struct virtio_blk_req *vr = &sc->sc_reqs[i];
		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
				      offsetof(struct virtio_blk_req, vr_bp),
				      1,
				      offsetof(struct virtio_blk_req, vr_bp),
				      0,
				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
				      &vr->vr_cmdsts);
		if (r != 0) {
			aprint_error_dev(sc->sc_dev,
					 "command dmamap creation failed, "
					 "error code %d\n", r);
			goto err_reqs;
		}
		r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
				    &vr->vr_hdr,
				    offsetof(struct virtio_blk_req, vr_bp),
				    NULL, BUS_DMA_NOWAIT);
		if (r != 0) {
			aprint_error_dev(sc->sc_dev,
					 "command dmamap load failed, "
					 "error code %d\n", r);
			goto err_reqs;
		}
		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
				      ld->sc_maxxfer,
				      (ld->sc_maxxfer / NBPG) +
				      VIRTIO_BLK_MIN_SEGMENTS,
				      ld->sc_maxxfer,
				      0,
				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
				      &vr->vr_payload);
		if (r != 0) {
			aprint_error_dev(sc->sc_dev,
					 "payload dmamap creation failed, "
					 "error code %d\n", r);
			goto err_reqs;
		}
	}
	return 0;

err_reqs:
	for (i = 0; i < qsize; i++) {
		struct virtio_blk_req *vr = &sc->sc_reqs[i];
		if (vr->vr_cmdsts) {
			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
					   vr->vr_cmdsts);
			vr->vr_cmdsts = 0;
		}
		if (vr->vr_payload) {
			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
					   vr->vr_payload);
			vr->vr_payload = 0;
		}
	}
	bus_dmamem_unmap(sc->sc_virtio->sc_dmat, sc->sc_reqs, allocsize);
err_dmamem_alloc:
	bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_seg, 1);
err_none:
	return -1;
}
コード例 #30
0
ファイル: dp83932.c プロジェクト: Tommmster/netbsd-avr32
/*
 * sonic_attach:
 *
 *	Attach a SONIC interface to the system.
 */
void
sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr)
{
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	int i, rseg, error;
	bus_dma_segment_t seg;
	size_t cdatasize;
	uint8_t *nullbuf;

	/*
	 * Allocate the control data structures, and create and load the
	 * DMA map for it.
	 */
	if (sc->sc_32bit)
		cdatasize = sizeof(struct sonic_control_data32);
	else
		cdatasize = sizeof(struct sonic_control_data16);

	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN,
	     PAGE_SIZE, (64 * 1024), &seg, 1, &rseg,
	     BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(sc->sc_dev,
		    "unable to allocate control data, error = %d\n", error);
		goto fail_0;
	}

	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
	    cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16,
	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
		aprint_error_dev(sc->sc_dev,
		    "unable to map control data, error = %d\n", error);
		goto fail_1;
	}
	nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize;
	memset(nullbuf, 0, ETHER_PAD_LEN);

	if ((error = bus_dmamap_create(sc->sc_dmat,
	     cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT,
	     &sc->sc_cddmamap)) != 0) {
		aprint_error_dev(sc->sc_dev,
		    "unable to create control data DMA map, error = %d\n",
		    error);
		goto fail_2;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
	     sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(sc->sc_dev,
		    "unable to load control data DMA map, error = %d\n", error);
		goto fail_3;
	}

	/*
	 * Create the transmit buffer DMA maps.
	 */
	for (i = 0; i < SONIC_NTXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
		     SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
		     &sc->sc_txsoft[i].ds_dmamap)) != 0) {
			aprint_error_dev(sc->sc_dev,
			    "unable to create tx DMA map %d, error = %d\n",
			    i, error);
			goto fail_4;
		}
	}

	/*
	 * Create the receive buffer DMA maps.
	 */
	for (i = 0; i < SONIC_NRXDESC; i++) {
		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
		     MCLBYTES, 0, BUS_DMA_NOWAIT,
		     &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
			aprint_error_dev(sc->sc_dev,
			    "unable to create rx DMA map %d, error = %d\n",
			    i, error);
			goto fail_5;
		}
		sc->sc_rxsoft[i].ds_mbuf = NULL;
	}

	/*
	 * create and map the pad buffer
	 */
	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) {
		aprint_error_dev(sc->sc_dev,
		    "unable to create pad buffer DMA map, error = %d\n", error);
		goto fail_5;
	}

	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
		aprint_error_dev(sc->sc_dev,
		    "unable to load pad buffer DMA map, error = %d\n", error);
		goto fail_6;
	}
	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
	    BUS_DMASYNC_PREWRITE);

	/*
	 * Reset the chip to a known state.
	 */
	sonic_reset(sc);

	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
	    ether_sprintf(enaddr));

	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
	ifp->if_softc = sc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = sonic_ioctl;
	ifp->if_start = sonic_start;
	ifp->if_watchdog = sonic_watchdog;
	ifp->if_init = sonic_init;
	ifp->if_stop = sonic_stop;
	IFQ_SET_READY(&ifp->if_snd);

	/*
	 * We can support 802.1Q VLAN-sized frames.
	 */
	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;

	/*
	 * Attach the interface.
	 */
	if_attach(ifp);
	ether_ifattach(ifp, enaddr);

	/*
	 * Make sure the interface is shutdown during reboot.
	 */
	sc->sc_sdhook = shutdownhook_establish(sonic_shutdown, sc);
	if (sc->sc_sdhook == NULL)
		aprint_error_dev(sc->sc_dev,
		    "WARNING: unable to establish shutdown hook\n");
	return;

	/*
	 * Free any resources we've allocated during the failed attach
	 * attempt.  Do this in reverse order and fall through.
	 */
 fail_6:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
 fail_5:
	for (i = 0; i < SONIC_NRXDESC; i++) {
		if (sc->sc_rxsoft[i].ds_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_rxsoft[i].ds_dmamap);
	}
 fail_4:
	for (i = 0; i < SONIC_NTXDESC; i++) {
		if (sc->sc_txsoft[i].ds_dmamap != NULL)
			bus_dmamap_destroy(sc->sc_dmat,
			    sc->sc_txsoft[i].ds_dmamap);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
 fail_3:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
 fail_2:
	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize);
 fail_1:
	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
 fail_0:
	return;
}