Exemple #1
0
static int hpt_attach(device_t dev)
{
	PHBA hba = (PHBA)device_get_softc(dev);
	HIM *him = hba->ldm_adapter.him;
	PCI_ID pci_id;
	HPT_UINT size;
	PVBUS vbus;
	PVBUS_EXT vbus_ext;

	KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));

	pci_enable_busmaster(dev);

	pci_id.vid = pci_get_vendor(dev);
	pci_id.did = pci_get_device(dev);
	pci_id.rev = pci_get_revid(dev);
	pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev);

	size = him->get_adapter_size(&pci_id);
	hba->ldm_adapter.him_handle = kmalloc(size, M_DEVBUF, M_WAITOK);
	if (!hba->ldm_adapter.him_handle)
		return ENXIO;

	hba->pcidev = dev;
	hba->pciaddr.tree = 0;
	hba->pciaddr.bus = pci_get_bus(dev);
	hba->pciaddr.device = pci_get_slot(dev);
	hba->pciaddr.function = pci_get_function(dev);

	if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) {
		kfree(hba->ldm_adapter.him_handle, M_DEVBUF);
		return -1;
	}

	os_printk("adapter at PCI %d:%d:%d, IRQ %d",
		hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev));

	if (!ldm_register_adapter(&hba->ldm_adapter)) {
		size = ldm_get_vbus_size();
		vbus_ext = kmalloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK);
		if (!vbus_ext) {
			kfree(hba->ldm_adapter.him_handle, M_DEVBUF);
			return -1;
		}
		memset(vbus_ext, 0, sizeof(VBUS_EXT));
		vbus_ext->ext_type = EXT_TYPE_VBUS;
		ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
		ldm_register_adapter(&hba->ldm_adapter);
	}

	ldm_for_each_vbus(vbus, vbus_ext) {
		if (hba->ldm_adapter.vbus==vbus) {
			hba->vbus_ext = vbus_ext;
			hba->next = vbus_ext->hba_list;
			vbus_ext->hba_list = hba;
			break;
		}
	}
	return 0;
}
Exemple #2
0
static int
isci_attach(device_t device)
{
    int error;
    struct isci_softc *isci = DEVICE2SOFTC(device);

    g_isci = isci;
    isci->device = device;
    pci_enable_busmaster(device);

    isci_allocate_pci_memory(isci);

    error = isci_initialize(isci);

    if (error)
    {
        isci_detach(device);
        return (error);
    }

    isci_interrupt_setup(isci);
    isci_sysctl_initialize(isci);

    return (0);
}
Exemple #3
0
static int
iop_pci_attach(device_t dev)
{
    struct iop_softc *sc = device_get_softc(dev);
    int rid;

    bzero(sc, sizeof(struct iop_softc));

    /* get resources */
    rid = 0x10;
    sc->r_mem = 
	bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);

    if (!sc->r_mem)
	return 0;

    rid = 0x00;
    sc->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
				       RF_SHAREABLE | RF_ACTIVE);

    /* now setup the infrastructure to talk to the device */
    pci_enable_busmaster(dev);

    sc->ibase = rman_get_virtual(sc->r_mem);
    sc->reg = (struct i2o_registers *)sc->ibase;
    sc->dev = dev;
    mtx_init(&sc->mtx, "pst lock", NULL, MTX_DEF);

    if (!iop_init(sc))
	return 0;
    return bus_generic_attach(dev);
}
Exemple #4
0
static int
vga_pci_enable_busmaster(device_t dev, device_t child)
{

	device_printf(dev, "child %s requested pci_enable_busmaster\n",
	    device_get_nameunit(child));
	return (pci_enable_busmaster(dev));
}
Exemple #5
0
static int
rtw_pci_attach(device_t dev)
{
	struct rtw_softc *sc = device_get_softc(dev);
	struct rtw_regs *regs = &sc->sc_regs;
	int i, error;

	/*
	 * No power management hooks.
	 * XXX Maybe we should add some!
	 */
	sc->sc_flags |= RTW_F_ENABLED;

	sc->sc_rev = pci_get_revid(dev);

#ifndef BURN_BRIDGES
	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
		uint32_t mem, port, irq;
						
		mem = pci_read_config(dev, RTW_PCI_MMBA, 4);
		port = pci_read_config(dev, RTW_PCI_IOBA, 4);
		irq = pci_read_config(dev, PCIR_INTLINE, 4);
		
		device_printf(dev, "chip is in D%d power mode "
		    "-- setting to D0\n", pci_get_powerstate(dev));

		pci_set_powerstate(dev, PCI_POWERSTATE_D0);

		pci_write_config(dev, RTW_PCI_MMBA, mem, 4);
		pci_write_config(dev, RTW_PCI_IOBA, port, 4);
		pci_write_config(dev, PCIR_INTLINE, irq, 4);
	}
#endif	/* !BURN_BRIDGES */

	/* Enable PCI bus master */
	pci_enable_busmaster(dev);

	/* Allocate IO memory/port */
	for (i = 0; i < NELEM(rtw_pci_regs); ++i) {
		regs->r_rid = rtw_pci_regs[i].reg_rid;
		regs->r_type = rtw_pci_regs[i].reg_type;
		regs->r_res = bus_alloc_resource_any(dev, regs->r_type,
						     &regs->r_rid, RF_ACTIVE);
		if (regs->r_res != NULL)
			break;
	}
	if (regs->r_res == NULL) {
		device_printf(dev, "can't allocate IO mem/port\n");
		return ENXIO;
	}
	regs->r_bh = rman_get_bushandle(regs->r_res);
	regs->r_bt = rman_get_bustag(regs->r_res);

	error = rtw_attach(dev);
	if (error)
		rtw_pci_detach(dev);
	return error;
}
Exemple #6
0
static int
ral_pci_attach(device_t dev)
{
	struct ral_pci_softc *psc = device_get_softc(dev);
	struct rt2560_softc *sc = &psc->u.sc_rt2560;
	int error;

	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
		device_printf(dev, "chip is in D%d power mode "
		    "-- setting to D0\n", pci_get_powerstate(dev));
		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
	}

	/* enable bus-mastering */
	pci_enable_busmaster(dev);

	psc->sc_opns = (pci_get_device(dev) == 0x0201) ? &ral_rt2560_opns :
	    &ral_rt2661_opns;

	psc->mem_rid = RAL_PCI_BAR0;
	psc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &psc->mem_rid,
	    RF_ACTIVE);
	if (psc->mem == NULL) {
		device_printf(dev, "could not allocate memory resource\n");
		return ENXIO;
	}

	sc->sc_st = rman_get_bustag(psc->mem);
	sc->sc_sh = rman_get_bushandle(psc->mem);
	sc->sc_invalid = 1;
	
	psc->irq_rid = 0;
	psc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &psc->irq_rid,
	    RF_ACTIVE | RF_SHAREABLE);
	if (psc->irq == NULL) {
		device_printf(dev, "could not allocate interrupt resource\n");
		return ENXIO;
	}

	error = (*psc->sc_opns->attach)(dev, pci_get_device(dev));
	if (error != 0)
		return error;

	/*
	 * Hook our interrupt after all initialization is complete.
	 */
	error = bus_setup_intr(dev, psc->irq, INTR_TYPE_NET | INTR_MPSAFE,
	    NULL, psc->sc_opns->intr, psc, &psc->sc_ih);
	if (error != 0) {
		device_printf(dev, "could not set up interrupt\n");
		return error;
	}
	sc->sc_invalid = 0;
	
	return 0;
}
Exemple #7
0
static int
hdspe_attach(device_t dev)
{
    struct sc_info *sc;
    struct sc_pcminfo *scp;
    struct hdspe_channel *chan_map;
    uint32_t rev;
    int i, err;

#if 0
    device_printf(dev, "hdspe_attach()\n");
#endif

    sc = device_get_softc(dev);
    sc->lock = snd_mtxcreate(device_get_nameunit(dev),
                             "snd_hdspe softc");
    sc->dev = dev;

    pci_enable_busmaster(dev);
    rev = pci_get_revid(dev);
    switch (rev) {
    case PCI_REVISION_AIO:
        sc->type = AIO;
        chan_map = chan_map_aio;
        break;
    case PCI_REVISION_RAYDAT:
        sc->type = RAYDAT;
        chan_map = chan_map_rd;
        break;
    default:
        return ENXIO;
    }

    /* Allocate resources. */
    err = hdspe_alloc_resources(sc);
    if (err) {
        device_printf(dev, "Unable to allocate system resources.\n");
        return ENXIO;
    }

    if (hdspe_init(sc) != 0)
        return ENXIO;

    for (i = 0; i < HDSPE_MAX_CHANS && chan_map[i].descr != NULL; i++) {
        scp = malloc(sizeof(struct sc_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO);
        scp->hc = &chan_map[i];
        scp->sc = sc;
        scp->dev = device_add_child(dev, "pcm", -1);
        device_set_ivars(scp->dev, scp);
    }

    hdspe_map_dmabuf(sc);

    return (bus_generic_attach(dev));
}
Exemple #8
0
static int
mwl_pci_resume(device_t dev)
{
    struct mwl_pci_softc *psc = device_get_softc(dev);

    pci_enable_busmaster(dev);

    mwl_resume(&psc->sc_sc);

    return (0);
}
Exemple #9
0
static int
set_pci_config(device_t dev)
{
	uint32_t data;

	pci_enable_busmaster(dev);

	data = pci_get_revid(dev);
	data |= PCIM_CMD_PORTEN;
	pci_write_config(dev, PCIR_COMMAND, data, 2);

	return 0;
}
Exemple #10
0
int
ata_pci_attach(device_t dev)
{
    struct ata_pci_controller *ctlr = device_get_softc(dev);
    device_t child;
    u_int32_t cmd;
    int unit;

    /* do chipset specific setups only needed once */
    ctlr->legacy = ata_legacy(dev);
    if (ctlr->legacy || pci_read_config(dev, PCIR_BAR(2), 4) & IOMASK)
	ctlr->channels = 2;
    else
	ctlr->channels = 1;
    ctlr->ichannels = -1;
    ctlr->ch_attach = ata_pci_ch_attach;
    ctlr->ch_detach = ata_pci_ch_detach;
    ctlr->dev = dev;

    /* if needed try to enable busmastering */
    pci_enable_busmaster(dev);
    cmd = pci_read_config(dev, PCIR_COMMAND, 2);

    /* if busmastering mode "stuck" use it */
    if ((cmd & PCIM_CMD_BUSMASTEREN) == PCIM_CMD_BUSMASTEREN) {
	ctlr->r_type1 = SYS_RES_IOPORT;
	ctlr->r_rid1 = ATA_BMADDR_RID;
	ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, &ctlr->r_rid1,
					      RF_ACTIVE);
    }

    if (ctlr->chipinit(dev))
	return ENXIO;

    /* attach all channels on this controller */
    for (unit = 0; unit < ctlr->channels; unit++) {
	if ((ctlr->ichannels & (1 << unit)) == 0)
	    continue;
	child = device_add_child(dev, "ata",
	    ((unit == 0 || unit == 1) && ctlr->legacy) ?
	    unit : devclass_find_free_unit(ata_devclass, 2));
	if (child == NULL)
	    device_printf(dev, "failed to add ata child device\n");
	else
	    device_set_ivars(child, (void *)(intptr_t)unit);
    }
    bus_generic_attach(dev);
    return 0;
}
Exemple #11
0
static int
vtpci_attach(device_t dev)
{
	struct vtpci_softc *sc;
	device_t child;
	int rid;

	sc = device_get_softc(dev);
	sc->vtpci_dev = dev;

	pci_enable_busmaster(dev);

	rid = PCIR_BAR(0);
	sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
	    RF_ACTIVE);
	if (sc->vtpci_res == NULL) {
		device_printf(dev, "cannot map I/O space\n");
		return (ENXIO);
	}

	if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
		sc->vtpci_flags |= VTPCI_FLAG_NO_MSI;

	if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
		rid = PCIR_BAR(1);
		sc->vtpci_msix_res = bus_alloc_resource_any(dev,
		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
	}

	if (sc->vtpci_msix_res == NULL)
		sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX;

	vtpci_reset(sc);

	/* Tell the host we've noticed this device. */
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);

	if ((child = device_add_child(dev, NULL, -1)) == NULL) {
		device_printf(dev, "cannot create child device\n");
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
		vtpci_detach(dev);
		return (ENOMEM);
	}

	sc->vtpci_child_dev = child;
	vtpci_probe_and_attach_child(sc);

	return (0);
}
Exemple #12
0
static int
mpr_pci_attach(device_t dev)
{
	struct mpr_softc *sc;
	struct mpr_ident *m;
	int error;

	sc = device_get_softc(dev);
	bzero(sc, sizeof(*sc));
	sc->mpr_dev = dev;
	m = mpr_find_ident(dev);
	sc->mpr_flags = m->flags;

	/* Twiddle basic PCI config bits for a sanity check */
	pci_enable_busmaster(dev);

	/* Allocate the System Interface Register Set */
	sc->mpr_regs_rid = PCIR_BAR(1);
	if ((sc->mpr_regs_resource = bus_alloc_resource_any(dev,
	    SYS_RES_MEMORY, &sc->mpr_regs_rid, RF_ACTIVE)) == NULL) {
		mpr_printf(sc, "Cannot allocate PCI registers\n");
		return (ENXIO);
	}
	sc->mpr_btag = rman_get_bustag(sc->mpr_regs_resource);
	sc->mpr_bhandle = rman_get_bushandle(sc->mpr_regs_resource);

	/* Allocate the parent DMA tag */
	if (bus_dma_tag_create( bus_get_dma_tag(dev),	/* parent */
				1, 0,			/* algnmnt, boundary */
				BUS_SPACE_MAXADDR,	/* lowaddr */
				BUS_SPACE_MAXADDR,	/* highaddr */
				NULL, NULL,		/* filter, filterarg */
				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
				BUS_SPACE_UNRESTRICTED,	/* nsegments */
				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
				0,			/* flags */
				NULL, NULL,		/* lockfunc, lockarg */
				&sc->mpr_parent_dmat)) {
		mpr_printf(sc, "Cannot allocate parent DMA tag\n");
		mpr_pci_free(sc);
		return (ENOMEM);
	}

	if ((error = mpr_attach(sc)) != 0)
		mpr_pci_free(sc);

	return (error);
}
Exemple #13
0
static int
ntb_attach(device_t device)
{
	struct ntb_softc *ntb;
	struct ntb_hw_info *p;
	int error;

	ntb = DEVICE2SOFTC(device);
	p = ntb_get_device_info(pci_get_devid(device));

	ntb->device = device;
	ntb->type = p->type;
	ntb->features = p->features;

	/* Heartbeat timer for NTB_SOC since there is no link interrupt */
	callout_init(&ntb->heartbeat_timer, 1);
	callout_init(&ntb->lr_timer, 1);

	if (ntb->type == NTB_SOC)
		error = ntb_detect_soc(ntb);
	else
		error = ntb_detect_xeon(ntb);
	if (error)
		goto out;

	ntb_detect_max_mw(ntb);

	error = ntb_map_pci_bars(ntb);
	if (error)
		goto out;
	if (ntb->type == NTB_SOC)
		error = ntb_setup_soc(ntb);
	else
		error = ntb_setup_xeon(ntb);
	if (error)
		goto out;
	error = ntb_setup_interrupts(ntb);
	if (error)
		goto out;

	pci_enable_busmaster(ntb->device);

out:
	if (error != 0)
		ntb_detach(device);
	return (error);
}
Exemple #14
0
static int
ioat_attach(device_t device)
{
	struct ioat_softc *ioat;
	int error;

	ioat = DEVICE2SOFTC(device);
	ioat->device = device;

	error = ioat_map_pci_bar(ioat);
	if (error != 0)
		goto err;

	ioat->version = ioat_read_cbver(ioat);
	if (ioat->version < IOAT_VER_3_0) {
		error = ENODEV;
		goto err;
	}

	error = ioat3_attach(device);
	if (error != 0)
		goto err;

	error = pci_enable_busmaster(device);
	if (error != 0)
		goto err;

	error = ioat_setup_intr(ioat);
	if (error != 0)
		goto err;

	error = ioat_reset_hw(ioat);
	if (error != 0)
		goto err;

	ioat_process_events(ioat);
	ioat_setup_sysctl(device);

	ioat->chan_idx = ioat_channel_index;
	ioat_channel[ioat_channel_index++] = ioat;
	ioat_test_attach();

err:
	if (error != 0)
		ioat_detach(device);
	return (error);
}
static int
bhndb_pci_attach(device_t dev)
{
	struct bhndb_pci_softc	*sc;
	int			 error, reg;

	sc = device_get_softc(dev);
	sc->dev = dev;

	/* Enable PCI bus mastering */
	pci_enable_busmaster(device_get_parent(dev));

	/* Determine our bridge device class */
	sc->pci_devclass = BHND_DEVCLASS_PCI;
	if (pci_find_cap(device_get_parent(dev), PCIY_EXPRESS, &reg) == 0)
		sc->pci_devclass = BHND_DEVCLASS_PCIE;

	/* Determine the basic set of applicable quirks. This will be updated
	 * in bhndb_pci_init_full_config() once the PCI device core has
	 * been enumerated. */
	sc->quirks = bhndb_pci_discover_quirks(sc, NULL);

	/* Using the discovered quirks, apply any WARs required for basic
	 * register access. */
	if ((error = bhndb_pci_wars_register_access(sc)))
		return (error);

	/* Use siba(4)-compatible regwin handling until we know
	 * what kind of bus is attached */
	sc->set_regwin = bhndb_pci_compat_setregwin;

	/* Perform full bridge attach. This should call back into our
	 * bhndb_pci_init_full_config() implementation once the bridged
	 * bhnd(4) bus has been enumerated, but before any devices have been
	 * probed or attached. */
	if ((error = bhndb_attach(dev, sc->pci_devclass)))
		return (error);

	/* If supported, switch to the faster regwin handling */
	if (sc->bhndb.chipid.chip_type != BHND_CHIPTYPE_SIBA) {
		atomic_store_rel_ptr((volatile void *) &sc->set_regwin,
		    (uintptr_t) &bhndb_pci_fast_setregwin);
	}

	return (0);
}
Exemple #16
0
static int
ata_kauai_attach(device_t dev)
{
	struct ata_kauai_softc *sc = device_get_softc(dev);
#if USE_DBDMA_IRQ
	int dbdma_irq_rid = 1;
	struct resource *dbdma_irq;
	void *cookie;
#endif

	pci_enable_busmaster(dev);

	/* Init DMA engine */

	sc->sc_ch.dbdma_rid = 1;
	sc->sc_ch.dbdma_regs = sc->sc_memr;
	sc->sc_ch.dbdma_offset = ATA_KAUAI_DBDMAOFFSET;

	ata_dbdma_dmainit(dev);

#if USE_DBDMA_IRQ
	/* Bind to DBDMA interrupt as well */
	if ((dbdma_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
	    &dbdma_irq_rid, RF_SHAREABLE | RF_ACTIVE)) != NULL) {
		bus_setup_intr(dev, dbdma_irq, ATA_INTR_FLAGS, NULL,
		    (driver_intr_t *)ata_kauai_dma_interrupt, sc,&cookie);
	}
#endif

	/* Set up initial mode */
	sc->pioconf[0] = sc->pioconf[1] = 
	    bus_read_4(sc->sc_memr, PIO_CONFIG_REG) & 0x0f000fff;

	sc->udmaconf[0] = sc->udmaconf[1] = 0;
	sc->wdmaconf[0] = sc->wdmaconf[1] = 0;

	/* Magic FCR value from Apple */
	bus_write_4(sc->sc_memr, 0, 0x00000007);

	/* Set begin_transaction */
	sc->sc_ch.sc_ch.hw.begin_transaction = ata_kauai_begin_transaction;

	return ata_attach(dev);
}
Exemple #17
0
static void
init_pvscsi(void *data)
{
    struct pci_device *pci = data;
    void *iobase = pci_enable_membar(pci, PCI_BASE_ADDRESS_0);
    if (!iobase)
        return;
    pci_enable_busmaster(pci);

    dprintf(1, "found pvscsi at %pP, io @ %p\n", pci, iobase);

    pvscsi_write_cmd_desc(iobase, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);

    struct pvscsi_ring_dsc_s *ring_dsc = NULL;
    pvscsi_init_rings(iobase, &ring_dsc);
    int i;
    for (i = 0; i < 7; i++)
        pvscsi_scan_target(pci, iobase, ring_dsc, i);
}
Exemple #18
0
static int
atiixp_pci_resume(device_t dev)
{
	struct atiixp_info *sc = pcm_getdevinfo(dev);

	atiixp_lock(sc);
	/* power up pci bus */
	pci_set_powerstate(dev, PCI_POWERSTATE_D0);
	pci_enable_io(dev, SYS_RES_MEMORY);
	pci_enable_busmaster(dev);
	/* reset / power up aclink */
	atiixp_reset_aclink(sc);
	atiixp_unlock(sc);

	if (mixer_reinit(dev) == -1) {
		device_printf(dev, "unable to reinitialize the mixer\n");
		return ENXIO;
	}

	/*
	 * Resume channel activities. Reset channel format regardless
	 * of its previous state.
	 */
	if (sc->pch.channel) {
		if (sc->pch.fmt)
			atiixp_chan_setformat(NULL, &sc->pch, sc->pch.fmt);
		if (sc->pch.active)
			atiixp_chan_trigger(NULL, &sc->pch, PCMTRIG_START);
	}
	if (sc->rch.channel) {
		if (sc->rch.fmt)
			atiixp_chan_setformat(NULL, &sc->rch, sc->rch.fmt);
		if (sc->rch.active)
			atiixp_chan_trigger(NULL, &sc->rch, PCMTRIG_START);
	}

	/* enable interrupts */
	atiixp_lock(sc);
	atiixp_enable_interrupts(sc);
	atiixp_unlock(sc);

	return 0;
}
Exemple #19
0
static int
nvme_attach(device_t dev)
{
	struct nvme_controller	*ctrlr = DEVICE2SOFTC(dev);
	int			status;

	status = nvme_ctrlr_construct(ctrlr, dev);

	if (status != 0) {
		nvme_ctrlr_destruct(ctrlr, dev);
		return (status);
	}

	/*
	 * Reset controller twice to ensure we do a transition from cc.en==1
	 *  to cc.en==0.  This is because we don't really know what status
	 *  the controller was left in when boot handed off to OS.
	 */
	status = nvme_ctrlr_hw_reset(ctrlr);
	if (status != 0) {
		nvme_ctrlr_destruct(ctrlr, dev);
		return (status);
	}

	status = nvme_ctrlr_hw_reset(ctrlr);
	if (status != 0) {
		nvme_ctrlr_destruct(ctrlr, dev);
		return (status);
	}

	nvme_sysctl_initialize_ctrlr(ctrlr);

	pci_enable_busmaster(dev);

	ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook;
	ctrlr->config_hook.ich_arg = ctrlr;

	config_intrhook_establish(&ctrlr->config_hook);

	return (0);
}
Exemple #20
0
static int
ntb_attach(device_t device)
{
	struct ntb_softc *ntb = DEVICE2SOFTC(device);
	struct ntb_hw_info *p = ntb_get_device_info(pci_get_devid(device));
	int error;

	ntb->device = device;
	ntb->type = p->type;
	ntb->features = p->features;

	/* Heartbeat timer for NTB_SOC since there is no link interrupt */
	callout_init(&ntb->heartbeat_timer, CALLOUT_MPSAFE);
	callout_init(&ntb->lr_timer, CALLOUT_MPSAFE);

	DETACH_ON_ERROR(ntb_map_pci_bars(ntb));
	DETACH_ON_ERROR(ntb_initialize_hw(ntb));
	DETACH_ON_ERROR(ntb_setup_interrupts(ntb));

	pci_enable_busmaster(ntb->device);

	return (error);
}
Exemple #21
0
static int
siba_bwn_attach(device_t dev)
{
	struct siba_bwn_softc *ssc = device_get_softc(dev);
	struct siba_softc *siba = &ssc->ssc_siba;

	siba->siba_dev = dev;
	siba->siba_type = SIBA_TYPE_PCI;

	/*
	 * Enable bus mastering.
	 */
	pci_enable_busmaster(dev);

	/*
	 * Setup memory-mapping of PCI registers.
	 */
	siba->siba_mem_rid = SIBA_PCIR_BAR;
	siba->siba_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
		&siba->siba_mem_rid, RF_ACTIVE);
	if (siba->siba_mem_res == NULL) {
		device_printf(dev, "cannot map register space\n");
		return (ENXIO);
	}
	siba->siba_mem_bt = rman_get_bustag(siba->siba_mem_res);
	siba->siba_mem_bh = rman_get_bushandle(siba->siba_mem_res);

	/* Get more PCI information */
	siba->siba_pci_did = pci_get_device(dev);
	siba->siba_pci_vid = pci_get_vendor(dev);
	siba->siba_pci_subvid = pci_get_subvendor(dev);
	siba->siba_pci_subdid = pci_get_subdevice(dev);
	siba->siba_pci_revid = pci_get_revid(dev);

	return (siba_core_attach(siba));
}
Exemple #22
0
static int
ehci_pci_attach(device_t self)
{
	ehci_softc_t *sc = device_get_softc(self);
	int err;
	int rid;

	/* initialise some bus fields */
	sc->sc_bus.parent = self;
	sc->sc_bus.devices = sc->sc_devices;
	sc->sc_bus.devices_max = EHCI_MAX_DEVICES;

	/* get all DMA memory */
	if (usb_bus_mem_alloc_all(&sc->sc_bus,
	    USB_GET_DMA_TAG(self), &ehci_iterate_hw_softc)) {
		return (ENOMEM);
	}

	pci_enable_busmaster(self);

	switch (pci_read_config(self, PCI_USBREV, 1) & PCI_USB_REV_MASK) {
	case PCI_USB_REV_PRE_1_0:
	case PCI_USB_REV_1_0:
	case PCI_USB_REV_1_1:
		/*
		 * NOTE: some EHCI USB controllers have the wrong USB
		 * revision number. It appears those controllers are
		 * fully compliant so we just ignore this value in
		 * some common cases.
		 */
		device_printf(self, "pre-2.0 USB revision (ignored)\n");
		/* fallthrough */
	case PCI_USB_REV_2_0:
		break;
	default:
		/* Quirk for Parallels Desktop 4.0 */
		device_printf(self, "USB revision is unknown. Assuming v2.0.\n");
		break;
	}

	rid = PCI_CBMEM;
	sc->sc_io_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (!sc->sc_io_res) {
		device_printf(self, "Could not map memory\n");
		goto error;
	}
	sc->sc_io_tag = rman_get_bustag(sc->sc_io_res);
	sc->sc_io_hdl = rman_get_bushandle(sc->sc_io_res);
	sc->sc_io_size = rman_get_size(sc->sc_io_res);

	rid = 0;
	sc->sc_irq_res = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid,
	    RF_SHAREABLE | RF_ACTIVE);
	if (sc->sc_irq_res == NULL) {
		device_printf(self, "Could not allocate irq\n");
		goto error;
	}
	sc->sc_bus.bdev = device_add_child(self, "usbus", -1);
	if (!sc->sc_bus.bdev) {
		device_printf(self, "Could not add USB device\n");
		goto error;
	}
	device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus);

	/*
	 * ehci_pci_match will never return NULL if ehci_pci_probe
	 * succeeded
	 */
	device_set_desc(sc->sc_bus.bdev, ehci_pci_match(self));
	switch (pci_get_vendor(self)) {
	case PCI_EHCI_VENDORID_ACERLABS:
		sprintf(sc->sc_vendor, "AcerLabs");
		break;
	case PCI_EHCI_VENDORID_AMD:
		sprintf(sc->sc_vendor, "AMD");
		break;
	case PCI_EHCI_VENDORID_APPLE:
		sprintf(sc->sc_vendor, "Apple");
		break;
	case PCI_EHCI_VENDORID_ATI:
		sprintf(sc->sc_vendor, "ATI");
		break;
	case PCI_EHCI_VENDORID_CMDTECH:
		sprintf(sc->sc_vendor, "CMDTECH");
		break;
	case PCI_EHCI_VENDORID_INTEL:
		sprintf(sc->sc_vendor, "Intel");
		break;
	case PCI_EHCI_VENDORID_NEC:
		sprintf(sc->sc_vendor, "NEC");
		break;
	case PCI_EHCI_VENDORID_OPTI:
		sprintf(sc->sc_vendor, "OPTi");
		break;
	case PCI_EHCI_VENDORID_PHILIPS:
		sprintf(sc->sc_vendor, "Philips");
		break;
	case PCI_EHCI_VENDORID_SIS:
		sprintf(sc->sc_vendor, "SiS");
		break;
	case PCI_EHCI_VENDORID_NVIDIA:
	case PCI_EHCI_VENDORID_NVIDIA2:
		sprintf(sc->sc_vendor, "nVidia");
		break;
	case PCI_EHCI_VENDORID_VIA:
		sprintf(sc->sc_vendor, "VIA");
		break;
	default:
		if (bootverbose)
			device_printf(self, "(New EHCI DeviceId=0x%08x)\n",
			    pci_get_devid(self));
		sprintf(sc->sc_vendor, "(0x%04x)", pci_get_vendor(self));
	}

#if (__FreeBSD_version >= 700031)
	err = bus_setup_intr(self, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
	    NULL, (driver_intr_t *)ehci_interrupt, sc, &sc->sc_intr_hdl);
#else
	err = bus_setup_intr(self, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
	    (driver_intr_t *)ehci_interrupt, sc, &sc->sc_intr_hdl);
#endif
	if (err) {
		device_printf(self, "Could not setup irq, %d\n", err);
		sc->sc_intr_hdl = NULL;
		goto error;
	}
	ehci_pci_take_controller(self);

	/* Undocumented quirks taken from Linux */

	switch (pci_get_vendor(self)) {
	case PCI_EHCI_VENDORID_ATI:
		/* SB600 and SB700 EHCI quirk */
		switch (pci_get_device(self)) {
		case 0x4386:
			ehci_pci_ati_quirk(self, 0);
			break;
		case 0x4396:
			ehci_pci_ati_quirk(self, 1);
			break;
		default:
			break;
		}
		break;

	case PCI_EHCI_VENDORID_VIA:
		ehci_pci_via_quirk(self);
		break;

	default:
		break;
	}

	/* Dropped interrupts workaround */
	switch (pci_get_vendor(self)) {
	case PCI_EHCI_VENDORID_ATI:
	case PCI_EHCI_VENDORID_VIA:
		sc->sc_flags |= EHCI_SCFLG_LOSTINTRBUG;
		if (bootverbose)
			device_printf(self,
			    "Dropped interrupts workaround enabled\n");
		break;
	default:
		break;
	}

	/* Doorbell feature workaround */
	switch (pci_get_vendor(self)) {
	case PCI_EHCI_VENDORID_NVIDIA:
	case PCI_EHCI_VENDORID_NVIDIA2:
		sc->sc_flags |= EHCI_SCFLG_IAADBUG;
		if (bootverbose)
			device_printf(self,
			    "Doorbell workaround enabled\n");
		break;
	default:
		break;
	}

	err = ehci_init(sc);
	if (!err) {
		err = device_probe_and_attach(sc->sc_bus.bdev);
	}
	if (err) {
		device_printf(self, "USB init failed err=%d\n", err);
		goto error;
	}
	return (0);

error:
	ehci_pci_detach(self);
	return (ENXIO);
}
Exemple #23
0
static int
mfi_pci_attach(device_t dev)
{
	struct mfi_softc *sc;
	struct mfi_ident *m;
	int count, error;

	sc = device_get_softc(dev);
	bzero(sc, sizeof(*sc));
	sc->mfi_dev = dev;
	m = mfi_find_ident(dev);
	sc->mfi_flags = m->flags;

	/* Ensure busmastering is enabled */
	pci_enable_busmaster(dev);

	/* Allocate PCI registers */
	if ((sc->mfi_flags & MFI_FLAGS_1064R) ||
	    (sc->mfi_flags & MFI_FLAGS_1078)) {
		/* 1068/1078: Memory mapped BAR is at offset 0x10 */
		sc->mfi_regs_rid = PCIR_BAR(0);
	}
	else if ((sc->mfi_flags & MFI_FLAGS_GEN2) ||
		 (sc->mfi_flags & MFI_FLAGS_SKINNY) ||
		(sc->mfi_flags & MFI_FLAGS_TBOLT)) { 
		/* Gen2/Skinny: Memory mapped BAR is at offset 0x14 */
		sc->mfi_regs_rid = PCIR_BAR(1);
	}
	if ((sc->mfi_regs_resource = bus_alloc_resource_any(sc->mfi_dev,
	    SYS_RES_MEMORY, &sc->mfi_regs_rid, RF_ACTIVE)) == NULL) {
		device_printf(dev, "Cannot allocate PCI registers\n");
		return (ENXIO);
	}
	sc->mfi_btag = rman_get_bustag(sc->mfi_regs_resource);
	sc->mfi_bhandle = rman_get_bushandle(sc->mfi_regs_resource);

	error = ENOMEM;

	/* Allocate parent DMA tag */
	if (bus_dma_tag_create(	bus_get_dma_tag(dev),	/* PCI parent */
				1, 0,			/* algnmnt, boundary */
				BUS_SPACE_MAXADDR,	/* lowaddr */
				BUS_SPACE_MAXADDR,	/* highaddr */
				NULL, NULL,		/* filter, filterarg */
				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
				BUS_SPACE_UNRESTRICTED,	/* nsegments */
				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
				0,			/* flags */
				NULL, NULL,		/* lockfunc, lockarg */
				&sc->mfi_parent_dmat)) {
		device_printf(dev, "Cannot allocate parent DMA tag\n");
		goto out;
	}

	/* Allocate IRQ resource. */
	sc->mfi_irq_rid = 0;
	count = 1;
	if (mfi_msi && pci_alloc_msi(sc->mfi_dev, &count) == 0) {
		device_printf(sc->mfi_dev, "Using MSI\n");
		sc->mfi_irq_rid = 1;
	}
	if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
	    &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
		device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
		error = EINVAL;
		goto out;
	}

	error = mfi_attach(sc);
out:
	if (error) {
		mfi_free(sc);
		mfi_pci_free(sc);
	}

	return (error);
}
Exemple #24
0
static int
athp_pci_attach(device_t dev)
{
	struct ath10k_pci *ar_pci = device_get_softc(dev);
	struct ath10k *ar = &ar_pci->sc_sc;
	int rid, i;
	int err = 0;
	int ret;

	ar->sc_dev = dev;
	ar->sc_invalid = 1;

	/* XXX TODO: initialize sc_debug from TUNABLE */
#if 0
	ar->sc_debug = ATH10K_DBG_BOOT | ATH10K_DBG_PCI | ATH10K_DBG_HTC |
	    ATH10K_DBG_PCI_DUMP | ATH10K_DBG_WMI | ATH10K_DBG_BMI | ATH10K_DBG_MAC |
	    ATH10K_DBG_WMI_PRINT | ATH10K_DBG_MGMT | ATH10K_DBG_DATA | ATH10K_DBG_HTT;
#endif
	ar->sc_psc = ar_pci;

	/* Load-time tunable/sysctl tree */
	athp_attach_sysctl(ar);

	/* Enable WMI/HTT RX for now */
	ar->sc_rx_wmi = 1;
	ar->sc_rx_htt = 1;

	/* Fetch pcie capability offset */
	ret = pci_find_cap(dev, PCIY_EXPRESS, &ar_pci->sc_cap_off);
	if (ret != 0) {
		device_printf(dev,
		    "%s: failed to find pci-express capability offset\n",
		    __func__);
		return (ret);
	}

	/*
	 * Initialise ath10k core bits.
	 */
	if (ath10k_core_init(ar) < 0)
		goto bad0;

	/*
	 * Initialise ath10k freebsd bits.
	 */
	sprintf(ar->sc_mtx_buf, "%s:def", device_get_nameunit(dev));
	mtx_init(&ar->sc_mtx, ar->sc_mtx_buf, MTX_NETWORK_LOCK,
	    MTX_DEF);

	sprintf(ar->sc_buf_mtx_buf, "%s:buf", device_get_nameunit(dev));
	mtx_init(&ar->sc_buf_mtx, ar->sc_buf_mtx_buf, "athp buf", MTX_DEF);

	sprintf(ar->sc_dma_mtx_buf, "%s:dma", device_get_nameunit(dev));
	mtx_init(&ar->sc_dma_mtx, ar->sc_dma_mtx_buf, "athp dma", MTX_DEF);

	sprintf(ar->sc_conf_mtx_buf, "%s:conf", device_get_nameunit(dev));
	mtx_init(&ar->sc_conf_mtx, ar->sc_conf_mtx_buf, "athp conf",
	    MTX_DEF | MTX_RECURSE);

	sprintf(ar_pci->ps_mtx_buf, "%s:ps", device_get_nameunit(dev));
	mtx_init(&ar_pci->ps_mtx, ar_pci->ps_mtx_buf, "athp ps", MTX_DEF);

	sprintf(ar_pci->ce_mtx_buf, "%s:ce", device_get_nameunit(dev));
	mtx_init(&ar_pci->ce_mtx, ar_pci->ce_mtx_buf, "athp ce", MTX_DEF);

	sprintf(ar->sc_data_mtx_buf, "%s:data", device_get_nameunit(dev));
	mtx_init(&ar->sc_data_mtx, ar->sc_data_mtx_buf, "athp data",
	    MTX_DEF);

	/*
	 * Initialise ath10k BMI/PCIDIAG bits.
	 */
	ret = athp_descdma_alloc(ar, &ar_pci->sc_bmi_txbuf, "bmi_msg_req",
	    4, 1024);
	ret |= athp_descdma_alloc(ar, &ar_pci->sc_bmi_rxbuf, "bmi_msg_resp",
	    4, 1024);
	if (ret != 0) {
		device_printf(dev, "%s: failed to allocate BMI TX/RX buffer\n",
		    __func__);
		goto bad0;
	}

	/*
	 * Initialise HTT descriptors/memory.
	 */
	ret = ath10k_htt_rx_alloc_desc(ar, &ar->htt);
	if (ret != 0) {
		device_printf(dev, "%s: failed to alloc HTT RX descriptors\n",
		    __func__);
		goto bad;
	}

	/* XXX here instead of in core_init because we need the lock init'ed */
	callout_init_mtx(&ar->scan.timeout, &ar->sc_data_mtx, 0);

	ar_pci->pipe_taskq = taskqueue_create("athp pipe taskq", M_NOWAIT,
	    NULL, ar_pci);
	(void) taskqueue_start_threads(&ar_pci->pipe_taskq, 1, PI_NET, "%s pipe taskq",
	    device_get_nameunit(dev));
	if (ar_pci->pipe_taskq == NULL) {
		device_printf(dev, "%s: couldn't create pipe taskq\n",
		    __func__);
		err = ENXIO;
		goto bad;
	}

	/*
	 * Look at the device/vendor ID and choose which register offset
	 * mapping to use.  This is used by a lot of the register access
	 * pieces to get the correct device-specific windows.
	 */
	ar_pci->sc_vendorid = pci_get_vendor(dev);
	ar_pci->sc_deviceid = pci_get_device(dev);
	if (athp_pci_hw_lookup(ar_pci) != 0) {
		device_printf(dev, "%s: hw lookup failed\n", __func__);
		err = ENXIO;
		goto bad;
	}

	/*
	 * Enable bus mastering.
	 */
	pci_enable_busmaster(dev);

	/*
	 * Setup memory-mapping of PCI registers.
	 */
	rid = BS_BAR;
	ar_pci->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (ar_pci->sc_sr == NULL) {
		device_printf(dev, "cannot map register space\n");
		err = ENXIO;
		goto bad;
	}

	/* Driver copy; hopefully we can delete this */
	ar->sc_st = rman_get_bustag(ar_pci->sc_sr);
	ar->sc_sh = rman_get_bushandle(ar_pci->sc_sr);

	/* Local copy for bus operations */
	ar_pci->sc_st = rman_get_bustag(ar_pci->sc_sr);
	ar_pci->sc_sh = rman_get_bushandle(ar_pci->sc_sr);

	/*
	 * Mark device invalid so any interrupts (shared or otherwise)
	 * that arrive before the HAL is setup are discarded.
	 */
	ar->sc_invalid = 1;

	printf("%s: msicount=%d, msixcount=%d\n",
	    __func__,
	    pci_msi_count(dev),
	    pci_msix_count(dev));

	/*
	 * Arrange interrupt line.
	 *
	 * XXX TODO: this is effictively ath10k_pci_init_irq().
	 * Refactor it out later.
	 *
	 * First - attempt MSI.  If we get it, then use it.
	 */
	i = MSI_NUM_REQUEST;
	if (pci_alloc_msi(dev, &i) == 0) {
		device_printf(dev, "%s: %d MSI interrupts\n", __func__, i);
		ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
	} else {
		i = 1;
		if (pci_alloc_msi(dev, &i) == 0) {
			device_printf(dev, "%s: 1 MSI interrupt\n", __func__);
			ar_pci->num_msi_intrs = 1;
		} else {
			device_printf(dev, "%s: legacy interrupts\n", __func__);
			ar_pci->num_msi_intrs = 0;
		}
	}
	err = ath10k_pci_request_irq(ar_pci);
	if (err != 0)
		goto bad1;

	/*
	 * Attach register ops - needed for the caller to do register IO.
	 */
	ar->sc_regio.reg_read = athp_pci_regio_read_reg;
	ar->sc_regio.reg_write = athp_pci_regio_write_reg;
	ar->sc_regio.reg_s_read = athp_pci_regio_s_read_reg;
	ar->sc_regio.reg_s_write = athp_pci_regio_s_write_reg;
	ar->sc_regio.reg_flush = athp_pci_regio_flush_reg;
	ar->sc_regio.reg_arg = ar_pci;

	/*
	 * TODO: abstract this out to be a bus/hif specific
	 * attach path.
	 *
	 * I'm not sure what USB/SDIO will look like here, but
	 * I'm pretty sure it won't involve PCI/CE setup.
	 * It'll still have WME/HIF/BMI, but it'll be done over
	 * USB endpoints.
	 */

	if (athp_pci_setup_bufs(ar_pci) != 0) {
		err = ENXIO;
		goto bad4;
	}

	/* HIF ops attach */
	ar->hif.ops = &ath10k_pci_hif_ops;
	ar->hif.bus = ATH10K_BUS_PCI;

	/* Alloc pipes */
	ret = ath10k_pci_alloc_pipes(ar);
	if (ret) {
		device_printf(ar->sc_dev, "%s: pci_alloc_pipes failed: %d\n",
		    __func__,
		    ret);
		/* XXX cleanup */
		err = ENXIO;
		goto bad4;
	}

	/* deinit ce */
	ath10k_pci_ce_deinit(ar);

	/* disable irq */
	ret = ath10k_pci_irq_disable(ar_pci);
	if (ret) {
		device_printf(ar->sc_dev, "%s: irq_disable failed: %d\n",
		    __func__,
		    ret);
		err = ENXIO;
		goto bad4;
	}

	/* init IRQ */
	ret = ath10k_pci_init_irq(ar_pci);
	if (ret) {
		device_printf(ar->sc_dev, "%s: init_irq failed: %d\n",
		    __func__,
		    ret);
		err = ENXIO;
		goto bad4;
	}

	/* Ok, gate open the interrupt handler */
	ar->sc_invalid = 0;

	/* pci_chip_reset */
	ret = ath10k_pci_chip_reset(ar_pci);
	if (ret) {
		device_printf(ar->sc_dev, "%s: chip_reset failed: %d\n",
		    __func__,
		    ret);
		err = ENXIO;
		goto bad4;
	}

	/* read SoC/chip version */
	ar->sc_chipid = athp_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS(ar->sc_regofs));

	/* Verify chip version is something we can use */
	device_printf(ar->sc_dev, "%s: chipid: 0x%08x\n", __func__, ar->sc_chipid);
	if (! ath10k_pci_chip_is_supported(ar_pci->sc_deviceid, ar->sc_chipid)) {
		device_printf(ar->sc_dev,
		    "%s: unsupported chip; chipid: 0x%08x\n", __func__,
		    ar->sc_chipid);
		err = ENXIO;
		goto bad4;
	}

	/* Call main attach method with given info */
	ar->sc_preinit_hook.ich_func = athp_attach_preinit;
	ar->sc_preinit_hook.ich_arg = ar;
	if (config_intrhook_establish(&ar->sc_preinit_hook) != 0) {
		device_printf(ar->sc_dev,
		    "%s: couldn't establish preinit hook\n", __func__);
		goto bad4;
	}

	return (0);

	/* Fallthrough for setup failure */
bad4:
	athp_pci_free_bufs(ar_pci);
	/* Ensure we disable interrupts from the device */
	ath10k_pci_deinit_irq(ar_pci);
	ath10k_pci_free_irq(ar_pci);
bad1:
	bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, ar_pci->sc_sr);
bad:

	ath10k_htt_rx_free_desc(ar, &ar->htt);

	athp_descdma_free(ar, &ar_pci->sc_bmi_txbuf);
	athp_descdma_free(ar, &ar_pci->sc_bmi_rxbuf);

	/* XXX disable busmaster? */
	mtx_destroy(&ar_pci->ps_mtx);
	mtx_destroy(&ar_pci->ce_mtx);
	mtx_destroy(&ar->sc_conf_mtx);
	mtx_destroy(&ar->sc_data_mtx);
	mtx_destroy(&ar->sc_buf_mtx);
	mtx_destroy(&ar->sc_dma_mtx);
	mtx_destroy(&ar->sc_mtx);
	if (ar_pci->pipe_taskq) {
		taskqueue_drain_all(ar_pci->pipe_taskq);
		taskqueue_free(ar_pci->pipe_taskq);
	}

	/* Shutdown ioctl handler */
	athp_ioctl_teardown(ar);

	ath10k_core_destroy(ar);
bad0:
	return (err);
}
Exemple #25
0
static int
cs4281_pci_attach(device_t dev)
{
    struct sc_info *sc;
    struct ac97_info *codec = NULL;
    char status[SND_STATUSLEN];

    sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
    sc->dev = dev;
    sc->type = pci_get_devid(dev);

    pci_enable_busmaster(dev);

#if __FreeBSD_version > 500000
    if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
	/* Reset the power state. */
	device_printf(dev, "chip is in D%d power mode "
		      "-- setting to D0\n", pci_get_powerstate(dev));

	pci_set_powerstate(dev, PCI_POWERSTATE_D0);
    }
#else
    data = pci_read_config(dev, CS4281PCI_PMCS_OFFSET, 4);
    if (data & CS4281PCI_PMCS_PS_MASK) {
	    /* Reset the power state. */
	    device_printf(dev, "chip is in D%d power mode "
			  "-- setting to D0\n",
			  data & CS4281PCI_PMCS_PS_MASK);
	    pci_write_config(dev, CS4281PCI_PMCS_OFFSET,
			     data & ~CS4281PCI_PMCS_PS_MASK, 4);
    }
#endif

    sc->regid   = PCIR_BAR(0);
    sc->regtype = SYS_RES_MEMORY;
    sc->reg = bus_alloc_resource(dev, sc->regtype, &sc->regid,
				 0, ~0, CS4281PCI_BA0_SIZE, RF_ACTIVE);
    if (!sc->reg) {
	sc->regtype = SYS_RES_IOPORT;
	sc->reg = bus_alloc_resource(dev, sc->regtype, &sc->regid,
				     0, ~0, CS4281PCI_BA0_SIZE, RF_ACTIVE);
	if (!sc->reg) {
	    device_printf(dev, "unable to allocate register space\n");
	    goto bad;
	}
    }
    sc->st = rman_get_bustag(sc->reg);
    sc->sh = rman_get_bushandle(sc->reg);

    sc->memid = PCIR_BAR(1);
    sc->mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid, 0,
				 ~0, CS4281PCI_BA1_SIZE, RF_ACTIVE);
    if (sc->mem == NULL) {
	device_printf(dev, "unable to allocate fifo space\n");
	goto bad;
    }

    sc->irqid = 0;
    sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
				     RF_ACTIVE | RF_SHAREABLE);
    if (!sc->irq) {
	device_printf(dev, "unable to allocate interrupt\n");
	goto bad;
    }

    if (snd_setup_intr(dev, sc->irq, 0, cs4281_intr, sc, &sc->ih)) {
	device_printf(dev, "unable to setup interrupt\n");
	goto bad;
    }

    sc->bufsz = pcm_getbuffersize(dev, 4096, CS4281_DEFAULT_BUFSZ, 65536);

    if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2,
			   /*boundary*/0,
			   /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
			   /*highaddr*/BUS_SPACE_MAXADDR,
			   /*filter*/NULL, /*filterarg*/NULL,
			   /*maxsize*/sc->bufsz, /*nsegments*/1,
			   /*maxsegz*/0x3ffff,
			   /*flags*/0, /*lockfunc*/busdma_lock_mutex,
			   /*lockarg*/&Giant, &sc->parent_dmat) != 0) {
	device_printf(dev, "unable to create dma tag\n");
	goto bad;
    }

    /* power up */
    cs4281_power(sc, 0);

    /* init chip */
    if (cs4281_init(sc) == -1) {
	device_printf(dev, "unable to initialize the card\n");
	goto bad;
    }

    /* create/init mixer */
    codec = AC97_CREATE(dev, sc, cs4281_ac97);
    if (codec == NULL)
        goto bad;

    mixer_init(dev, ac97_getmixerclass(), codec);

    if (pcm_register(dev, sc, 1, 1))
	goto bad;

    pcm_addchan(dev, PCMDIR_PLAY, &cs4281chan_class, sc);
    pcm_addchan(dev, PCMDIR_REC, &cs4281chan_class, sc);

    snprintf(status, SND_STATUSLEN, "at %s 0x%lx irq %ld %s",
	     (sc->regtype == SYS_RES_IOPORT)? "io" : "memory",
	     rman_get_start(sc->reg), rman_get_start(sc->irq),PCM_KLDSTRING(snd_cs4281));
    pcm_setstatus(dev, status);

    return 0;

 bad:
    if (codec)
	ac97_destroy(codec);
    if (sc->reg)
	bus_release_resource(dev, sc->regtype, sc->regid, sc->reg);
    if (sc->mem)
	bus_release_resource(dev, SYS_RES_MEMORY, sc->memid, sc->mem);
    if (sc->ih)
	bus_teardown_intr(dev, sc->irq, sc->ih);
    if (sc->irq)
	bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq);
    if (sc->parent_dmat)
	bus_dma_tag_destroy(sc->parent_dmat);
    free(sc, M_DEVBUF);

    return ENXIO;
}
Exemple #26
0
static int
amr_pci_attach(device_t dev)
{
    struct amr_softc	*sc;
    struct amr_ident	*id;
    int			rid, rtype, error;

    debug_called(1);

    /*
     * Initialise softc.
     */
    sc = device_get_softc(dev);
    bzero(sc, sizeof(*sc));
    sc->amr_dev = dev;

    /* assume failure is 'not configured' */
    error = ENXIO;

    /*
     * Determine board type.
     */
    if ((id = amr_find_ident(dev)) == NULL)
	return (ENXIO);

    if (id->flags & AMR_ID_QUARTZ) {
	sc->amr_type |= AMR_TYPE_QUARTZ;
    }

    if ((amr_force_sg32 == 0) && (id->flags & AMR_ID_DO_SG64) &&
	(sizeof(vm_paddr_t) > 4)) {
	device_printf(dev, "Using 64-bit DMA\n");
	sc->amr_type |= AMR_TYPE_SG64;
    }

    /* force the busmaster enable bit on */
    pci_enable_busmaster(dev);

    /*
     * Allocate the PCI register window.
     */
    rid = PCIR_BAR(0);
    rtype = AMR_IS_QUARTZ(sc) ? SYS_RES_MEMORY : SYS_RES_IOPORT;
    sc->amr_reg = bus_alloc_resource_any(dev, rtype, &rid, RF_ACTIVE);
    if (sc->amr_reg == NULL) {
	device_printf(sc->amr_dev, "can't allocate register window\n");
	goto out;
    }
    sc->amr_btag = rman_get_bustag(sc->amr_reg);
    sc->amr_bhandle = rman_get_bushandle(sc->amr_reg);

    /*
     * Allocate and connect our interrupt.
     */
    rid = 0;
    sc->amr_irq = bus_alloc_resource_any(sc->amr_dev, SYS_RES_IRQ, &rid,
        RF_SHAREABLE | RF_ACTIVE);
    if (sc->amr_irq == NULL) {
        device_printf(sc->amr_dev, "can't allocate interrupt\n");
	goto out;
    }
    if (bus_setup_intr(sc->amr_dev, sc->amr_irq,
	INTR_TYPE_BIO | INTR_ENTROPY | INTR_MPSAFE, NULL, amr_pci_intr,
	sc, &sc->amr_intr)) {
        device_printf(sc->amr_dev, "can't set up interrupt\n");
	goto out;
    }

    debug(2, "interrupt attached");

    /* assume failure is 'out of memory' */
    error = ENOMEM;

    /*
     * Allocate the parent bus DMA tag appropriate for PCI.
     */
    if (bus_dma_tag_create(bus_get_dma_tag(dev),	/* PCI parent */
			   1, 0, 			/* alignment,boundary */
			   AMR_IS_SG64(sc) ?
			   BUS_SPACE_MAXADDR :
			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
			   BUS_SPACE_MAXADDR, 		/* highaddr */
			   NULL, NULL, 			/* filter, filterarg */
			   BUS_SPACE_MAXSIZE,		/* maxsize */
			   BUS_SPACE_UNRESTRICTED,	/* nsegments */
			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
			   0,				/* flags */
			   NULL, NULL,			/* lockfunc, lockarg */
			   &sc->amr_parent_dmat)) {
	device_printf(dev, "can't allocate parent DMA tag\n");
	goto out;
    }

    /*
     * Create DMA tag for mapping buffers into controller-addressable space.
     */
    if (bus_dma_tag_create(sc->amr_parent_dmat,		/* parent */
			   1, 0,			/* alignment,boundary */
			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
			   BUS_SPACE_MAXADDR,		/* highaddr */
			   NULL, NULL,			/* filter, filterarg */
			   DFLTPHYS,			/* maxsize */
			   AMR_NSEG,			/* nsegments */
			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
			   0,		/* flags */
			   busdma_lock_mutex,		/* lockfunc */
			   &sc->amr_list_lock,		/* lockarg */
			   &sc->amr_buffer_dmat)) {
        device_printf(sc->amr_dev, "can't allocate buffer DMA tag\n");
	goto out;
    }

    if (bus_dma_tag_create(sc->amr_parent_dmat,		/* parent */
			   1, 0,			/* alignment,boundary */
			   BUS_SPACE_MAXADDR,		/* lowaddr */
			   BUS_SPACE_MAXADDR,		/* highaddr */
			   NULL, NULL,			/* filter, filterarg */
			   DFLTPHYS,			/* maxsize */
			   AMR_NSEG,			/* nsegments */
			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
			   0,		/* flags */
			   busdma_lock_mutex,		/* lockfunc */
			   &sc->amr_list_lock,		/* lockarg */
			   &sc->amr_buffer64_dmat)) {
        device_printf(sc->amr_dev, "can't allocate buffer DMA tag\n");
	goto out;
    }

    debug(2, "dma tag done");

    /*
     * Allocate and set up mailbox in a bus-visible fashion.
     */
    mtx_init(&sc->amr_list_lock, "AMR List Lock", NULL, MTX_DEF);
    mtx_init(&sc->amr_hw_lock, "AMR HW Lock", NULL, MTX_DEF);
    if ((error = amr_setup_mbox(sc)) != 0)
	goto out;

    debug(2, "mailbox setup");

    /*
     * Build the scatter/gather buffers.
     */
    if ((error = amr_sglist_map(sc)) != 0)
	goto out;
    debug(2, "s/g list mapped");

    if ((error = amr_ccb_map(sc)) != 0)
	goto out;
    debug(2, "ccb mapped");


    /*
     * Do bus-independant initialisation, bring controller online.
     */
    error = amr_attach(sc);

out:
    if (error)
	amr_pci_free(sc);
    return(error);
}
Exemple #27
0
static int
adv_pci_attach(device_t dev)
{
	struct		adv_softc *adv;
	u_int32_t	id;
	int		error, rid, irqrid;
	void		*ih;
	struct resource	*iores, *irqres;

	/*
	 * Determine the chip version.
	 */
	id = pci_get_devid(dev);
	pci_enable_busmaster(dev);

	/*
	 * Early chips can't handle non-zero latency timer settings.
	 */
	if (id == PCI_DEVICE_ID_ADVANSYS_1200A
	 || id == PCI_DEVICE_ID_ADVANSYS_1200B) {
		pci_write_config(dev, PCIR_LATTIMER, /*value*/0, /*bytes*/1);
	}

	rid = PCI_BASEADR0;
	iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
				       RF_ACTIVE);
	if (iores == NULL)
		return ENXIO;

	if (adv_find_signature(iores) == 0) {
		bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
		return ENXIO;
	}

	adv = adv_alloc(dev, iores, 0);
	if (adv == NULL) {
		bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
		return ENXIO;
	}

	/* Allocate a dmatag for our transfer DMA maps */
	error = bus_dma_tag_create(
			/* parent	*/ bus_get_dma_tag(dev),
			/* alignment	*/ 1,
			/* boundary	*/ 0,
			/* lowaddr	*/ ADV_PCI_MAX_DMA_ADDR,
			/* highaddr	*/ BUS_SPACE_MAXADDR,
			/* filter	*/ NULL,
			/* filterarg	*/ NULL,
			/* maxsize	*/ BUS_SPACE_MAXSIZE_32BIT,
			/* nsegments	*/ ~0,
			/* maxsegsz	*/ ADV_PCI_MAX_DMA_COUNT,
			/* flags	*/ 0,
			/* lockfunc	*/ NULL,
			/* lockarg	*/ NULL,
			&adv->parent_dmat);
 
	if (error != 0) {
		device_printf(dev, "Could not allocate DMA tag - error %d\n",
		    error);
		adv_free(adv);
		bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
		return ENXIO;
	}

	adv->init_level++;

	if (overrun_buf == NULL) {
		/* Need to allocate our overrun buffer */
		if (bus_dma_tag_create(
				/* parent	*/ adv->parent_dmat,
				/* alignment	*/ 8,
				/* boundary	*/ 0,
				/* lowaddr	*/ ADV_PCI_MAX_DMA_ADDR,
				/* highaddr	*/ BUS_SPACE_MAXADDR,
				/* filter	*/ NULL,
				/* filterarg	*/ NULL,
				/* maxsize	*/ ADV_OVERRUN_BSIZE,
				/* nsegments	*/ 1,
				/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
				/* flags	*/ 0,
				/* lockfunc	*/ NULL,
				/* lockarg	*/ NULL,
				&overrun_dmat) != 0) {
			bus_dma_tag_destroy(adv->parent_dmat);
			adv_free(adv);
			bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
			return ENXIO;
       		}
		if (bus_dmamem_alloc(overrun_dmat,
				     &overrun_buf,
				     BUS_DMA_NOWAIT,
				     &overrun_dmamap) != 0) {
			bus_dma_tag_destroy(overrun_dmat);
			bus_dma_tag_destroy(adv->parent_dmat);
			adv_free(adv);
			bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
			return ENXIO;
		}
		/* And permanently map it in */  
		bus_dmamap_load(overrun_dmat, overrun_dmamap,
				overrun_buf, ADV_OVERRUN_BSIZE,
				adv_map, &overrun_physbase,
				/*flags*/0);
	}

	adv->overrun_physbase = overrun_physbase;
			
	/*
	 * Stop the chip.
	 */
	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);

	adv->chip_version = ADV_INB(adv, ADV_NONEISA_CHIP_REVISION);
	adv->type = ADV_PCI;
	
	/*
	 * Setup active negation and signal filtering.
	 */
	{
		u_int8_t extra_cfg;

		if (adv->chip_version >= ADV_CHIP_VER_PCI_ULTRA_3150)
			adv->type |= ADV_ULTRA;
		if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050)
			extra_cfg = ADV_IFC_ACT_NEG | ADV_IFC_WR_EN_FILTER;
		else
			extra_cfg = ADV_IFC_ACT_NEG | ADV_IFC_SLEW_RATE;
		ADV_OUTB(adv, ADV_REG_IFC, extra_cfg);
	}

	if (adv_init(adv) != 0) {
		adv_free(adv);
		bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
		return ENXIO;
	}

	adv->max_dma_count = ADV_PCI_MAX_DMA_COUNT;
	adv->max_dma_addr = ADV_PCI_MAX_DMA_ADDR;

#if defined(CC_DISABLE_PCI_PARITY_INT) && CC_DISABLE_PCI_PARITY_INT
	{
		u_int16_t config_msw;

		config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
		config_msw &= 0xFFC0;
		ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); 
	}
#endif
 
	if (id == PCI_DEVICE_ID_ADVANSYS_1200A
	 || id == PCI_DEVICE_ID_ADVANSYS_1200B) {
		adv->bug_fix_control |= ADV_BUG_FIX_IF_NOT_DWB;
		adv->bug_fix_control |= ADV_BUG_FIX_ASYN_USE_SYN;
		adv->fix_asyn_xfer = ~0;
	}

	irqrid = 0;
	irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irqrid,
					RF_SHAREABLE | RF_ACTIVE);
	if (irqres == NULL ||
	    bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE,
	    NULL, adv_intr, adv, &ih) != 0) {
		if (irqres != NULL)
			bus_release_resource(dev, SYS_RES_IRQ, irqrid, irqres);
		adv_free(adv);
		bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
		return ENXIO;
	}

	if (adv_attach(adv) != 0) {
		bus_teardown_intr(dev, irqres, ih);
		bus_release_resource(dev, SYS_RES_IRQ, irqrid, irqres);
		adv_free(adv);
		bus_release_resource(dev, SYS_RES_IOPORT, rid, iores);
		return ENXIO;
	}
	return 0;
}
Exemple #28
0
static int
adw_pci_attach(device_t dev)
{
	struct		adw_softc *adw;
	struct		adw_pci_identity *entry;
	u_int16_t	command;
	struct		resource *regs;
	int		regs_type;
	int		regs_id;
	int		error;
	int		zero;
 
	entry = adw_find_pci_device(dev);
	if (entry == NULL)
		return (ENXIO);
	regs = NULL;
	regs_type = 0;
	regs_id = 0;
#ifdef ADW_ALLOW_MEMIO
	regs_type = SYS_RES_MEMORY;
	regs_id = ADW_PCI_MEMBASE;
	regs = bus_alloc_resource_any(dev, regs_type, &regs_id, RF_ACTIVE);
#endif
	if (regs == NULL) {
		regs_type = SYS_RES_IOPORT;
		regs_id = ADW_PCI_IOBASE;
		regs = bus_alloc_resource_any(dev, regs_type,
					      &regs_id, RF_ACTIVE);
	}

	if (regs == NULL) {
		device_printf(dev, "can't allocate register resources\n");
		return (ENOMEM);
	}

	adw = adw_alloc(dev, regs, regs_type, regs_id);
	if (adw == NULL)
		return(ENOMEM);

	/*
	 * Now that we have access to our registers, just verify that
	 * this really is an AdvanSys device.
	 */
	if (adw_find_signature(adw) == 0) {
		adw_free(adw);
		return (ENXIO);
	}

	adw_reset_chip(adw);

	error = entry->setup(dev, entry, adw);

	if (error != 0)
		return (error);

	/* Ensure busmastering is enabled */
	pci_enable_busmaster(dev);

	/* Allocate a dmatag for our transfer DMA maps */
	error = bus_dma_tag_create(
			/* parent	*/ bus_get_dma_tag(dev),
			/* alignment	*/ 1,
			/* boundary	*/ 0,
			/* lowaddr	*/ ADW_PCI_MAX_DMA_ADDR,
			/* highaddr	*/ BUS_SPACE_MAXADDR,
			/* filter	*/ NULL,
			/* filterarg	*/ NULL,
			/* maxsize	*/ BUS_SPACE_MAXSIZE_32BIT,
			/* nsegments	*/ ~0,
			/* maxsegsz	*/ ADW_PCI_MAX_DMA_COUNT,
			/* flags	*/ 0,
			/* lockfunc	*/ NULL,
			/* lockarg	*/ NULL,
			&adw->parent_dmat);

	adw->init_level++;
 
	if (error != 0) {
		device_printf(dev, "Could not allocate DMA tag - error %d\n",
		    error);
		adw_free(adw);
		return (error);
	}

	adw->init_level++;

	error = adw_init(adw);
	if (error != 0) {
		adw_free(adw);
		return (error);
	}

	/*
	 * If the PCI Configuration Command Register "Parity Error Response
	 * Control" Bit was clear (0), then set the microcode variable
	 * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode
	 * to ignore DMA parity errors.
	 */
	command = pci_read_config(dev, PCIR_COMMAND, /*bytes*/2);
	if ((command & PCIM_CMD_PERRESPEN) == 0)
		adw_lram_write_16(adw, ADW_MC_CONTROL_FLAG,
				  adw_lram_read_16(adw, ADW_MC_CONTROL_FLAG)
				  | ADW_MC_CONTROL_IGN_PERR);

	zero = 0;
	adw->irq_res_type = SYS_RES_IRQ;
	adw->irq = bus_alloc_resource_any(dev, adw->irq_res_type, &zero,
					  RF_ACTIVE | RF_SHAREABLE);
	if (adw->irq == NULL) {
		adw_free(adw);
		return (ENOMEM);
	}

	error = adw_attach(adw);
	if (error != 0)
		adw_free(adw);
	return (error);
}
Exemple #29
0
static int
xhci_pci_attach(device_t self)
{
	struct xhci_softc *sc = device_get_softc(self);
	int err;
	int rid;

	/* XXX check for 64-bit capability */

	if (xhci_init(sc, self)) {
		device_printf(self, "Could not initialize softc\n");
		goto error;
	}

	pci_enable_busmaster(self);

	rid = PCI_XHCI_CBMEM;
	sc->sc_io_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (!sc->sc_io_res) {
		device_printf(self, "Could not map memory\n");
		goto error;
	}
	sc->sc_io_tag = rman_get_bustag(sc->sc_io_res);
	sc->sc_io_hdl = rman_get_bushandle(sc->sc_io_res);
	sc->sc_io_size = rman_get_size(sc->sc_io_res);

	rid = 0;
	sc->sc_irq_res = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid,
	    RF_SHAREABLE | RF_ACTIVE);
	if (sc->sc_irq_res == NULL) {
		device_printf(self, "Could not allocate IRQ\n");
		goto error;
	}
	sc->sc_bus.bdev = device_add_child(self, "usbus", -1);
	if (sc->sc_bus.bdev == NULL) {
		device_printf(self, "Could not add USB device\n");
		goto error;
	}
	device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus);

	ksprintf(sc->sc_vendor, "0x%04x", pci_get_vendor(self));

	err = bus_setup_intr(self, sc->sc_irq_res, INTR_MPSAFE,
	    (driver_intr_t *)xhci_interrupt, sc, &sc->sc_intr_hdl, NULL);
	
	if (err) {
		device_printf(self, "Could not setup IRQ, err=%d\n", err);
		sc->sc_intr_hdl = NULL;
		goto error;
	}
	xhci_pci_take_controller(self);

	err = xhci_halt_controller(sc);

	if (err == 0)
		err = xhci_start_controller(sc);

	if (err == 0)
		err = device_probe_and_attach(sc->sc_bus.bdev);

	if (err) {
		device_printf(self, "XHCI halt/start/probe failed err=%d\n", err);
		goto error;
	}
	return (0);

error:
	xhci_pci_detach(self);
	return (ENXIO);
}
Exemple #30
0
/*
 * Allocate resources for our device, set up the bus interface.
 */
static int
aacraid_pci_attach(device_t dev)
{
	struct aac_softc *sc;
	struct aac_ident *id;
	int error;
	u_int32_t command;

	fwprintf(NULL, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");

	/*
	 * Initialise softc.
	 */
	sc = device_get_softc(dev);
	bzero(sc, sizeof(*sc));
	sc->aac_dev = dev;

	/* assume failure is 'not configured' */
	error = ENXIO;

	/* 
	 * Verify that the adapter is correctly set up in PCI space.
	 */
	pci_enable_busmaster(dev);
	command = pci_read_config(sc->aac_dev, PCIR_COMMAND, 2);
	if (!(command & PCIM_CMD_BUSMASTEREN)) {
		device_printf(sc->aac_dev, "can't enable bus-master feature\n");
		goto out;
	}

	/* 
	 * Detect the hardware interface version, set up the bus interface
	 * indirection.
	 */
	id = aac_find_ident(dev);
	sc->aac_hwif = id->hwif;
	switch(sc->aac_hwif) {
	case AAC_HWIF_SRC:
		fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "set hardware up for PMC SRC");
		sc->aac_if = aacraid_src_interface;
		break;
	case AAC_HWIF_SRCV:
		fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "set hardware up for PMC SRCv");
		sc->aac_if = aacraid_srcv_interface;
		break;
	default:
		sc->aac_hwif = AAC_HWIF_UNKNOWN;
		device_printf(sc->aac_dev, "unknown hardware type\n");
		error = ENXIO;
		goto out;
	}

	/* assume failure is 'out of memory' */
	error = ENOMEM;

	/*
	 * Allocate the PCI register window.
	 */
	sc->aac_regs_rid0 = PCIR_BAR(0);
	if ((sc->aac_regs_res0 = bus_alloc_resource_any(sc->aac_dev,
	    SYS_RES_MEMORY, &sc->aac_regs_rid0, RF_ACTIVE)) == NULL) {
		device_printf(sc->aac_dev,
		    "couldn't allocate register window 0\n");
		goto out;
	}
	sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
	sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);

	sc->aac_regs_rid1 = PCIR_BAR(2);
	if ((sc->aac_regs_res1 = bus_alloc_resource_any(sc->aac_dev,
	    SYS_RES_MEMORY, &sc->aac_regs_rid1, RF_ACTIVE)) == NULL) {
		device_printf(sc->aac_dev,
		    "couldn't allocate register window 1\n");
		goto out;
	}
	sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1);
	sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1);

	/*
	 * Allocate the parent bus DMA tag appropriate for our PCI interface.
	 * 
	 * Note that some of these controllers are 64-bit capable.
	 */
	if (bus_dma_tag_create(bus_get_dma_tag(dev), 	/* parent */
			       PAGE_SIZE, 0,		/* algnmnt, boundary */
			       BUS_SPACE_MAXADDR,	/* lowaddr */
			       BUS_SPACE_MAXADDR, 	/* highaddr */
			       NULL, NULL, 		/* filter, filterarg */
			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
			       BUS_SPACE_UNRESTRICTED,	/* nsegments */
			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
			       0,			/* flags */
			       NULL, NULL,		/* No locking needed */
			       &sc->aac_parent_dmat)) {
		device_printf(sc->aac_dev, "can't allocate parent DMA tag\n");
		goto out;
	}

	/* Set up quirks */
	sc->flags = id->quirks;

	/*
	 * Do bus-independent initialisation.
	 */
	error = aacraid_attach(sc);

out:
	if (error)
		aacraid_free(sc);
	return(error);
}