static int i80321_aau_attach(device_t dev) { struct i80321_aau_softc *softc = device_get_softc(dev); struct i80321_softc *sc = device_get_softc(device_get_parent(dev)); struct i80321_aaudesc_s *aaudescs; mtx_init(&softc->mtx, "AAU mtx", NULL, MTX_SPIN); softc->sc_st = sc->sc_st; if (bus_space_subregion(softc->sc_st, sc->sc_sh, VERDE_AAU_BASE, VERDE_AAU_SIZE, &softc->sc_aau_sh) != 0) panic("%s: unable to subregion AAU registers", device_get_name(dev)); if (bus_dma_tag_create(NULL, sizeof(i80321_aaudesc_t), 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, AAU_RING_SIZE * sizeof(i80321_aaudesc_t), 1, sizeof(i80321_aaudesc_t), BUS_DMA_ALLOCNOW, busdma_lock_mutex, &Giant, &softc->dmatag)) panic("Couldn't create a dma tag"); if (bus_dmamem_alloc(softc->dmatag, (void **)&aaudescs, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &softc->aauring[0].map)) panic("Couldn't alloc dma memory"); for (int i = 0; i < AAU_RING_SIZE; i++) { if (i > 0) if (bus_dmamap_create(softc->dmatag, 0, &softc->aauring[i].map)) panic("Couldn't create dma map"); softc->aauring[i].desc = &aaudescs[i]; bus_dmamap_load(softc->dmatag, softc->aauring[i].map, softc->aauring[i].desc, sizeof(i80321_aaudesc_t), i80321_mapphys, &softc->aauring[i].phys_addr, 0); bzero(softc->aauring[i].desc, sizeof(i80321_aaudesc_t)); } aau_softc = softc; _arm_bzero = aau_bzero; _min_bzero_size = 1024; return (0); }
static int amr_ccb_map(struct amr_softc *sc) { int ccbsize, error; /* * Passthrough and Extended passthrough structures will share the same * memory. */ ccbsize = sizeof(union amr_ccb) * AMR_MAXCMD; error = bus_dma_tag_create(sc->amr_parent_dmat, /* parent */ 128, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ccbsize, /* maxsize */ 1, /* nsegments */ ccbsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->amr_ccb_dmat); if (error != 0) { device_printf(sc->amr_dev, "can't allocate ccb tag\n"); return (ENOMEM); } error = bus_dmamem_alloc(sc->amr_ccb_dmat, (void **)&sc->amr_ccb, BUS_DMA_NOWAIT, &sc->amr_ccb_dmamap); if (error) { device_printf(sc->amr_dev, "can't allocate ccb memory\n"); return (ENOMEM); } bus_dmamap_load(sc->amr_ccb_dmat, sc->amr_ccb_dmamap, sc->amr_ccb, ccbsize, amr_sglist_helper, &sc->amr_ccb_busaddr, 0); bzero(sc->amr_ccb, ccbsize); return (0); }
int isci_allocate_dma_buffer(device_t device, struct ISCI_MEMORY *memory) { uint32_t status; status = bus_dma_tag_create(bus_get_dma_tag(device), 0x40 /* cacheline alignment */, 0x0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, memory->size, 0x1 /* we want physically contiguous */, memory->size, 0, NULL, NULL, &memory->dma_tag); if(status == ENOMEM) { isci_log_message(0, "ISCI", "bus_dma_tag_create failed\n"); return (status); } status = bus_dmamem_alloc(memory->dma_tag, (void **)&memory->virtual_address, BUS_DMA_ZERO, &memory->dma_map); if(status == ENOMEM) { isci_log_message(0, "ISCI", "bus_dmamem_alloc failed\n"); return (status); } status = bus_dmamap_load(memory->dma_tag, memory->dma_map, (void *)memory->virtual_address, memory->size, isci_allocate_dma_buffer_callback, memory, 0); if(status == EINVAL) { isci_log_message(0, "ISCI", "bus_dmamap_load failed\n"); return (status); } return (0); }
static int iir_pci_attach(device_t dev) { struct gdt_softc *gdt; struct resource *irq = NULL; int retries, rid, error = 0; void *ih; u_int8_t protocol; gdt = device_get_softc(dev); mtx_init(&gdt->sc_lock, "iir", NULL, MTX_DEF); /* map DPMEM */ rid = PCI_DPMEM; gdt->sc_dpmem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (gdt->sc_dpmem == NULL) { device_printf(dev, "can't allocate register resources\n"); error = ENOMEM; goto err; } /* get IRQ */ rid = 0; irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (irq == NULL) { device_printf(dev, "can't find IRQ value\n"); error = ENOMEM; goto err; } gdt->sc_devnode = dev; gdt->sc_init_level = 0; gdt->sc_hanum = device_get_unit(dev); gdt->sc_bus = pci_get_bus(dev); gdt->sc_slot = pci_get_slot(dev); gdt->sc_vendor = pci_get_vendor(dev); gdt->sc_device = pci_get_device(dev); gdt->sc_subdevice = pci_get_subdevice(dev); gdt->sc_class = GDT_MPR; /* no FC ctr. if (gdt->sc_device >= GDT_PCI_PRODUCT_FC) gdt->sc_class |= GDT_FC; */ /* initialize RP controller */ /* check and reset interface area */ bus_write_4(gdt->sc_dpmem, GDT_MPR_IC, htole32(GDT_MPR_MAGIC)); if (bus_read_4(gdt->sc_dpmem, GDT_MPR_IC) != htole32(GDT_MPR_MAGIC)) { device_printf(dev, "cannot access DPMEM at 0x%lx (shadowed?)\n", rman_get_start(gdt->sc_dpmem)); error = ENXIO; goto err; } bus_set_region_4(gdt->sc_dpmem, GDT_I960_SZ, htole32(0), GDT_MPR_SZ >> 2); /* Disable everything */ bus_write_1(gdt->sc_dpmem, GDT_EDOOR_EN, bus_read_1(gdt->sc_dpmem, GDT_EDOOR_EN) | 4); bus_write_1(gdt->sc_dpmem, GDT_MPR_EDOOR, 0xff); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS, 0); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_CMD_INDEX, 0); bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO, htole32(rman_get_start(gdt->sc_dpmem))); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_CMD_INDX, 0xff); bus_write_1(gdt->sc_dpmem, GDT_MPR_LDOOR, 1); DELAY(20); retries = GDT_RETRIES; while (bus_read_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS) != 0xff) { if (--retries == 0) { device_printf(dev, "DEINIT failed\n"); error = ENXIO; goto err; } DELAY(1); } protocol = (uint8_t)le32toh(bus_read_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO)); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS, 0); if (protocol != GDT_PROTOCOL_VERSION) { device_printf(dev, "unsupported protocol %d\n", protocol); error = ENXIO; goto err; } /* special command to controller BIOS */ bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO, htole32(0)); bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO + sizeof (u_int32_t), htole32(0)); bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO + 2 * sizeof (u_int32_t), htole32(1)); bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO + 3 * sizeof (u_int32_t), htole32(0)); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_CMD_INDX, 0xfe); bus_write_1(gdt->sc_dpmem, GDT_MPR_LDOOR, 1); DELAY(20); retries = GDT_RETRIES; while (bus_read_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS) != 0xfe) { if (--retries == 0) { device_printf(dev, "initialization error\n"); error = ENXIO; goto err; } DELAY(1); } bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS, 0); gdt->sc_ic_all_size = GDT_MPR_SZ; gdt->sc_copy_cmd = gdt_mpr_copy_cmd; gdt->sc_get_status = gdt_mpr_get_status; gdt->sc_intr = gdt_mpr_intr; gdt->sc_release_event = gdt_mpr_release_event; gdt->sc_set_sema0 = gdt_mpr_set_sema0; gdt->sc_test_busy = gdt_mpr_test_busy; /* Allocate a dmatag representing the capabilities of this attachment */ if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignemnt*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/BUS_SPACE_UNRESTRICTED, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&gdt->sc_lock, &gdt->sc_parent_dmat) != 0) { error = ENXIO; goto err; } gdt->sc_init_level++; if (iir_init(gdt) != 0) { iir_free(gdt); error = ENXIO; goto err; } /* Register with the XPT */ iir_attach(gdt); /* associate interrupt handler */ if (bus_setup_intr(dev, irq, INTR_TYPE_CAM | INTR_MPSAFE, NULL, iir_intr, gdt, &ih )) { device_printf(dev, "Unable to register interrupt handler\n"); error = ENXIO; goto err; } gdt_pci_enable_intr(gdt); return (0); err: if (irq) bus_release_resource( dev, SYS_RES_IRQ, 0, irq ); if (gdt->sc_dpmem) bus_release_resource( dev, SYS_RES_MEMORY, rid, gdt->sc_dpmem ); mtx_destroy(&gdt->sc_lock); return (error); }
static int amr_sglist_map(struct amr_softc *sc) { size_t segsize; void *p; int error; debug_called(1); /* * Create a single tag describing a region large enough to hold all of * the s/g lists we will need. * * Note that we could probably use AMR_LIMITCMD here, but that may become * tunable. */ if (AMR_IS_SG64(sc)) segsize = sizeof(struct amr_sg64entry) * AMR_NSEG * AMR_MAXCMD; else segsize = sizeof(struct amr_sgentry) * AMR_NSEG * AMR_MAXCMD; error = bus_dma_tag_create(sc->amr_parent_dmat, /* parent */ 512, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ segsize, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->amr_sg_dmat); if (error != 0) { device_printf(sc->amr_dev, "can't allocate scatter/gather DMA tag\n"); return(ENOMEM); } /* * Allocate enough s/g maps for all commands and permanently map them into * controller-visible space. * * XXX this assumes we can get enough space for all the s/g maps in one * contiguous slab. We may need to switch to a more complex arrangement * where we allocate in smaller chunks and keep a lookup table from slot * to bus address. * * XXX HACK ALERT: at least some controllers don't like the s/g memory * being allocated below 0x2000. We leak some memory if * we get some below this mark and allocate again. We * should be able to avoid this with the tag setup, but * that does't seem to work. */ retry: error = bus_dmamem_alloc(sc->amr_sg_dmat, (void **)&p, BUS_DMA_NOWAIT, &sc->amr_sg_dmamap); if (error) { device_printf(sc->amr_dev, "can't allocate s/g table\n"); return(ENOMEM); } bus_dmamap_load(sc->amr_sg_dmat, sc->amr_sg_dmamap, p, segsize, amr_sglist_helper, &sc->amr_sgbusaddr, 0); if (sc->amr_sgbusaddr < 0x2000) { debug(1, "s/g table too low (0x%x), reallocating\n", sc->amr_sgbusaddr); goto retry; } if (AMR_IS_SG64(sc)) sc->amr_sg64table = (struct amr_sg64entry *)p; sc->amr_sgtable = (struct amr_sgentry *)p; return(0); }
/* * Allocate resources for our device, set up the bus interface. */ static int aacraid_pci_attach(device_t dev) { struct aac_softc *sc; struct aac_ident *id; int error; u_int32_t command; fwprintf(NULL, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Initialise softc. */ sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->aac_dev = dev; /* assume failure is 'not configured' */ error = ENXIO; /* * Verify that the adapter is correctly set up in PCI space. */ pci_enable_busmaster(dev); command = pci_read_config(sc->aac_dev, PCIR_COMMAND, 2); if (!(command & PCIM_CMD_BUSMASTEREN)) { device_printf(sc->aac_dev, "can't enable bus-master feature\n"); goto out; } /* * Detect the hardware interface version, set up the bus interface * indirection. */ id = aac_find_ident(dev); sc->aac_hwif = id->hwif; switch(sc->aac_hwif) { case AAC_HWIF_SRC: fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "set hardware up for PMC SRC"); sc->aac_if = aacraid_src_interface; break; case AAC_HWIF_SRCV: fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "set hardware up for PMC SRCv"); sc->aac_if = aacraid_srcv_interface; break; default: sc->aac_hwif = AAC_HWIF_UNKNOWN; device_printf(sc->aac_dev, "unknown hardware type\n"); error = ENXIO; goto out; } /* assume failure is 'out of memory' */ error = ENOMEM; /* * Allocate the PCI register window. */ sc->aac_regs_rid0 = PCIR_BAR(0); if ((sc->aac_regs_res0 = bus_alloc_resource_any(sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0, RF_ACTIVE)) == NULL) { device_printf(sc->aac_dev, "couldn't allocate register window 0\n"); goto out; } sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0); sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0); sc->aac_regs_rid1 = PCIR_BAR(2); if ((sc->aac_regs_res1 = bus_alloc_resource_any(sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid1, RF_ACTIVE)) == NULL) { device_printf(sc->aac_dev, "couldn't allocate register window 1\n"); goto out; } sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1); sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1); /* * Allocate the parent bus DMA tag appropriate for our PCI interface. * * Note that some of these controllers are 64-bit capable. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ PAGE_SIZE, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_parent_dmat)) { device_printf(sc->aac_dev, "can't allocate parent DMA tag\n"); goto out; } /* Set up quirks */ sc->flags = id->quirks; /* * Do bus-independent initialisation. */ error = aacraid_attach(sc); out: if (error) aacraid_free(sc); return(error); }
static int adv_pci_attach(device_t dev) { struct adv_softc *adv; u_int32_t id; u_int32_t command; int error, rid, irqrid; void *ih; struct resource *iores, *irqres; /* * Determine the chip version. */ id = pci_read_config(dev, PCIR_DEVVENDOR, /*bytes*/4); command = pci_read_config(dev, PCIR_COMMAND, /*bytes*/1); /* * These cards do not allow memory mapped accesses, so we must * ensure that I/O accesses are available or we won't be able * to talk to them. */ if ((command & (PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN)) != (PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN)) { command |= PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, command, /*bytes*/1); } /* * Early chips can't handle non-zero latency timer settings. */ if (id == PCI_DEVICE_ID_ADVANSYS_1200A || id == PCI_DEVICE_ID_ADVANSYS_1200B) { pci_write_config(dev, PCIR_LATTIMER, /*value*/0, /*bytes*/1); } rid = PCI_BASEADR0; iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1, RF_ACTIVE); if (iores == NULL) return ENXIO; if (adv_find_signature(rman_get_bustag(iores), rman_get_bushandle(iores)) == 0) { bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } adv = adv_alloc(dev, rman_get_bustag(iores), rman_get_bushandle(iores)); if (adv == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } /* Allocate a dmatag for our transfer DMA maps */ /* XXX Should be a child of the PCI bus dma tag */ error = bus_dma_tag_create(/*parent*/NULL, /*alignment*/1, /*boundary*/0, /*lowaddr*/ADV_PCI_MAX_DMA_ADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/BUS_SPACE_UNRESTRICTED, /*maxsegsz*/ADV_PCI_MAX_DMA_COUNT, /*flags*/0, &adv->parent_dmat); if (error != 0) { kprintf("%s: Could not allocate DMA tag - error %d\n", adv_name(adv), error); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } adv->init_level++; if (overrun_buf == NULL) { /* Need to allocate our overrun buffer */ if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/8, /*boundary*/0, ADV_PCI_MAX_DMA_ADDR, BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, ADV_OVERRUN_BSIZE, /*nsegments*/1, BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &overrun_dmat) != 0) { bus_dma_tag_destroy(adv->parent_dmat); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } if (bus_dmamem_alloc(overrun_dmat, (void *)&overrun_buf, BUS_DMA_NOWAIT, &overrun_dmamap) != 0) { bus_dma_tag_destroy(overrun_dmat); bus_dma_tag_destroy(adv->parent_dmat); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } /* And permanently map it in */ bus_dmamap_load(overrun_dmat, overrun_dmamap, overrun_buf, ADV_OVERRUN_BSIZE, adv_map, &overrun_physbase, /*flags*/0); } adv->overrun_physbase = overrun_physbase; /* * Stop the chip. */ ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT); ADV_OUTW(adv, ADV_CHIP_STATUS, 0); adv->chip_version = ADV_INB(adv, ADV_NONEISA_CHIP_REVISION); adv->type = ADV_PCI; /* * Setup active negation and signal filtering. */ { u_int8_t extra_cfg; if (adv->chip_version >= ADV_CHIP_VER_PCI_ULTRA_3150) adv->type |= ADV_ULTRA; if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050) extra_cfg = ADV_IFC_ACT_NEG | ADV_IFC_WR_EN_FILTER; else extra_cfg = ADV_IFC_ACT_NEG | ADV_IFC_SLEW_RATE; ADV_OUTB(adv, ADV_REG_IFC, extra_cfg); } if (adv_init(adv) != 0) { adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } adv->max_dma_count = ADV_PCI_MAX_DMA_COUNT; adv->max_dma_addr = ADV_PCI_MAX_DMA_ADDR; #if CC_DISABLE_PCI_PARITY_INT { u_int16_t config_msw; config_msw = ADV_INW(adv, ADV_CONFIG_MSW); config_msw &= 0xFFC0; ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); } #endif if (id == PCI_DEVICE_ID_ADVANSYS_1200A || id == PCI_DEVICE_ID_ADVANSYS_1200B) { adv->bug_fix_control |= ADV_BUG_FIX_IF_NOT_DWB; adv->bug_fix_control |= ADV_BUG_FIX_ASYN_USE_SYN; adv->fix_asyn_xfer = ~0; } irqrid = 0; irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &irqrid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (irqres == NULL || bus_setup_intr(dev, irqres, 0, adv_intr, adv, &ih, NULL)) { adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } adv_attach(adv); return 0; }
/** * mrsas_passthru: Handle pass-through commands * input: Adapter instance soft state * argument pointer * * This function is called from mrsas_ioctl() to handle pass-through and * ioctl commands to Firmware. */ int mrsas_passthru( struct mrsas_softc *sc, void *arg ) { struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; union mrsas_frame *in_cmd = (union mrsas_frame *) &(user_ioc->frame.raw); struct mrsas_mfi_cmd *cmd = NULL; bus_dma_tag_t ioctl_data_tag[MAX_IOCTL_SGE]; bus_dmamap_t ioctl_data_dmamap[MAX_IOCTL_SGE]; void *ioctl_data_mem[MAX_IOCTL_SGE]; // ioctl data virtual addr bus_addr_t ioctl_data_phys_addr[MAX_IOCTL_SGE]; // ioctl data phys addr bus_dma_tag_t ioctl_sense_tag = 0; bus_dmamap_t ioctl_sense_dmamap = 0; void *ioctl_sense_mem = NULL; bus_addr_t ioctl_sense_phys_addr = 0; int i, adapter, ioctl_data_size, ioctl_sense_size, ret=0; struct mrsas_sge32 *kern_sge32; unsigned long *sense_ptr; /* For debug - uncomment the following line for debug output */ //mrsas_dump_ioctl(sc, user_ioc); /* * Check for NOP from MegaCli... MegaCli can issue a DCMD of 0. In this * case do nothing and return 0 to it as status. */ if (in_cmd->dcmd.opcode == 0) { device_printf(sc->mrsas_dev, "In %s() Got a NOP\n", __func__); user_ioc->frame.hdr.cmd_status = MFI_STAT_OK; return (0); } /* Validate host_no */ adapter = user_ioc->host_no; if (adapter != device_get_unit(sc->mrsas_dev)) { device_printf(sc->mrsas_dev, "In %s() IOCTL not for me!\n", __func__); return(ENOENT); } /* Validate SGL length */ if (user_ioc->sge_count > MAX_IOCTL_SGE) { device_printf(sc->mrsas_dev, "In %s() SGL is too long (%d > 8).\n", __func__, user_ioc->sge_count); return(ENOENT); } /* Get a command */ cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd for IOCTL\n"); return(ENOMEM); } /* * User's IOCTL packet has 2 frames (maximum). Copy those two * frames into our cmd's frames. cmd->frame's context will get * overwritten when we copy from user's frames. So set that value * alone separately */ memcpy(cmd->frame, user_ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); cmd->frame->hdr.context = cmd->index; cmd->frame->hdr.pad_0 = 0; cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | MFI_FRAME_SENSE64); /* * The management interface between applications and the fw uses * MFI frames. E.g, RAID configuration changes, LD property changes * etc are accomplishes through different kinds of MFI frames. The * driver needs to care only about substituting user buffers with * kernel buffers in SGLs. The location of SGL is embedded in the * struct iocpacket itself. */ kern_sge32 = (struct mrsas_sge32 *) ((unsigned long)cmd->frame + user_ioc->sgl_off); /* * For each user buffer, create a mirror buffer and copy in */ for (i=0; i < user_ioc->sge_count; i++) { if (!user_ioc->sgl[i].iov_len) continue; ioctl_data_size = user_ioc->sgl[i].iov_len; if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 1, 0, // algnmnt, boundary BUS_SPACE_MAXADDR_32BIT,// lowaddr BUS_SPACE_MAXADDR, // highaddr NULL, NULL, // filter, filterarg ioctl_data_size, // maxsize 1, // msegments ioctl_data_size, // maxsegsize BUS_DMA_ALLOCNOW, // flags &ioctl_data_tag[i])) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl data tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(ioctl_data_tag[i], (void **)&ioctl_data_mem[i], (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_data_dmamap[i])) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n"); return (ENOMEM); } if (bus_dmamap_load(ioctl_data_tag[i], ioctl_data_dmamap[i], ioctl_data_mem[i], ioctl_data_size, mrsas_alloc_cb, &ioctl_data_phys_addr[i], BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioctl data mem\n"); return (ENOMEM); } /* Save the physical address and length */ kern_sge32[i].phys_addr = (u_int32_t)ioctl_data_phys_addr[i]; kern_sge32[i].length = user_ioc->sgl[i].iov_len; /* Copy in data from user space */ ret = copyin(user_ioc->sgl[i].iov_base, ioctl_data_mem[i], user_ioc->sgl[i].iov_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n"); goto out; } } ioctl_sense_size = user_ioc->sense_len; if (user_ioc->sense_len) { if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent 1, 0, // algnmnt, boundary BUS_SPACE_MAXADDR_32BIT,// lowaddr BUS_SPACE_MAXADDR, // highaddr NULL, NULL, // filter, filterarg ioctl_sense_size, // maxsize 1, // msegments ioctl_sense_size, // maxsegsize BUS_DMA_ALLOCNOW, // flags &ioctl_sense_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(ioctl_sense_tag, (void **)&ioctl_sense_mem, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_sense_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n"); return (ENOMEM); } if (bus_dmamap_load(ioctl_sense_tag, ioctl_sense_dmamap, ioctl_sense_mem, ioctl_sense_size, mrsas_alloc_cb, &ioctl_sense_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioctl sense mem\n"); return (ENOMEM); } sense_ptr = (unsigned long *)((unsigned long)cmd->frame + user_ioc->sense_off); sense_ptr = ioctl_sense_mem; } /* * Set the sync_cmd flag so that the ISR knows not to complete this * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; mrsas_issue_blocked_cmd(sc, cmd); cmd->sync_cmd = 0; /* * copy out the kernel buffers to user buffers */ for (i = 0; i < user_ioc->sge_count; i++) { ret = copyout(ioctl_data_mem[i], user_ioc->sgl[i].iov_base, user_ioc->sgl[i].iov_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL copyout failed!\n"); goto out; } } /* * copy out the sense */ if (user_ioc->sense_len) { /* * sense_buff points to the location that has the user * sense buffer address */ sense_ptr = (unsigned long *) ((unsigned long)user_ioc->frame.raw + user_ioc->sense_off); ret = copyout(ioctl_sense_mem, (unsigned long*)*sense_ptr, user_ioc->sense_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL sense copyout failed!\n"); goto out; } } /* * Return command status to user space */ memcpy(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status, sizeof(u_int8_t)); out: /* * Release sense buffer */ if (ioctl_sense_phys_addr) bus_dmamap_unload(ioctl_sense_tag, ioctl_sense_dmamap); if (ioctl_sense_mem) bus_dmamem_free(ioctl_sense_tag, ioctl_sense_mem, ioctl_sense_dmamap); if (ioctl_sense_tag) bus_dma_tag_destroy(ioctl_sense_tag); /* * Release data buffers */ for (i = 0; i < user_ioc->sge_count; i++) { if (!user_ioc->sgl[i].iov_len) continue; if (ioctl_data_phys_addr[i]) bus_dmamap_unload(ioctl_data_tag[i], ioctl_data_dmamap[i]); if (ioctl_data_mem[i] != NULL) bus_dmamem_free(ioctl_data_tag[i], ioctl_data_mem[i], ioctl_data_dmamap[i]); if (ioctl_data_tag[i] != NULL) bus_dma_tag_destroy(ioctl_data_tag[i]); } /* Free command */ mrsas_release_mfi_cmd(cmd); return(ret); }
static usbd_status usb_block_allocmem(bus_dma_tag_t tag, size_t size, size_t align, usb_dma_block_t **dmap) { usb_dma_block_t *p; int s; DPRINTFN(5, ("usb_block_allocmem: size=%lu align=%lu\n", (u_long)size, (u_long)align)); #ifdef DIAGNOSTIC if (!curproc) { printf("usb_block_allocmem: in interrupt context, size=%lu\n", (unsigned long) size); } #endif s = splusb(); /* First check the free list. */ for (p = LIST_FIRST(&usb_blk_freelist); p; p = LIST_NEXT(p, next)) { if (p->tag == tag && p->size >= size && p->size < size * 2 && p->align >= align) { LIST_REMOVE(p, next); usb_blk_nfree--; splx(s); *dmap = p; DPRINTFN(6,("usb_block_allocmem: free list size=%lu\n", (u_long)p->size)); return (USBD_NORMAL_COMPLETION); } } splx(s); #ifdef DIAGNOSTIC if (!curproc) { printf("usb_block_allocmem: in interrupt context, failed\n"); return (USBD_NOMEM); } #endif DPRINTFN(6, ("usb_block_allocmem: no free\n")); p = malloc(sizeof *p, M_USB, M_NOWAIT); if (p == NULL) return (USBD_NOMEM); if (bus_dma_tag_create(tag, align, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, sizeof(p->segs) / sizeof(p->segs[0]), size, 0, NULL, NULL, &p->tag) == ENOMEM) { goto free; } p->size = size; p->align = align; if (bus_dmamem_alloc(p->tag, &p->kaddr, BUS_DMA_NOWAIT|BUS_DMA_COHERENT, &p->map)) goto tagfree; if (bus_dmamap_load(p->tag, p->map, p->kaddr, p->size, usbmem_callback, p, 0)) goto memfree; /* XXX - override the tag, ok since we never free it */ p->tag = tag; *dmap = p; return (USBD_NORMAL_COMPLETION); /* * XXX - do we need to _unload? is the order of _free and _destroy * correct? */ memfree: bus_dmamem_free(p->tag, p->kaddr, p->map); tagfree: bus_dma_tag_destroy(p->tag); free: free(p, M_USB); return (USBD_NOMEM); }
/* * Attach all the sub-devices we can find */ static int bt_isa_attach(device_t dev) { struct bt_softc *bt = device_get_softc(dev); bus_dma_filter_t *filter; void *filter_arg; bus_addr_t lowaddr; int error, drq; /* Initialise softc */ error = bt_isa_alloc_resources(dev, 0, ~0); if (error) { device_printf(dev, "can't allocate resources in bt_isa_attach\n"); return error; } /* Program the DMA channel for external control */ if ((drq = isa_get_drq(dev)) != -1) isa_dmacascade(drq); /* Allocate our parent dmatag */ filter = NULL; filter_arg = NULL; lowaddr = BUS_SPACE_MAXADDR_24BIT; /* XXX Should be a child of the ISA bus dma tag */ if (bus_dma_tag_create(/*parent*/NULL, /*alignemnt*/1, /*boundary*/0, lowaddr, /*highaddr*/BUS_SPACE_MAXADDR, filter, filter_arg, /*maxsize*/BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/BUS_SPACE_UNRESTRICTED, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &bt->parent_dmat) != 0) { bt_isa_release_resources(dev); return (ENOMEM); } error = bt_init(dev); if (error) { bt_isa_release_resources(dev); return (ENOMEM); } if (lowaddr != BUS_SPACE_MAXADDR_32BIT) { /* DMA tag for our sense buffers */ if (bus_dma_tag_create(bt->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, bt->max_ccbs * sizeof(struct scsi_sense_data), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &bt->sense_dmat) != 0) { bt_isa_release_resources(dev); return (ENOMEM); } bt->init_level++; /* Allocation of sense buffers */ if (bus_dmamem_alloc(bt->sense_dmat, (void *)&bt->sense_buffers, BUS_DMA_NOWAIT, &bt->sense_dmamap) != 0) { bt_isa_release_resources(dev); return (ENOMEM); } bt->init_level++; /* And permanently map them */ bus_dmamap_load(bt->sense_dmat, bt->sense_dmamap, bt->sense_buffers, bt->max_ccbs * sizeof(*bt->sense_buffers), btmapsensebuffers, bt, /*flags*/0); bt->init_level++; } error = bt_attach(dev); if (error) { bt_isa_release_resources(dev); return (error); } return (0); }
static int ath_ahb_attach(device_t dev) { struct ath_ahb_softc *psc = device_get_softc(dev); struct ath_softc *sc = &psc->sc_sc; int error = ENXIO; int rid; long eepromaddr; uint8_t *p; sc->sc_dev = dev; rid = 0; psc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (psc->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); goto bad; } if (resource_long_value(device_get_name(dev), device_get_unit(dev), "eepromaddr", &eepromaddr) != 0) { device_printf(dev, "cannot fetch 'eepromaddr' from hints\n"); goto bad0; } rid = 0; device_printf(sc->sc_dev, "eeprom @ %p\n", (void *) eepromaddr); psc->sc_eeprom = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, (uintptr_t) eepromaddr, (uintptr_t) eepromaddr + (uintptr_t) ((ATH_EEPROM_DATA_SIZE * 2) - 1), 0, RF_ACTIVE); if (psc->sc_eeprom == NULL) { device_printf(dev, "cannot map eeprom space\n"); goto bad0; } /* XXX uintptr_t is a bandaid for ia64; to be fixed */ sc->sc_st = (HAL_BUS_TAG)(uintptr_t) rman_get_bustag(psc->sc_sr); sc->sc_sh = (HAL_BUS_HANDLE) rman_get_bushandle(psc->sc_sr); /* * Mark device invalid so any interrupts (shared or otherwise) * that arrive before the HAL is setup are discarded. */ sc->sc_invalid = 1; /* Copy the EEPROM data out */ sc->sc_eepromdata = malloc(ATH_EEPROM_DATA_SIZE * 2, M_TEMP, M_NOWAIT | M_ZERO); if (sc->sc_eepromdata == NULL) { device_printf(dev, "cannot allocate memory for eeprom data\n"); goto bad1; } device_printf(sc->sc_dev, "eeprom data @ %p\n", (void *) rman_get_bushandle(psc->sc_eeprom)); /* XXX why doesn't this work? -adrian */ #if 0 bus_space_read_multi_1( rman_get_bustag(psc->sc_eeprom), rman_get_bushandle(psc->sc_eeprom), 0, (u_int8_t *) sc->sc_eepromdata, ATH_EEPROM_DATA_SIZE * 2); #endif p = (void *) rman_get_bushandle(psc->sc_eeprom); memcpy(sc->sc_eepromdata, p, ATH_EEPROM_DATA_SIZE * 2); /* * Arrange interrupt line. */ rid = 0; psc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (psc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad1; } if (bus_setup_intr(dev, psc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, ath_intr, sc, &psc->sc_ih)) { device_printf(dev, "could not establish interrupt\n"); goto bad2; } /* * Setup DMA descriptor area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ 0x3ffff, /* maxsize XXX */ ATH_MAX_SCATTER, /* nsegments */ 0x3ffff, /* maxsegsize XXX */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->sc_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad3; } ATH_LOCK_INIT(sc); error = ath_attach(AR9130_DEVID, sc); if (error == 0) /* success */ return 0; ATH_LOCK_DESTROY(sc); bus_dma_tag_destroy(sc->sc_dmat); bad3: bus_teardown_intr(dev, psc->sc_irq, psc->sc_ih); bad2: bus_release_resource(dev, SYS_RES_IRQ, 0, psc->sc_irq); bad1: bus_release_resource(dev, SYS_RES_MEMORY, 0, psc->sc_eeprom); bad0: bus_release_resource(dev, SYS_RES_MEMORY, 0, psc->sc_sr); bad: /* XXX?! */ if (sc->sc_eepromdata) free(sc->sc_eepromdata, M_TEMP); return (error); }
int __init vchiq_platform_init(VCHIQ_STATE_T *state) { VCHIQ_SLOT_ZERO_T *vchiq_slot_zero; int frag_mem_size; int err; int i; /* Allocate space for the channels in coherent memory */ g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE); frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS); err = bus_dma_tag_create( NULL, PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ g_slot_mem_size + frag_mem_size, 1, /* maxsize, nsegments */ g_slot_mem_size + frag_mem_size, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &dma_tag); err = bus_dmamem_alloc(dma_tag, (void **)&g_slot_mem, BUS_DMA_COHERENT | BUS_DMA_WAITOK, &dma_map); if (err) { vchiq_log_error(vchiq_core_log_level, "Unable to allocate channel memory"); err = -ENOMEM; goto failed_alloc; } err = bus_dmamap_load(dma_tag, dma_map, g_slot_mem, g_slot_mem_size + frag_mem_size, vchiq_dmamap_cb, &g_slot_phys, 0); if (err) { vchiq_log_error(vchiq_core_log_level, "cannot load DMA map"); err = -ENOMEM; goto failed_load; } WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0); vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size); if (!vchiq_slot_zero) { err = -EINVAL; goto failed_init_slots; } vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] = (int)g_slot_phys + g_slot_mem_size; vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] = MAX_FRAGMENTS; g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size); g_slot_mem_size += frag_mem_size; g_free_fragments = g_fragments_base; for (i = 0; i < (MAX_FRAGMENTS - 1); i++) { *(FRAGMENTS_T **)&g_fragments_base[i] = &g_fragments_base[i + 1]; } *(FRAGMENTS_T **)&g_fragments_base[i] = NULL; _sema_init(&g_free_fragments_sema, MAX_FRAGMENTS); if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) != VCHIQ_SUCCESS) { err = -EINVAL; goto failed_vchiq_init; } bcm_mbox_write(BCM2835_MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys); vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %x, phys %x)", (unsigned int)vchiq_slot_zero, g_slot_phys); vchiq_call_connected_callbacks(); return 0; failed_vchiq_init: failed_init_slots: failed_load: bus_dmamap_unload(dma_tag, dma_map); failed_alloc: bus_dmamap_destroy(dma_tag, dma_map); bus_dma_tag_destroy(dma_tag); return err; }
static int bcm_dma_init(device_t dev) { struct bcm_dma_softc *sc = device_get_softc(dev); uint32_t mask; struct bcm_dma_ch *ch; void *cb_virt; vm_paddr_t cb_phys; int err; int i; /* disable and clear interrupt status */ bus_write_4(sc->sc_mem, BCM_DMA_ENABLE, 0); bus_write_4(sc->sc_mem, BCM_DMA_INT_STATUS, 0); /* Allocate DMA chunks control blocks */ /* p.40 of spec - control block should be 32-bit aligned */ err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct bcm_dma_cb), 1, sizeof(struct bcm_dma_cb), BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_dma_tag); if (err) { device_printf(dev, "failed allocate DMA tag"); return (err); } /* setup initial settings */ for (i = 0; i < BCM_DMA_CH_MAX; i++) { ch = &sc->sc_dma_ch[i]; err = bus_dmamem_alloc(sc->sc_dma_tag, &cb_virt, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ch->dma_map); if (err) { device_printf(dev, "cannot allocate DMA memory\n"); break; } /* * Least alignment for busdma-allocated stuff is cache * line size, so just make sure nothing stupid happend * and we got properly aligned address */ if ((uintptr_t)cb_virt & 0x1f) { device_printf(dev, "DMA address is not 32-bytes aligned: %p\n", (void*)cb_virt); break; } err = bus_dmamap_load(sc->sc_dma_tag, ch->dma_map, cb_virt, sizeof(struct bcm_dma_cb), bcm_dmamap_cb, &cb_phys, BUS_DMA_WAITOK); if (err) { device_printf(dev, "cannot load DMA memory\n"); break; } bzero(ch, sizeof(struct bcm_dma_ch)); ch->ch = i; ch->cb = cb_virt; ch->vc_cb = cb_phys; ch->intr_func = NULL; ch->intr_arg = NULL; ch->flags = BCM_DMA_CH_UNMAP; ch->cb->info = INFO_WAIT_RESP; /* reset DMA engine */ bcm_dma_reset(dev, i); } /* now use DMA2/DMA3 only */ sc->sc_dma_ch[2].flags = BCM_DMA_CH_FREE; sc->sc_dma_ch[3].flags = BCM_DMA_CH_FREE; /* enable DMAs */ mask = 0; for (i = 0; i < BCM_DMA_CH_MAX; i++) if (sc->sc_dma_ch[i].flags & BCM_DMA_CH_FREE) mask |= (1 << i); bus_write_4(sc->sc_mem, BCM_DMA_ENABLE, mask); return (0); }
static int adw_pci_attach(device_t dev) { struct adw_softc *adw; struct adw_pci_identity *entry; u_int16_t command; struct resource *regs; int regs_type; int regs_id; int error; int zero; entry = adw_find_pci_device(dev); if (entry == NULL) return (ENXIO); regs = NULL; regs_type = 0; regs_id = 0; #ifdef ADW_ALLOW_MEMIO regs_type = SYS_RES_MEMORY; regs_id = ADW_PCI_MEMBASE; regs = bus_alloc_resource_any(dev, regs_type, ®s_id, RF_ACTIVE); #endif if (regs == NULL) { regs_type = SYS_RES_IOPORT; regs_id = ADW_PCI_IOBASE; regs = bus_alloc_resource_any(dev, regs_type, ®s_id, RF_ACTIVE); } if (regs == NULL) { device_printf(dev, "can't allocate register resources\n"); return (ENOMEM); } adw = adw_alloc(dev, regs, regs_type, regs_id); if (adw == NULL) return(ENOMEM); /* * Now that we have access to our registers, just verify that * this really is an AdvanSys device. */ if (adw_find_signature(adw) == 0) { adw_free(adw); return (ENXIO); } adw_reset_chip(adw); error = entry->setup(dev, entry, adw); if (error != 0) return (error); /* Ensure busmastering is enabled */ pci_enable_busmaster(dev); /* Allocate a dmatag for our transfer DMA maps */ error = bus_dma_tag_create( /* parent */ bus_get_dma_tag(dev), /* alignment */ 1, /* boundary */ 0, /* lowaddr */ ADW_PCI_MAX_DMA_ADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ BUS_SPACE_MAXSIZE_32BIT, /* nsegments */ ~0, /* maxsegsz */ ADW_PCI_MAX_DMA_COUNT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &adw->parent_dmat); adw->init_level++; if (error != 0) { device_printf(dev, "Could not allocate DMA tag - error %d\n", error); adw_free(adw); return (error); } adw->init_level++; error = adw_init(adw); if (error != 0) { adw_free(adw); return (error); } /* * If the PCI Configuration Command Register "Parity Error Response * Control" Bit was clear (0), then set the microcode variable * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode * to ignore DMA parity errors. */ command = pci_read_config(dev, PCIR_COMMAND, /*bytes*/2); if ((command & PCIM_CMD_PERRESPEN) == 0) adw_lram_write_16(adw, ADW_MC_CONTROL_FLAG, adw_lram_read_16(adw, ADW_MC_CONTROL_FLAG) | ADW_MC_CONTROL_IGN_PERR); zero = 0; adw->irq_res_type = SYS_RES_IRQ; adw->irq = bus_alloc_resource_any(dev, adw->irq_res_type, &zero, RF_ACTIVE | RF_SHAREABLE); if (adw->irq == NULL) { adw_free(adw); return (ENOMEM); } error = adw_attach(adw); if (error != 0) adw_free(adw); return (error); }
int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller) { int error; device_t device = controller->isci->device; uint32_t max_segment_size = isci_io_request_get_max_io_size(); uint32_t status = 0; struct ISCI_MEMORY *uncached_controller_memory = &controller->uncached_controller_memory; struct ISCI_MEMORY *cached_controller_memory = &controller->cached_controller_memory; struct ISCI_MEMORY *request_memory = &controller->request_memory; POINTER_UINT virtual_address; bus_addr_t physical_address; controller->mdl = sci_controller_get_memory_descriptor_list_handle( controller->scif_controller_handle); uncached_controller_memory->size = sci_mdl_decorator_get_memory_size( controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS); error = isci_allocate_dma_buffer(device, uncached_controller_memory); if (error != 0) return (error); sci_mdl_decorator_assign_memory( controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS, uncached_controller_memory->virtual_address, uncached_controller_memory->physical_address); cached_controller_memory->size = sci_mdl_decorator_get_memory_size( controller->mdl, SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS ); error = isci_allocate_dma_buffer(device, cached_controller_memory); if (error != 0) return (error); sci_mdl_decorator_assign_memory(controller->mdl, SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS, cached_controller_memory->virtual_address, cached_controller_memory->physical_address); request_memory->size = controller->queue_depth * isci_io_request_get_object_size(); error = isci_allocate_dma_buffer(device, request_memory); if (error != 0) return (error); /* For STP PIO testing, we want to ensure we can force multiple SGLs * since this has been a problem area in SCIL. This tunable parameter * will allow us to force DMA segments to a smaller size, ensuring * that even if a physically contiguous buffer is attached to this * I/O, the DMA subsystem will pass us multiple segments in our DMA * load callback. */ TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size); /* Create DMA tag for our I/O requests. Then we can create DMA maps based off * of this tag and store them in each of our ISCI_IO_REQUEST objects. This * will enable better performance than creating the DMA maps everytime we get * an I/O. */ status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, isci_io_request_get_max_io_size(), SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL, &controller->buffer_dma_tag); sci_pool_initialize(controller->request_pool); virtual_address = request_memory->virtual_address; physical_address = request_memory->physical_address; for (int i = 0; i < controller->queue_depth; i++) { struct ISCI_REQUEST *request = (struct ISCI_REQUEST *)virtual_address; isci_request_construct(request, controller->scif_controller_handle, controller->buffer_dma_tag, physical_address); sci_pool_put(controller->request_pool, request); virtual_address += isci_request_get_object_size(); physical_address += isci_request_get_object_size(); } uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) + scif_remote_device_get_object_size(); controller->remote_device_memory = (uint8_t *) malloc( remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI, M_NOWAIT | M_ZERO); sci_pool_initialize(controller->remote_device_pool); uint8_t *remote_device_memory_ptr = controller->remote_device_memory; for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { struct ISCI_REMOTE_DEVICE *remote_device = (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr; controller->remote_device[i] = NULL; remote_device->index = i; remote_device->is_resetting = FALSE; remote_device->frozen_lun_mask = 0; sci_fast_list_element_init(remote_device, &remote_device->pending_device_reset_element); sci_pool_put(controller->remote_device_pool, remote_device); remote_device_memory_ptr += remote_device_size; } return (0); }
static int dma_setup(struct dwmmc_softc *sc) { int error; int nidx; int idx; /* * Set up TX descriptor ring, descriptors, and dma maps. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 4096, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ DESC_SIZE, 1, /* maxsize, nsegments */ DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->desc_tag); if (error != 0) { device_printf(sc->dev, "could not create ring DMA tag.\n"); return (1); } error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->desc_map); if (error != 0) { device_printf(sc->dev, "could not allocate descriptor ring.\n"); return (1); } error = bus_dmamap_load(sc->desc_tag, sc->desc_map, sc->desc_ring, DESC_SIZE, dwmmc_get1paddr, &sc->desc_ring_paddr, 0); if (error != 0) { device_printf(sc->dev, "could not load descriptor ring map.\n"); return (1); } for (idx = 0; idx < sc->desc_count; idx++) { sc->desc_ring[idx].des0 = DES0_CH; sc->desc_ring[idx].des1 = 0; nidx = (idx + 1) % sc->desc_count; sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \ (nidx * sizeof(struct idmac_desc)); } error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 4096, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->desc_count * MMC_SECTOR_SIZE, /* maxsize */ sc->desc_count, /* nsegments */ MMC_SECTOR_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->buf_tag); if (error != 0) { device_printf(sc->dev, "could not create ring DMA tag.\n"); return (1); } error = bus_dmamap_create(sc->buf_tag, 0, &sc->buf_map); if (error != 0) { device_printf(sc->dev, "could not create TX buffer DMA map.\n"); return (1); } return (0); }
static int at91_usart_bus_attach(struct uart_softc *sc) { int err; int i; uint32_t cr; struct at91_usart_softc *atsc; atsc = (struct at91_usart_softc *)sc; if (at91_usart_requires_rts0_workaround(sc)) atsc->flags |= USE_RTS0_WORKAROUND; /* * See if we have a TIMEOUT bit. We disable all interrupts as * a side effect. Boot loaders may have enabled them. Since * a TIMEOUT interrupt can't happen without other setup, the * apparent race here can't actually happen. */ WR4(&sc->sc_bas, USART_IDR, 0xffffffff); WR4(&sc->sc_bas, USART_IER, USART_CSR_TIMEOUT); if (RD4(&sc->sc_bas, USART_IMR) & USART_CSR_TIMEOUT) atsc->flags |= HAS_TIMEOUT; WR4(&sc->sc_bas, USART_IDR, 0xffffffff); /* * Allocate transmit DMA tag and map. We allow a transmit buffer * to be any size, but it must map to a single contiguous physical * extent. */ err = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &atsc->tx_tag); if (err != 0) goto errout; err = bus_dmamap_create(atsc->tx_tag, 0, &atsc->tx_map); if (err != 0) goto errout; if (atsc->flags & HAS_TIMEOUT) { /* * Allocate receive DMA tags, maps, and buffers. * The receive buffers should be aligned to arm_dcache_align, * otherwise partial cache line flushes on every receive * interrupt are pretty much guaranteed. */ err = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), arm_dcache_align, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sc->sc_rxfifosz, 1, sc->sc_rxfifosz, BUS_DMA_ALLOCNOW, NULL, NULL, &atsc->rx_tag); if (err != 0) goto errout; for (i = 0; i < 2; i++) { err = bus_dmamem_alloc(atsc->rx_tag, (void **)&atsc->ping_pong[i].buffer, BUS_DMA_NOWAIT, &atsc->ping_pong[i].map); if (err != 0) goto errout; err = bus_dmamap_load(atsc->rx_tag, atsc->ping_pong[i].map, atsc->ping_pong[i].buffer, sc->sc_rxfifosz, at91_getaddr, &atsc->ping_pong[i].pa, 0); if (err != 0) goto errout; bus_dmamap_sync(atsc->rx_tag, atsc->ping_pong[i].map, BUS_DMASYNC_PREREAD); } atsc->ping = &atsc->ping_pong[0]; atsc->pong = &atsc->ping_pong[1]; } /* Turn on rx and tx */ cr = USART_CR_RSTSTA | USART_CR_RSTRX | USART_CR_RSTTX; WR4(&sc->sc_bas, USART_CR, cr); WR4(&sc->sc_bas, USART_CR, USART_CR_RXEN | USART_CR_TXEN); /* * Setup the PDC to receive data. We use the ping-pong buffers * so that we can more easily bounce between the two and so that * we get an interrupt 1/2 way through the software 'fifo' we have * to avoid overruns. */ if (atsc->flags & HAS_TIMEOUT) { WR4(&sc->sc_bas, PDC_RPR, atsc->ping->pa); WR4(&sc->sc_bas, PDC_RCR, sc->sc_rxfifosz); WR4(&sc->sc_bas, PDC_RNPR, atsc->pong->pa); WR4(&sc->sc_bas, PDC_RNCR, sc->sc_rxfifosz); WR4(&sc->sc_bas, PDC_PTCR, PDC_PTCR_RXTEN); /* * Set the receive timeout to be 1.5 character times * assuming 8N1. */ WR4(&sc->sc_bas, USART_RTOR, 15); WR4(&sc->sc_bas, USART_CR, USART_CR_STTTO); WR4(&sc->sc_bas, USART_IER, USART_CSR_TIMEOUT | USART_CSR_RXBUFF | USART_CSR_ENDRX); } else { WR4(&sc->sc_bas, USART_IER, USART_CSR_RXRDY); } WR4(&sc->sc_bas, USART_IER, USART_CSR_RXBRK | USART_DCE_CHANGE_BITS); /* Prime sc->hwsig with the initial hw line states. */ at91_usart_bus_getsig(sc); errout: return (err); }
static int mwl_pci_attach(device_t dev) { struct mwl_pci_softc *psc = device_get_softc(dev); struct mwl_softc *sc = &psc->sc_sc; int rid, error = ENXIO; sc->sc_dev = dev; /* * Enable memory mapping and bus mastering. */ if (!mwl_pci_setup(dev)) return 0; /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR0; psc->sc_sr0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (psc->sc_sr0 == NULL) { device_printf(dev, "cannot map BAR0 register space\n"); goto bad; } rid = BS_BAR1; psc->sc_sr1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (psc->sc_sr1 == NULL) { device_printf(dev, "cannot map BAR1 register space\n"); goto bad1; } sc->sc_invalid = 1; /* * Arrange interrupt line. */ rid = 0; psc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (psc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad2; } if (bus_setup_intr(dev, psc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, mwl_intr, sc, &psc->sc_ih)) { device_printf(dev, "could not establish interrupt\n"); goto bad3; } /* * Setup DMA descriptor area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXADDR, /* maxsize */ MWL_TXDESC, /* nsegments */ BUS_SPACE_MAXADDR, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->sc_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } /* * Finish off the attach. */ MWL_LOCK_INIT(sc); sc->sc_io0t = rman_get_bustag(psc->sc_sr0); sc->sc_io0h = rman_get_bushandle(psc->sc_sr0); sc->sc_io1t = rman_get_bustag(psc->sc_sr1); sc->sc_io1h = rman_get_bushandle(psc->sc_sr1); if (mwl_attach(pci_get_device(dev), sc) == 0) return (0); MWL_LOCK_DESTROY(sc); bus_dma_tag_destroy(sc->sc_dmat); bad4: bus_teardown_intr(dev, psc->sc_irq, psc->sc_ih); bad3: bus_release_resource(dev, SYS_RES_IRQ, 0, psc->sc_irq); bad2: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR1, psc->sc_sr1); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR0, psc->sc_sr0); bad: return (error); }
static int wds_attach(device_t dev) { struct wds *wp; struct cam_devq *devq; struct cam_sim *sim; struct cam_path *pathp; int i; int error = 0; wp = (struct wds *)device_get_softc(dev); wp->port_rid = 0; wp->port_r = bus_alloc_resource(dev, SYS_RES_IOPORT, &wp->port_rid, /*start*/ 0, /*end*/ ~0, /*count*/ 0, RF_ACTIVE); if (wp->port_r == NULL) return (ENXIO); /* We must now release resources on error. */ wp->drq_rid = 0; wp->drq_r = bus_alloc_resource(dev, SYS_RES_DRQ, &wp->drq_rid, /*start*/ 0, /*end*/ ~0, /*count*/ 0, RF_ACTIVE); if (wp->drq_r == NULL) goto bad; wp->intr_rid = 0; wp->intr_r = bus_alloc_resource(dev, SYS_RES_IRQ, &wp->intr_rid, /*start*/ 0, /*end*/ ~0, /*count*/ 0, RF_ACTIVE); if (wp->intr_r == NULL) goto bad; error = bus_setup_intr(dev, wp->intr_r, INTR_TYPE_CAM | INTR_ENTROPY, NULL, (driver_intr_t *)wds_intr, (void *)wp, &wp->intr_cookie); if (error) goto bad; /* now create the memory buffer */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /*alignment*/4, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ sizeof(* wp->dx), /*nsegments*/ 1, /*maxsegsz*/ sizeof(* wp->dx), /*flags*/ 0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &wp->bustag); if (error) goto bad; error = bus_dmamem_alloc(wp->bustag, (void **)&wp->dx, /*flags*/ 0, &wp->busmap); if (error) goto bad; bus_dmamap_load(wp->bustag, wp->busmap, (void *)wp->dx, sizeof(* wp->dx), wds_alloc_callback, (void *)&wp->dx_p, /*flags*/0); /* initialize the wds_req structures on this unit */ for(i=0; i<MAXSIMUL; i++) { wp->dx->req[i].id = i; wp->wdsr_free |= 1<<i; } /* initialize the memory buffer allocation for this unit */ if (BUFSIZ / FRAGSIZ > 32) { fragsiz = (BUFSIZ / 32) & ~0x01; /* keep it word-aligned */ device_printf(dev, "data buffer fragment size too small. " "BUFSIZE / FRAGSIZE must be <= 32\n"); } else fragsiz = FRAGSIZ & ~0x01; /* keep it word-aligned */ wp->data_free = 0; nfrags = 0; for (i = fragsiz; i <= BUFSIZ; i += fragsiz) { nfrags++; wp->data_free = (wp->data_free << 1) | 1; } /* complete the hardware initialization */ if (wds_init(wp) != 0) goto bad; if (wds_getvers(wp) == -1) device_printf(dev, "getvers failed\n"); device_printf(dev, "using %d bytes / %d frags for dma buffer\n", BUFSIZ, nfrags); devq = cam_simq_alloc(MAXSIMUL); if (devq == NULL) goto bad; sim = cam_sim_alloc(wds_action, wds_poll, "wds", (void *) wp, wp->unit, &Giant, 1, 1, devq); if (sim == NULL) { cam_simq_free(devq); goto bad; } wp->sim = sim; if (xpt_bus_register(sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(sim, /* free_devq */ TRUE); goto bad; } if (xpt_create_path(&pathp, /* periph */ NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, /* free_devq */ TRUE); goto bad; } wp->path = pathp; return (0); bad: wds_free_resources(wp); if (error) return (error); else /* exact error is unknown */ return (ENXIO); }
static int dma_attach(device_t dev) { struct dma_softc *dsc; struct lsi64854_softc *lsc; struct dma_devinfo *ddi; device_t cdev; const char *name; char *cabletype; uint32_t csr; phandle_t child, node; int error, i; dsc = device_get_softc(dev); lsc = &dsc->sc_lsi64854; name = ofw_bus_get_name(dev); node = ofw_bus_get_node(dev); dsc->sc_ign = sbus_get_ign(dev); dsc->sc_slot = sbus_get_slot(dev); i = 0; lsc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (lsc->sc_res == NULL) { device_printf(dev, "cannot allocate resources\n"); return (ENXIO); } if (strcmp(name, "espdma") == 0 || strcmp(name, "dma") == 0) lsc->sc_channel = L64854_CHANNEL_SCSI; else if (strcmp(name, "ledma") == 0) { /* * Check to see which cable type is currently active and * set the appropriate bit in the ledma csr so that it * gets used. If we didn't netboot, the PROM won't have * the "cable-selection" property; default to TP and then * the user can change it via a "media" option to ifconfig. */ csr = L64854_GCSR(lsc); if ((OF_getprop_alloc(node, "cable-selection", 1, (void **)&cabletype)) == -1) { /* assume TP if nothing there */ csr |= E_TP_AUI; } else { if (strcmp(cabletype, "aui") == 0) csr &= ~E_TP_AUI; else csr |= E_TP_AUI; free(cabletype, M_OFWPROP); } L64854_SCSR(lsc, csr); DELAY(20000); /* manual says we need a 20ms delay */ lsc->sc_channel = L64854_CHANNEL_ENET; } else { device_printf(dev, "unsupported DMA channel\n"); error = ENXIO; goto fail_lres; } error = bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* no locking */ &lsc->sc_parent_dmat); if (error != 0) { device_printf(dev, "cannot allocate parent DMA tag\n"); goto fail_lres; } i = sbus_get_burstsz(dev); lsc->sc_burst = (i & SBUS_BURST_32) ? 32 : (i & SBUS_BURST_16) ? 16 : 0; lsc->sc_dev = dev; /* Attach children. */ i = 0; for (child = OF_child(node); child != 0; child = OF_peer(child)) { if ((ddi = dma_setup_dinfo(dev, dsc, child)) == NULL) continue; if (i != 0) { device_printf(dev, "<%s>: only one child per DMA channel supported\n", ddi->ddi_obdinfo.obd_name); dma_destroy_dinfo(ddi); continue; } if ((cdev = device_add_child(dev, NULL, -1)) == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", ddi->ddi_obdinfo.obd_name); dma_destroy_dinfo(ddi); continue; } device_set_ivars(cdev, ddi); i++; } return (bus_generic_attach(dev)); fail_lres: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(lsc->sc_res), lsc->sc_res); return (error); }
static int admsw_attach(device_t dev) { uint8_t enaddr[ETHER_ADDR_LEN]; struct admsw_softc *sc = (struct admsw_softc *) device_get_softc(dev); struct ifnet *ifp; int error, i, rid; sc->sc_dev = dev; device_printf(dev, "ADM5120 Switch Engine, %d ports\n", SW_DEVS); sc->ndevs = 0; /* XXXMIPS: fix it */ enaddr[0] = 0x00; enaddr[1] = 0x0C; enaddr[2] = 0x42; enaddr[3] = 0x07; enaddr[4] = 0xB2; enaddr[5] = 0x4E; memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); device_printf(sc->sc_dev, "base Ethernet address %s\n", ether_sprintf(enaddr)); callout_init(&sc->sc_watchdog, 1); rid = 0; if ((sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate memory resource\n"); return (ENXIO); } /* Hook up the interrupt handler. */ rid = 0; if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET, admsw_intr, NULL, sc, &sc->sc_ih)) != 0) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (error); } /* * Allocate the control data structures, and create and load the * DMA map for it. */ if ((error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct admsw_control_data), 1, sizeof(struct admsw_control_data), 0, NULL, NULL, &sc->sc_control_dmat)) != 0) { device_printf(sc->sc_dev, "unable to create control data DMA map, error = %d\n", error); return (error); } if ((error = bus_dmamem_alloc(sc->sc_control_dmat, (void **)&sc->sc_control_data, BUS_DMA_NOWAIT, &sc->sc_cddmamap)) != 0) { device_printf(sc->sc_dev, "unable to allocate control data, error = %d\n", error); return (error); } if ((error = bus_dmamap_load(sc->sc_control_dmat, sc->sc_cddmamap, sc->sc_control_data, sizeof(struct admsw_control_data), admsw_dma_map_addr, &sc->sc_cddma, 0)) != 0) { device_printf(sc->sc_dev, "unable to load control data DMA map, error = %d\n", error); return (error); } /* * Create the transmit buffer DMA maps. */ if ((error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->sc_bufs_dmat)) != 0) { device_printf(sc->sc_dev, "unable to create control data DMA map, error = %d\n", error); return (error); } for (i = 0; i < ADMSW_NTXHDESC; i++) { if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, &sc->sc_txhsoft[i].ds_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create txh DMA map %d, error = %d\n", i, error); return (error); } sc->sc_txhsoft[i].ds_mbuf = NULL; } for (i = 0; i < ADMSW_NTXLDESC; i++) { if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, &sc->sc_txlsoft[i].ds_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create txl DMA map %d, error = %d\n", i, error); return (error); } sc->sc_txlsoft[i].ds_mbuf = NULL; } /* * Create the receive buffer DMA maps. */ for (i = 0; i < ADMSW_NRXHDESC; i++) { if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create rxh DMA map %d, error = %d\n", i, error); return (error); } sc->sc_rxhsoft[i].ds_mbuf = NULL; } for (i = 0; i < ADMSW_NRXLDESC; i++) { if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create rxl DMA map %d, error = %d\n", i, error); return (error); } sc->sc_rxlsoft[i].ds_mbuf = NULL; } admsw_init_bufs(sc); admsw_reset(sc); for (i = 0; i < SW_DEVS; i++) { ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO); ifp = sc->sc_ifnet[i] = if_alloc(IFT_ETHER); /* Setup interface parameters */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), i); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = admsw_ioctl; ifp->if_output = ether_output; ifp->if_start = admsw_start; ifp->if_init = admsw_init; ifp->if_mtu = ETHERMTU; ifp->if_baudrate = IF_Mbps(100); IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, ifqmaxlen)); ifp->if_snd.ifq_drv_maxlen = max(ADMSW_NTXLDESC, ifqmaxlen); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities |= IFCAP_VLAN_MTU; /* Attach the interface. */ ether_ifattach(ifp, enaddr); enaddr[5]++; } /* XXX: admwdog_attach(sc); */ /* leave interrupts and cpu port disabled */ return (0); }
/* check card and initialize it */ int ips_adapter_init(ips_softc_t *sc) { int i; DEVICE_PRINTF(1,sc->dev, "initializing\n"); if (bus_dma_tag_create( /* parent */ sc->adapter_dmatag, /* alignemnt */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ IPS_COMMAND_LEN + IPS_MAX_SG_LEN, /* numsegs */ 1, /* maxsegsize*/ IPS_COMMAND_LEN + IPS_MAX_SG_LEN, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &sc->command_dmatag) != 0) { device_printf(sc->dev, "can't alloc command dma tag\n"); goto error; } if (bus_dma_tag_create( /* parent */ sc->adapter_dmatag, /* alignemnt */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ IPS_MAX_IOBUF_SIZE, /* numsegs */ IPS_MAX_SG_ELEMENTS, /* maxsegsize*/ IPS_MAX_IOBUF_SIZE, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &sc->queue_mtx, &sc->sg_dmatag) != 0) { device_printf(sc->dev, "can't alloc SG dma tag\n"); goto error; } /* create one command buffer until we know how many commands this card can handle */ sc->max_cmds = 1; ips_cmdqueue_init(sc); if(sc->ips_adapter_reinit(sc, 0)) goto error; /* initialize ffdc values */ microtime(&sc->ffdc_resettime); sc->ffdc_resetcount = 1; if ((i = ips_ffdc_reset(sc)) != 0) { device_printf(sc->dev, "failed to send ffdc reset to device (%d)\n", i); goto error; } if ((i = ips_get_adapter_info(sc)) != 0) { device_printf(sc->dev, "failed to get adapter configuration data from device (%d)\n", i); goto error; } ips_update_nvram(sc); /* no error check as failure doesn't matter */ if(sc->adapter_type > 0 && sc->adapter_type <= IPS_ADAPTER_MAX_T){ device_printf(sc->dev, "adapter type: %s\n", ips_adapter_name[sc->adapter_type]); } if ((i = ips_get_drive_info(sc)) != 0) { device_printf(sc->dev, "failed to get drive configuration data from device (%d)\n", i); goto error; } ips_cmdqueue_free(sc); if(sc->adapter_info.max_concurrent_cmds) sc->max_cmds = min(128, sc->adapter_info.max_concurrent_cmds); else sc->max_cmds = 32; if(ips_cmdqueue_init(sc)){ device_printf(sc->dev, "failed to initialize command buffers\n"); goto error; } sc->device_file = make_dev(&ips_cdevsw, device_get_unit(sc->dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "ips%d", device_get_unit(sc->dev)); sc->device_file->si_drv1 = sc; ips_diskdev_init(sc); callout_reset(&sc->timer, 10 * hz, ips_timeout, sc); return 0; error: ips_adapter_free(sc); return ENXIO; }
static int adv_isa_probe(device_t dev) { int port_index; int max_port_index; u_long iobase, iocount, irq; int user_iobase = 0; int rid = 0; void *ih; struct resource *iores, *irqres; /* * We don't know of any PnP ID's for these cards. */ if (isa_get_logicalid(dev) != 0) return (ENXIO); /* * Default to scanning all possible device locations. */ port_index = 0; max_port_index = MAX_ISA_IOPORT_INDEX; if (bus_get_resource(dev, SYS_RES_IOPORT, 0, &iobase, &iocount) == 0) { user_iobase = 1; for (;port_index <= max_port_index; port_index++) if (iobase <= adv_isa_ioports[port_index]) break; if ((port_index > max_port_index) || (iobase != adv_isa_ioports[port_index])) { if (bootverbose) printf("adv%d: Invalid baseport of 0x%lx specified. " "Nearest valid baseport is 0x%x. Failing " "probe.\n", device_get_unit(dev), iobase, (port_index <= max_port_index) ? adv_isa_ioports[port_index] : adv_isa_ioports[max_port_index]); return ENXIO; } max_port_index = port_index; } /* Perform the actual probing */ adv_set_isapnp_wait_for_key(); for (;port_index <= max_port_index; port_index++) { u_int16_t port_addr = adv_isa_ioports[port_index]; bus_size_t maxsegsz; bus_size_t maxsize; bus_addr_t lowaddr; int error; struct adv_softc *adv; if (port_addr == 0) /* Already been attached */ continue; if (bus_set_resource(dev, SYS_RES_IOPORT, 0, port_addr, 1)) continue; /* XXX what is the real portsize? */ iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (iores == NULL) continue; if (adv_find_signature(rman_get_bustag(iores), rman_get_bushandle(iores)) == 0) { bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); continue; } /* * Got one. Now allocate our softc * and see if we can initialize the card. */ adv = adv_alloc(dev, rman_get_bustag(iores), rman_get_bushandle(iores)); if (adv == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); break; } /* * Stop the chip. */ ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT); ADV_OUTW(adv, ADV_CHIP_STATUS, 0); /* * Determine the chip version. */ adv->chip_version = ADV_INB(adv, ADV_NONEISA_CHIP_REVISION); if ((adv->chip_version >= ADV_CHIP_MIN_VER_VL) && (adv->chip_version <= ADV_CHIP_MAX_VER_VL)) { adv->type = ADV_VL; maxsegsz = ADV_VL_MAX_DMA_COUNT; maxsize = BUS_SPACE_MAXSIZE_32BIT; lowaddr = ADV_VL_MAX_DMA_ADDR; bus_delete_resource(dev, SYS_RES_DRQ, 0); } else if ((adv->chip_version >= ADV_CHIP_MIN_VER_ISA) && (adv->chip_version <= ADV_CHIP_MAX_VER_ISA)) { if (adv->chip_version >= ADV_CHIP_MIN_VER_ISA_PNP) { adv->type = ADV_ISAPNP; ADV_OUTB(adv, ADV_REG_IFC, ADV_IFC_INIT_DEFAULT); } else { adv->type = ADV_ISA; } maxsegsz = ADV_ISA_MAX_DMA_COUNT; maxsize = BUS_SPACE_MAXSIZE_24BIT; lowaddr = ADV_ISA_MAX_DMA_ADDR; adv->isa_dma_speed = ADV_DEF_ISA_DMA_SPEED; adv->isa_dma_channel = adv_get_isa_dma_channel(adv); bus_set_resource(dev, SYS_RES_DRQ, 0, adv->isa_dma_channel, 1); } else { panic("advisaprobe: Unknown card revision\n"); } /* * Allocate a parent dmatag for all tags created * by the MI portions of the advansys driver */ error = bus_dma_tag_create( /* parent */ bus_get_dma_tag(dev), /* alignemnt */ 1, /* boundary */ 0, /* lowaddr */ lowaddr, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ maxsize, /* nsegments */ ~0, /* maxsegsz */ maxsegsz, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &Giant, &adv->parent_dmat); if (error != 0) { printf("%s: Could not allocate DMA tag - error %d\n", adv_name(adv), error); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); break; } adv->init_level += 2; if (overrun_buf == NULL) { /* Need to allocate our overrun buffer */ if (bus_dma_tag_create( /* parent */ adv->parent_dmat, /* alignment */ 8, /* boundary */ 0, /* lowaddr */ ADV_ISA_MAX_DMA_ADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ ADV_OVERRUN_BSIZE, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &overrun_dmat) != 0) { adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); break; } if (bus_dmamem_alloc(overrun_dmat, (void **)&overrun_buf, BUS_DMA_NOWAIT, &overrun_dmamap) != 0) { bus_dma_tag_destroy(overrun_dmat); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); break; } /* And permanently map it in */ bus_dmamap_load(overrun_dmat, overrun_dmamap, overrun_buf, ADV_OVERRUN_BSIZE, adv_map, &overrun_physbase, /*flags*/0); } adv->overrun_physbase = overrun_physbase; if (adv_init(adv) != 0) { bus_dmamap_unload(overrun_dmat, overrun_dmamap); bus_dmamem_free(overrun_dmat, overrun_buf, overrun_dmamap); bus_dma_tag_destroy(overrun_dmat); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); break; } switch (adv->type) { case ADV_ISAPNP: if (adv->chip_version == ADV_CHIP_VER_ASYN_BUG) { adv->bug_fix_control |= ADV_BUG_FIX_ASYN_USE_SYN; adv->fix_asyn_xfer = ~0; } /* Fall Through */ case ADV_ISA: adv->max_dma_count = ADV_ISA_MAX_DMA_COUNT; adv->max_dma_addr = ADV_ISA_MAX_DMA_ADDR; adv_set_isa_dma_settings(adv); break; case ADV_VL: adv->max_dma_count = ADV_VL_MAX_DMA_COUNT; adv->max_dma_addr = ADV_VL_MAX_DMA_ADDR; break; default: panic("advisaprobe: Invalid card type\n"); } /* Determine our IRQ */ if (bus_get_resource(dev, SYS_RES_IRQ, 0, &irq, NULL)) bus_set_resource(dev, SYS_RES_IRQ, 0, adv_get_chip_irq(adv), 1); else adv_set_chip_irq(adv, irq); irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (irqres == NULL || bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY, NULL, adv_intr, adv, &ih)) { bus_dmamap_unload(overrun_dmat, overrun_dmamap); bus_dmamem_free(overrun_dmat, overrun_buf, overrun_dmamap); bus_dma_tag_destroy(overrun_dmat); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); break; } /* Mark as probed */ adv_isa_ioports[port_index] = 0; return 0; } if (user_iobase) bus_set_resource(dev, SYS_RES_IOPORT, 0, iobase, iocount); else bus_delete_resource(dev, SYS_RES_IOPORT, 0); return ENXIO; }
static int fwohci_pci_attach(device_t self) { fwohci_softc_t *sc = device_get_softc(self); int err; int rid; #if defined(__DragonFly__) || __FreeBSD_version < 500000 int intr; /* For the moment, put in a message stating what is wrong */ intr = pci_read_config(self, PCIR_INTLINE, 1); if (intr == 0 || intr == 255) { device_printf(self, "Invalid irq %d\n", intr); #ifdef __i386__ device_printf(self, "Please switch PNP-OS to 'No' in BIOS\n"); #endif } #endif #if 0 if (bootverbose) firewire_debug = bootverbose; #endif mtx_init(FW_GMTX(&sc->fc), "firewire", NULL, MTX_DEF); fwohci_pci_init(self); rid = PCI_CBMEM; #if __FreeBSD_version >= 502109 sc->bsr = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); #else sc->bsr = bus_alloc_resource(self, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); #endif if (!sc->bsr) { device_printf(self, "Could not map memory\n"); return ENXIO; } sc->bst = rman_get_bustag(sc->bsr); sc->bsh = rman_get_bushandle(sc->bsr); rid = 0; #if __FreeBSD_version >= 502109 sc->irq_res = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); #else sc->irq_res = bus_alloc_resource(self, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); #endif if (sc->irq_res == NULL) { device_printf(self, "Could not allocate irq\n"); fwohci_pci_detach(self); return ENXIO; } err = bus_setup_intr(self, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, (driver_intr_t *) fwohci_intr, sc, &sc->ih); #if defined(__DragonFly__) || __FreeBSD_version < 500000 /* XXX splcam() should mask this irq for sbp.c*/ err = bus_setup_intr(self, sc->irq_res, INTR_TYPE_CAM, (driver_intr_t *) fwohci_dummy_intr, sc, &sc->ih_cam); /* XXX splbio() should mask this irq for physio()/fwmem_strategy() */ err = bus_setup_intr(self, sc->irq_res, INTR_TYPE_BIO, (driver_intr_t *) fwohci_dummy_intr, sc, &sc->ih_bio); #endif if (err) { device_printf(self, "Could not setup irq, %d\n", err); fwohci_pci_detach(self); return ENXIO; } err = bus_dma_tag_create( #if defined(__FreeBSD__) && __FreeBSD_version >= 700020 /*parent*/bus_get_dma_tag(self), #else /*parent*/NULL, #endif /*alignment*/1, /*boundary*/0, #if BOUNCE_BUFFER_TEST /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, #else /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, #endif /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/0x100000, /*nsegments*/0x20, /*maxsegsz*/0x8000, /*flags*/BUS_DMA_ALLOCNOW, #if defined(__FreeBSD__) && __FreeBSD_version >= 501102 /*lockfunc*/busdma_lock_mutex, /*lockarg*/FW_GMTX(&sc->fc), #endif &sc->fc.dmat); if (err != 0) { printf("fwohci_pci_attach: Could not allocate DMA tag " "- error %d\n", err); return (ENOMEM); } err = fwohci_init(sc, self); if (err) { device_printf(self, "fwohci_init failed with err=%d\n", err); fwohci_pci_detach(self); return EIO; } /* probe and attach a child device(firewire) */ bus_generic_probe(self); bus_generic_attach(self); return 0; }
static int cs4281_pci_attach(device_t dev) { struct sc_info *sc; struct ac97_info *codec = NULL; char status[SND_STATUSLEN]; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->dev = dev; sc->type = pci_get_devid(dev); pci_enable_busmaster(dev); #if __FreeBSD_version > 500000 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { /* Reset the power state. */ device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); } #else data = pci_read_config(dev, CS4281PCI_PMCS_OFFSET, 4); if (data & CS4281PCI_PMCS_PS_MASK) { /* Reset the power state. */ device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", data & CS4281PCI_PMCS_PS_MASK); pci_write_config(dev, CS4281PCI_PMCS_OFFSET, data & ~CS4281PCI_PMCS_PS_MASK, 4); } #endif sc->regid = PCIR_BAR(0); sc->regtype = SYS_RES_MEMORY; sc->reg = bus_alloc_resource(dev, sc->regtype, &sc->regid, 0, ~0, CS4281PCI_BA0_SIZE, RF_ACTIVE); if (!sc->reg) { sc->regtype = SYS_RES_IOPORT; sc->reg = bus_alloc_resource(dev, sc->regtype, &sc->regid, 0, ~0, CS4281PCI_BA0_SIZE, RF_ACTIVE); if (!sc->reg) { device_printf(dev, "unable to allocate register space\n"); goto bad; } } sc->st = rman_get_bustag(sc->reg); sc->sh = rman_get_bushandle(sc->reg); sc->memid = PCIR_BAR(1); sc->mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid, 0, ~0, CS4281PCI_BA1_SIZE, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "unable to allocate fifo space\n"); goto bad; } sc->irqid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_ACTIVE | RF_SHAREABLE); if (!sc->irq) { device_printf(dev, "unable to allocate interrupt\n"); goto bad; } if (snd_setup_intr(dev, sc->irq, 0, cs4281_intr, sc, &sc->ih)) { device_printf(dev, "unable to setup interrupt\n"); goto bad; } sc->bufsz = pcm_getbuffersize(dev, 4096, CS4281_DEFAULT_BUFSZ, 65536); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &sc->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } /* power up */ cs4281_power(sc, 0); /* init chip */ if (cs4281_init(sc) == -1) { device_printf(dev, "unable to initialize the card\n"); goto bad; } /* create/init mixer */ codec = AC97_CREATE(dev, sc, cs4281_ac97); if (codec == NULL) goto bad; mixer_init(dev, ac97_getmixerclass(), codec); if (pcm_register(dev, sc, 1, 1)) goto bad; pcm_addchan(dev, PCMDIR_PLAY, &cs4281chan_class, sc); pcm_addchan(dev, PCMDIR_REC, &cs4281chan_class, sc); snprintf(status, SND_STATUSLEN, "at %s 0x%lx irq %ld %s", (sc->regtype == SYS_RES_IOPORT)? "io" : "memory", rman_get_start(sc->reg), rman_get_start(sc->irq),PCM_KLDSTRING(snd_cs4281)); pcm_setstatus(dev, status); return 0; bad: if (codec) ac97_destroy(codec); if (sc->reg) bus_release_resource(dev, sc->regtype, sc->regid, sc->reg); if (sc->mem) bus_release_resource(dev, SYS_RES_MEMORY, sc->memid, sc->mem); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); if (sc->parent_dmat) bus_dma_tag_destroy(sc->parent_dmat); free(sc, M_DEVBUF); return ENXIO; }
/* * Attach all the sub-devices we can find */ static int aha_isa_attach(device_t dev) { struct aha_softc *aha = device_get_softc(dev); int error = ENOMEM; aha->dev = dev; aha->portrid = 0; aha->port = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &aha->portrid, AHA_NREGS, RF_ACTIVE); if (!aha->port) { device_printf(dev, "Unable to allocate I/O ports\n"); goto fail; } aha->irqrid = 0; aha->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &aha->irqrid, RF_ACTIVE); if (!aha->irq) { device_printf(dev, "Unable to allocate excluse use of irq\n"); goto fail; } aha->drqrid = 0; aha->drq = bus_alloc_resource_any(dev, SYS_RES_DRQ, &aha->drqrid, RF_ACTIVE); if (!aha->drq) { device_printf(dev, "Unable to allocate drq\n"); goto fail; } #if 0 /* is the drq ever unset? */ if (dev->id_drq != -1) isa_dmacascade(dev->id_drq); #endif isa_dmacascade(rman_get_start(aha->drq)); /* Allocate our parent dmatag */ if (bus_dma_tag_create( /* parent */ bus_get_dma_tag(dev), /* alignemnt */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_24BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ BUS_SPACE_MAXSIZE_24BIT, /* nsegments */ ~0, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &aha->parent_dmat) != 0) { device_printf(dev, "dma tag create failed.\n"); goto fail; } if (aha_init(aha)) { device_printf(dev, "init failed\n"); goto fail; } /* * The 1542A and B look the same. So we guess based on * the firmware revision. It appears that only rev 0 is on * the A cards. */ if (aha->boardid <= BOARD_1542 && aha->fw_major == 0) { device_printf(dev, "154xA may not work\n"); aha->ccb_sg_opcode = INITIATOR_SG_CCB; aha->ccb_ccb_opcode = INITIATOR_CCB; } error = aha_attach(aha); if (error) { device_printf(dev, "attach failed\n"); goto fail; } error = bus_setup_intr(dev, aha->irq, INTR_TYPE_CAM|INTR_ENTROPY| INTR_MPSAFE, NULL, aha_intr, aha, &aha->ih); if (error) { device_printf(dev, "Unable to register interrupt handler\n"); aha_detach(aha); goto fail; } return (0); fail: ; aha_free(aha); bus_free_resource(dev, SYS_RES_IOPORT, aha->port); bus_free_resource(dev, SYS_RES_IRQ, aha->irq); bus_free_resource(dev, SYS_RES_DRQ, aha->drq); return (error); }
static int mfi_pci_attach(device_t dev) { struct mfi_softc *sc; struct mfi_ident *m; int count, error; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->mfi_dev = dev; m = mfi_find_ident(dev); sc->mfi_flags = m->flags; /* Ensure busmastering is enabled */ pci_enable_busmaster(dev); /* Allocate PCI registers */ if ((sc->mfi_flags & MFI_FLAGS_1064R) || (sc->mfi_flags & MFI_FLAGS_1078)) { /* 1068/1078: Memory mapped BAR is at offset 0x10 */ sc->mfi_regs_rid = PCIR_BAR(0); } else if ((sc->mfi_flags & MFI_FLAGS_GEN2) || (sc->mfi_flags & MFI_FLAGS_SKINNY) || (sc->mfi_flags & MFI_FLAGS_TBOLT)) { /* Gen2/Skinny: Memory mapped BAR is at offset 0x14 */ sc->mfi_regs_rid = PCIR_BAR(1); } if ((sc->mfi_regs_resource = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_MEMORY, &sc->mfi_regs_rid, RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate PCI registers\n"); return (ENXIO); } sc->mfi_btag = rman_get_bustag(sc->mfi_regs_resource); sc->mfi_bhandle = rman_get_bushandle(sc->mfi_regs_resource); error = ENOMEM; /* Allocate parent DMA tag */ if (bus_dma_tag_create( bus_get_dma_tag(dev), /* PCI parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_parent_dmat)) { device_printf(dev, "Cannot allocate parent DMA tag\n"); goto out; } /* Allocate IRQ resource. */ sc->mfi_irq_rid = 0; count = 1; if (mfi_msi && pci_alloc_msi(sc->mfi_dev, &count) == 0) { device_printf(sc->mfi_dev, "Using MSI\n"); sc->mfi_irq_rid = 1; } if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ, &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(sc->mfi_dev, "Cannot allocate interrupt\n"); error = EINVAL; goto out; } error = mfi_attach(sc); out: if (error) { mfi_free(sc); mfi_pci_free(sc); } return (error); }
static int adv_pci_attach(device_t dev) { struct adv_softc *adv; u_int32_t id; int error, rid, irqrid; void *ih; struct resource *iores, *irqres; /* * Determine the chip version. */ id = pci_get_devid(dev); pci_enable_busmaster(dev); /* * Early chips can't handle non-zero latency timer settings. */ if (id == PCI_DEVICE_ID_ADVANSYS_1200A || id == PCI_DEVICE_ID_ADVANSYS_1200B) { pci_write_config(dev, PCIR_LATTIMER, /*value*/0, /*bytes*/1); } rid = PCI_BASEADR0; iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (iores == NULL) return ENXIO; if (adv_find_signature(iores) == 0) { bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } adv = adv_alloc(dev, iores, 0); if (adv == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } /* Allocate a dmatag for our transfer DMA maps */ error = bus_dma_tag_create( /* parent */ bus_get_dma_tag(dev), /* alignment */ 1, /* boundary */ 0, /* lowaddr */ ADV_PCI_MAX_DMA_ADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ BUS_SPACE_MAXSIZE_32BIT, /* nsegments */ ~0, /* maxsegsz */ ADV_PCI_MAX_DMA_COUNT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &adv->parent_dmat); if (error != 0) { device_printf(dev, "Could not allocate DMA tag - error %d\n", error); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } adv->init_level++; if (overrun_buf == NULL) { /* Need to allocate our overrun buffer */ if (bus_dma_tag_create( /* parent */ adv->parent_dmat, /* alignment */ 8, /* boundary */ 0, /* lowaddr */ ADV_PCI_MAX_DMA_ADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ ADV_OVERRUN_BSIZE, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &overrun_dmat) != 0) { bus_dma_tag_destroy(adv->parent_dmat); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } if (bus_dmamem_alloc(overrun_dmat, &overrun_buf, BUS_DMA_NOWAIT, &overrun_dmamap) != 0) { bus_dma_tag_destroy(overrun_dmat); bus_dma_tag_destroy(adv->parent_dmat); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } /* And permanently map it in */ bus_dmamap_load(overrun_dmat, overrun_dmamap, overrun_buf, ADV_OVERRUN_BSIZE, adv_map, &overrun_physbase, /*flags*/0); } adv->overrun_physbase = overrun_physbase; /* * Stop the chip. */ ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT); ADV_OUTW(adv, ADV_CHIP_STATUS, 0); adv->chip_version = ADV_INB(adv, ADV_NONEISA_CHIP_REVISION); adv->type = ADV_PCI; /* * Setup active negation and signal filtering. */ { u_int8_t extra_cfg; if (adv->chip_version >= ADV_CHIP_VER_PCI_ULTRA_3150) adv->type |= ADV_ULTRA; if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050) extra_cfg = ADV_IFC_ACT_NEG | ADV_IFC_WR_EN_FILTER; else extra_cfg = ADV_IFC_ACT_NEG | ADV_IFC_SLEW_RATE; ADV_OUTB(adv, ADV_REG_IFC, extra_cfg); } if (adv_init(adv) != 0) { adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } adv->max_dma_count = ADV_PCI_MAX_DMA_COUNT; adv->max_dma_addr = ADV_PCI_MAX_DMA_ADDR; #if defined(CC_DISABLE_PCI_PARITY_INT) && CC_DISABLE_PCI_PARITY_INT { u_int16_t config_msw; config_msw = ADV_INW(adv, ADV_CONFIG_MSW); config_msw &= 0xFFC0; ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); } #endif if (id == PCI_DEVICE_ID_ADVANSYS_1200A || id == PCI_DEVICE_ID_ADVANSYS_1200B) { adv->bug_fix_control |= ADV_BUG_FIX_IF_NOT_DWB; adv->bug_fix_control |= ADV_BUG_FIX_ASYN_USE_SYN; adv->fix_asyn_xfer = ~0; } irqrid = 0; irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irqrid, RF_SHAREABLE | RF_ACTIVE); if (irqres == NULL || bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, NULL, adv_intr, adv, &ih) != 0) { if (irqres != NULL) bus_release_resource(dev, SYS_RES_IRQ, irqrid, irqres); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } if (adv_attach(adv) != 0) { bus_teardown_intr(dev, irqres, ih); bus_release_resource(dev, SYS_RES_IRQ, irqrid, irqres); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } return 0; }
static int amr_pci_attach(device_t dev) { struct amr_softc *sc; int rid, rtype, error; u_int32_t command; debug("called"); /* * Initialise softc. */ sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->amr_dev = dev; /* * Determine board type.. */ command = pci_read_config(dev, PCIR_COMMAND, 1); if ((pci_get_vendor(dev) == 0x8086) && (pci_get_device(dev) == 0x1960)) { sc->amr_type = AMR_TYPE_QUARTZ; /* * Make sure we are going to be able to talk to this board. */ if ((command & PCIM_CMD_MEMEN) == 0) { device_printf(dev, "memory window not available\n"); return(ENXIO); } } else { sc->amr_type = AMR_TYPE_STD; /* * Make sure we are going to be able to talk to this board. */ if ((command & PCIM_CMD_PORTEN) == 0) { device_printf(dev, "I/O window not available\n"); return(ENXIO); } } /* * Allocate the PCI register window. */ rid = AMR_CFG_BASE; rtype = (sc->amr_type == AMR_TYPE_QUARTZ) ? SYS_RES_MEMORY : SYS_RES_IOPORT; sc->amr_reg = bus_alloc_resource(dev, rtype, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->amr_reg == NULL) { device_printf(sc->amr_dev, "couldn't allocate register window\n"); amr_free(sc); return(ENXIO); } sc->amr_btag = rman_get_bustag(sc->amr_reg); sc->amr_bhandle = rman_get_bushandle(sc->amr_reg); /* * Allocate the parent bus DMA tag appropriate for PCI. */ error = bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, AMR_NSEG, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ &sc->amr_parent_dmat); if (error != 0) { device_printf(dev, "can't allocate parent DMA tag\n"); amr_free(sc); return(ENOMEM); } /* * Do bus-independant initialisation. */ error = amr_attach(sc); if (error != 0) { amr_free(sc); return(error); } /* * Start the controller. */ amr_startup(sc); return(0); }
static int amr_pci_attach(device_t dev) { struct amr_softc *sc; struct amr_ident *id; int rid, rtype, error; debug_called(1); /* * Initialise softc. */ sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->amr_dev = dev; /* assume failure is 'not configured' */ error = ENXIO; /* * Determine board type. */ if ((id = amr_find_ident(dev)) == NULL) return (ENXIO); if (id->flags & AMR_ID_QUARTZ) { sc->amr_type |= AMR_TYPE_QUARTZ; } if ((amr_force_sg32 == 0) && (id->flags & AMR_ID_DO_SG64) && (sizeof(vm_paddr_t) > 4)) { device_printf(dev, "Using 64-bit DMA\n"); sc->amr_type |= AMR_TYPE_SG64; } /* force the busmaster enable bit on */ pci_enable_busmaster(dev); /* * Allocate the PCI register window. */ rid = PCIR_BAR(0); rtype = AMR_IS_QUARTZ(sc) ? SYS_RES_MEMORY : SYS_RES_IOPORT; sc->amr_reg = bus_alloc_resource_any(dev, rtype, &rid, RF_ACTIVE); if (sc->amr_reg == NULL) { device_printf(sc->amr_dev, "can't allocate register window\n"); goto out; } sc->amr_btag = rman_get_bustag(sc->amr_reg); sc->amr_bhandle = rman_get_bushandle(sc->amr_reg); /* * Allocate and connect our interrupt. */ rid = 0; sc->amr_irq = bus_alloc_resource_any(sc->amr_dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->amr_irq == NULL) { device_printf(sc->amr_dev, "can't allocate interrupt\n"); goto out; } if (bus_setup_intr(sc->amr_dev, sc->amr_irq, INTR_TYPE_BIO | INTR_ENTROPY | INTR_MPSAFE, NULL, amr_pci_intr, sc, &sc->amr_intr)) { device_printf(sc->amr_dev, "can't set up interrupt\n"); goto out; } debug(2, "interrupt attached"); /* assume failure is 'out of memory' */ error = ENOMEM; /* * Allocate the parent bus DMA tag appropriate for PCI. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* PCI parent */ 1, 0, /* alignment,boundary */ AMR_IS_SG64(sc) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->amr_parent_dmat)) { device_printf(dev, "can't allocate parent DMA tag\n"); goto out; } /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(sc->amr_parent_dmat, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ DFLTPHYS, /* maxsize */ AMR_NSEG, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->amr_list_lock, /* lockarg */ &sc->amr_buffer_dmat)) { device_printf(sc->amr_dev, "can't allocate buffer DMA tag\n"); goto out; } if (bus_dma_tag_create(sc->amr_parent_dmat, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ DFLTPHYS, /* maxsize */ AMR_NSEG, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->amr_list_lock, /* lockarg */ &sc->amr_buffer64_dmat)) { device_printf(sc->amr_dev, "can't allocate buffer DMA tag\n"); goto out; } debug(2, "dma tag done"); /* * Allocate and set up mailbox in a bus-visible fashion. */ mtx_init(&sc->amr_list_lock, "AMR List Lock", NULL, MTX_DEF); mtx_init(&sc->amr_hw_lock, "AMR HW Lock", NULL, MTX_DEF); if ((error = amr_setup_mbox(sc)) != 0) goto out; debug(2, "mailbox setup"); /* * Build the scatter/gather buffers. */ if ((error = amr_sglist_map(sc)) != 0) goto out; debug(2, "s/g list mapped"); if ((error = amr_ccb_map(sc)) != 0) goto out; debug(2, "ccb mapped"); /* * Do bus-independant initialisation, bring controller online. */ error = amr_attach(sc); out: if (error) amr_pci_free(sc); return(error); }