static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him = hba->ldm_adapter.him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = kmalloc(size, M_DEVBUF, M_WAITOK); if (!hba->ldm_adapter.him_handle) return ENXIO; hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { kfree(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = kmalloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); if (!vbus_ext) { kfree(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } memset(vbus_ext, 0, sizeof(VBUS_EXT)); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; }
static int ofw_pci_route_interrupt(device_t bus, device_t dev, int pin) { struct ofw_pci_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr[2]; int intrcells; phandle_t iparent; sc = device_get_softc(bus); pintr = pin; /* Fabricate imap information in case this isn't an OFW device */ bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr), &iparent); if (intrcells) { pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr); return (pintr); } /* Maybe it's a real interrupt, not an intpin */ if (pin > 4) return (pin); device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); }
static void sfxge_mcdi_logger(void *arg, efx_log_msg_t type, void *header, size_t header_size, void *data, size_t data_size) { struct sfxge_softc *sc = (struct sfxge_softc *)arg; char buffer[SFXGE_MCDI_LOG_BUF_SIZE]; size_t pfxsize; size_t start; if (!sc->mcdi_logging) return; pfxsize = snprintf(buffer, sizeof(buffer), "sfc %04x:%02x:%02x.%02x %s MCDI RPC %s:", pci_get_domain(sc->dev), pci_get_bus(sc->dev), pci_get_slot(sc->dev), pci_get_function(sc->dev), device_get_nameunit(sc->dev), type == EFX_LOG_MCDI_REQUEST ? "REQ" : type == EFX_LOG_MCDI_RESPONSE ? "RESP" : "???"); start = sfxge_mcdi_do_log(buffer, header, header_size, pfxsize, pfxsize); start = sfxge_mcdi_do_log(buffer, data, data_size, pfxsize, start); if (start != pfxsize) { buffer[start] = '\0'; printf("%s\n", buffer); } }
int ixp425_md_route_interrupt(device_t bridge, device_t device, int pin) { static int ixp425_pci_table[IXP425_MAX_DEV][IXP425_MAX_LINE] = { {PCI_INT_A, PCI_INT_B, PCI_INT_C, PCI_INT_D}, {PCI_INT_B, PCI_INT_C, PCI_INT_D, PCI_INT_A}, {PCI_INT_C, PCI_INT_D, PCI_INT_A, PCI_INT_B}, {PCI_INT_D, PCI_INT_A, PCI_INT_B, PCI_INT_C}, /* NB: for optional USB controller on Gateworks Avila */ {PCI_INT_A, PCI_INT_B, PCI_INT_C, PCI_INT_D}, }; int dev; dev = pci_get_slot(device); if (bootverbose) device_printf(bridge, "routing pin %d for %s\n", pin, device_get_nameunit(device)); if (pin >= 1 && pin <= IXP425_MAX_LINE && dev >= 1 && dev <= IXP425_MAX_DEV) { return (ixp425_pci_table[dev - 1][pin - 1]); } else printf("ixppcib: no mapping for %d/%d/%d\n", pci_get_bus(device), dev, pci_get_function(device)); return (-1); }
int vga_pci_repost(device_t dev) { #if defined(__amd64__) || (defined(__i386__) && !defined(PC98)) x86regs_t regs; if (!vga_pci_is_boot_display(dev)) return (EINVAL); if (x86bios_get_orm(VGA_PCI_BIOS_SHADOW_ADDR) == NULL) return (ENOTSUP); x86bios_init_regs(®s); regs.R_AH = pci_get_bus(dev); regs.R_AL = (pci_get_slot(dev) << 3) | (pci_get_function(dev) & 0x07); regs.R_DL = 0x80; device_printf(dev, "REPOSTing\n"); x86bios_call(®s, X86BIOS_PHYSTOSEG(VGA_PCI_BIOS_SHADOW_ADDR + 3), X86BIOS_PHYSTOOFF(VGA_PCI_BIOS_SHADOW_ADDR + 3)); x86bios_get_intr(0x10); return (0); #else return (ENOTSUP); #endif }
static int hpt_probe(device_t dev) { PCI_ID pci_id; HIM *him; int i; PHBA hba; /* Some of supported chips are used not only by HPT. */ if (pci_get_vendor(dev) != 0x1103 && !attach_generic) return (ENXIO); for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); hba = (PHBA)device_get_softc(dev); memset(hba, 0, sizeof(HBA)); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; return 0; } } } return (ENXIO); }
static int ata_highpoint_check_80pin(device_t dev, int mode) { device_t parent = device_get_parent(dev); struct ata_pci_controller *ctlr = device_get_softc(parent); struct ata_channel *ch = device_get_softc(dev); u_int8_t reg, val, res; if (ctlr->chip->cfg1 == HPT_374 && pci_get_function(parent) == 1) { reg = ch->unit ? 0x57 : 0x53; val = pci_read_config(parent, reg, 1); pci_write_config(parent, reg, val | 0x80, 1); } else { reg = 0x5b; val = pci_read_config(parent, reg, 1); pci_write_config(parent, reg, val & 0xfe, 1); } res = pci_read_config(parent, 0x5a, 1) & (ch->unit ? 0x1:0x2); pci_write_config(parent, reg, val, 1); if (ata_dma_check_80pin && mode > ATA_UDMA2 && res) { ata_print_cable(dev, "controller"); mode = ATA_UDMA2; } return mode; }
phandle_t ofw_pci_node(device_t dev) { return (ofw_pci_find_node(pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); }
static int octopci_route_interrupt(device_t dev, device_t child, int pin) { struct octopci_softc *sc; unsigned bus, slot, func; unsigned irq; sc = device_get_softc(dev); bus = pci_get_bus(child); slot = pci_get_slot(child); func = pci_get_function(child); #if defined(OCTEON_VENDOR_LANNER) if (slot < 32) { if (slot == 3 || slot == 9) irq = pin; else irq = pin - 1; return (CVMX_IRQ_PCI_INT0 + (irq & 3)); } #endif irq = slot + pin - 3; return (CVMX_IRQ_PCI_INT0 + (irq & 3)); }
static int hpt_probe(device_t dev) { PCI_ID pci_id; HIM *him; int i; PHBA hba; for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if (him->get_controller_count) him->get_controller_count(&pci_id,0,0); if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); hba = (PHBA)device_get_softc(dev); memset(hba, 0, sizeof(HBA)); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; return 0; } } } return (ENXIO); }
extern int machdep_pci_route_interrupt(device_t pcib, device_t dev, int pin) { int bus; int device; int func; uint32_t busno; struct i80321_pci_softc *sc = device_get_softc(pcib); bus = pci_get_bus(dev); device = pci_get_slot(dev); func = pci_get_function(dev); busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCIXSR); busno = PCIXSR_BUSNO(busno); if (busno == 0xff) busno = 0; if (bus != busno) goto no_mapping; switch (device) { /* IQ31244 PCI */ case 1: /* PCIX-PCIX bridge */ /* * The S-ATA chips are behind the bridge, and all of * the S-ATA interrupts are wired together. */ return (ICU_INT_XINT(2)); case 2: /* PCI slot */ /* All pins are wired together. */ return (ICU_INT_XINT(3)); case 3: /* i82546 dual Gig-E */ if (pin == 1 || pin == 2) return (ICU_INT_XINT(0)); goto no_mapping; /* IQ80321 PCI */ case 4: /* i82544 Gig-E */ case 8: /* * Apparently you can set the device for the ethernet adapter * to 8 with a jumper, so handle that as well */ if (pin == 1) return (ICU_INT_XINT(0)); goto no_mapping; case 6: /* S-PCI-X slot */ if (pin == 1) return (ICU_INT_XINT(2)); if (pin == 2) return (ICU_INT_XINT(3)); goto no_mapping; default: no_mapping: printf("No mapping for %d/%d/%d/%c\n", bus, device, func, pin); } return (0); }
/* * Return a pointer to a pretty name for a PCI device. If the device * has a driver attached, the device's name is used, otherwise a name * is generated from the device's PCI address. */ const char * pcib_child_name(device_t child) { static char buf[64]; if (device_get_nameunit(child) != NULL) return (device_get_nameunit(child)); snprintf(buf, sizeof(buf), "pci%d:%d:%d:%d", pci_get_domain(child), pci_get_bus(child), pci_get_slot(child), pci_get_function(child)); return (buf); }
static int legacy_pcib_route_interrupt(device_t pcib, device_t dev, int pin) { #ifdef __HAVE_PIR return (pci_pir_route_interrupt(pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pin)); #else /* No routing possible */ return (PCI_INVALID_IRQ); #endif }
static int adw_generic_setup(device_t dev, struct adw_pci_identity *entry, struct adw_softc *adw) { adw->channel = pci_get_function(dev) == 1 ? 'B' : 'A'; adw->chip = ADW_CHIP_NONE; adw->features = ADW_FENONE; adw->flags = ADW_FNONE; adw->mcode_data = entry->mcode_data; adw->default_eeprom = entry->default_eeprom; return (0); }
/* * HighPoint chipset support functions */ static int ata_highpoint_probe(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); const struct ata_chip_id *idx; static const struct ata_chip_id ids[] = {{ ATA_HPT374, 0x07, HPT_374, 0, ATA_UDMA6, "HPT374" }, { ATA_HPT372, 0x02, HPT_372, 0, ATA_UDMA6, "HPT372N" }, { ATA_HPT372, 0x01, HPT_372, 0, ATA_UDMA6, "HPT372" }, { ATA_HPT371, 0x01, HPT_372, 0, ATA_UDMA6, "HPT371" }, { ATA_HPT366, 0x05, HPT_372, 0, ATA_UDMA6, "HPT372" }, { ATA_HPT366, 0x03, HPT_370, 0, ATA_UDMA5, "HPT370" }, { ATA_HPT366, 0x02, HPT_366, 0, ATA_UDMA4, "HPT368" }, { ATA_HPT366, 0x00, HPT_366, HPT_OLD, ATA_UDMA4, "HPT366" }, { ATA_HPT302, 0x01, HPT_372, 0, ATA_UDMA6, "HPT302" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (pci_get_vendor(dev) != ATA_HIGHPOINT_ID) return ENXIO; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; strcpy(buffer, "HighPoint "); strcat(buffer, idx->text); if (idx->cfg1 == HPT_374) { if (pci_get_function(dev) == 0) strcat(buffer, " (channel 0+1)"); if (pci_get_function(dev) == 1) strcat(buffer, " (channel 2+3)"); } sprintf(buffer, "%s %s controller", buffer, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_highpoint_chipinit; return (BUS_PROBE_LOW_PRIORITY); }
static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); }
/* * Set the SYSTEM_IDLE_TIMEOUT to 80 ns on nForce2 systems to work * around a hang that is triggered when the CPU generates a very fast * CONNECT/HALT cycle sequence. Specifically, the hang can result in * the lapic timer being stopped. * * This requires changing the value for config register at offset 0x6c * for the Host-PCI bridge at bus/dev/function 0/0/0: * * Chip Current Value New Value * ---- ---------- ---------- * C17 0x1F0FFF01 0x1F01FF01 * C18D 0x9F0FFF01 0x9F01FF01 * * We do this by always clearing the bits in 0x000e0000. * * See also: http://lkml.org/lkml/2004/5/3/157 */ static void fixc1_nforce2(device_t dev) { uint32_t val; if (pci_get_bus(dev) == 0 && pci_get_slot(dev) == 0 && pci_get_function(dev) == 0) { val = pci_read_config(dev, 0x6c, 4); if (val & 0x000e0000) { printf("Correcting nForce2 C1 CPU disconnect hangs\n"); val &= ~0x000e0000; pci_write_config(dev, 0x6c, val, 4); } } }
/* * Set the ARI_EN bit in the lowest-numbered PCI function with the SR-IOV * capability. This bit is only writeable on the lowest-numbered PF but * affects all PFs on the device. */ static int pci_iov_set_ari(device_t bus) { device_t lowest; device_t *devlist; int i, error, devcount, lowest_func, lowest_pos, iov_pos, dev_func; uint16_t iov_ctl; /* If ARI is disabled on the downstream port there is nothing to do. */ if (!PCIB_ARI_ENABLED(device_get_parent(bus))) return (0); error = device_get_children(bus, &devlist, &devcount); if (error != 0) return (error); lowest = NULL; for (i = 0; i < devcount; i++) { if (pci_find_extcap(devlist[i], PCIZ_SRIOV, &iov_pos) == 0) { dev_func = pci_get_function(devlist[i]); if (lowest == NULL || dev_func < lowest_func) { lowest = devlist[i]; lowest_func = dev_func; lowest_pos = iov_pos; } } } free(devlist, M_TEMP); /* * If we called this function some device must have the SR-IOV * capability. */ KASSERT(lowest != NULL, ("Could not find child of %s with SR-IOV capability", device_get_nameunit(bus))); iov_ctl = pci_read_config(lowest, lowest_pos + PCIR_SRIOV_CTL, 2); iov_ctl |= PCIM_SRIOV_ARI_EN; pci_write_config(lowest, lowest_pos + PCIR_SRIOV_CTL, iov_ctl, 2); if ((pci_read_config(lowest, lowest_pos + PCIR_SRIOV_CTL, 2) & PCIM_SRIOV_ARI_EN) == 0) { device_printf(lowest, "failed to enable ARI\n"); return (ENXIO); } return (0); }
static struct pptdev * ppt_find(int bus, int slot, int func) { device_t dev; int i, b, s, f; for (i = 0; i < num_pptdevs; i++) { dev = pptdevs[i].dev; b = pci_get_bus(dev); s = pci_get_slot(dev); f = pci_get_function(dev); if (bus == b && slot == s && func == f) return (&pptdevs[i]); } return (NULL); }
int ppt_unassign_all(struct vm *vm) { int i, bus, slot, func; device_t dev; for (i = 0; i < num_pptdevs; i++) { if (pptdevs[i].vm == vm) { dev = pptdevs[i].dev; bus = pci_get_bus(dev); slot = pci_get_slot(dev); func = pci_get_function(dev); vm_unassign_pptdev(vm, bus, slot, func); } } return (0); }
static void acpi_pci_update_device(ACPI_HANDLE handle, device_t pci_child) { ACPI_STATUS status; device_t child; /* * Lookup and remove the unused device that acpi0 creates when it walks * the namespace creating devices. */ child = acpi_get_device(handle); if (child != NULL) { if (device_is_alive(child)) { /* * The TabletPC TC1000 has a second PCI-ISA bridge * that has a _HID for an acpi_sysresource device. * In that case, leave ACPI-CA's device data pointing * at the ACPI-enumerated device. */ device_printf(child, "Conflicts with PCI device %d:%d:%d\n", pci_get_bus(pci_child), pci_get_slot(pci_child), pci_get_function(pci_child)); return; } KASSERT(device_get_parent(child) == devclass_get_device(devclass_find("acpi"), 0), ("%s: child (%s)'s parent is not acpi0", __func__, acpi_name(handle))); device_delete_child(device_get_parent(child), child); } /* * Update ACPI-CA to use the PCI enumerated device_t for this handle. */ status = AcpiDetachData(handle, acpi_fake_objhandler); if (ACPI_FAILURE(status)) kprintf("WARNING: Unable to detach object data from %s - %s\n", acpi_name(handle), AcpiFormatException(status)); status = AcpiAttachData(handle, acpi_fake_objhandler, pci_child); if (ACPI_FAILURE(status)) kprintf("WARNING: Unable to attach object data to %s - %s\n", acpi_name(handle), AcpiFormatException(status)); }
extern int machdep_pci_route_interrupt(device_t pcib, device_t dev, int pin) { int bus; int device; int func; uint32_t busno; struct i80321_pci_softc *sc = device_get_softc(pcib); bus = pci_get_bus(dev); device = pci_get_slot(dev); func = pci_get_function(dev); busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCIXSR); busno = PCIXSR_BUSNO(busno); if (busno == 0xff) busno = 0; if (bus != busno) goto no_mapping; switch (device) { /* EP80219 PCI */ case 1: /* Ethernet i82555 10/100 */ printf("Device %d routed to irq %d\n", device, ICU_INT_XINT(0)); return (ICU_INT_XINT(0)); case 2: /* UART */ printf("Device %d routed to irq %d\n", device, ICU_INT_XINT(1)); return (ICU_INT_XINT(1)); case 3: /* * The S-ATA chips are behind the bridge, and all of * the S-ATA interrupts are wired together. */ printf("Device %d routed to irq %d\n", device, ICU_INT_XINT(2)); return (ICU_INT_XINT(2)); case 4: /* MINI-PIC_INT */ printf("Device %d routed to irq %d\n", device, ICU_INT_XINT(3)); return( ICU_INT_XINT(3)); default: no_mapping: printf("No mapping for %d/%d/%d/%c\n", bus, device, func, pin); } return (0); }
static int ofw_pcib_pci_route_interrupt(device_t bridge, device_t dev, int intpin) { struct ofw_pcib_softc *sc; struct ofw_bus_iinfo *ii; struct ofw_pci_register reg; cell_t pintr, mintr; phandle_t iparent; uint8_t maskbuf[sizeof(reg) + sizeof(pintr)]; sc = device_get_softc(bridge); ii = &sc->ops_iinfo; if (ii->opi_imapsz > 0) { pintr = intpin; /* Fabricate imap information if this isn't an OFW device */ bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); if (ofw_bus_lookup_imap(ofw_bus_get_node(dev), ii, ®, sizeof(reg), &pintr, sizeof(pintr), &mintr, sizeof(mintr), &iparent, maskbuf)) { /* * If we've found a mapping, return it and don't map * it again on higher levels - that causes problems * in some cases, and never seems to be required. */ return (ofw_bus_map_intr(dev, iparent, mintr)); } } else if (intpin >= 1 && intpin <= 4) { /* * When an interrupt map is missing, we need to do the * standard PCI swizzle and continue mapping at the parent. */ return (pcib_route_interrupt(bridge, dev, intpin)); } return (PCIB_ROUTE_INTERRUPT(device_get_parent(device_get_parent( bridge)), bridge, intpin)); }
/* * Cypress chipset support functions */ static int ata_cypress_probe(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); /* * the Cypress chip is a mess, it contains two ATA functions, but * both channels are visible on the first one. * simply ignore the second function for now, as the right * solution (ignoring the second channel on the first function) * doesn't work with the crappy ATA interrupt setup on the alpha. */ if (pci_get_devid(dev) == ATA_CYPRESS_82C693 && pci_get_function(dev) == 1 && pci_get_subclass(dev) == PCIS_STORAGE_IDE) { device_set_desc(dev, "Cypress 82C693 ATA controller"); ctlr->chipinit = ata_cypress_chipinit; return (BUS_PROBE_LOW_PRIORITY); } return ENXIO; }
static int ppt_probe(device_t dev) { int bus, slot, func; struct pci_devinfo *dinfo; dinfo = (struct pci_devinfo *)device_get_ivars(dev); bus = pci_get_bus(dev); slot = pci_get_slot(dev); func = pci_get_function(dev); /* * To qualify as a pci passthrough device a device must: * - be allowed by administrator to be used in this role * - be an endpoint device */ if (vmm_is_pptdev(bus, slot, func) && (dinfo->cfg.hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_NORMAL) return (0); else return (ENXIO); }
static int t4iov_attach_child(device_t dev) { struct t4iov_softc *sc; #ifdef PCI_IOV nvlist_t *pf_schema, *vf_schema; #endif device_t pdev; int error; sc = device_get_softc(dev); MPASS(!sc->sc_attached); /* * PF0-3 are associated with a specific port on the NIC (PF0 * with port 0, etc.). Ask the PF4 driver for the device for * this function's associated port to determine if the port is * present. */ error = T4_READ_PORT_DEVICE(sc->sc_main, pci_get_function(dev), &pdev); if (error) return (0); #ifdef PCI_IOV pf_schema = pci_iov_schema_alloc_node(); vf_schema = pci_iov_schema_alloc_node(); error = pci_iov_attach_name(dev, pf_schema, vf_schema, "%s", device_get_nameunit(pdev)); if (error) { device_printf(dev, "Failed to initialize SR-IOV: %d\n", error); return (0); } #endif sc->sc_attached = true; return (0); }
static int ofw_pci_route_interrupt(device_t bus, device_t dev, int pin) { struct ofw_pci_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr; phandle_t iparent; uint8_t maskbuf[sizeof(reg) + sizeof(pintr)]; sc = device_get_softc(bus); pintr = pin; if (ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), &mintr, sizeof(mintr), &iparent, maskbuf)) return (MAP_IRQ(iparent, mintr)); /* Maybe it's a real interrupt, not an intpin */ if (pin > 4) return (pin); device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); }
static bool radeon_atrm_get_bios(struct radeon_device *rdev) { int ret; int size = 256 * 1024; int i; device_t dev; ACPI_HANDLE dhandle, atrm_handle; ACPI_STATUS status; bool found = false; DRM_INFO("%s: ===> Try ATRM...\n", __func__); /* ATRM is for the discrete card only */ if (rdev->flags & RADEON_IS_IGP) { DRM_INFO("%s: IGP card detected, skipping this method...\n", __func__); return false; } #ifdef FREEBSD_WIP while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { #endif /* FREEBSD_WIP */ if ((dev = pci_find_class(PCIC_DISPLAY, PCIS_DISPLAY_VGA)) != NULL) { DRM_INFO("%s: pci_find_class() found: %d:%d:%d:%d, vendor=%04x, device=%04x\n", __func__, pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_vendor(dev), pci_get_device(dev)); DRM_INFO("%s: Get ACPI device handle\n", __func__); dhandle = acpi_get_handle(dev); #ifdef FREEBSD_WIP if (!dhandle) continue; #endif /* FREEBSD_WIP */ if (!dhandle) return false; DRM_INFO("%s: Get ACPI handle for \"ATRM\"\n", __func__); status = AcpiGetHandle(dhandle, "ATRM", &atrm_handle); if (!ACPI_FAILURE(status)) { found = true; #ifdef FREEBSD_WIP break; #endif /* FREEBSD_WIP */ } else { DRM_INFO("%s: Failed to get \"ATRM\" handle: %s\n", __func__, AcpiFormatException(status)); } } if (!found) return false; rdev->bios = malloc(size, DRM_MEM_DRIVER, M_NOWAIT); if (!rdev->bios) { DRM_ERROR("Unable to allocate bios\n"); return false; } for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { DRM_INFO("%s: Call radeon_atrm_call()\n", __func__); ret = radeon_atrm_call(atrm_handle, rdev->bios, (i * ATRM_BIOS_PAGE), ATRM_BIOS_PAGE); if (ret < ATRM_BIOS_PAGE) break; } if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) { if (i == 0) { DRM_INFO("%s: Incorrect BIOS size\n", __func__); } else { DRM_INFO("%s: Incorrect BIOS signature: 0x%02X%02X\n", __func__, rdev->bios[0], rdev->bios[1]); } free(rdev->bios, DRM_MEM_DRIVER); return false; } return true; } #else static inline bool radeon_atrm_get_bios(struct radeon_device *rdev) { return false; } #endif static bool ni_read_disabled_bios(struct radeon_device *rdev) { u32 bus_cntl; u32 d1vga_control; u32 d2vga_control; u32 vga_render_control; u32 rom_cntl; bool r; DRM_INFO("%s: ===> Try disabled BIOS (ni)...\n", __func__); bus_cntl = RREG32(R600_BUS_CNTL); d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); rom_cntl = RREG32(R600_ROM_CNTL); /* enable the rom */ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); /* Disable VGA mode */ WREG32(AVIVO_D1VGA_CONTROL, (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | AVIVO_DVGA_CONTROL_TIMING_SELECT))); WREG32(AVIVO_D2VGA_CONTROL, (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | AVIVO_DVGA_CONTROL_TIMING_SELECT))); WREG32(AVIVO_VGA_RENDER_CONTROL, (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE); r = radeon_read_bios(rdev); /* restore regs */ WREG32(R600_BUS_CNTL, bus_cntl); WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); WREG32(R600_ROM_CNTL, rom_cntl); return r; }
/****************************************************************************** agtiapi_InitResource() Purpose: Mapping PCI memory space Allocate and initialize per card based resource Parameters: ag_card_info_t *pCardInfo (IN) Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: ******************************************************************************/ STATIC agBOOLEAN agtiapi_InitResource( ag_card_info_t *thisCardInst ) { struct agtiapi_softc *pmsc = thisCardInst->pCard; device_t devx = thisCardInst->pPCIDev; //AGTIAPI_PRINTK( "agtiapi_InitResource: begin; pointer values %p / %p \n", // devx, thisCardInst ); // no IO mapped card implementation, we'll implement memory mapping if( agtiapi_typhAlloc( thisCardInst ) == AGTIAPI_FAIL ) { printf( "agtiapi_InitResource: failed call to agtiapi_typhAlloc \n" ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_InitResource: dma alloc MemSpan %p -- %p\n", (void*) pmsc->typh_busaddr, (void*) ( (U32_64)pmsc->typh_busaddr + pmsc->typhn ) ); // logical BARs for SPC: // bar 0 and 1 - logical BAR0 // bar 2 and 3 - logical BAR1 // bar4 - logical BAR2 // bar5 - logical BAR3 // Skiping the assignments for bar 1 and bar 3 (making bar 0, 2 64-bit): U32 bar; U32 lBar = 0; // logicalBar for (bar = 0; bar < PCI_NUMBER_BARS; bar++) { if ((bar==1) || (bar==3)) continue; thisCardInst->pciMemBaseRIDSpc[lBar] = PCIR_BAR(bar); thisCardInst->pciMemBaseRscSpc[lBar] = bus_alloc_resource_any( devx, SYS_RES_MEMORY, &(thisCardInst->pciMemBaseRIDSpc[lBar]), RF_ACTIVE ); AGTIAPI_PRINTK( "agtiapi_InitResource: bus_alloc_resource_any rtn %p \n", thisCardInst->pciMemBaseRscSpc[lBar] ); if ( thisCardInst->pciMemBaseRscSpc[lBar] != NULL ) { thisCardInst->pciMemVirtAddrSpc[lBar] = (caddr_t)rman_get_virtual( thisCardInst->pciMemBaseRscSpc[lBar] ); thisCardInst->pciMemBaseSpc[lBar] = bus_get_resource_start( devx, SYS_RES_MEMORY, thisCardInst->pciMemBaseRIDSpc[lBar]); thisCardInst->pciMemSizeSpc[lBar] = bus_get_resource_count( devx, SYS_RES_MEMORY, thisCardInst->pciMemBaseRIDSpc[lBar] ); AGTIAPI_PRINTK( "agtiapi_InitResource: PCI: bar %d, lBar %d " "VirtAddr=%lx, len=%d\n", bar, lBar, (long unsigned int)thisCardInst->pciMemVirtAddrSpc[lBar], thisCardInst->pciMemSizeSpc[lBar] ); } else { thisCardInst->pciMemVirtAddrSpc[lBar] = 0; thisCardInst->pciMemBaseSpc[lBar] = 0; thisCardInst->pciMemSizeSpc[lBar] = 0; } lBar++; } thisCardInst->pciMemVirtAddr = thisCardInst->pciMemVirtAddrSpc[0]; thisCardInst->pciMemSize = thisCardInst->pciMemSizeSpc[0]; thisCardInst->pciMemBase = thisCardInst->pciMemBaseSpc[0]; // Allocate all TI data structure required resources. // tiLoLevelResource U32 numVal; ag_resource_info_t *pRscInfo; pRscInfo = &thisCardInst->tiRscInfo; pRscInfo->tiLoLevelResource.loLevelOption.pciFunctionNumber = pci_get_function( devx ); struct timeval tv; tv.tv_sec = 1; tv.tv_usec = 0; int ticksPerSec; ticksPerSec = tvtohz( &tv ); int uSecPerTick = 1000000/USEC_PER_TICK; if (pRscInfo->tiLoLevelResource.loLevelMem.count != 0) { //AGTIAPI_INIT("agtiapi_InitResource: loLevelMem count = %d\n", // pRscInfo->tiLoLevelResource.loLevelMem.count); // adjust tick value to meet Linux requirement pRscInfo->tiLoLevelResource.loLevelOption.usecsPerTick = uSecPerTick; AGTIAPI_PRINTK( "agtiapi_InitResource: " "pRscInfo->tiLoLevelResource.loLevelOption.usecsPerTick" " 0x%x\n", pRscInfo->tiLoLevelResource.loLevelOption.usecsPerTick ); for( numVal = 0; numVal < pRscInfo->tiLoLevelResource.loLevelMem.count; numVal++ ) { if( pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].totalLength == 0 ) { AGTIAPI_PRINTK("agtiapi_InitResource: skip ZERO %d\n", numVal); continue; } // check for 64 bit alignment if ( pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].alignment < AGTIAPI_64BIT_ALIGN ) { AGTIAPI_PRINTK("agtiapi_InitResource: set ALIGN %d\n", numVal); pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].alignment = AGTIAPI_64BIT_ALIGN; } if( ((pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].type & (BIT(0) | BIT(1))) == TI_DMA_MEM) || ((pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].type & (BIT(0) | BIT(1))) == TI_CACHED_DMA_MEM)) { if ( thisCardInst->dmaIndex >= sizeof(thisCardInst->tiDmaMem) / sizeof(thisCardInst->tiDmaMem[0]) ) { AGTIAPI_PRINTK( "Invalid dmaIndex %d ERROR\n", thisCardInst->dmaIndex ); return AGTIAPI_FAIL; } thisCardInst->tiDmaMem[thisCardInst->dmaIndex].type = #ifdef CACHED_DMA pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].type & (BIT(0) | BIT(1)); #else TI_DMA_MEM; #endif if( agtiapi_MemAlloc( thisCardInst, &thisCardInst->tiDmaMem[thisCardInst->dmaIndex].dmaVirtAddr, &thisCardInst->tiDmaMem[thisCardInst->dmaIndex].dmaPhysAddr, &pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].virtPtr, &pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal]. physAddrUpper, &pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal]. physAddrLower, pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].totalLength, thisCardInst->tiDmaMem[thisCardInst->dmaIndex].type, pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].alignment) != AGTIAPI_SUCCESS ) { return AGTIAPI_FAIL; } thisCardInst->tiDmaMem[thisCardInst->dmaIndex].memSize = pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].totalLength; //AGTIAPI_INIT("agtiapi_InitResource: LoMem %d dmaIndex=%d DMA virt" // " %p, phys 0x%x, length %d align %d\n", // numVal, pCardInfo->dmaIndex, // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].virtPtr, // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].physAddrLower, // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].totalLength, // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].alignment); thisCardInst->dmaIndex++; } else if ( (pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].type & (BIT(0) | BIT(1))) == TI_CACHED_MEM) { if (thisCardInst->cacheIndex >= sizeof(thisCardInst->tiCachedMem) / sizeof(thisCardInst->tiCachedMem[0])) { AGTIAPI_PRINTK( "Invalid cacheIndex %d ERROR\n", thisCardInst->cacheIndex ); return AGTIAPI_FAIL; } if ( agtiapi_MemAlloc( thisCardInst, &thisCardInst->tiCachedMem[thisCardInst->cacheIndex], (vm_paddr_t *)agNULL, &pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].virtPtr, (U32 *)agNULL, (U32 *)agNULL, pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].totalLength, TI_CACHED_MEM, pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].alignment) != AGTIAPI_SUCCESS ) { return AGTIAPI_FAIL; } //AGTIAPI_INIT("agtiapi_InitResource: LoMem %d cacheIndex=%d CACHED " // "vaddr %p / %p, length %d align %d\n", // numVal, pCardInfo->cacheIndex, // pCardInfo->tiCachedMem[pCardInfo->cacheIndex], // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].virtPtr, // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].totalLength, // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].alignment); thisCardInst->cacheIndex++; } else if ( ((pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].type & (BIT(0) | BIT(1))) == TI_DMA_MEM_CHIP)) { // not expecting this case, print warning that should get attention printf( "RED ALARM: we need a BAR for TI_DMA_MEM_CHIP, ignoring!" ); } else { printf( "agtiapi_InitResource: Unknown required memory type %d " "ERROR!\n", pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].type); return AGTIAPI_FAIL; } } } // end: TI data structure resources ... // begin: tiInitiatorResource if ( pmsc->flags & AGTIAPI_INITIATOR ) { if ( pRscInfo->tiInitiatorResource.initiatorMem.count != 0 ) { //AGTIAPI_INIT("agtiapi_InitResource: initiatorMem count = %d\n", // pRscInfo->tiInitiatorResource.initiatorMem.count); numVal = (U32)( pRscInfo->tiInitiatorResource.initiatorOption.usecsPerTick / uSecPerTick ); if( pRscInfo->tiInitiatorResource.initiatorOption.usecsPerTick % uSecPerTick > 0 ) pRscInfo->tiInitiatorResource.initiatorOption.usecsPerTick = (numVal + 1) * uSecPerTick; else pRscInfo->tiInitiatorResource.initiatorOption.usecsPerTick = numVal * uSecPerTick; for ( numVal = 0; numVal < pRscInfo->tiInitiatorResource.initiatorMem.count; numVal++ ) { // check for 64 bit alignment if( pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. alignment < AGTIAPI_64BIT_ALIGN ) { pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. alignment = AGTIAPI_64BIT_ALIGN; } if( thisCardInst->cacheIndex >= sizeof( thisCardInst->tiCachedMem) / sizeof( thisCardInst->tiCachedMem[0])) { AGTIAPI_PRINTK( "Invalid cacheIndex %d ERROR\n", thisCardInst->cacheIndex ); return AGTIAPI_FAIL; } // initiator memory is cached, no check is needed if( agtiapi_MemAlloc( thisCardInst, (void *)&thisCardInst->tiCachedMem[thisCardInst->cacheIndex], (vm_paddr_t *)agNULL, &pRscInfo->tiInitiatorResource.initiatorMem. tdCachedMem[numVal].virtPtr, (U32 *)agNULL, (U32 *)agNULL, pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. totalLength, TI_CACHED_MEM, pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. alignment) != AGTIAPI_SUCCESS) { return AGTIAPI_FAIL; } // AGTIAPI_INIT("agtiapi_InitResource: IniMem %d cacheIndex=%d CACHED " // "vaddr %p / %p, length %d align 0x%x\n", // numVal, // pCardInfo->cacheIndex, // pCardInfo->tiCachedMem[pCardInfo->cacheIndex], // pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. // virtPtr, //pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. // totalLength, // pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. // alignment); thisCardInst->cacheIndex++; } } } // end: tiInitiatorResource // begin: tiTdSharedMem if (pRscInfo->tiSharedMem.tdSharedCachedMem1.totalLength != 0) { // check for 64 bit alignment if( pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment < AGTIAPI_64BIT_ALIGN ) { pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment = AGTIAPI_64BIT_ALIGN; } if( (pRscInfo->tiSharedMem.tdSharedCachedMem1.type & (BIT(0) | BIT(1))) == TI_DMA_MEM ) { if( thisCardInst->dmaIndex >= sizeof(thisCardInst->tiDmaMem) / sizeof(thisCardInst->tiDmaMem[0]) ) { AGTIAPI_PRINTK( "Invalid dmaIndex %d ERROR\n", thisCardInst->dmaIndex); return AGTIAPI_FAIL; } if( agtiapi_MemAlloc( thisCardInst, (void *)&thisCardInst-> tiDmaMem[thisCardInst->dmaIndex].dmaVirtAddr, &thisCardInst->tiDmaMem[thisCardInst->dmaIndex]. dmaPhysAddr, &pRscInfo->tiSharedMem.tdSharedCachedMem1.virtPtr, &pRscInfo->tiSharedMem.tdSharedCachedMem1. physAddrUpper, &pRscInfo->tiSharedMem.tdSharedCachedMem1. physAddrLower, pRscInfo->tiSharedMem.tdSharedCachedMem1. totalLength, TI_DMA_MEM, pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment) != AGTIAPI_SUCCESS ) return AGTIAPI_FAIL; thisCardInst->tiDmaMem[thisCardInst->dmaIndex].memSize = pRscInfo->tiSharedMem.tdSharedCachedMem1.totalLength + pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment; // printf( "agtiapi_InitResource: SharedMem DmaIndex=%d DMA " // "virt %p / %p, phys 0x%x, align %d\n", // thisCardInst->dmaIndex, // thisCardInst->tiDmaMem[thisCardInst->dmaIndex].dmaVirtAddr, // pRscInfo->tiSharedMem.tdSharedCachedMem1.virtPtr, // pRscInfo->tiSharedMem.tdSharedCachedMem1.physAddrLower, // pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment); thisCardInst->dmaIndex++; } else if( (pRscInfo->tiSharedMem.tdSharedCachedMem1.type & (BIT(0) | BIT(1))) == TI_CACHED_MEM ) { if( thisCardInst->cacheIndex >= sizeof(thisCardInst->tiCachedMem) / sizeof(thisCardInst->tiCachedMem[0]) ) { AGTIAPI_PRINTK( "Invalid cacheIndex %d ERROR\n", thisCardInst->cacheIndex); return AGTIAPI_FAIL; } if( agtiapi_MemAlloc( thisCardInst, (void *)&thisCardInst-> tiCachedMem[thisCardInst->cacheIndex], (vm_paddr_t *)agNULL, &pRscInfo->tiSharedMem.tdSharedCachedMem1.virtPtr, (U32 *)agNULL, (U32 *)agNULL, pRscInfo-> tiSharedMem.tdSharedCachedMem1.totalLength, TI_CACHED_MEM, pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment) != AGTIAPI_SUCCESS ) return AGTIAPI_FAIL; // printf( "agtiapi_InitResource: SharedMem cacheIndex=%d CACHED " // "vaddr %p / %p, length %d align 0x%x\n", // thisCardInst->cacheIndex, // thisCardInst->tiCachedMem[thisCardInst->cacheIndex], // pRscInfo->tiSharedMem.tdSharedCachedMem1.virtPtr, // pRscInfo->tiSharedMem.tdSharedCachedMem1.totalLength, // pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment); AGTIAPI_PRINTK( "agtiapi_InitResource: SharedMem cacheIndex=%d CACHED " "vaddr %p / %p, length %d align 0x%x\n", thisCardInst->cacheIndex, thisCardInst->tiCachedMem[thisCardInst->cacheIndex], pRscInfo->tiSharedMem.tdSharedCachedMem1.virtPtr, pRscInfo->tiSharedMem.tdSharedCachedMem1.totalLength, pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment ); thisCardInst->cacheIndex++; } else { AGTIAPI_PRINTK( "agtiapi_InitResource: " "Unknown required memory type ERROR!\n" ); return AGTIAPI_FAIL; } } // end: tiTdSharedMem DELAY( 200000 ); // or use AGTIAPI_INIT_MDELAY(200); return AGTIAPI_SUCCESS; } // agtiapi_InitResource() ends here
static void nvme_sim_action(struct cam_sim *sim, union ccb *ccb) { struct nvme_controller *ctrlr; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_sim_action: func= %#x\n", ccb->ccb_h.func_code)); ctrlr = sim2ctrlr(sim); mtx_assert(&ctrlr->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_CALC_GEOMETRY: /* Calculate Geometry Totally nuts ? XXX */ /* * Only meaningful for old-school SCSI disks since only the SCSI * da driver generates them. Reject all these that slip through. */ /*FALLTHROUGH*/ case XPT_ABORT: /* Abort the specified CCB */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: /* * NVMe doesn't really have different transfer settings, but * other parts of CAM think failure here is a big deal. */ ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; device_t dev = ctrlr->dev; /* * NVMe may have multiple LUNs on the same path. Current generation * of NVMe devives support only a single name space. Multiple name * space drives are coming, but it's unclear how we should report * them up the stack. */ cpi->version_num = 1; cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = PIM_UNMAPPED | PIM_NOSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = ctrlr->cdata.nn; cpi->maxio = ctrlr->max_xfer_size; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = nvme_link_kBps(ctrlr); strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "NVMe", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_NVME; /* XXX XPORT_PCIE ? */ cpi->transport_version = nvme_mmio_read_4(ctrlr, vs); cpi->protocol = PROTO_NVME; cpi->protocol_version = nvme_mmio_read_4(ctrlr, vs); cpi->xport_specific.nvme.nsid = xpt_path_lun_id(ccb->ccb_h.path); cpi->xport_specific.nvme.domain = pci_get_domain(dev); cpi->xport_specific.nvme.bus = pci_get_bus(dev); cpi->xport_specific.nvme.slot = pci_get_slot(dev); cpi->xport_specific.nvme.function = pci_get_function(dev); cpi->xport_specific.nvme.extra = 0; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: /* Get transport settings */ { struct ccb_trans_settings *cts; struct ccb_trans_settings_nvme *nvmep; struct ccb_trans_settings_nvme *nvmex; device_t dev; uint32_t status, caps; dev = ctrlr->dev; cts = &ccb->cts; nvmex = &cts->xport_specific.nvme; nvmep = &cts->proto_specific.nvme; status = pcie_read_config(dev, PCIER_LINK_STA, 2); caps = pcie_read_config(dev, PCIER_LINK_CAP, 2); nvmex->valid = CTS_NVME_VALID_SPEC | CTS_NVME_VALID_LINK; nvmex->spec = nvme_mmio_read_4(ctrlr, vs); nvmex->speed = status & PCIEM_LINK_STA_SPEED; nvmex->lanes = (status & PCIEM_LINK_STA_WIDTH) >> 4; nvmex->max_speed = caps & PCIEM_LINK_CAP_MAX_SPEED; nvmex->max_lanes = (caps & PCIEM_LINK_CAP_MAX_WIDTH) >> 4; /* XXX these should be something else maybe ? */ nvmep->valid = 1; nvmep->spec = nvmex->spec; cts->transport = XPORT_NVME; cts->protocol = PROTO_NVME; cts->ccb_h.status = CAM_REQ_CMP; break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* * every driver handles this, but nothing generates it. Assume * it's OK to just say 'that worked'. */ /*FALLTHROUGH*/ case XPT_RESET_DEV: /* Bus Device Reset the specified device */ case XPT_RESET_BUS: /* Reset the specified bus */ /* * NVMe doesn't really support physically resetting the bus. It's part * of the bus scanning dance, so return sucess to tell the process to * proceed. */ ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_NVME_IO: /* Execute the requested I/O operation */ case XPT_NVME_ADMIN: /* or Admin operation */ nvme_sim_nvmeio(sim, ccb); return; /* no done */ default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); }