static int ioat_dca_remove_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; u16 global_req_table; /* This implementation only supports PCI-Express */ if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == pdev) { global_req_table = readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); writel(0, ioatdca->iobase + global_req_table + (i * 4)); ioatdca->req_slots[i].pdev = NULL; ioatdca->req_slots[i].rid = 0; ioatdca->requester_count--; return i; } } return -ENODEV; }
static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; int i; u16 id; u16 global_req_table; /* This implementation only supports PCI-Express */ if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); if (ioatdca->requester_count == ioatdca->max_requesters) return -ENODEV; for (i = 0; i < ioatdca->max_requesters; i++) { if (ioatdca->req_slots[i].pdev == NULL) { /* found an empty slot */ ioatdca->requester_count++; ioatdca->req_slots[i].pdev = pdev; ioatdca->req_slots[i].rid = id; global_req_table = readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); writel(id | IOAT_DCA_GREQID_VALID, ioatdca->iobase + global_req_table + (i * 4)); return i; } } /* Error, ioatdma->requester_count is out of whack */ return -EFAULT; }
static u64 pnv_dma_get_required_mask(struct device *dev) { if (dev_is_pci(dev)) return pnv_pci_dma_get_required_mask(to_pci_dev(dev)); return __dma_get_required_mask(dev); }
static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { #ifdef CONFIG_X86_MPPARSE struct mpc_intsrc mp_irq; struct pci_dev *pdev; unsigned char number; unsigned int devfn; int ioapic; u8 pin; if (!acpi_ioapic) return 0; if (!dev || !dev_is_pci(dev)) return 0; pdev = to_pci_dev(dev); number = pdev->bus->number; devfn = pdev->devfn; pin = pdev->pin; /* print the entry should happen on mptable identically */ mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); mp_irq.srcbus = number; mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); ioapic = mp_find_ioapic(gsi); mp_irq.dstapic = mpc_ioapic_id(ioapic); mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi); mp_save_irq(&mp_irq); #endif return 0; }
static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *info) { struct pci_dev *pdev, *alias_dev; struct msi_domain_info *msi_info; int alias_count = 0; if (!dev_is_pci(dev)) return -EINVAL; msi_info = msi_get_domain_info(domain->parent); pdev = to_pci_dev(dev); /* * If pdev is downstream of any aliasing bridges, take an upper * bound of how many other vectors could map to the same DevID. */ pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev); if (alias_dev != pdev && alias_dev->subordinate) pci_walk_bus(alias_dev->subordinate, its_pci_msi_vec_count, &alias_count); /* ITS specific DeviceID, as the core ITS ignores dev. */ info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev); return msi_info->ops->msi_prepare(domain->parent, dev, max(nvec, alias_count), info); }
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr) { #ifdef CONFIG_PCI if (dev && dev_is_pci(dev)) return octeon_pci_dma_ops->dma_to_phys(dev, daddr); #endif return daddr; }
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) { #ifdef CONFIG_PCI if (dev && dev_is_pci(dev)) return octeon_pci_dma_ops->phys_to_dma(dev, paddr); #endif return paddr; }
/* * get_node_path fills in @path with the firmware path to the device. * Note that if @node is a parisc device, we don't fill in the 'mod' field. * This is because both callers pass the parent and fill in the mod * themselves. If @node is a PCI device, we do fill it in, even though this * is inconsistent. */ static void get_node_path(struct device *dev, struct hardware_path *path) { int i = 5; memset(&path->bc, -1, 6); if (dev_is_pci(dev)) { unsigned int devfn = to_pci_dev(dev)->devfn; path->mod = PCI_FUNC(devfn); path->bc[i--] = PCI_SLOT(devfn); dev = dev->parent; } while (dev != &root) { if (dev_is_pci(dev)) { unsigned int devfn = to_pci_dev(dev)->devfn; path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5); } else if (dev->bus == &parisc_bus_type) { path->bc[i--] = to_parisc_device(dev)->hw_path; } dev = dev->parent; } }
/* * Allocate a pasid table for @dev. It should be called in a * single-thread context. */ int intel_pasid_alloc_table(struct device *dev) { struct device_domain_info *info; struct pasid_table *pasid_table; struct pasid_table_opaque data; struct page *pages; size_t size, count; int ret, order; info = dev->archdata.iommu; if (WARN_ON(!info || !dev_is_pci(dev) || !info->pasid_supported || info->pasid_table)) return -EINVAL; /* DMA alias device already has a pasid table, use it: */ data.pasid_table = &pasid_table; ret = pci_for_each_dma_alias(to_pci_dev(dev), &get_alias_pasid_table, &data); if (ret) goto attach_out; pasid_table = kzalloc(sizeof(*pasid_table), GFP_ATOMIC); if (!pasid_table) return -ENOMEM; INIT_LIST_HEAD(&pasid_table->dev); size = sizeof(struct pasid_entry); count = min_t(int, pci_max_pasids(to_pci_dev(dev)), intel_pasid_max_id); order = get_order(size * count); pages = alloc_pages_node(info->iommu->node, GFP_ATOMIC | __GFP_ZERO, order); if (!pages) return -ENOMEM; pasid_table->table = page_address(pages); pasid_table->order = order; pasid_table->max_pasid = count; attach_out: device_attach_pasid_table(info, pasid_table); return 0; }
void intel_pasid_free_table(struct device *dev) { struct device_domain_info *info; struct pasid_table *pasid_table; info = dev->archdata.iommu; if (!info || !dev_is_pci(dev) || !info->pasid_supported || !info->pasid_table) return; pasid_table = info->pasid_table; device_detach_pasid_table(info, pasid_table); if (!list_empty(&pasid_table->dev)) return; free_pages((unsigned long)pasid_table->table, pasid_table->order); kfree(pasid_table); }
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr) { const unsigned char *addr; struct device_node *dp; if (dev_is_pci(dev)) dp = pci_device_to_OF_node(to_pci_dev(dev)); else dp = dev->of_node; addr = NULL; if (dp) addr = of_get_mac_address(dp); if (!addr) addr = arch_get_platform_mac_address(); if (!addr) return -ENODEV; ether_addr_copy(mac_addr, addr); return 0; }
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) { struct drm_master *master = file_priv->master; int ret; if (master->unique != NULL) drm_unset_busid(dev, master); if (dev->dev && dev_is_pci(dev->dev)) { ret = drm_pci_set_busid(dev, master); if (ret) { drm_unset_busid(dev, master); return ret; } } else { WARN_ON(!dev->unique); master->unique = kstrdup(dev->unique, GFP_KERNEL); if (master->unique) master->unique_len = strlen(dev->unique); } return 0; }
static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *info) { struct pci_dev *pdev; struct its_pci_alias dev_alias; struct msi_domain_info *msi_info; if (!dev_is_pci(dev)) return -EINVAL; msi_info = msi_get_domain_info(domain->parent); pdev = to_pci_dev(dev); dev_alias.pdev = pdev; dev_alias.count = nvec; pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); /* ITS specific DeviceID, as the core ITS ignores dev. */ info->scratchpad[0].ul = dev_alias.dev_id; return msi_info->ops->msi_prepare(domain->parent, dev, dev_alias.count, info); }
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) { struct resource *res; int ret = 0; /* Using bitmap to manager UAR index */ ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx); if (ret == -1) return -ENOMEM; if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1) uar->index = (uar->logic_idx - 1) % (hr_dev->caps.phy_num_uars - 1) + 1; else uar->index = 0; if (!dev_is_pci(hr_dev->dev)) { res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&hr_dev->pdev->dev, "memory resource not found!\n"); return -EINVAL; } uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; } else {
static int pnv_dma_set_mask(struct device *dev, u64 dma_mask) { if (dev_is_pci(dev)) return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask); return __dma_set_mask(dev, dma_mask); }
static int __init match_pci_dev(struct device *dev, void *data) { unsigned int devfn = *(unsigned int *)data; return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn; }
static int iTCO_wdt_init(struct platform_device *pdev) { int ret; u32 base_address; unsigned long val32; struct pci_dev *parent; struct resource *irq; if (!pdev->dev.parent || !dev_is_pci(pdev->dev.parent)) { pr_err("Unqualified parent device.\n"); return -EINVAL; } parent = to_pci_dev(pdev->dev.parent); /* * Find the ACPI/PM base I/O address which is the base * for the TCO registers (TCOBASE=ACPIBASE + 0x60) * ACPIBASE is bits [15:7] from 0x40-0x43 */ pci_read_config_dword(parent, 0x40, &base_address); base_address &= 0x0000ff80; if (base_address == 0x00000000) { /* Something's wrong here, ACPIBASE has to be set */ pr_err("failed to get TCOBASE address, device disabled by hardware/BIOS\n"); return -ENODEV; } iTCO_wdt_private.ACPIBASE = base_address; pci_read_config_dword(parent, 0x44, &pmc_base_address); pmc_base_address &= 0xFFFFFE00; /* * Disable watchdog on command-line demand */ if (strstr(saved_command_line, "disable_kernel_watchdog=1")) { pr_warn("disable_kernel_watchdog=1 watchdog will not be started\n"); iTCO_wdt_private.enable = false; /* Set the NO_REBOOT bit to prevent later reboots */ iTCO_wdt_set_NO_REBOOT_bit(); /* Ensure Wdt is well stopped in case started by IAFW */ iTCO_wdt_stop(); } else { iTCO_wdt_private.enable = true; /* Check chipset's NO_REBOOT bit */ if (iTCO_wdt_unset_NO_REBOOT_bit()) { pr_err("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n"); ret = -ENODEV; /* Cannot reset NO_REBOOT bit */ goto out; } } /* The TCO logic uses the TCO_EN bit in the SMI_EN register */ if (!request_region(SMI_EN, 4, "iTCO_wdt")) { pr_err("I/O address 0x%04lx already in use, device disabled\n", SMI_EN); ret = -EIO; goto out; } if (!request_region(SMI_STS, 4, "iTCO_wdt")) { pr_err("I/O address 0x%04lx already in use, device disabled\n", SMI_STS); ret = -EIO; goto unreg_smi_sts; } /* The TCO I/O registers reside in a 32-byte range pointed to by the TCOBASE value */ if (!request_region(TCOBASE, 0x20, "iTCO_wdt")) { pr_err("I/O address 0x%04lx already in use, device disabled\n", TCOBASE); ret = -EIO; goto unreg_smi_en; } pr_info("Found a TCO device (TCOBASE=0x%04lx)\n", TCOBASE); /* Check that the heartbeat value is within it's range; if not reset to the default */ if (iTCO_wdt_set_heartbeat(heartbeat)) { iTCO_wdt_set_heartbeat(WATCHDOG_HEARTBEAT); pr_info("timeout value out of range, using %d\n", heartbeat); } ret = misc_register(&iTCO_wdt_miscdev); if (ret != 0) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto unreg_region; } pr_info("initialized. heartbeat=%d sec (nowayout=%d) policy=0x%x\n", heartbeat, nowayout, iTCO_wdt_get_current_ospolicy()); /* Reset OS policy */ iTCO_wdt_set_reset_type(TCO_POLICY_NORM); irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { pr_err("No warning interrupt resource found\n"); goto misc_unreg; } ret = acpi_register_gsi(NULL, irq->start, irq->flags & (IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE) ? ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE, irq->flags & (IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_HIGHLEVEL) ? ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW); if (ret < 0) { pr_err("failed to configure TCO warning IRQ %d\n", (int)irq->start); goto misc_unreg; } ret = request_irq(irq->start, tco_irq_handler, 0, "tco_watchdog", NULL); if (ret < 0) { pr_err("failed to request TCO warning IRQ %d\n", (int)irq->start); goto gsi_unreg; } /* Clear old TCO timeout status */ val32 = TCO_TIMEOUT_BIT | SECOND_TO_STS_BIT; outl(val32, TCO1_STS); /* Clear the SMI status */ outl(TCO_STS_BIT, SMI_STS); /* Enable SMI for TCO */ val32 = inl(SMI_EN); val32 |= TCO_EN_BIT; outl(val32, SMI_EN); /* then ensure that PMC is ready to handle next SMI */ val32 |= EOS_BIT; outl(val32, SMI_EN); reboot_notifier.notifier_call = TCO_reboot_notifier; reboot_notifier.priority = 1; ret = register_reboot_notifier(&reboot_notifier); if (ret) /* We continue as reboot notifier is not critical for * watchdog */ pr_err("cannot register reboot notifier %d\n", ret); return 0; gsi_unreg: acpi_unregister_gsi((int)(irq->start)); misc_unreg: misc_deregister(&iTCO_wdt_miscdev); unreg_region: release_region(TCOBASE, 0x20); unreg_smi_sts: release_region(SMI_STS, 4); unreg_smi_en: release_region(SMI_EN, 4); out: iTCO_wdt_private.ACPIBASE = 0; return ret; }
static void iwl_set_hw_address_family_8000(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *mac_override, const __le16 *nvm_hw) { const u8 *hw_addr; if (mac_override) { hw_addr = (const u8 *)(mac_override + MAC_ADDRESS_OVERRIDE_FAMILY_8000); /* The byte order is little endian 16 bit, meaning 214365 */ data->hw_addr[0] = hw_addr[1]; data->hw_addr[1] = hw_addr[0]; data->hw_addr[2] = hw_addr[3]; data->hw_addr[3] = hw_addr[2]; data->hw_addr[4] = hw_addr[5]; data->hw_addr[5] = hw_addr[4]; if (is_valid_ether_addr(data->hw_addr)) return; IWL_ERR_DEV(dev, "mac address from nvm override section is not valid\n"); } if (nvm_hw) { /* read the MAC address from OTP */ if (!dev_is_pci(dev) || (data->nvm_version < 0xE08)) { /* read the mac address from the WFPM location */ hw_addr = (const u8 *)(nvm_hw + HW_ADDR0_WFPM_FAMILY_8000); data->hw_addr[0] = hw_addr[3]; data->hw_addr[1] = hw_addr[2]; data->hw_addr[2] = hw_addr[1]; data->hw_addr[3] = hw_addr[0]; hw_addr = (const u8 *)(nvm_hw + HW_ADDR1_WFPM_FAMILY_8000); data->hw_addr[4] = hw_addr[1]; data->hw_addr[5] = hw_addr[0]; } else if ((data->nvm_version >= 0xE08) && (data->nvm_version < 0xE0B)) { /* read "reverse order" from the PCIe location */ hw_addr = (const u8 *)(nvm_hw + HW_ADDR0_PCIE_FAMILY_8000); data->hw_addr[5] = hw_addr[2]; data->hw_addr[4] = hw_addr[1]; data->hw_addr[3] = hw_addr[0]; hw_addr = (const u8 *)(nvm_hw + HW_ADDR1_PCIE_FAMILY_8000); data->hw_addr[2] = hw_addr[3]; data->hw_addr[1] = hw_addr[2]; data->hw_addr[0] = hw_addr[1]; } else { /* read from the PCIe location */ hw_addr = (const u8 *)(nvm_hw + HW_ADDR0_PCIE_FAMILY_8000); data->hw_addr[5] = hw_addr[0]; data->hw_addr[4] = hw_addr[1]; data->hw_addr[3] = hw_addr[2]; hw_addr = (const u8 *)(nvm_hw + HW_ADDR1_PCIE_FAMILY_8000); data->hw_addr[2] = hw_addr[1]; data->hw_addr[1] = hw_addr[2]; data->hw_addr[0] = hw_addr[3]; } if (!is_valid_ether_addr(data->hw_addr)) IWL_ERR_DEV(dev, "mac address from hw section is not valid\n"); return; } IWL_ERR_DEV(dev, "mac address is not found\n"); }