static u32 pci_conf_read(pci_reg pcitag, u32 reg, size_t size) { u32 bus, dev, fun; u32 addr, type, val = 0xff; // 0xff means invalid sometimes bus = PCI_BUS(pcitag); dev = PCI_SLOT(pcitag); fun = PCI_FUNC(pcitag); /*range check is valueless*/ #ifdef CONFIG_CS5536 extern u32 cs5536_pci_conf_read(pci_reg, u32, size_t); if(dev == CS5536_IDSEL){ if(size != 4){ printf("must be 4 bytes to read!\n"); return 0xffffffff; } return cs5536_pci_conf_read(pcitag, reg, size); } #endif if(bus == 0) { addr = ((1 << (dev + 11)) | (fun << 8) | reg); type = 0; } else { addr = ((bus << 16) | (dev << 11) | (fun << 8) | reg); type = 0x10000; } printf("addr is %x\n", addr); *(volatile u32 *)(PHY_TO_UNCACHED(NB_PCICMD)) |= 0x28000000; *(volatile u32 *)(PHY_TO_UNCACHED(PCIMAP_CFG)) = (addr >> 16) | type; val = *(volatile u32 *)(PHY_TO_UNCACHED(PCICFG_SPACE) | (addr & 0xfffc)); if(size == 4) { return val; } else if (size == 1){ val = (val >> ((reg & 3) * 8)) & 0xff; } else if (size == 2){ //caller will ensure reg align is ok
static int pci_conf2_read(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 *value) { unsigned long flags; int dev, fn; if ((bus > 255) || (devfn > 255) || (reg > 255)) { *value = -1; return -EINVAL; } dev = PCI_SLOT(devfn); fn = PCI_FUNC(devfn); if (dev & 0x10) return PCIBIOS_DEVICE_NOT_FOUND; raw_spin_lock_irqsave(&pci_config_lock, flags); outb((u8)(0xF0 | (fn << 1)), 0xCF8); outb((u8)bus, 0xCFA); switch (len) { case 1: *value = inb(PCI_CONF2_ADDRESS(dev, reg)); break; case 2: *value = inw(PCI_CONF2_ADDRESS(dev, reg)); break; case 4: *value = inl(PCI_CONF2_ADDRESS(dev, reg)); break; } outb(0, 0xCF8); raw_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; }
static void __iomem *soc_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, int where) { struct soc_pcie_port *port = soc_pcie_bus2port(bus); int busno = bus->number; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); void __iomem *base; int offset; int type; u32 addr_reg; base = port->reg_base; /* If there is no link, just show the PCI bridge. */ if (!port->link && (busno > 0 || slot > 0)) return NULL; if (busno == 0) { if (slot >= 1) return NULL; type = slot; __raw_writel(where & 0x1ffc, base + SOC_PCIE_EXT_CFG_ADDR); offset = SOC_PCIE_EXT_CFG_DATA; } else { /* WAR for function num > 1 */ if (fn > 1) return NULL; type = 1; addr_reg = (busno & 0xff) << 20 | (slot << 15) | (fn << 12) | (where & 0xffc) | (type & 0x3); __raw_writel(addr_reg, base + SOC_PCIE_CFG_ADDR); offset = SOC_PCIE_CFG_DATA; } return base + offset; }
static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev) { struct pci_dev *dev2; dev2 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn) + 1, PCI_FUNC(dev->devfn))); if (dev2 && dev2->vendor == dev->vendor && dev2->device == dev->device) { if (dev2->irq != dev->irq) { dev2->irq = dev->irq; printk(KERN_INFO DRV_NAME " %s: PCI config space " "interrupt fixed\n", pci_name(dev)); } return dev2; } return NULL; }
static int bonito64_config_access(unsigned char access_type, struct pci_dev *dev, unsigned char where, u32 *data) { unsigned char bus = dev->bus->number; unsigned char dev_fn = dev->devfn; unsigned char type; u32 intr, dummy; u64 pci_addr; if ((bus == 0) && (PCI_SLOT(dev_fn) == 0)) return -1; /* Clear cause register bits */ BONITO_PCICMD |= (BONITO_PCICMD_MABORT_CLR | BONITO_PCICMD_MTABORT_CLR); /* * Setup pattern to be used as PCI "address" for Type 0 cycle */ if (bus == 0) { /* IDSEL */ pci_addr = (u64)1 << (PCI_SLOT(dev_fn) + 10); } else { /* Bus number */ pci_addr = bus << PCI_CFG_TYPE1_BUS_SHF; /* Device number */ pci_addr |= PCI_SLOT(dev_fn) << PCI_CFG_TYPE1_DEV_SHF; } /* Function (same for Type 0/1) */ pci_addr |= PCI_FUNC(dev_fn) << PCI_CFG_TYPE0_FUNC_SHF; /* Register number (same for Type 0/1) */ pci_addr |= (where & ~0x3) << PCI_CFG_TYPE0_REG_SHF; if (bus == 0) { /* Type 0 */ BONITO_PCIMAP_CFG = pci_addr >> 16; } else {
static int stmmac_pci_find_phy_addr(struct pci_dev *pdev, const struct dmi_system_id *dmi_list) { const struct stmmac_pci_func_data *func_data; const struct stmmac_pci_dmi_data *dmi_data; const struct dmi_system_id *dmi_id; int func = PCI_FUNC(pdev->devfn); size_t n; dmi_id = dmi_first_match(dmi_list); if (!dmi_id) return -ENODEV; dmi_data = dmi_id->driver_data; func_data = dmi_data->func; for (n = 0; n < dmi_data->nfuncs; n++, func_data++) if (func_data->func == func) return func_data->phy_addr; return -ENODEV; }
static int xen_pcibk_publish_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain, unsigned int bus, unsigned int devfn, unsigned int devid) { int err; int len; char str[64]; len = snprintf(str, sizeof(str), "vdev-%d", devid); if (unlikely(len >= (sizeof(str) - 1))) { err = -ENOMEM; goto out; } /* Note: The PV protocol uses %02x, don't change it */ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str, "%04x:%02x:%02x.%02x", domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); out: return err; }
static int pci_conf2_write(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 value) { unsigned long flags; int dev, fn; WARN_ON(seg); if ((bus > 255) || (devfn > 255) || (reg > 255)) return -EINVAL; dev = PCI_SLOT(devfn); fn = PCI_FUNC(devfn); if (dev & 0x10) return PCIBIOS_DEVICE_NOT_FOUND; raw_spin_lock_irqsave(&pci_config_lock, flags); outb((u8)(0xF0 | (fn << 1)), 0xCF8); outb((u8)bus, 0xCFA); switch (len) { case 1: outb((u8)value, PCI_CONF2_ADDRESS(dev, reg)); break; case 2: outw((u16)value, PCI_CONF2_ADDRESS(dev, reg)); break; case 4: outl((u32)value, PCI_CONF2_ADDRESS(dev, reg)); break; } outb(0, 0xCF8); raw_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; }
static u8 svwks_ratemask (ide_drive_t *drive) { struct pci_dev *dev = HWIF(drive)->pci_dev; u8 mode; if (!svwks_revision) pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision); if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { u32 reg = 0; if (isa_dev) pci_read_config_dword(isa_dev, 0x64, ®); /* * Don't enable UDMA on disk devices for the moment */ if(drive->media == ide_disk) return 0; /* Check the OSB4 DMA33 enable bit */ return ((reg & 0x00004000) == 0x00004000) ? 1 : 0; } else if (svwks_revision < SVWKS_CSB5_REVISION_NEW) { return 1; } else if (svwks_revision >= SVWKS_CSB5_REVISION_NEW) { u8 btr = 0; pci_read_config_byte(dev, 0x5A, &btr); mode = btr & 0x3; if (!eighty_ninty_three(drive)) mode = min(mode, (u8)1); /* If someone decides to do UDMA133 on CSB5 the same issue will bite so be inclusive */ if (mode > 2 && check_in_drive_lists(drive, svwks_bad_ata100)) mode = 2; } if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) || (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) && (!(PCI_FUNC(dev->devfn) & 1))) mode = 2; return mode; }
static int serial_hsu_pci_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct hsu_dma_chan *dchan; int ret, share_irq = 0; resource_size_t start, len; start = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); dev_info(&pdev->dev, "FUNC: %d driver: %ld addr:%lx len:%lx\n", PCI_FUNC(pdev->devfn), ent->driver_data, (unsigned long) pci_resource_start(pdev, 0), (unsigned long) pci_resource_len(pdev, 0)); ret = pci_enable_device(pdev); if (ret) return ret; ret = pci_request_region(pdev, 0, "hsu dma"); if (ret) goto err; /* share irq with port? ANN all and TNG chip from B0 stepping */ if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER && pdev->revision >= 0x1) || intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE) share_irq = 1; ret = serial_hsu_dma_setup(&pdev->dev, start, len, pdev->irq, share_irq); if (ret) goto err; return 0; err: pci_disable_device(pdev); return ret; }
/* * This function called by IOMMU driver on PPR failure */ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid, unsigned long address, u16 flags) { struct kfd_dev *dev; dev_warn(kfd_device, "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X", PCI_BUS_NUM(pdev->devfn), PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pasid, address, flags); dev = kfd_device_by_pci_dev(pdev); BUG_ON(dev == NULL); kfd_signal_iommu_event(dev, pasid, address, flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC); return AMD_IOMMU_INV_PRI_RSP_INVALID; }
int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { int irq = 0; if (slot == sb_slot) { switch (PCI_FUNC(dev->devfn)) { case 2: irq = 10; break; case 3: irq = 11; break; case 5: irq = 9; break; } } else { irq = LOONGSON_IRQ_BASE + 25 + pin; } return irq; }
static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd, unsigned long arg) { struct adf_hw_device_data *hw_data; struct adf_dev_status_info dev_info; struct adf_accel_dev *accel_dev; if (copy_from_user(&dev_info, (void __user *)arg, sizeof(struct adf_dev_status_info))) { pr_err("QAT: failed to copy from user.\n"); return -EFAULT; } accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id); if (!accel_dev) { pr_err("QAT: Device %d not found\n", dev_info.accel_id); return -ENODEV; } hw_data = accel_dev->hw_device; dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; dev_info.num_ae = hw_data->get_num_aes(hw_data); dev_info.num_accel = hw_data->get_num_accels(hw_data); dev_info.num_logical_accel = hw_data->num_logical_accel; dev_info.banks_per_accel = hw_data->num_banks / hw_data->num_logical_accel; strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name)); dev_info.instance_id = hw_data->instance_id; dev_info.type = hw_data->dev_class->type; dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number; dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn); dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn); if (copy_to_user((void __user *)arg, &dev_info, sizeof(struct adf_dev_status_info))) { dev_err(&GET_DEV(accel_dev), "failed to copy status.\n"); return -EFAULT; } return 0; }
/** * Note access to the configuration registers are protected at the higher layer * by 'pci_lock' in drivers/pci/access.c */ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct iproc_pcie *pcie = iproc_data(bus); unsigned slot = PCI_SLOT(devfn); unsigned fn = PCI_FUNC(devfn); unsigned busno = bus->number; u32 val; u16 offset; if (!iproc_pcie_device_is_valid(pcie, slot, fn)) return NULL; /* root complex access */ if (busno == 0) { iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, where & CFG_IND_ADDR_MASK); offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); if (iproc_pcie_reg_is_invalid(offset)) return NULL; else return (pcie->base + offset); } /* EP device access */ val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | (slot << CFG_ADDR_DEV_NUM_SHIFT) | (fn << CFG_ADDR_FUNC_NUM_SHIFT) | (where & CFG_ADDR_REG_NUM_MASK) | (1 & CFG_ADDR_CFG_TYPE_MASK); iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val); offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA); if (iproc_pcie_reg_is_invalid(offset)) return NULL; else return (pcie->base + offset); }
static dma_addr_t pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, u64 dma_attributes, int dma_flags) { struct pcibus_info *pcibus_info = (struct pcibus_info *) ((info->pdi_host_pcidev_info)->pdi_pcibus_info); u64 pci_addr; /* Translate to Crosstalk View of Physical Address */ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) pci_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : PHYS_TO_TIODMA(paddr) | dma_attributes; else pci_addr = IS_PIC_SOFT(pcibus_info) ? paddr : paddr | dma_attributes; /* Handle Bus mode */ if (IS_PCIX(pcibus_info)) pci_addr &= ~PCI64_ATTR_PREF; /* Handle Bridge Chipset differences */ if (IS_PIC_SOFT(pcibus_info)) { pci_addr |= ((u64) pcibus_info-> pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); } else pci_addr |= (dma_flags & SN_DMA_MSI) ? TIOCP_PCI64_CMDTYPE_MSI : TIOCP_PCI64_CMDTYPE_MEM; /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */ if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) pci_addr |= PCI64_ATTR_VIRTUAL; return pci_addr; }
/** * pci_visit_dev - scans the pci buses. * Every bus and every function is presented to a custom * function that can act upon it. */ int pci_visit_dev(struct pci_visit *fn, struct pci_dev_wrapped *wrapped_dev, struct pci_bus_wrapped *wrapped_parent) { struct pci_dev* dev = wrapped_dev ? wrapped_dev->dev : NULL; int result = 0; if (!dev) return 0; if (fn->pre_visit_pci_dev) { result = fn->pre_visit_pci_dev(wrapped_dev, wrapped_parent); if (result) return result; } switch (dev->class >> 8) { case PCI_CLASS_BRIDGE_PCI: result = pci_visit_bridge(fn, wrapped_dev, wrapped_parent); if (result) return result; break; default: DBG("scanning device %02x, %02x\n", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); if (fn->visit_pci_dev) { result = fn->visit_pci_dev (wrapped_dev, wrapped_parent); if (result) return result; } } if (fn->post_visit_pci_dev) result = fn->post_visit_pci_dev(wrapped_dev, wrapped_parent); return result; }
int __init pci_is_66mhz_capable(struct pci_channel *hose, int top_bus, int current_bus) { u32 pci_devfn; unsigned short vid; int cap66 = -1; u16 stat; printk(KERN_INFO "PCI: Checking 66MHz capabilities...\n"); for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) { if (PCI_FUNC(pci_devfn)) continue; if (early_read_config_word(hose, top_bus, current_bus, pci_devfn, PCI_VENDOR_ID, &vid) != PCIBIOS_SUCCESSFUL) continue; if (vid == 0xffff) continue; /* check 66MHz capability */ if (cap66 < 0) cap66 = 1; if (cap66) { early_read_config_word(hose, top_bus, current_bus, pci_devfn, PCI_STATUS, &stat); if (!(stat & PCI_STATUS_66MHZ)) { printk(KERN_DEBUG "PCI: %02x:%02x not 66MHz capable.\n", current_bus, pci_devfn); cap66 = 0; break; } } } return cap66 > 0; }
static void i2c_early_init_bus(unsigned int bus) { ROMSTAGE_CONST struct soc_intel_skylake_config *config; ROMSTAGE_CONST struct device *tree_dev; pci_devfn_t dev; int devfn; uintptr_t base; /* Find the PCI device for this bus controller */ devfn = i2c_bus_to_devfn(bus); if (devfn < 0) return; /* Look up the controller device in the devicetree */ dev = PCI_DEV(0, PCI_SLOT(devfn), PCI_FUNC(devfn)); tree_dev = dev_find_slot(0, devfn); if (!tree_dev || !tree_dev->enabled) return; /* Skip if not enabled for early init */ config = tree_dev->chip_info; if (!config) return; if (!config->i2c[bus].early_init) return; /* Prepare early base address for access before memory */ base = EARLY_I2C_BASE(bus); pci_write_config32(dev, PCI_BASE_ADDRESS_0, base); pci_write_config32(dev, PCI_COMMAND, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); /* Take device out of reset */ lpss_reset_release(base); /* Initialize the controller */ lpss_i2c_init(bus, &config->i2c[bus]); }
uintptr_t dw_i2c_base_address(unsigned int bus) { int devfn; pci_devfn_t dev; uintptr_t base; /* Find device+function for this controller */ devfn = dw_i2c_soc_bus_to_devfn(bus); if (devfn < 0) return (uintptr_t)NULL; /* Form a PCI address for this device */ dev = PCI_DEV(0, PCI_SLOT(devfn), PCI_FUNC(devfn)); /* Read the first base address for this device */ base = ALIGN_DOWN(pci_read_config32(dev, PCI_BASE_ADDRESS_0), 16); /* Attempt to initialize bus if base is not set yet */ if (!base && !lpss_i2c_early_init_bus(bus)) base = ALIGN_DOWN(pci_read_config32(dev, PCI_BASE_ADDRESS_0), 16); return base; }
/**************************************************************************** PARAMETERS: pcidev - PCI device info for the video card on the bus to boot VGAInfo - BIOS emulator VGA info structure REMARKS: This function executes the BIOS POST code on the controller. We assume that at this stage the controller has its I/O and memory space enabled and that all other controllers are in a disabled state. ****************************************************************************/ static void PCI_doBIOSPOST(pci_dev_t pcidev, BE_VGAInfo * VGAInfo) { RMREGS regs; RMSREGS sregs; /* Determine the value to store in AX for BIOS POST. Per the PCI specs, AH must contain the bus and AL must contain the devfn, encoded as (dev << 3) | fn */ memset(®s, 0, sizeof(regs)); memset(&sregs, 0, sizeof(sregs)); regs.x.ax = ((int)PCI_BUS(pcidev) << 8) | ((int)PCI_DEV(pcidev) << 3) | (int)PCI_FUNC(pcidev); /*Setup the X86 emulator for the VGA BIOS*/ BE_setVGA(VGAInfo); /*Execute the BIOS POST code*/ BE_callRealMode(0xC000, 0x0003, ®s, &sregs); /*Cleanup and exit*/ BE_getVGA(VGAInfo); }
static int tx4927_pcibios_write_config_dword(struct pci_dev *dev, int where, unsigned int val) { int flags; unsigned char bus, func_num; if (where & 3) return PCIBIOS_BAD_REGISTER_NUMBER; /* check if the bus is top-level */ if (dev->bus->parent != NULL) { bus = dev->bus->number; db_assert(bus != 0); } else { bus = 0; } func_num = PCI_FUNC(dev->devfn); if (mkaddr(bus, dev->devfn, where, &flags)) return -1; tx4927_pcicptr->g2pcfgdata = val; return check_abort(flags); }
int kvm_deassign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct iommu_domain *domain = kvm->arch.iommu_domain; struct pci_dev *pdev = NULL; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; iommu_detach_device(domain, &pdev->dev); printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n", assigned_dev->host_busnr, PCI_SLOT(assigned_dev->host_devfn), PCI_FUNC(assigned_dev->host_devfn)); return 0; }
static int _pcie_intel_fpga_write_config(struct intel_fpga_pcie *pcie, pci_dev_t bdf, uint offset, ulong value, enum pci_size_t size) { u32 data; u8 byte_en; dev_dbg(pcie->dev, "PCIE CFG write: (b.d.f)=(%02d.%02d.%02d)\n", PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); dev_dbg(pcie->dev, "(addr,size,val)=(0x%04x, %d, 0x%08lx)\n", offset, size, value); /* Uses memory mapped method to read rootport config registers */ if (IS_ROOT_PORT(pcie, bdf)) return intel_fpga_pcie_rp_wr_conf(pcie->bus, bdf, offset, value, size); byte_en = pcie_get_byte_en(offset, size); data = pci_conv_size_to_32(0, value, offset, size); return tlp_cfg_dword_write(pcie, bdf, offset & ~DWORD_MASK, byte_en, data); }
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct intel_device_info *intel_info = (struct intel_device_info *) ent->driver_data; if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { DRM_INFO("This hardware requires preliminary hardware support.\n" "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); return -ENODEV; } /* Only bind to function 0 of the device. Early generations * used function 1 as a placeholder for multi-head. This causes * us confusion instead, especially on the systems where both * functions have the same PCI-ID! */ if (PCI_FUNC(pdev->devfn)) return -ENODEV; driver.driver_features &= ~(DRIVER_USE_AGP); return drm_get_pci_dev(pdev, ent, &driver); }
/** * ks_pcie_cfg_setup() - Set up configuration space address for a device * * @ks_pcie: ptr to keystone_pcie structure * @bus: Bus number the device is residing on * @devfn: device, function number info * * Forms and returns the address of configuration space mapped in PCIESS * address space 0. Also configures CFG_SETUP for remote configuration space * access. * * The address space has two regions to access configuration - local and remote. * We access local region for bus 0 (as RC is attached on bus 0) and remote * region for others with TYPE 1 access when bus > 1. As for device on bus = 1, * we will do TYPE 0 access as it will be on our secondary bus (logical). * CFG_SETUP is needed only for remote configuration access. */ static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus, unsigned int devfn) { u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn); struct pcie_port *pp = &ks_pcie->pp; u32 regval; if (bus == 0) return pp->dbi_base; regval = (bus << 16) | (device << 8) | function; /* * Since Bus#1 will be a virtual bus, we need to have TYPE0 * access only. * TYPE 1 */ if (bus != 1) regval |= BIT(24); writel(regval, ks_pcie->va_app_base + CFG_SETUP); return pp->va_cfg0_base; }
void pci_device_hot_add(Monitor *mon, const QDict *qdict) { PCIDevice *dev = NULL; const char *pci_addr = qdict_get_str(qdict, "pci_addr"); const char *type = qdict_get_str(qdict, "type"); const char *opts = qdict_get_try_str(qdict, "opts"); /* strip legacy tag */ if (!strncmp(pci_addr, "pci_addr=", 9)) { pci_addr += 9; } if (!opts) { opts = ""; } if (!strcmp(pci_addr, "auto")) pci_addr = NULL; if (strcmp(type, "nic") == 0) dev = qemu_pci_hot_add_nic(mon, pci_addr, opts); else if (strcmp(type, "storage") == 0) dev = qemu_pci_hot_add_storage(mon, pci_addr, opts); #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT else if (strcmp(type, "host") == 0) dev = qemu_pci_hot_assign_device(mon, pci_addr, opts); #endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */ else monitor_printf(mon, "invalid type: %s\n", type); if (dev) { monitor_printf(mon, "OK domain %d, bus %d, slot %d, function %d\n", 0, pci_bus_num(dev->bus), PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); } else monitor_printf(mon, "failed to add %s\n", opts); }
osal_result os_pci_write_config_32( os_pci_dev_t pci_dev, unsigned int offset, unsigned int val) { int fDev; char szDevAddr[64]; if(NULL == pci_dev) { return OSAL_INVALID_HANDLE; } if(21 != snprintf(szDevAddr, sizeof(szDevAddr), "/proc/bus/pci/%2.2x/%2.2x.%1.1x", (unsigned int)PCI_BUS(((pci_dev_t*)pci_dev)->slot_address), (unsigned int)PCI_DEV(((pci_dev_t*)pci_dev)->slot_address), (unsigned int)PCI_FUNC(((pci_dev_t*)pci_dev)->slot_address))) { return OSAL_ERROR; } if(-1 == (fDev = open(szDevAddr, O_WRONLY))) { return OSAL_NOT_FOUND; } if(-1 == lseek(fDev, offset, SEEK_SET)) { close(fDev); return OSAL_ERROR; } if(4 != write(fDev, &val, 4)) { close(fDev); return OSAL_ERROR; } close(fDev); return OSAL_SUCCESS; }
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int virq; if ((PCI_SLOT(dev->devfn) != PCI_IDSEL_CS5536) && (PCI_SLOT(dev->devfn) < 32)) { virq = irq_tab[slot][pin]; printk(KERN_INFO "slot: %d, pin: %d, irq: %d\n", slot, pin, virq + LOONGSON_IRQ_BASE); if (virq != 0) return LOONGSON_IRQ_BASE + virq; else return 0; } else if (PCI_SLOT(dev->devfn) == PCI_IDSEL_CS5536) { /* cs5536 */ switch (PCI_FUNC(dev->devfn)) { case 2: pci_write_config_byte(dev, PCI_INTERRUPT_LINE, CS5536_IDE_INTR); return CS5536_IDE_INTR; /* for IDE */ case 3: pci_write_config_byte(dev, PCI_INTERRUPT_LINE, CS5536_ACC_INTR); return CS5536_ACC_INTR; /* for AUDIO */ case 4: /* for OHCI */ case 5: /* for EHCI */ case 6: /* for UDC */ case 7: /* for OTG */ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, CS5536_USB_INTR); return CS5536_USB_INTR; } return dev->irq; } else { printk(KERN_INFO " strange pci slot number.\n"); return 0; } }
void pci_disable_msi(struct pci_dev* dev) { struct msi_desc *entry; int pos, default_vector; u16 control; unsigned long flags; if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSI))) return; pci_read_config_word(dev, msi_control_reg(pos), &control); if (!(control & PCI_MSI_FLAGS_ENABLE)) return; spin_lock_irqsave(&msi_lock, flags); entry = msi_desc[dev->irq]; if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { spin_unlock_irqrestore(&msi_lock, flags); return; } if (entry->msi_attrib.state) { spin_unlock_irqrestore(&msi_lock, flags); printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on vector->%d\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), dev->irq); BUG_ON(entry->msi_attrib.state > 0); } else { vector_irq[dev->irq] = 0; /* free it */ nr_released_vectors++; default_vector = entry->msi_attrib.default_vector; spin_unlock_irqrestore(&msi_lock, flags); /* Restore dev->irq to its default pin-assertion vector */ dev->irq = default_vector; disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), PCI_CAP_ID_MSI); } }
static dma_addr_t pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, u64 dma_attributes, int dma_flags) { struct pcibus_info *pcibus_info = (struct pcibus_info *) ((info->pdi_host_pcidev_info)->pdi_pcibus_info); u64 pci_addr; if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) pci_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : PHYS_TO_TIODMA(paddr); else pci_addr = paddr; pci_addr |= dma_attributes; if (IS_PCIX(pcibus_info)) pci_addr &= ~PCI64_ATTR_PREF; if (IS_PIC_SOFT(pcibus_info)) { pci_addr |= ((u64) pcibus_info-> pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); } else pci_addr |= (dma_flags & SN_DMA_MSI) ? TIOCP_PCI64_CMDTYPE_MSI : TIOCP_PCI64_CMDTYPE_MEM; if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) pci_addr |= PCI64_ATTR_VIRTUAL; return pci_addr; }